conflict_resolution
stringlengths
27
16k
<<<<<<< /** * * */ public static final class DirectCandidateGenerator extends CandidateGenerator { private final String field; private String preFilter; private String postFilter; private String suggestMode; private Float accuracy; private Integer size; private String sort; private String stringDistance; private Integer maxEdits; private Integer maxInspections; private Float maxTermFreq; private Integer prefixLength; private Integer minWordLength; private Float minDocFreq; /** * @param field Sets from what field to fetch the candidate suggestions from. */ public DirectCandidateGenerator(String field) { super("direct_generator"); this.field = field; } /** * The global suggest mode controls what suggested terms are included or * controls for what suggest text tokens, terms should be suggested for. * Three possible values can be specified: * <ol> * <li><code>missing</code> - Only suggest terms in the suggest text * that aren't in the index. This is the default. * <li><code>popular</code> - Only suggest terms that occur in more docs * then the original suggest text term. * <li><code>always</code> - Suggest any matching suggest terms based on * tokens in the suggest text. * </ol> */ public DirectCandidateGenerator suggestMode(String suggestMode) { this.suggestMode = suggestMode; return this; } /** * Sets how similar the suggested terms at least need to be compared to * the original suggest text tokens. A value between 0 and 1 can be * specified. This value will be compared to the string distance result * of each candidate spelling correction. * <p> * Default is <tt>0.5</tt> */ public DirectCandidateGenerator accuracy(float accuracy) { this.accuracy = accuracy; return this; } /** * Sets the maximum suggestions to be returned per suggest text term. */ public DirectCandidateGenerator size(int size) { if (size <= 0) { throw new IllegalArgumentException("Size must be positive"); } this.size = size; return this; } /** * Sets how to sort the suggest terms per suggest text token. Two * possible values: * <ol> * <li><code>score</code> - Sort should first be based on score, then * document frequency and then the term itself. * <li><code>frequency</code> - Sort should first be based on document * frequency, then scotr and then the term itself. * </ol> * <p> * What the score is depends on the suggester being used. */ public DirectCandidateGenerator sort(String sort) { this.sort = sort; return this; } /** * Sets what string distance implementation to use for comparing how * similar suggested terms are. Four possible values can be specified: * <ol> * <li><code>internal</code> - This is the default and is based on * <code>damerau_levenshtein</code>, but highly optimized for comparing * string distance for terms inside the index. * <li><code>damerau_levenshtein</code> - String distance algorithm * based on Damerau-Levenshtein algorithm. * <li><code>levenstein</code> - String distance algorithm based on * Levenstein edit distance algorithm. * <li><code>jarowinkler</code> - String distance algorithm based on * Jaro-Winkler algorithm. * <li><code>ngram</code> - String distance algorithm based on character * n-grams. * </ol> */ public DirectCandidateGenerator stringDistance(String stringDistance) { this.stringDistance = stringDistance; return this; } /** * Sets the maximum edit distance candidate suggestions can have in * order to be considered as a suggestion. Can only be a value between 1 * and 2. Any other value result in an bad request error being thrown. * Defaults to <tt>2</tt>. */ public DirectCandidateGenerator maxEdits(Integer maxEdits) { this.maxEdits = maxEdits; return this; } /** * A factor that is used to multiply with the size in order to inspect * more candidate suggestions. Can improve accuracy at the cost of * performance. Defaults to <tt>5</tt>. */ public DirectCandidateGenerator maxInspections(Integer maxInspections) { this.maxInspections = maxInspections; return this; } /** * Sets a maximum threshold in number of documents a suggest text token * can exist in order to be corrected. Can be a relative percentage * number (e.g 0.4) or an absolute number to represent document * frequencies. If an value higher than 1 is specified then fractional * can not be specified. Defaults to <tt>0.01</tt>. * <p> * This can be used to exclude high frequency terms from being * suggested. High frequency terms are usually spelled correctly on top * of this this also improves the suggest performance. */ public DirectCandidateGenerator maxTermFreq(float maxTermFreq) { this.maxTermFreq = maxTermFreq; return this; } /** * Sets the number of minimal prefix characters that must match in order * be a candidate suggestion. Defaults to 1. Increasing this number * improves suggest performance. Usually misspellings don't occur in the * beginning of terms. */ public DirectCandidateGenerator prefixLength(int prefixLength) { this.prefixLength = prefixLength; return this; } /** * The minimum length a suggest text term must have in order to be * corrected. Defaults to <tt>4</tt>. */ public DirectCandidateGenerator minWordLength(int minWordLength) { this.minWordLength = minWordLength; return this; } /** * Sets a minimal threshold in number of documents a suggested term * should appear in. This can be specified as an absolute number or as a * relative percentage of number of documents. This can improve quality * by only suggesting high frequency terms. Defaults to 0f and is not * enabled. If a value higher than 1 is specified then the number cannot * be fractional. */ public DirectCandidateGenerator minDocFreq(float minDocFreq) { this.minDocFreq = minDocFreq; return this; } /** * Sets a filter (analyzer) that is applied to each of the tokens passed to this candidate generator. * This filter is applied to the original token before candidates are generated. */ public DirectCandidateGenerator preFilter(String preFilter) { this.preFilter = preFilter; return this; } /** * Sets a filter (analyzer) that is applied to each of the generated tokens * before they are passed to the actual phrase scorer. */ public DirectCandidateGenerator postFilter(String postFilter) { this.postFilter = postFilter; return this; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); if (field != null) { builder.field("field", field); } if (suggestMode != null) { builder.field("suggest_mode", suggestMode); } if (accuracy != null) { builder.field("accuracy", accuracy); } if (size != null) { builder.field("size", size); } if (sort != null) { builder.field("sort", sort); } if (stringDistance != null) { builder.field("string_distance", stringDistance); } if (maxEdits != null) { builder.field("max_edits", maxEdits); } if (maxInspections != null) { builder.field("max_inspections", maxInspections); } if (maxTermFreq != null) { builder.field("max_term_freq", maxTermFreq); } if (prefixLength != null) { builder.field("prefix_length", prefixLength); } if (minWordLength != null) { builder.field("min_word_length", minWordLength); } if (minDocFreq != null) { builder.field("min_doc_freq", minDocFreq); } if (preFilter != null) { builder.field("pre_filter", preFilter); } if (postFilter != null) { builder.field("post_filter", postFilter); } builder.endObject(); return builder; } } @Override public String getWriteableName() { return SUGGESTION_NAME; } @Override public void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalFloat(maxErrors); out.writeOptionalFloat(realWordErrorLikelihood); out.writeOptionalFloat(confidence); out.writeOptionalVInt(gramSize); // NORELEASE model.writeTo(); out.writeOptionalBoolean(forceUnigrams); out.writeOptionalVInt(tokenLimit); out.writeOptionalString(preTag); out.writeOptionalString(postTag); out.writeOptionalString(separator); if (collateQuery != null) { out.writeBoolean(true); collateQuery.writeTo(out); } else { out.writeBoolean(false); } out.writeMap(collateParams); out.writeOptionalBoolean(collatePrune); // NORELEASE write Map<String, List<CandidateGenerator>> generators = new HashMap<>(); } @Override public PhraseSuggestionBuilder doReadFrom(StreamInput in, String name) throws IOException { PhraseSuggestionBuilder builder = new PhraseSuggestionBuilder(name); builder.maxErrors = in.readOptionalFloat(); builder.realWordErrorLikelihood = in.readOptionalFloat(); builder.confidence = in.readOptionalFloat(); builder.gramSize = in.readOptionalVInt(); // NORELEASE read model builder.forceUnigrams = in.readOptionalBoolean(); builder.tokenLimit = in.readOptionalVInt(); builder.preTag = in.readOptionalString(); builder.postTag = in.readOptionalString(); builder.separator = in.readOptionalString(); if (in.readBoolean()) { builder.collateQuery = Template.readTemplate(in); } builder.collateParams = in.readMap(); builder.collatePrune = in.readOptionalBoolean(); // NORELEASE read Map<String, List<CandidateGenerator>> generators; return builder; } @Override protected boolean doEquals(PhraseSuggestionBuilder other) { return Objects.equals(maxErrors, other.maxErrors) && Objects.equals(separator, other.separator) && Objects.equals(realWordErrorLikelihood, other.realWordErrorLikelihood) && Objects.equals(confidence, other.confidence) && // NORELEASE Objects.equals(generator, other.generator) && Objects.equals(gramSize, other.gramSize) && // NORELEASE Objects.equals(model, other.model) && Objects.equals(forceUnigrams, other.forceUnigrams) && Objects.equals(tokenLimit, other.tokenLimit) && Objects.equals(preTag, other.preTag) && Objects.equals(postTag, other.postTag) && Objects.equals(collateQuery, other.collateQuery) && Objects.equals(collateParams, other.collateParams) && Objects.equals(collatePrune, other.collatePrune); } @Override protected int doHashCode() { return Objects.hash(maxErrors, separator, realWordErrorLikelihood, confidence, /** NORELEASE generators, */ gramSize, /** NORELEASE model, */ forceUnigrams, tokenLimit, preTag, postTag, collateQuery, collateParams, collatePrune); } ======= >>>>>>>
<<<<<<< final Engine.Delete engineDelete = new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.seqNo(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime(), false); ======= final Engine.Delete engineDelete = new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime()); >>>>>>> final Engine.Delete engineDelete = new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.seqNo(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime());
<<<<<<< public RestGetSettingsAction(Settings settings, RestController controller, Client client) { super(settings, client); ======= public RestGetSettingsAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings) { super(settings, controller, client); this.indexScopedSettings = indexScopedSettings; >>>>>>> public RestGetSettingsAction(Settings settings, RestController controller, Client client, IndexScopedSettings indexScopedSettings) { super(settings, client); this.indexScopedSettings = indexScopedSettings;
<<<<<<< private void maybeUpdateSequenceNumber(Engine.Operation op) { if (op.origin() == Operation.Origin.PRIMARY) { op.updateSeqNo(seqNoService.generateSeqNo()); } } private static VersionValueSupplier NEW_VERSION_VALUE = (u, t) -> new VersionValue(u); @FunctionalInterface private interface VersionValueSupplier { VersionValue apply(long updatedVersion, long time); } private <T extends Engine.Operation> void maybeAddToTranslog( final T op, final long updatedVersion, final Function<T, Translog.Operation> toTranslogOp, final VersionValueSupplier toVersionValue) throws IOException { if (op.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location translogLocation = translog.add(toTranslogOp.apply(op)); op.setTranslogLocation(translogLocation); } versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); } ======= >>>>>>> <<<<<<< ======= final IndexResult indexResult; >>>>>>> <<<<<<< maybeAddToTranslog(index, updatedVersion, Translog.Index::new, NEW_VERSION_VALUE); } finally { if (index.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { seqNoService.markSeqNoAsCompleted(index.seqNo()); } ======= indexResult.setTook(System.nanoTime() - index.startTime()); indexResult.freeze(); return indexResult; >>>>>>> indexResult.setTook(System.nanoTime() - index.startTime()); indexResult.freeze(); return indexResult; } finally { if (indexResult != null && indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { seqNoService.markSeqNoAsCompleted(indexResult.getSeqNo()); } <<<<<<< if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) return; maybeUpdateSequenceNumber(delete); final long updatedVersion = updateVersion(delete, currentVersion, expectedVersion); final boolean found = deleteIfFound(delete, currentVersion, deleted, versionValue); delete.updateVersion(updatedVersion, found); maybeAddToTranslog(delete, updatedVersion, Translog.Delete::new, DeleteVersionValue::new); } finally { if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { seqNoService.markSeqNoAsCompleted(delete.seqNo()); } ======= final DeleteResult deleteResult; if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) { // skip executing delete because of version conflict on recovery deleteResult = new DeleteResult(expectedVersion, true); } else { updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion); found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue); deleteResult = new DeleteResult(updatedVersion, found); location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY ? translog.add(new Translog.Delete(delete, deleteResult)) : null; versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); deleteResult.setTranslogLocation(location); } deleteResult.setTook(System.nanoTime() - delete.startTime()); deleteResult.freeze(); return deleteResult; >>>>>>> if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) { // skip executing delete because of version conflict on recovery deleteResult = new DeleteResult(expectedVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, true); } else { final long seqNo; if (delete.origin() == Operation.Origin.PRIMARY) { seqNo = seqNoService.generateSeqNo(); } else { seqNo = delete.seqNo(); } updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion); found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue); deleteResult = new DeleteResult(updatedVersion, seqNo, found); location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY ? translog.add(new Translog.Delete(delete, deleteResult)) : null; versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); deleteResult.setTranslogLocation(location); } deleteResult.setTook(System.nanoTime() - delete.startTime()); deleteResult.freeze(); return deleteResult; } finally { if (deleteResult != null && deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { seqNoService.markSeqNoAsCompleted(deleteResult.getSeqNo()); }
<<<<<<< @Override public Optional<EngineFactory> getEngineFactory(IndexSettings indexSettings) { List<Optional<EngineFactory>> enginePlugins = filterPlugins(EnginePlugin.class).stream() .map(p -> p.getEngineFactory(indexSettings)) .collect(Collectors.toList()); if (enginePlugins.size() == 0) { return Optional.empty(); } else if (enginePlugins.size() == 1) { return enginePlugins.stream().findFirst().get(); } else { throw new IllegalStateException("Only one EngineFactory plugin allowed"); } } ======= @Override public void close() throws IOException { IOUtils.close(plugins); } >>>>>>> @Override public void close() throws IOException { IOUtils.close(plugins); } @Override public Optional<EngineFactory> getEngineFactory(IndexSettings indexSettings) { List<Optional<EngineFactory>> enginePlugins = filterPlugins(EnginePlugin.class).stream() .map(p -> p.getEngineFactory(indexSettings)) .collect(Collectors.toList()); if (enginePlugins.size() == 0) { return Optional.empty(); } else if (enginePlugins.size() == 1) { return enginePlugins.stream().findFirst().get(); } else { throw new IllegalStateException("Only one EngineFactory plugin allowed"); } }
<<<<<<< import java.io.PrintWriter; import java.io.Writer; ======= import java.io.PrintWriter; >>>>>>> import java.io.PrintWriter; <<<<<<< /** Returns a Writer which can be used to write to the terminal directly. */ public abstract PrintWriter getWriter(); /** Print a message directly to the terminal. */ protected abstract void doPrint(String msg); ======= /** Returns a Writer which can be used to write to the terminal directly. */ public abstract PrintWriter getWriter(); >>>>>>> /** Returns a Writer which can be used to write to the terminal directly. */ public abstract PrintWriter getWriter(); <<<<<<< public PrintWriter getWriter() { return console.writer(); } @Override public void doPrint(String msg) { console.printf("%s", msg); console.flush(); ======= public PrintWriter getWriter() { return console.writer(); >>>>>>> public PrintWriter getWriter() { return console.writer(); <<<<<<< private static final PrintWriter writer = new PrintWriter(System.out); ======= private final PrintWriter writer = newWriter(); SystemTerminal() { super(System.lineSeparator()); } @SuppressForbidden(reason = "Writer for System.out") private static PrintWriter newWriter() { return new PrintWriter(System.out); } >>>>>>> private static final PrintWriter writer = newWriter(); SystemTerminal() { super(System.lineSeparator()); } @SuppressForbidden(reason = "Writer for System.out") private static PrintWriter newWriter() { return new PrintWriter(System.out); }
<<<<<<< successfulShards.incrementAndGet(); // mark primary as successful decPendingAndFinishIfNeeded(); ======= >>>>>>>
<<<<<<< import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.util.Arrays; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.concurrent.ExecutionException; ======= import java.util.Arrays; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Locale; >>>>>>> import java.lang.reflect.Proxy; import java.util.Arrays; import java.util.EnumSet; import java.util.List; import java.util.concurrent.ExecutionException;
<<<<<<< ======= import org.elasticsearch.index.mapper.core.NumberFieldMapper; >>>>>>> <<<<<<< import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; ======= >>>>>>> import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction;
<<<<<<< import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.suggest.stats.SuggestStats; ======= >>>>>>> import org.elasticsearch.index.shard.IndexingStats;
<<<<<<< import org.elasticsearch.plugins.MapperPlugin; ======= import org.elasticsearch.plugins.AnalysisPlugin; >>>>>>> import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.AnalysisPlugin;
<<<<<<< import java.util.stream.Collectors; ======= import java.util.function.Supplier; >>>>>>> import java.util.function.Supplier; import java.util.stream.Collectors; <<<<<<< @Override protected NamedXContentRegistry xContentRegistry() { return namedXContentRegistry; } public final void testFromXContent() throws IOException { final NamedXContentRegistry xContentRegistry = xContentRegistry(); final T aggregation = createTestInstance(); //norelease Remove this assumption when all aggregations can be parsed back. assumeTrue("This test does not support the aggregation type yet", getNamedXContents().stream().filter(entry -> entry.name.match(aggregation.getType())).count() > 0); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); final boolean humanReadable = randomBoolean(); final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference originalBytes = toShuffledXContent(aggregation, xContentType, params, humanReadable); Aggregation parsedAggregation; try (XContentParser parser = xContentType.xContent().createParser(xContentRegistry, originalBytes)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); String currentName = parser.currentName(); int i = currentName.indexOf(InternalAggregation.TYPED_KEYS_DELIMITER); String aggType = currentName.substring(0, i); String aggName = currentName.substring(i + 1); parsedAggregation = parser.namedObject(Aggregation.class, aggType, aggName); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); assertEquals(aggregation.getName(), parsedAggregation.getName()); assertEquals(aggregation.getMetaData(), parsedAggregation.getMetaData()); assertTrue(parsedAggregation instanceof ParsedAggregation); assertEquals(aggregation.getType(), ((ParsedAggregation) parsedAggregation).getType()); final BytesReference parsedBytes = toXContent((ToXContent) parsedAggregation, xContentType, params, humanReadable); assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); assertFromXContent(aggregation, (ParsedAggregation) parsedAggregation); } } //norelease TODO make abstract protected void assertFromXContent(T aggregation, ParsedAggregation parsedAggregation) { } ======= /** * @return a random {@link DocValueFormat} that can be used in aggregations which * compute numbers. */ protected static DocValueFormat randomNumericDocValueFormat() { final List<Supplier<DocValueFormat>> formats = new ArrayList<>(3); formats.add(() -> DocValueFormat.RAW); formats.add(() -> new DocValueFormat.Decimal(randomFrom("###.##", "###,###.##"))); return randomFrom(formats).get(); } >>>>>>> @Override protected NamedXContentRegistry xContentRegistry() { return namedXContentRegistry; } public final void testFromXContent() throws IOException { final NamedXContentRegistry xContentRegistry = xContentRegistry(); final T aggregation = createTestInstance(); //norelease Remove this assumption when all aggregations can be parsed back. assumeTrue("This test does not support the aggregation type yet", getNamedXContents().stream().filter(entry -> entry.name.match(aggregation.getType())).count() > 0); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); final boolean humanReadable = randomBoolean(); final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference originalBytes = toShuffledXContent(aggregation, xContentType, params, humanReadable); Aggregation parsedAggregation; try (XContentParser parser = xContentType.xContent().createParser(xContentRegistry, originalBytes)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); String currentName = parser.currentName(); int i = currentName.indexOf(InternalAggregation.TYPED_KEYS_DELIMITER); String aggType = currentName.substring(0, i); String aggName = currentName.substring(i + 1); parsedAggregation = parser.namedObject(Aggregation.class, aggType, aggName); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); assertEquals(aggregation.getName(), parsedAggregation.getName()); assertEquals(aggregation.getMetaData(), parsedAggregation.getMetaData()); assertTrue(parsedAggregation instanceof ParsedAggregation); assertEquals(aggregation.getType(), ((ParsedAggregation) parsedAggregation).getType()); final BytesReference parsedBytes = toXContent((ToXContent) parsedAggregation, xContentType, params, humanReadable); assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); assertFromXContent(aggregation, (ParsedAggregation) parsedAggregation); } } //norelease TODO make abstract protected void assertFromXContent(T aggregation, ParsedAggregation parsedAggregation) { } /** * @return a random {@link DocValueFormat} that can be used in aggregations which * compute numbers. */ protected static DocValueFormat randomNumericDocValueFormat() { final List<Supplier<DocValueFormat>> formats = new ArrayList<>(3); formats.add(() -> DocValueFormat.RAW); formats.add(() -> new DocValueFormat.Decimal(randomFrom("###.##", "###,###.##"))); return randomFrom(formats).get(); }
<<<<<<< .put("path.conf", getDataPath("/indices/analyze/conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) .put(HUNSPELL_IGNORE_CASE.getKey(), true) ======= .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/conf_dir")) .put(HUNSPELL_LAZY_LOAD, randomBoolean()) .put(HUNSPELL_IGNORE_CASE, true) >>>>>>> .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) .put(HUNSPELL_IGNORE_CASE.getKey(), true) <<<<<<< .put("path.conf", getDataPath("/indices/analyze/conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) .put(HUNSPELL_IGNORE_CASE.getKey(), true) ======= .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/conf_dir")) .put(HUNSPELL_LAZY_LOAD, randomBoolean()) .put(HUNSPELL_IGNORE_CASE, true) >>>>>>> .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) .put(HUNSPELL_IGNORE_CASE.getKey(), true) <<<<<<< .put("path.conf", getDataPath("/indices/analyze/no_aff_conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) ======= .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/no_aff_conf_dir")) .put(HUNSPELL_LAZY_LOAD, randomBoolean()) >>>>>>> .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/no_aff_conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) <<<<<<< .put("path.conf", getDataPath("/indices/analyze/two_aff_conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) ======= .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/two_aff_conf_dir")) .put(HUNSPELL_LAZY_LOAD, randomBoolean()) >>>>>>> .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/two_aff_conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean())
<<<<<<< import org.elasticsearch.common.settings.ClusterSettings; ======= import org.elasticsearch.common.Randomness; >>>>>>> import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Randomness;
<<<<<<< import org.elasticsearch.index.query.QueryShardException; ======= import org.elasticsearch.index.query.QueryParsingException; import org.elasticsearch.search.internal.SearchContext; >>>>>>> import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.search.internal.SearchContext; <<<<<<< public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryShardException { ======= public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) throws IOException, QueryParsingException { >>>>>>> public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) throws IOException, QueryShardException {
<<<<<<< public void testOptimize() { String optimizeShardAction = OptimizeAction.NAME + "[n]"; interceptTransportActions(optimizeShardAction); ======= @Test public void testForceMerge() { String mergeShardAction = ForceMergeAction.NAME + "[n]"; interceptTransportActions(mergeShardAction); >>>>>>> public void testForceMerge() { String mergeShardAction = ForceMergeAction.NAME + "[n]"; interceptTransportActions(mergeShardAction);
<<<<<<< private volatile Map<Integer, IndexShardInjectorPair> shards = emptyMap(); private static class IndexShardInjectorPair { private final IndexShard indexShard; private final Injector injector; public IndexShardInjectorPair(IndexShard indexShard, Injector injector) { this.indexShard = indexShard; this.injector = injector; } public IndexShard getIndexShard() { return indexShard; } public Injector getInjector() { return injector; } } ======= private final IndexServicesProvider indexServicesProvider; private final IndexStore indexStore; private volatile ImmutableMap<Integer, IndexShard> shards = ImmutableMap.of(); >>>>>>> private final IndexServicesProvider indexServicesProvider; private final IndexStore indexStore; private volatile Map<Integer, IndexShard> shards = emptyMap(); <<<<<<< HashMap<Integer, IndexShardInjectorPair> newShards = new HashMap<>(shards); IndexShardInjectorPair indexShardInjectorPair = newShards.remove(shardId); indexShard = indexShardInjectorPair.getIndexShard(); shardInjector = indexShardInjectorPair.getInjector(); shards = unmodifiableMap(newShards); closeShardInjector(reason, sId, shardInjector, indexShard); ======= HashMap<Integer, IndexShard> tmpShardsMap = new HashMap<>(shards); indexShard = tmpShardsMap.remove(shardId); shards = ImmutableMap.copyOf(tmpShardsMap); closeShard(reason, sId, indexShard, indexShard.store()); >>>>>>> HashMap<Integer, IndexShard> newShards = new HashMap<>(shards); indexShard = newShards.remove(shardId); shards = unmodifiableMap(newShards); closeShard(reason, sId, indexShard, indexShard.store());
<<<<<<< AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException { return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); ======= AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, Map<String, Object> metaData) throws IOException { final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(); return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); >>>>>>> AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException { final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(); return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); <<<<<<< AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException { return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); ======= AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, Map<String, Object> metaData) throws IOException { final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); >>>>>>> AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException { final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); <<<<<<< AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException { return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); ======= AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, Map<String, Object> metaData) throws IOException { final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); >>>>>>> AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List<Reducer> reducers, Map<String, Object> metaData) throws IOException { final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData);
<<<<<<< public void onModule(SettingsModule module) { module.registerSetting(INDEX_TEST_SEED_SETTING); ======= @Override public String name() { return "test-seed-plugin"; } @Override public String description() { return "a test plugin that registers index.tests.seed as an index setting"; } @Override public List<Setting<?>> getSettings() { return Arrays.asList(INDEX_TEST_SEED_SETTING); >>>>>>> @Override public List<Setting<?>> getSettings() { return Arrays.asList(INDEX_TEST_SEED_SETTING);
<<<<<<< import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.ExplainLifecycleRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.StartILMRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.StopILMRequest; import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; ======= >>>>>>> import org.elasticsearch.protocol.xpack.indexlifecycle.ExplainLifecycleRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.StartILMRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.StopILMRequest; <<<<<<< static Request cancelTasks(CancelTasksRequest cancelTasksRequest) { Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel"); Params params = new Params(request); params.withTimeout(cancelTasksRequest.getTimeout()) .withTaskId(cancelTasksRequest.getTaskId()) .withNodes(cancelTasksRequest.getNodes()) .withParentTaskId(cancelTasksRequest.getParentTaskId()) .withActions(cancelTasksRequest.getActions()); return request; } ======= >>>>>>> <<<<<<< static Request putLifecyclePolicy(PutLifecyclePolicyRequest putLifecycleRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs(putLifecycleRequest.getName()) .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); Params params = new Params(request); params.withMasterTimeout(putLifecycleRequest.masterNodeTimeout()); params.withTimeout(putLifecycleRequest.timeout()); request.setEntity(createEntity(putLifecycleRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } static Request deleteLifecyclePolicy(DeleteLifecyclePolicyRequest deleteLifecyclePolicyRequest) { Request request = new Request(HttpDelete.METHOD_NAME, new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs(deleteLifecyclePolicyRequest.getLifecyclePolicy()) .build()); Params params = new Params(request); params.withMasterTimeout(deleteLifecyclePolicyRequest.masterNodeTimeout()); params.withTimeout(deleteLifecyclePolicyRequest.timeout()); return request; } static Request setIndexLifecyclePolicy(SetIndexLifecyclePolicyRequest setPolicyRequest) { String[] indices = setPolicyRequest.indices() == null ? Strings.EMPTY_ARRAY : setPolicyRequest.indices(); Request request = new Request(HttpPut.METHOD_NAME, new EndpointBuilder() .addCommaSeparatedPathParts(indices) .addPathPartAsIs("_ilm") .addPathPart(setPolicyRequest.policy()) .build()); Params params = new Params(request); params.withIndicesOptions(setPolicyRequest.indicesOptions()); params.withMasterTimeout(setPolicyRequest.masterNodeTimeout()); return request; } static Request startILM(StartILMRequest startILMRequest) { Request request = new Request(HttpPost.METHOD_NAME, new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs("start") .build()); Params params = new Params(request); params.withMasterTimeout(startILMRequest.masterNodeTimeout()); params.withTimeout(startILMRequest.timeout()); return request; } static Request stopILM(StopILMRequest stopILMRequest) { Request request = new Request(HttpPost.METHOD_NAME, new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs("stop") .build()); Params params = new Params(request); params.withMasterTimeout(stopILMRequest.masterNodeTimeout()); params.withTimeout(stopILMRequest.timeout()); return request; } static Request lifecycleManagementStatus(LifecycleManagementStatusRequest lifecycleManagementStatusRequest){ Request request = new Request(HttpGet.METHOD_NAME, new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs("status") .build()); Params params = new Params(request); params.withMasterTimeout(lifecycleManagementStatusRequest.masterNodeTimeout()); params.withTimeout(lifecycleManagementStatusRequest.timeout()); return request; } static Request explainLifecycle(ExplainLifecycleRequest explainLifecycleRequest) { String[] indices = explainLifecycleRequest.indices() == null ? Strings.EMPTY_ARRAY : explainLifecycleRequest.indices(); Request request = new Request(HttpGet.METHOD_NAME, new EndpointBuilder() .addCommaSeparatedPathParts(indices) .addPathPartAsIs("_ilm") .addPathPartAsIs("explain") .build()); Params params = new Params(request); params.withIndicesOptions(explainLifecycleRequest.indicesOptions()); params.withMasterTimeout(explainLifecycleRequest.masterNodeTimeout()); return request; } static Request putLicense(PutLicenseRequest putLicenseRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") .addPathPartAsIs("license") .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); Params parameters = new Params(request); parameters.withTimeout(putLicenseRequest.timeout()); parameters.withMasterTimeout(putLicenseRequest.masterNodeTimeout()); if (putLicenseRequest.isAcknowledge()) { parameters.putParam("acknowledge", "true"); } request.setJsonEntity(putLicenseRequest.getLicenseDefinition()); return request; } static Request getLicense(GetLicenseRequest getLicenseRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") .addPathPartAsIs("license") .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); Params parameters = new Params(request); parameters.withLocal(getLicenseRequest.local()); return request; } static Request deleteLicense(DeleteLicenseRequest deleteLicenseRequest) { Request request = new Request(HttpDelete.METHOD_NAME, "/_xpack/license"); Params parameters = new Params(request); parameters.withTimeout(deleteLicenseRequest.timeout()); parameters.withMasterTimeout(deleteLicenseRequest.masterNodeTimeout()); return request; } ======= >>>>>>> static Request putLifecyclePolicy(PutLifecyclePolicyRequest putLifecycleRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs(putLifecycleRequest.getName()) .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); Params params = new Params(request); params.withMasterTimeout(putLifecycleRequest.masterNodeTimeout()); params.withTimeout(putLifecycleRequest.timeout()); request.setEntity(createEntity(putLifecycleRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } static Request deleteLifecyclePolicy(DeleteLifecyclePolicyRequest deleteLifecyclePolicyRequest) { Request request = new Request(HttpDelete.METHOD_NAME, new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs(deleteLifecyclePolicyRequest.getLifecyclePolicy()) .build()); Params params = new Params(request); params.withMasterTimeout(deleteLifecyclePolicyRequest.masterNodeTimeout()); params.withTimeout(deleteLifecyclePolicyRequest.timeout()); return request; } static Request setIndexLifecyclePolicy(SetIndexLifecyclePolicyRequest setPolicyRequest) { String[] indices = setPolicyRequest.indices() == null ? Strings.EMPTY_ARRAY : setPolicyRequest.indices(); Request request = new Request(HttpPut.METHOD_NAME, new EndpointBuilder() .addCommaSeparatedPathParts(indices) .addPathPartAsIs("_ilm") .addPathPart(setPolicyRequest.policy()) .build()); Params params = new Params(request); params.withIndicesOptions(setPolicyRequest.indicesOptions()); params.withMasterTimeout(setPolicyRequest.masterNodeTimeout()); return request; } static Request startILM(StartILMRequest startILMRequest) { Request request = new Request(HttpPost.METHOD_NAME, new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs("start") .build()); Params params = new Params(request); params.withMasterTimeout(startILMRequest.masterNodeTimeout()); params.withTimeout(startILMRequest.timeout()); return request; } static Request stopILM(StopILMRequest stopILMRequest) { Request request = new Request(HttpPost.METHOD_NAME, new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs("stop") .build()); Params params = new Params(request); params.withMasterTimeout(stopILMRequest.masterNodeTimeout()); params.withTimeout(stopILMRequest.timeout()); return request; } static Request lifecycleManagementStatus(LifecycleManagementStatusRequest lifecycleManagementStatusRequest){ Request request = new Request(HttpGet.METHOD_NAME, new EndpointBuilder() .addPathPartAsIs("_ilm") .addPathPartAsIs("status") .build()); Params params = new Params(request); params.withMasterTimeout(lifecycleManagementStatusRequest.masterNodeTimeout()); params.withTimeout(lifecycleManagementStatusRequest.timeout()); return request; } static Request explainLifecycle(ExplainLifecycleRequest explainLifecycleRequest) { String[] indices = explainLifecycleRequest.indices() == null ? Strings.EMPTY_ARRAY : explainLifecycleRequest.indices(); Request request = new Request(HttpGet.METHOD_NAME, new EndpointBuilder() .addCommaSeparatedPathParts(indices) .addPathPartAsIs("_ilm") .addPathPartAsIs("explain") .build()); Params params = new Params(request); params.withIndicesOptions(explainLifecycleRequest.indicesOptions()); params.withMasterTimeout(explainLifecycleRequest.masterNodeTimeout()); return request; }
<<<<<<< items[i] = new BulkItemResponse(i, "delete", new DeleteResponse(new ShardId("test", 0), "type", String.valueOf(i), i, 1, delete)); ======= items[i] = new BulkItemResponse(i, "delete", new DeleteResponse(new ShardId("test", "_na_", 0), "type", String.valueOf(i), 1, delete)); >>>>>>> items[i] = new BulkItemResponse(i, "delete", new DeleteResponse(new ShardId("test", "_na_", 0), "type", String.valueOf(i), i, 1, delete)); <<<<<<< items[i] = new BulkItemResponse(i, "delete", new DeleteResponse(new ShardId("test-" + index, 0), "type", String.valueOf(i), i, 1, delete)); ======= items[i] = new BulkItemResponse(i, "delete", new DeleteResponse(new ShardId("test-" + index, "_na_", 0), "type", String.valueOf(i), 1, delete)); >>>>>>> items[i] = new BulkItemResponse(i, "delete", new DeleteResponse(new ShardId("test-" + index, "_na_", 0), "type", String.valueOf(i), i, 1, delete));
<<<<<<< ======= import org.elasticsearch.common.Strings; import org.elasticsearch.common.cli.CliTool; import org.elasticsearch.common.cli.CliToolTestCase; import org.elasticsearch.common.cli.MockTerminal; import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.shield.authc.esusers.FileUserRolesStore; import org.elasticsearch.shield.authc.support.Hasher; import org.elasticsearch.shield.authc.support.SecuredStringTests; >>>>>>>
<<<<<<< ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, null, 1, true, ShardRoutingState.UNASSIGNED, 1, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); ======= ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, null, true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); >>>>>>> ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, null, 1, true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
<<<<<<< import com.forgeessentials.commons.IReconstructData; import com.forgeessentials.data.api.ClassContainer; import com.forgeessentials.data.api.TypeData; import com.forgeessentials.data.api.TypeMultiValInfo; ======= import java.util.HashMap; import java.util.HashSet; import java.util.Set; >>>>>>> import com.forgeessentials.commons.IReconstructData; import com.forgeessentials.data.api.ClassContainer; import com.forgeessentials.data.api.TypeData; import com.forgeessentials.data.api.TypeMultiValInfo; import java.util.HashMap; import java.util.HashSet; import java.util.Set;
<<<<<<< } catch (Throwable e) { // nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed. // some operations were already perform and have a seqno assigned. we shouldn't just reindex them ======= } catch (Exception e) { >>>>>>> } catch (Exception e) { // nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed. // some operations were already perform and have a seqno assigned. we shouldn't just reindex them <<<<<<< } catch (Throwable e) { // nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed. // some operations were already perform and have a seqno assigned. we shouldn't just reindex them ======= } catch (Exception e) { >>>>>>> } catch (Exception e) { // nocommit: since we now have RetryOnPrimaryException, retrying doesn't always mean the shard is closed. // some operations were already perform and have a seqno assigned. we shouldn't just reindex them <<<<<<< new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t))); ======= new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); >>>>>>> new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); <<<<<<< new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), t))); ======= new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e))); >>>>>>> new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e))); <<<<<<< new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), t))); ======= new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e))); >>>>>>> new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e)));
<<<<<<< public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List<Reducer> reducers, Map<String, Object> metaData, FilterCachingPolicy filterCachingPolicy) throws IOException { super(name, factories, aggregationContext, parentAggregator, reducers, metaData); ======= public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, Map<String, Object> metaData, QueryCachingPolicy filterCachingPolicy) throws IOException { super(name, factories, aggregationContext, parentAggregator, metaData); >>>>>>> public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List<Reducer> reducers, Map<String, Object> metaData, QueryCachingPolicy filterCachingPolicy) throws IOException { super(name, factories, aggregationContext, parentAggregator, reducers, metaData); <<<<<<< return new NestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData, filterCachingPolicy); ======= return new NestedAggregator(name, factories, objectMapper, context, parent, metaData, queryCachingPolicy); >>>>>>> return new NestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData, queryCachingPolicy);
<<<<<<< ObjectMapper parent = mapper; for (int i = 0; i < paths.length-1; i++) { String currentPath = context.path().pathAsText(paths[i]); FieldMapper existingFieldMapper = context.docMapper().mappers().getMapper(currentPath); if (existingFieldMapper != null) { throw new MapperParsingException( "Could not dynamically add mapping for field [{}]. Existing mapping for [{}] must be of type object but found [{}].", null, String.join(".", paths), currentPath, existingFieldMapper.fieldType.typeName()); } mapper = context.docMapper().objectMappers().get(currentPath); if (mapper == null) { // One mapping is missing, check if we are allowed to create a dynamic one. ObjectMapper.Dynamic dynamic = dynamicOrDefault(parent, context); switch (dynamic) { case STRICT: throw new StrictDynamicMappingException(parent.fullPath(), paths[i]); case TRUE: Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], XContentFieldType.OBJECT); if (builder == null) { builder = new ObjectMapper.Builder(paths[i]).enabled(true); } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to` or dots in field names"); } context.addDynamicMapper(mapper); break; case FALSE: // Should not dynamically create any more mappers so return the last mapper return new Tuple<>(pathsAdded, parent); ======= ObjectMapper parent = mapper; for (int i = 0; i < paths.length-1; i++) { String currentPath = context.path().pathAsText(paths[i]); Mapper existingFieldMapper = context.docMapper().mappers().getMapper(currentPath); if (existingFieldMapper != null) { throw new MapperParsingException( "Could not dynamically add mapping for field [{}]. Existing mapping for [{}] must be of type object but found [{}].", null, String.join(".", paths), currentPath, existingFieldMapper.typeName()); } mapper = context.docMapper().objectMappers().get(currentPath); if (mapper == null) { // One mapping is missing, check if we are allowed to create a dynamic one. ObjectMapper.Dynamic dynamic = dynamicOrDefault(parent, context); switch (dynamic) { case STRICT: throw new StrictDynamicMappingException(parent.fullPath(), paths[i]); case TRUE: Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], XContentFieldType.OBJECT); if (builder == null) { builder = new ObjectMapper.Builder(paths[i]).enabled(true); } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to` or dots in field names"); } context.addDynamicMapper(mapper); break; case FALSE: // Should not dynamically create any more mappers so return the last mapper return new Tuple<>(pathsAdded, parent); >>>>>>> ObjectMapper parent = mapper; for (int i = 0; i < paths.length-1; i++) { String currentPath = context.path().pathAsText(paths[i]); Mapper existingFieldMapper = context.docMapper().mappers().getMapper(currentPath); if (existingFieldMapper != null) { throw new MapperParsingException( "Could not dynamically add mapping for field [{}]. Existing mapping for [{}] must be of type object but found [{}].", null, String.join(".", paths), currentPath, existingFieldMapper.typeName()); } mapper = context.docMapper().objectMappers().get(currentPath); if (mapper == null) { // One mapping is missing, check if we are allowed to create a dynamic one. ObjectMapper.Dynamic dynamic = dynamicOrDefault(parent, context); switch (dynamic) { case STRICT: throw new StrictDynamicMappingException(parent.fullPath(), paths[i]); case TRUE: Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], XContentFieldType.OBJECT); if (builder == null) { builder = new ObjectMapper.Builder(paths[i]).enabled(true); } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to` or dots in field names"); } context.addDynamicMapper(mapper); break; case FALSE: // Should not dynamically create any more mappers so return the last mapper return new Tuple<>(pathsAdded, parent);
<<<<<<< import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesRanksTestCase; import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; ======= >>>>>>> import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles;
<<<<<<< import org.elasticsearch.common.io.ThrowableObjectInputStream; import org.elasticsearch.common.io.stream.*; ======= import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; >>>>>>> import org.elasticsearch.common.io.stream.*; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput;
<<<<<<< public static final Setting<Boolean> WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> NODE_DATA_SETTING = Setting.boolSetting("node.data", true, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> NODE_MASTER_SETTING = Setting.boolSetting("node.master", true, false, Setting.Scope.CLUSTER); public static final Setting<Boolean> NODE_LOCAL_SETTING = Setting.boolSetting("node.local", false, false, Setting.Scope.CLUSTER); public static final Setting<String> NODE_MODE_SETTING = new Setting<>("node.mode", "network", Function.identity(), false, Setting.Scope.CLUSTER); public static final Setting<Boolean> NODE_INGEST_SETTING = Setting.boolSetting("node.ingest", true, false, Setting.Scope.CLUSTER); public static final Setting<String> NODE_NAME_SETTING = Setting.simpleString("node.name", false, Setting.Scope.CLUSTER); // this sucks that folks can mistype data, master or ingest and get away with it. ======= public static final Setting<Boolean> WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, Property.NodeScope); public static final Setting<Boolean> NODE_CLIENT_SETTING = Setting.boolSetting("node.client", false, Property.NodeScope); public static final Setting<Boolean> NODE_DATA_SETTING = Setting.boolSetting("node.data", true, Property.NodeScope); public static final Setting<Boolean> NODE_MASTER_SETTING = Setting.boolSetting("node.master", true, Property.NodeScope); public static final Setting<Boolean> NODE_LOCAL_SETTING = Setting.boolSetting("node.local", false, Property.NodeScope); public static final Setting<String> NODE_MODE_SETTING = new Setting<>("node.mode", "network", Function.identity(), Property.NodeScope); public static final Setting<Boolean> NODE_INGEST_SETTING = Setting.boolSetting("node.ingest", true, Property.NodeScope); public static final Setting<String> NODE_NAME_SETTING = Setting.simpleString("node.name", Property.NodeScope); // this sucks that folks can mistype client etc and get away with it. >>>>>>> public static final Setting<Boolean> WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, Property.NodeScope); public static final Setting<Boolean> NODE_CLIENT_SETTING = Setting.boolSetting("node.client", false, Property.NodeScope); public static final Setting<Boolean> NODE_DATA_SETTING = Setting.boolSetting("node.data", true, Property.NodeScope); public static final Setting<Boolean> NODE_MASTER_SETTING = Setting.boolSetting("node.master", true, Property.NodeScope); public static final Setting<Boolean> NODE_LOCAL_SETTING = Setting.boolSetting("node.local", false, Property.NodeScope); public static final Setting<String> NODE_MODE_SETTING = new Setting<>("node.mode", "network", Function.identity(), Property.NodeScope); public static final Setting<Boolean> NODE_INGEST_SETTING = Setting.boolSetting("node.ingest", true, Property.NodeScope); public static final Setting<String> NODE_NAME_SETTING = Setting.simpleString("node.name", Property.NodeScope); // this sucks that folks can mistype data, master or ingest and get away with it.
<<<<<<< import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.seqno.LocalCheckpointService; ======= >>>>>>> import org.elasticsearch.index.seqno.LocalCheckpointService;
<<<<<<< public static final int SERIALIZATION_FORMAT = 7; private String id; private String type; private long seqNo = -1; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; private BytesReference source; private String routing; private String parent; private long timestamp; private long ttl; public Index() { ======= public static final int SERIALIZATION_FORMAT = 6; // since 2.0-beta1 and 1.1 private final String id; private final String type; private final long version; private final VersionType versionType; private final BytesReference source; private final String routing; private final String parent; private final long timestamp; private final long ttl; public Index(StreamInput in) throws IOException { final int format = in.readVInt(); // SERIALIZATION_FORMAT assert format == SERIALIZATION_FORMAT : "format was: " + format; id = in.readString(); type = in.readString(); source = in.readBytesReference(); routing = in.readOptionalString(); parent = in.readOptionalString(); this.version = in.readLong(); this.timestamp = in.readLong(); this.ttl = in.readLong(); this.versionType = VersionType.fromValue(in.readByte()); assert versionType.validateVersionForWrites(this.version); >>>>>>> public static final int SERIALIZATION_FORMAT = 7; private String id; private String type; private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; private BytesReference source; private String routing; private String parent; private long timestamp; private long ttl; public Index(StreamInput in) throws IOException { final int format = in.readVInt(); // SERIALIZATION_FORMAT assert format >= SERIALIZATION_FORMAT - 1 : "format was: " + format; id = in.readString(); type = in.readString(); source = in.readBytesReference(); routing = in.readOptionalString(); parent = in.readOptionalString(); this.version = in.readLong(); this.timestamp = in.readLong(); this.ttl = in.readLong(); this.versionType = VersionType.fromValue(in.readByte()); assert versionType.validateVersionForWrites(this.version); if (format >= 7) { seqNo = in.readVLong(); } <<<<<<< this.seqNo = 0; this.version = 0; ======= version = Versions.MATCH_ANY; versionType = VersionType.INTERNAL; routing = null; parent = null; timestamp = 0; ttl = 0; >>>>>>> this.seqNo = 0; this.version = 0; version = Versions.MATCH_ANY; versionType = VersionType.INTERNAL; routing = null; parent = null; timestamp = 0; ttl = 0; <<<<<<< public void readFrom(StreamInput in) throws IOException { int version = in.readVInt(); // version id = in.readString(); type = in.readString(); source = in.readBytesReference(); try { if (version >= 1) { if (in.readBoolean()) { routing = in.readString(); } } if (version >= 2) { if (in.readBoolean()) { parent = in.readString(); } } if (version >= 3) { this.version = in.readLong(); } if (version >= 4) { this.timestamp = in.readLong(); } if (version >= 5) { this.ttl = in.readLong(); } if (version >= 6) { this.versionType = VersionType.fromValue(in.readByte()); } if (version >= 7) { this.seqNo = in.readVLong(); } } catch (Exception e) { throw new ElasticsearchException("failed to read [" + type + "][" + id + "]", e); } assert versionType.validateVersionForWrites(version); } @Override ======= >>>>>>> <<<<<<< this(delete.uid(), delete.seqNo(), delete.version(), delete.versionType()); ======= this.uid = delete.uid(); this.version = delete.version(); this.versionType = delete.versionType(); >>>>>>> this(delete.uid(), delete.seqNo(), delete.version(), delete.versionType()); <<<<<<< this(uid, 0, 0, VersionType.EXTERNAL); ======= this(uid, Versions.MATCH_ANY, VersionType.INTERNAL); >>>>>>> this(uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL); <<<<<<< public void readFrom(StreamInput in) throws IOException { int version = in.readVInt(); // version uid = new Term(in.readString(), in.readString()); if (version >= 1) { this.version = in.readLong(); } if (version >= 2) { this.versionType = VersionType.fromValue(in.readByte()); } if (version >= 3) { this.seqNo = in.readVLong(); } assert versionType.validateVersionForWrites(version); } @Override ======= >>>>>>>
<<<<<<< import static java.util.Collections.singleton; ======= import static org.hamcrest.Matchers.greaterThanOrEqualTo; >>>>>>> import static java.util.Collections.singleton; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
<<<<<<< import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Module; ======= import org.elasticsearch.client.FilterClient; >>>>>>> <<<<<<< import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; ======= >>>>>>> <<<<<<< private static final List<RequestAndHeaders> requests = new CopyOnWriteArrayList<>(); ======= >>>>>>> <<<<<<< requests.clear(); ======= ActionRecordingPlugin.clear(); >>>>>>> <<<<<<< List<RequestAndHeaders> searchRequests = getRequests(SearchRequest.class); ======= List<SearchRequest> searchRequests = ActionRecordingPlugin.requestsOfType(SearchRequest.class); >>>>>>> <<<<<<< private List<RequestAndHeaders> getRequests(Class<?> clazz) { List<RequestAndHeaders> results = new ArrayList<>(); for (RequestAndHeaders request : requests) { if (request.request.getClass().equals(clazz)) { results.add(request); } } return results; } private void assertRequestsContainHeader(Class<? extends ActionRequest> clazz) { List<RequestAndHeaders> classRequests = getRequests(clazz); for (RequestAndHeaders request : classRequests) { assertRequestContainsHeader(request.request, request.headers); ======= private void assertRequestsContainHeader(Class<? extends ActionRequest<?>> clazz) { List<? extends ActionRequest<?>> classRequests = ActionRecordingPlugin.requestsOfType(clazz); for (ActionRequest<?> request : classRequests) { assertRequestContainsHeader(request); >>>>>>> <<<<<<< List<RequestAndHeaders> getRequests = getRequests(GetRequest.class); ======= List<GetRequest> getRequests = ActionRecordingPlugin.requestsOfType(GetRequest.class); >>>>>>> <<<<<<< private void assertRequestContainsHeader(ActionRequest request, Map<String, String> context) { ======= private void assertRequestContainsHeader(ActionRequest<?> request) { >>>>>>> <<<<<<< return internalCluster().transportClient().filterWithHeader(Collections.singletonMap(randomHeaderKey, randomHeaderValue)); ======= Client transportClient = internalCluster().transportClient(); FilterClient filterClient = new FilterClient(transportClient) { @Override protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute( Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) { request.putHeader(randomHeaderKey, randomHeaderValue); super.doExecute(action, request, listener); } }; return filterClient; >>>>>>> <<<<<<< public static class ActionLoggingPlugin extends Plugin { @Override public String name() { return "test-action-logging"; } @Override public String description() { return "Test action logging"; } @Override public Collection<Module> nodeModules() { return Collections.<Module>singletonList(new ActionLoggingModule()); } public void onModule(ActionModule module) { module.registerFilter(LoggingFilter.class); } } public static class ActionLoggingModule extends AbstractModule { @Override protected void configure() { bind(LoggingFilter.class).asEagerSingleton(); } } public static class LoggingFilter extends ActionFilter.Simple { private final ThreadPool threadPool; @Inject public LoggingFilter(Settings settings, ThreadPool pool) { super(settings); this.threadPool = pool; } @Override public int order() { return 999; } @Override protected boolean apply(String action, ActionRequest request, ActionListener listener) { requests.add(new RequestAndHeaders(threadPool.getThreadContext().getHeaders(), request)); return true; } @Override protected boolean apply(String action, ActionResponse response, ActionListener listener) { return true; } } private static class RequestAndHeaders { final Map<String, String> headers; final ActionRequest request; private RequestAndHeaders(Map<String, String> headers, ActionRequest request) { this.headers = headers; this.request = request; } } ======= >>>>>>>
<<<<<<< final Index index = new Index("test", "_na_"); ShardRouting test_0 = ShardRouting.newUnassigned(index, 0, null, 1, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); ======= final Index index = new Index("test", "0xdeadbeef"); ShardRouting test_0 = ShardRouting.newUnassigned(index, 0, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); >>>>>>> final Index index = new Index("test", "0xdeadbeef"); ShardRouting test_0 = ShardRouting.newUnassigned(index, 0, null, 1, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); <<<<<<< ShardStats[] stats = new ShardStats[]{ new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, "0xdeadbeef", test_0.shardId()), commonStats0, null, null), new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, "0xdeadbeef", test_1.shardId()), commonStats1, null, null) ======= ShardStats[] stats = new ShardStats[] { new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, test_0.shardId()), commonStats0 , null), new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, test_1.shardId()), commonStats1 , null) >>>>>>> ShardStats[] stats = new ShardStats[] { new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, test_0.shardId()), commonStats0 , null, null), new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, test_1.shardId()), commonStats1 , null, null)
<<<<<<< .extendArray("plugin.types", MockRepository.Plugin.class.getName()).build(); ======= // Rebalancing is causing some checks after restore to randomly fail // due to https://github.com/elastic/elasticsearch/issues/9421 .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) .extendArray("plugin.types", MockRepositoryPlugin.class.getName()).build(); >>>>>>> // Rebalancing is causing some checks after restore to randomly fail // due to https://github.com/elastic/elasticsearch/issues/9421 .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) .extendArray("plugin.types", MockRepository.Plugin.class.getName()).build();
<<<<<<< import com.forgeessentials.util.FunctionHelper; import com.forgeessentials.util.UserIdent; import com.forgeessentials.commons.selections.WorldArea; import com.forgeessentials.commons.selections.WorldPoint; import net.minecraft.server.MinecraftServer; ======= >>>>>>> import com.forgeessentials.util.FunctionHelper; import com.forgeessentials.util.UserIdent; import com.forgeessentials.commons.selections.WorldArea; import com.forgeessentials.commons.selections.WorldPoint; import net.minecraft.server.MinecraftServer;
<<<<<<< result = shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id, VersionType.EXTERNAL); ======= return shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id); >>>>>>> result = shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id);
<<<<<<< apiName.startsWith("watcher.") == false && apiName.startsWith("index_lifecycle.") == false) { ======= apiName.startsWith("watcher.") == false && apiName.startsWith("migration.") == false) { >>>>>>> apiName.startsWith("watcher.") == false && apiName.startsWith("migration.") == false && apiName.startsWith("index_lifecycle.") == false) {
<<<<<<< public class SimpleQueryStringBuilder extends QueryBuilder { public static final String NAME = "simple_query_string"; ======= public class SimpleQueryStringBuilder extends QueryBuilder implements BoostableQueryBuilder<SimpleQueryStringBuilder> { >>>>>>> public class SimpleQueryStringBuilder extends QueryBuilder implements BoostableQueryBuilder<SimpleQueryStringBuilder> { public static final String NAME = "simple_query_string"; <<<<<<< @Override public String queryId() { return NAME; } ======= >>>>>>> @Override public String queryId() { return NAME; }
<<<<<<< ======= import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; >>>>>>> import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; <<<<<<< import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; ======= >>>>>>> import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest; import org.elasticsearch.protocol.xpack.license.PutLicenseRequest; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
<<<<<<< private FetchPhase fetchPhase; ======= private final QueryShardContext queryShardContext; >>>>>>> private final QueryShardContext queryShardContext; private FetchPhase fetchPhase;
<<<<<<< IndexSettings.INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL, LocalCheckpointService.SETTINGS_BIT_ARRAYS_SIZE, ======= IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, >>>>>>> IndexSettings.INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL, LocalCheckpointService.SETTINGS_BIT_ARRAYS_SIZE, IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD,
<<<<<<< public boolean canConsoleUseCommand() { return false; } @Override public List<String> getTabCompletionOptions(MinecraftServer server, ICommandSender sender, String[] args, BlockPos pos) ======= public List<String> addTabCompletionOptions(ICommandSender sender, String[] args, BlockPos pos) >>>>>>> public List<String> getTabCompletionOptions(MinecraftServer server, ICommandSender sender, String[] args, BlockPos pos) <<<<<<< List<String> names = new ArrayList<>(); for (Item i : GameRegistry.findRegistry(Item.class)) ======= List<String> names = new ArrayList<>(); for (Item i : GameData.getItemRegistry().typeSafeIterable()) >>>>>>> List<String> names = new ArrayList<>(); for (Item i : GameRegistry.findRegistry(Item.class))
<<<<<<< public NotQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException { ======= public Query parse(QueryParseContext parseContext) throws IOException, ParsingException { >>>>>>> public NotQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { <<<<<<< throw new QueryParsingException(parseContext, "query is required when using `not` query"); ======= throw new ParsingException(parseContext, "filter is required when using `not` query"); >>>>>>> throw new ParsingException(parseContext, "query is required when using `not` query");
<<<<<<< promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true, true); ======= promote = AnalyzerCaster.promoteNumeric(definition, last.after, expression.actual, true); >>>>>>> promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true); <<<<<<< promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true, true); ======= promote = AnalyzerCaster.promoteNumeric(definition, last.after, expression.actual, true); >>>>>>> promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true); <<<<<<< promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true, true); ======= promote = AnalyzerCaster.promoteNumeric(definition, last.after, expression.actual, true); >>>>>>> promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true); <<<<<<< promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true, true); ======= promote = AnalyzerCaster.promoteNumeric(definition, last.after, expression.actual, true); >>>>>>> promote = AnalyzerCaster.promoteNumeric(last.after, expression.actual, true); <<<<<<< promote = AnalyzerCaster.promoteNumeric(last.after, false, true); ======= promote = AnalyzerCaster.promoteNumeric(definition, last.after, false); >>>>>>> promote = AnalyzerCaster.promoteNumeric(last.after, false); <<<<<<< promote = AnalyzerCaster.promoteNumeric(last.after, false, true); ======= promote = AnalyzerCaster.promoteNumeric(definition, last.after, false); >>>>>>> promote = AnalyzerCaster.promoteNumeric(last.after, false); <<<<<<< promote = AnalyzerCaster.promoteNumeric(last.after, false, true); ======= promote = AnalyzerCaster.promoteNumeric(definition, last.after, false); >>>>>>> promote = AnalyzerCaster.promoteNumeric(last.after, false); <<<<<<< there = AnalyzerCaster.getLegalCast(location, last.after, promote, false); back = AnalyzerCaster.getLegalCast(location, promote, last.after, true); ======= there = AnalyzerCaster.getLegalCast(definition, location, last.after, promote, false, false); back = AnalyzerCaster.getLegalCast(definition, location, promote, last.after, true, false); >>>>>>> there = AnalyzerCaster.getLegalCast(location, last.after, promote, false, false); back = AnalyzerCaster.getLegalCast(location, promote, last.after, true, false); <<<<<<< expression.write(adapter); adapter.writeBinaryInstruction(location, promote, operation); ======= expression.write(settings, definition, adapter); adapter.writeBinaryInstruction(location, promote, operation); >>>>>>> expression.write(adapter); adapter.writeBinaryInstruction(location, promote, operation);
<<<<<<< /** * Writes a {@link QueryBuilder} to the current stream */ public void writeQuery(QueryBuilder queryBuilder) throws IOException { writeNamedWriteable(queryBuilder); } /** * Writes a {@link ScoreFunctionBuilder} to the current stream */ public void writeScoreFunction(ScoreFunctionBuilder<?> scoreFunctionBuilder) throws IOException { writeNamedWriteable(scoreFunctionBuilder); } ======= /** * Writes the given {@link GeoPoint} to the stream */ public void writeGeoPoint(GeoPoint geoPoint) throws IOException { writeDouble(geoPoint.lat()); writeDouble(geoPoint.lon()); } >>>>>>> /** * Writes a {@link QueryBuilder} to the current stream */ public void writeQuery(QueryBuilder queryBuilder) throws IOException { writeNamedWriteable(queryBuilder); } /** * Writes a {@link ScoreFunctionBuilder} to the current stream */ public void writeScoreFunction(ScoreFunctionBuilder<?> scoreFunctionBuilder) throws IOException { writeNamedWriteable(scoreFunctionBuilder); } /** * Writes the given {@link GeoPoint} to the stream */ public void writeGeoPoint(GeoPoint geoPoint) throws IOException { writeDouble(geoPoint.lat()); writeDouble(geoPoint.lon()); }
<<<<<<< import java.io.Closeable; import java.io.IOException; import java.util.HashMap; import java.util.Map; ======= import java.io.Closeable; import java.io.IOException; >>>>>>> import java.io.Closeable; import java.io.IOException; import java.util.HashMap; import java.util.Map; <<<<<<< import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; ======= >>>>>>> <<<<<<< public void setHttpServer(@Nullable HttpServer httpServer) { this.httpServer = httpServer; } public synchronized void putAttribute(String key, String value) { Map<String, String> newServiceAttributes = new HashMap<>(serviceAttributes); newServiceAttributes.put(key, value); serviceAttributes = unmodifiableMap(newServiceAttributes); } public synchronized void removeAttribute(String key) { Map<String, String> newServiceAttributes = new HashMap<>(serviceAttributes); newServiceAttributes.remove(key); serviceAttributes = unmodifiableMap(newServiceAttributes); } /** * Attributes different services in the node can add to be reported as part of the node info (for example). */ public Map<String, String> attributes() { return this.serviceAttributes; } ======= // can not use constructor injection or there will be a circular dependency @Inject(optional = true) public void setScriptService(ScriptService scriptService) { this.scriptService = scriptService; this.ingestService.buildProcessorsFactoryRegistry(scriptService, clusterService); } >>>>>>>
<<<<<<< import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; ======= >>>>>>> import org.elasticsearch.ElasticsearchException; <<<<<<< ======= import java.util.Objects; import java.util.Queue; >>>>>>> <<<<<<< @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); if (newClusterState != null) { try { publishClusterState.pendingStatesQueue().markAsFailed(newClusterState, t); } catch (Throwable unexpected) { logger.error("unexpected exception while failing [{}]", unexpected, source); } } ======= /** * Picks the cluster state with highest version with the same master from the queue. All cluster states with * lower versions are ignored. If a cluster state with a different master is seen the processing logic stops and the * last processed state is returned. */ static ClusterState selectNextStateToProcess(Queue<ProcessClusterState> processNewClusterStates) { // try and get the state with the highest version out of all the ones with the same master node id ProcessClusterState stateToProcess = processNewClusterStates.poll(); if (stateToProcess == null) { return null; } stateToProcess.processed = true; while (true) { ProcessClusterState potentialState = processNewClusterStates.peek(); // nothing else in the queue, bail if (potentialState == null) { break; } // if its not from the same master, then bail if (!Objects.equals(stateToProcess.clusterState.nodes().masterNodeId(), potentialState.clusterState.nodes().masterNodeId())) { break; } // we are going to use it for sure, poll (remove) it potentialState = processNewClusterStates.poll(); if (potentialState == null) { // might happen if the queue is drained break; >>>>>>> @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); if (newClusterState != null) { try { publishClusterState.pendingStatesQueue().markAsFailed(newClusterState, t); } catch (Throwable unexpected) { logger.error("unexpected exception while failing [{}]", unexpected, source); } }
<<<<<<< public QueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException { ======= public Query parse(QueryParseContext parseContext) throws IOException, ParsingException { >>>>>>> public QueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, ParsingException { <<<<<<< if (indices.isEmpty() == false) { throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); ======= if (indicesFound) { throw new ParsingException(parseContext, "[indices] indices or index already specified"); >>>>>>> if (indices.isEmpty() == false) { throw new ParsingException(parseContext, "[indices] indices or index already specified"); <<<<<<< if (indices.isEmpty() == false) { throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); ======= if (indicesFound) { throw new ParsingException(parseContext, "[indices] indices or index already specified"); >>>>>>> if (indices.isEmpty() == false) { throw new ParsingException(parseContext, "[indices] indices or index already specified");
<<<<<<< import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; ======= import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.index.Index; >>>>>>> import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.index.Index; <<<<<<< final ShardRouting initShard; final ShardRouting startedShard; final ShardRouting relocatingShard; final IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder("test"); if (randomBoolean()) { initShard = TestShardRouting.newShardRouting("test", 0, "node1", 1, true, ShardRoutingState.INITIALIZING, 1); ShardRouting replica = TestShardRouting.newShardRouting("test", 0, null, 1, false, ShardRoutingState.UNASSIGNED, 1); indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).addShard(replica).build()); } else { ShardRouting primaryShard = TestShardRouting.newShardRouting("test", 0, "node2", 1, true, ShardRoutingState.STARTED, 1); initShard = TestShardRouting.newShardRouting("test", 0, "node1", 1, false, ShardRoutingState.INITIALIZING, 1); indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(primaryShard).addShard(initShard).build()); } if (randomBoolean()) { startedShard = TestShardRouting.newShardRouting("test", 1, "node2", 1, true, ShardRoutingState.STARTED, 1); ShardRouting replica = TestShardRouting.newShardRouting("test", 1, null, 1, false, ShardRoutingState.UNASSIGNED, 1); indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(startedShard).addShard(replica).build()); } else { ShardRouting primaryShard = TestShardRouting.newShardRouting("test", 1, "node1", 1, true, ShardRoutingState.STARTED, 1); startedShard = TestShardRouting.newShardRouting("test", 1, "node2", 1, false, ShardRoutingState.STARTED, 1); indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(primaryShard).addShard(startedShard).build()); } if (randomBoolean()) { relocatingShard = TestShardRouting.newShardRouting("test", 2, "node1", "node2", 1, true, ShardRoutingState.RELOCATING, 1); ShardRouting replica = TestShardRouting.newShardRouting("test", 2, null, 1, false, ShardRoutingState.UNASSIGNED, 1); indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).addShard(replica).build()); } else { ShardRouting primaryShard = TestShardRouting.newShardRouting("test", 2, "node3", 1, true, ShardRoutingState.STARTED, 1); relocatingShard = TestShardRouting.newShardRouting("test", 2, "node1", "node2", 1, false, ShardRoutingState.RELOCATING, 1); indexRoutingTable.addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()) .addShard(primaryShard).addShard(relocatingShard).build()); } stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTable).build()); ======= final ShardRouting initShard = TestShardRouting.newShardRouting(index, 0, "node1", true, ShardRoutingState.INITIALIZING); final ShardRouting startedShard = TestShardRouting.newShardRouting(index, 1, "node2", true, ShardRoutingState.STARTED); final ShardRouting relocatingShard = TestShardRouting.newShardRouting(index, 2, "node1", "node2", true, ShardRoutingState.RELOCATING); stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index) .addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).build()) .addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(startedShard).build()) .addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).build())).build()); >>>>>>> final ShardRouting initShard = TestShardRouting.newShardRouting(index, 0, "node1", 1, true, ShardRoutingState.INITIALIZING); final ShardRouting startedShard = TestShardRouting.newShardRouting(index, 1, "node2", 1, true, ShardRoutingState.STARTED); final ShardRouting relocatingShard = TestShardRouting.newShardRouting(index, 2, "node1", "node2", 1, true, ShardRoutingState.RELOCATING); stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index) .addIndexShard(new IndexShardRoutingTable.Builder(initShard.shardId()).addShard(initShard).build()) .addIndexShard(new IndexShardRoutingTable.Builder(startedShard.shardId()).addShard(startedShard).build()) .addIndexShard(new IndexShardRoutingTable.Builder(relocatingShard.shardId()).addShard(relocatingShard).build())).build()); <<<<<<< TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primaryTerm(), initShard.primary(), ShardRoutingState.INITIALIZING, initShard.allocationId(), randomInt())), false); ======= TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primary(), ShardRoutingState.INITIALIZING, initShard.allocationId())), false); >>>>>>> TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primaryTerm(), initShard.primary(), ShardRoutingState.INITIALIZING, initShard.allocationId())), false); <<<<<<< TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primaryTerm(), initShard.primary(), ShardRoutingState.INITIALIZING, 1)), false); ======= TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primary(), ShardRoutingState.INITIALIZING)), false); >>>>>>> TestShardRouting.newShardRouting(initShard.index(), initShard.id(), initShard.currentNodeId(), initShard.relocatingNodeId(), initShard.primaryTerm(), initShard.primary(), ShardRoutingState.INITIALIZING)), false); <<<<<<< TestShardRouting.newShardRouting(initShard.index(), initShard.id(), "some_node", initShard.currentNodeId(), initShard.primaryTerm(), initShard.primary(), ShardRoutingState.INITIALIZING, AllocationId.newTargetRelocation(AllocationId.newRelocation(initShard.allocationId())) , 1)), false); ======= TestShardRouting.newShardRouting(initShard.index(), initShard.id(), "some_node", initShard.currentNodeId(), initShard.primary(), ShardRoutingState.INITIALIZING, AllocationId.newTargetRelocation(AllocationId.newRelocation(initShard.allocationId())))), false); >>>>>>> TestShardRouting.newShardRouting(initShard.index(), initShard.id(), "some_node", initShard.currentNodeId(), initShard.primaryTerm(), initShard.primary(), ShardRoutingState.INITIALIZING, AllocationId.newTargetRelocation(AllocationId.newRelocation(initShard.allocationId())))), false); <<<<<<< TestShardRouting.newShardRouting(startedShard.index(), startedShard.id(), startedShard.currentNodeId(), startedShard.relocatingNodeId(), startedShard.primaryTerm(), startedShard.primary(), ShardRoutingState.INITIALIZING, startedShard.allocationId(), 1)), false); ======= TestShardRouting.newShardRouting(startedShard.index(), startedShard.id(), startedShard.currentNodeId(), startedShard.relocatingNodeId(), startedShard.primary(), ShardRoutingState.INITIALIZING, startedShard.allocationId())), false); >>>>>>> TestShardRouting.newShardRouting(startedShard.index(), startedShard.id(), startedShard.currentNodeId(), startedShard.relocatingNodeId(), startedShard.primaryTerm(), startedShard.primary(), ShardRoutingState.INITIALIZING, startedShard.allocationId())), false); <<<<<<< TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(), ShardRoutingState.INITIALIZING, targetAllocationId, randomInt())), false); ======= TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), ShardRoutingState.INITIALIZING, targetAllocationId)), false); >>>>>>> TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(), ShardRoutingState.INITIALIZING, targetAllocationId)), false); <<<<<<< TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(), ShardRoutingState.INITIALIZING, relocatingShard.version()))); ======= TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), ShardRoutingState.INITIALIZING))); >>>>>>> TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(), ShardRoutingState.INITIALIZING))); <<<<<<< TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(), ShardRoutingState.INITIALIZING, relocatingShard.allocationId(), randomInt())), false); ======= TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primary(), ShardRoutingState.INITIALIZING, relocatingShard.allocationId())), false); >>>>>>> TestShardRouting.newShardRouting(relocatingShard.index(), relocatingShard.id(), relocatingShard.relocatingNodeId(), relocatingShard.currentNodeId(), relocatingShard.primaryTerm(), relocatingShard.primary(), ShardRoutingState.INITIALIZING, relocatingShard.allocationId())), false);
<<<<<<< ======= @Override public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException { throw new UnsupportedOperationException("can't snapshot the directory as the primary may change it underneath us"); } @Override protected void onNewEngine(Engine newEngine) { // nothing to do here - the superclass sets the translog on some listeners but we don't have such a thing } >>>>>>> @Override public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException { throw new UnsupportedOperationException("can't snapshot the directory as the primary may change it underneath us"); } @Override protected void onNewEngine(Engine newEngine) { // nothing to do here - the superclass sets the translog on some listeners but we don't have such a thing }
<<<<<<< ======= import org.elasticsearch.index.mapper.FieldMapper; >>>>>>> <<<<<<< TermQueryBuilder termQuery = new TermQueryBuilder(fieldName, value); if (boost != 1.0f) { termQuery.boost(boost); ======= if (value == null) { throw new QueryParsingException(parseContext, "No value specified for term query"); } Query query = null; FieldMapper mapper = parseContext.fieldMapper(fieldName); if (mapper != null) { query = mapper.termQuery(value, parseContext); } if (query == null) { query = new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(value))); >>>>>>> TermQueryBuilder termQuery = new TermQueryBuilder(fieldName, value); if (boost != 1.0f) { termQuery.boost(boost);
<<<<<<< public void onModule(ScriptModule module) { module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(GroovyScriptEngineService.class, GroovyScriptEngineService.NAME)); ======= @Override public String name() { return "lang-groovy"; } @Override public String description() { return "Groovy scripting integration for Elasticsearch"; } @Override public ScriptEngineService getScriptEngineService(Settings settings) { return new GroovyScriptEngineService(settings); >>>>>>> @Override public ScriptEngineService getScriptEngineService(Settings settings) { return new GroovyScriptEngineService(settings);
<<<<<<< import com.google.common.collect.Maps; ======= >>>>>>> <<<<<<< Map<String, QueryParser<?>> queryParsers = Maps.newHashMap(); for (QueryParser<?> queryParser : injectedQueryParsers) { ======= Map<String, QueryParser> queryParsers = new HashMap<>(); for (QueryParser queryParser : injectedQueryParsers) { >>>>>>> Map<String, QueryParser<?>> queryParsers = new HashMap<>(); for (QueryParser<?> queryParser : injectedQueryParsers) {
<<<<<<< import java.util.List; import net.minecraft.command.CommandException; ======= >>>>>>> import net.minecraft.command.CommandException; <<<<<<< public void processCommand(ICommandSender sender, String[] args) throws CommandException ======= public PermissionLevel getPermissionLevel() >>>>>>> public PermissionLevel getPermissionLevel() <<<<<<< public List<String> addTabCompletionOptions(ICommandSender sender, String[] args, BlockPos pos) ======= public void parse(CommandParserArgs arguments) >>>>>>> public void parse(CommandParserArgs arguments) throws CommandException <<<<<<< return null; ======= arguments.tabComplete("reload", "modules", "mixin"); String subCmd = arguments.remove().toLowerCase(); if (arguments.isTabCompletion) return; switch (subCmd) { case "reload": ModuleLauncher.instance.reloadConfigs(); arguments.confirm("Reloaded configs. (may not work for all settings)"); break; case "modules": arguments.confirm("Loaded FE modules: " + StringUtils.join(ModuleLauncher.getModuleList(), ", ")); break; case "mixin": arguments.notify("Injected patches:"); for (String patch : FEMixinConfig.getInjectedPatches()) arguments.confirm("- " + patch); break; default: throw new TranslatedCommandException(FEPermissions.MSG_UNKNOWN_SUBCOMMAND, subCmd); } >>>>>>> arguments.tabComplete("reload", "modules", "mixin"); String subCmd = arguments.remove().toLowerCase(); if (arguments.isTabCompletion) return; switch (subCmd) { case "reload": ModuleLauncher.instance.reloadConfigs(); arguments.confirm("Reloaded configs. (may not work for all settings)"); break; case "modules": arguments.confirm("Loaded FE modules: " + StringUtils.join(ModuleLauncher.getModuleList(), ", ")); break; case "mixin": arguments.notify("Injected patches:"); for (String patch : FEMixinConfig.getInjectedPatches()) arguments.confirm("- " + patch); break; default: throw new TranslatedCommandException(FEPermissions.MSG_UNKNOWN_SUBCOMMAND, subCmd); }
<<<<<<< import org.elasticsearch.marvel.test.MarvelIntegTestCase; import org.elasticsearch.shield.authc.support.SecuredString; ======= import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.xpack.security.authc.support.SecuredString; import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; import org.elasticsearch.test.rest.client.http.HttpResponse; >>>>>>> import org.elasticsearch.marvel.test.MarvelIntegTestCase; import org.elasticsearch.xpack.security.authc.support.SecuredString; <<<<<<< import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.basicAuthHeaderValue; ======= import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase.SecuritySettings.TEST_PASSWORD; import static org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase.SecuritySettings.TEST_USERNAME; >>>>>>> import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; <<<<<<< ======= if (body != null) { requestBuilder.body(body); } if (securityEnabled()) { requestBuilder.addHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue(TEST_USERNAME, new SecuredString(TEST_PASSWORD.toCharArray()))); } return requestBuilder.execute(); >>>>>>>
<<<<<<< public static final String NAME = "script"; private final String script; ======= private Script script; @Deprecated private String scriptString; >>>>>>> private Script script; public static final String NAME = "script"; @Deprecated private String scriptString; <<<<<<< static final ScriptQueryBuilder PROTOTYPE = new ScriptQueryBuilder(null); public ScriptQueryBuilder(String script) { ======= public ScriptQueryBuilder(Script script) { >>>>>>> static final ScriptQueryBuilder PROTOTYPE = new ScriptQueryBuilder((Script) null); public ScriptQueryBuilder(Script script) { <<<<<<< protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); builder.field("script", script); if (this.params != null) { builder.field("params", this.params); } if (this.lang != null) { builder.field("lang", lang); ======= protected void doXContent(XContentBuilder builder, Params builderParams) throws IOException { builder.startObject(ScriptQueryParser.NAME); if (script != null) { builder.field(ScriptField.SCRIPT.getPreferredName(), script); } else { if (this.scriptString != null) { builder.field("script", scriptString); } if (this.params != null) { builder.field("params", this.params); } if (this.lang != null) { builder.field("lang", lang); } >>>>>>> protected void doXContent(XContentBuilder builder, Params builderParams) throws IOException { builder.startObject(NAME); if (script != null) { builder.field(ScriptField.SCRIPT.getPreferredName(), script); } else { if (this.scriptString != null) { builder.field("script", scriptString); } if (this.params != null) { builder.field("params", this.params); } if (this.lang != null) { builder.field("lang", lang); }
<<<<<<< orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL); orgReplica.flush(new FlushRequest().force(true)); // isolate delete#1 in its own translog generation and lucene segment ======= orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id"); getTranslog(orgReplica).rollGeneration(); // isolate the delete in it's own generation >>>>>>> orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id"); orgReplica.flush(new FlushRequest().force(true)); // isolate delete#1 in its own translog generation and lucene segment
<<<<<<< public static final String MANAGE_ENRICH = "manage_enrich"; public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM, MANAGE_ENRICH }; ======= public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_DATA_FRAME_TRANSFORMS, MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_DATA_FRAME_TRANSFORMS, MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM}; >>>>>>> public static final String MANAGE_ENRICH = "manage_enrich"; public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_DATA_FRAME_TRANSFORMS, MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_DATA_FRAME_TRANSFORMS, MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM, MANAGE_ENRICH };
<<<<<<< // TODO: A follow-up to make resync using soft-deletes Translog.Snapshot snapshot = indexShard.newTranslogSnapshotFromMinSeqNo(startingSeqNo); ======= >>>>>>>
<<<<<<< import org.elasticsearch.search.aggregations.InternalAggregationTestCase; import org.elasticsearch.search.aggregations.ParsedAggregation; ======= >>>>>>> import org.elasticsearch.search.aggregations.ParsedAggregation;
<<<<<<< private long version; private long seqNo; ======= private final long version; >>>>>>> private final long version; private final long seqNo; <<<<<<< public void updateVersion(long version) { this.version = version; } public long seqNo() { return seqNo; } public void updateSeqNo(long seqNo) { this.seqNo = seqNo; } public void setTranslogLocation(Translog.Location location) { this.location = location; } public Translog.Location getTranslogLocation() { return this.location; } public int sizeInBytes() { if (location != null) { return location.size; } else { return estimatedSizeInBytes(); } } protected abstract int estimatedSizeInBytes(); ======= public abstract int estimatedSizeInBytes(); >>>>>>> public long seqNo() { return seqNo; } public abstract int estimatedSizeInBytes(); <<<<<<< @Override public void updateVersion(long version) { super.updateVersion(version); this.doc.version().setLongValue(version); } @Override public void updateSeqNo(long seqNo) { super.updateSeqNo(seqNo); this.doc.seqNo().setLongValue(seqNo); } ======= >>>>>>> <<<<<<< public Delete(String type, String id, Term uid, long seqNo, long version, VersionType versionType, Origin origin, long startTime, boolean found) { super(uid, seqNo, version, versionType, origin, startTime); ======= public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime) { super(uid, version, versionType, origin, startTime); >>>>>>> public Delete(String type, String id, Term uid, long seqNo, long version, VersionType versionType, Origin origin, long startTime) { super(uid, seqNo, version, versionType, origin, startTime); <<<<<<< this(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime(), false); ======= this(type, id, uid, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); } public Delete(Delete template, VersionType versionType) { this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime()); >>>>>>> this(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); } public Delete(Delete template, VersionType versionType) { this(template.type(), template.id(), template.uid(), template.seqNo(), template.version(), versionType, template.origin(), template.startTime());
<<<<<<< import org.elasticsearch.index.query.QueryShardException; ======= import org.elasticsearch.index.query.QueryParsingException; import org.elasticsearch.search.internal.SearchContext; >>>>>>> import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.search.internal.SearchContext; <<<<<<< public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryShardException { ======= public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) throws IOException, QueryParsingException { >>>>>>> public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) throws IOException, QueryShardException {
<<<<<<< innerTestSettings(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.fileChunkSize().bytesAsInt()); } }); innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.translogOps()); } }); innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE_SETTING.getKey(), randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.translogSize().bytesAsInt()); } }); innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { ======= innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, randomIntBetween(1, 200), new Validator() { >>>>>>> innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { <<<<<<< innerTestSettings(RecoverySettings.INDICES_RECOVERY_COMPRESS_SETTING.getKey(), false, new Validator() { @Override public void validate(RecoverySettings recoverySettings, boolean expectedValue) { assertEquals(expectedValue, recoverySettings.compress()); } }); ======= >>>>>>>
<<<<<<< public void processCommandPlayer(EntityPlayerMP sender, String[] args) throws CommandException ======= public boolean canConsoleUseCommand() { return true; } @Override public PermissionLevel getPermissionLevel() { return PermissionLevel.OP; } @Override public String getCommandUsage(ICommandSender sender) { return "/kill <player> Commit suicide or kill other players (with special permission)."; } @Override public String getPermissionNode() { return ModuleCommands.PERM + ".kill"; } @Override public void registerExtraPermissions() { APIRegistry.perms.registerPermission(getPermissionNode() + ".others", PermissionLevel.OP); } @Override public void processCommandPlayer(EntityPlayerMP sender, String[] args) >>>>>>> public boolean canConsoleUseCommand() { return true; } @Override public PermissionLevel getPermissionLevel() { return PermissionLevel.OP; } @Override public String getCommandUsage(ICommandSender sender) { return "/kill <player> Commit suicide or kill other players (with special permission)."; } @Override public String getPermissionNode() { return ModuleCommands.PERM + ".kill"; } @Override public void registerExtraPermissions() { APIRegistry.perms.registerPermission(getPermissionNode() + ".others", PermissionLevel.OP); } @Override public void processCommandPlayer(EntityPlayerMP sender, String[] args) throws CommandException <<<<<<< public boolean canConsoleUseCommand() { return true; } @Override public void registerExtraPermissions() { APIRegistry.perms.registerPermission(getPermissionNode() + ".others", PermissionLevel.OP); } @Override public List<String> addTabCompletionOptions(ICommandSender sender, String[] args, BlockPos pos) ======= public List<String> addTabCompletionOptions(ICommandSender sender, String[] args) >>>>>>> public List<String> addTabCompletionOptions(ICommandSender sender, String[] args, BlockPos pos)
<<<<<<< return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats(), indexShard.seqNoStats()); ======= return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()); >>>>>>> return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats(), indexShard.seqNoStats());
<<<<<<< IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, List<Reducer> reducers, Map<String, Object> metaData) throws IOException { ======= IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, Map<String, Object> metaData) throws IOException { >>>>>>> IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, List<Reducer> reducers, Map<String, Object> metaData) throws IOException {
<<<<<<< public static final Setting<ByteSizeValue> INDICES_RECOVERY_FILE_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.file_chunk_size", new ByteSizeValue(512, ByteSizeUnit.KB), true, Setting.Scope.CLUSTER); public static final Setting<Integer> INDICES_RECOVERY_TRANSLOG_OPS_SETTING = Setting.intSetting("indices.recovery.translog_ops", 1000, true, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> INDICES_RECOVERY_TRANSLOG_SIZE_SETTING = Setting.byteSizeSetting("indices.recovery.translog_size", new ByteSizeValue(512, ByteSizeUnit.KB), true, Setting.Scope.CLUSTER); public static final Setting<Boolean> INDICES_RECOVERY_COMPRESS_SETTING = Setting.boolSetting("indices.recovery.compress", true, true, Setting.Scope.CLUSTER); public static final Setting<Integer> INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_streams", 3, true, Setting.Scope.CLUSTER); public static final Setting<Integer> INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_small_file_streams", 2, true, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER); ======= public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams"; public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams"; public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec"; >>>>>>> public static final Setting<Integer> INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_streams", 3, true, Setting.Scope.CLUSTER); public static final Setting<Integer> INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_small_file_streams", 2, true, Setting.Scope.CLUSTER); public static final Setting<ByteSizeValue> INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER);
<<<<<<< import org.elasticsearch.index.mapper.VersionFieldMapper; ======= import org.elasticsearch.index.seqno.SeqNoStats; >>>>>>> import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.seqno.SeqNoStats; <<<<<<< public void testSupplyTombstoneDoc() throws Exception { IndexShard shard = newStartedShard(); String id = randomRealisticUnicodeOfLengthBetween(1, 10); ParsedDocument deleteTombstone = shard.getEngine().config().getTombstoneDocSupplier().newDeleteTombstoneDoc("doc", id); assertThat(deleteTombstone.docs(), hasSize(1)); ParseContext.Document deleteDoc = deleteTombstone.docs().get(0); assertThat(deleteDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()), containsInAnyOrder(IdFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME)); assertThat(deleteDoc.getField(IdFieldMapper.NAME).binaryValue(), equalTo(Uid.encodeId(id))); assertThat(deleteDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L)); final String reason = randomUnicodeOfLength(200); ParsedDocument noopTombstone = shard.getEngine().config().getTombstoneDocSupplier().newNoopTombstoneDoc(reason); assertThat(noopTombstone.docs(), hasSize(1)); ParseContext.Document noopDoc = noopTombstone.docs().get(0); assertThat(noopDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()), containsInAnyOrder(VersionFieldMapper.NAME, SourceFieldMapper.NAME, SeqNoFieldMapper.TOMBSTONE_NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME)); assertThat(noopDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L)); assertThat(noopDoc.getField(SourceFieldMapper.NAME).binaryValue(), equalTo(new BytesRef(reason))); closeShards(shard); } public void testSearcherIncludesSoftDeletes() throws Exception { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build(); IndexMetaData metaData = IndexMetaData.builder("test") .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); recoverShardFromStore(shard); indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); indexDoc(shard, "test", "1", "{\"foo\" : \"baz\"}"); deleteDoc(shard, "test", "0"); shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { IndexSearcher searchWithSoftDeletes = new IndexSearcher(Lucene.wrapAllDocsLive(searcher.getDirectoryReader())); assertThat(searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10).totalHits, equalTo(0L)); assertThat(searchWithSoftDeletes.search(new TermQuery(new Term("foo", "bar")), 10).totalHits, equalTo(1L)); assertThat(searcher.searcher().search(new TermQuery(new Term("foo", "baz")), 10).totalHits, equalTo(1L)); assertThat(searchWithSoftDeletes.search(new TermQuery(new Term("foo", "baz")), 10).totalHits, equalTo(1L)); } closeShards(shard); } ======= public void testOnCloseStats() throws IOException { final IndexShard indexShard = newStartedShard(true); for (int i = 0; i < 3; i++) { indexDoc(indexShard, "_doc", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); indexShard.refresh("test"); // produce segments } // check stats on closed and on opened shard if (randomBoolean()) { closeShards(indexShard); expectThrows(AlreadyClosedException.class, () -> indexShard.seqNoStats()); expectThrows(AlreadyClosedException.class, () -> indexShard.commitStats()); expectThrows(AlreadyClosedException.class, () -> indexShard.storeStats()); } else { final SeqNoStats seqNoStats = indexShard.seqNoStats(); assertThat(seqNoStats.getLocalCheckpoint(), equalTo(2L)); final CommitStats commitStats = indexShard.commitStats(); assertThat(commitStats.getGeneration(), equalTo(2L)); final StoreStats storeStats = indexShard.storeStats(); assertThat(storeStats.sizeInBytes(), greaterThan(0L)); closeShards(indexShard); } } >>>>>>> public void testOnCloseStats() throws IOException { final IndexShard indexShard = newStartedShard(true); for (int i = 0; i < 3; i++) { indexDoc(indexShard, "_doc", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); indexShard.refresh("test"); // produce segments } // check stats on closed and on opened shard if (randomBoolean()) { closeShards(indexShard); expectThrows(AlreadyClosedException.class, () -> indexShard.seqNoStats()); expectThrows(AlreadyClosedException.class, () -> indexShard.commitStats()); expectThrows(AlreadyClosedException.class, () -> indexShard.storeStats()); } else { final SeqNoStats seqNoStats = indexShard.seqNoStats(); assertThat(seqNoStats.getLocalCheckpoint(), equalTo(2L)); final CommitStats commitStats = indexShard.commitStats(); assertThat(commitStats.getGeneration(), equalTo(2L)); final StoreStats storeStats = indexShard.storeStats(); assertThat(storeStats.sizeInBytes(), greaterThan(0L)); closeShards(indexShard); } } public void testSupplyTombstoneDoc() throws Exception { IndexShard shard = newStartedShard(); String id = randomRealisticUnicodeOfLengthBetween(1, 10); ParsedDocument deleteTombstone = shard.getEngine().config().getTombstoneDocSupplier().newDeleteTombstoneDoc("doc", id); assertThat(deleteTombstone.docs(), hasSize(1)); ParseContext.Document deleteDoc = deleteTombstone.docs().get(0); assertThat(deleteDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()), containsInAnyOrder(IdFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME)); assertThat(deleteDoc.getField(IdFieldMapper.NAME).binaryValue(), equalTo(Uid.encodeId(id))); assertThat(deleteDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L)); final String reason = randomUnicodeOfLength(200); ParsedDocument noopTombstone = shard.getEngine().config().getTombstoneDocSupplier().newNoopTombstoneDoc(reason); assertThat(noopTombstone.docs(), hasSize(1)); ParseContext.Document noopDoc = noopTombstone.docs().get(0); assertThat(noopDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()), containsInAnyOrder(VersionFieldMapper.NAME, SourceFieldMapper.NAME, SeqNoFieldMapper.TOMBSTONE_NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME)); assertThat(noopDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L)); assertThat(noopDoc.getField(SourceFieldMapper.NAME).binaryValue(), equalTo(new BytesRef(reason))); closeShards(shard); } public void testSearcherIncludesSoftDeletes() throws Exception { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build(); IndexMetaData metaData = IndexMetaData.builder("test") .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); recoverShardFromStore(shard); indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); indexDoc(shard, "test", "1", "{\"foo\" : \"baz\"}"); deleteDoc(shard, "test", "0"); shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { IndexSearcher searchWithSoftDeletes = new IndexSearcher(Lucene.wrapAllDocsLive(searcher.getDirectoryReader())); assertThat(searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10).totalHits, equalTo(0L)); assertThat(searchWithSoftDeletes.search(new TermQuery(new Term("foo", "bar")), 10).totalHits, equalTo(1L)); assertThat(searcher.searcher().search(new TermQuery(new Term("foo", "baz")), 10).totalHits, equalTo(1L)); assertThat(searchWithSoftDeletes.search(new TermQuery(new Term("foo", "baz")), 10).totalHits, equalTo(1L)); } closeShards(shard); }
<<<<<<< verify(completionHandler, times(1)).accept(Thread.currentThread(), null); ======= verify(completionHandler, times(1)).accept(null); for (DocWriteRequest<?> docWriteRequest : bulkRequest.requests()) { IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(docWriteRequest); assertThat(indexRequest, notNullValue()); assertThat(indexRequest.getContentType(), equalTo(xContentType)); } >>>>>>> verify(completionHandler, times(1)).accept(Thread.currentThread(), null); for (DocWriteRequest<?> docWriteRequest : bulkRequest.requests()) { IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(docWriteRequest); assertThat(indexRequest, notNullValue()); assertThat(indexRequest.getContentType(), equalTo(xContentType)); }
<<<<<<< final ShardId shardId = new ShardId(index, ++shardIndex); final int primaryTerm = randomInt(200); ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), primaryTerm, true, ShardRoutingState.STARTED, 1); ======= final ShardId shardId = new ShardId(index, "_na_", ++shardIndex); ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), true, ShardRoutingState.STARTED); >>>>>>> final ShardId shardId = new ShardId(index, "_na_", ++shardIndex); final int primaryTerm = randomInt(200); ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), primaryTerm, true, ShardRoutingState.STARTED);
<<<<<<< import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; ======= >>>>>>> <<<<<<< import static java.util.Collections.emptyList; import static java.util.stream.Collectors.toList; ======= >>>>>>> <<<<<<< public abstract class MapperTestCase extends ESTestCase { protected static final Settings SETTINGS = Settings.builder().put("index.version.created", Version.CURRENT).build(); protected Collection<? extends Plugin> getPlugins() { return emptyList(); } protected Settings getIndexSettings() { return SETTINGS; } protected IndexAnalyzers createIndexAnalyzers(IndexSettings indexSettings) { return new IndexAnalyzers( Map.of("default", new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer())), Map.of(), Map.of() ); } protected final String randomIndexOptions() { return randomFrom(new String[] { "docs", "freqs", "positions", "offsets" }); } protected final DocumentMapper createDocumentMapper(XContentBuilder mappings) throws IOException { return createMapperService(mappings).documentMapper(); } protected final MapperService createMapperService(XContentBuilder mappings) throws IOException { return createMapperService(getIndexSettings(), mappings); } /** * Create a {@link MapperService} like we would for an index. */ protected final MapperService createMapperService(Settings settings, XContentBuilder mapping) throws IOException { IndexMetadata meta = IndexMetadata.builder("index") .settings(Settings.builder().put("index.version.created", Version.CURRENT)) .numberOfReplicas(0) .numberOfShards(1) .build(); IndexSettings indexSettings = new IndexSettings(meta, Settings.EMPTY); MapperRegistry mapperRegistry = new IndicesModule( getPlugins().stream().filter(p -> p instanceof MapperPlugin).map(p -> (MapperPlugin) p).collect(toList()) ).getMapperRegistry(); ScriptModule scriptModule = new ScriptModule( Settings.EMPTY, getPlugins().stream().filter(p -> p instanceof ScriptPlugin).map(p -> (ScriptPlugin) p).collect(toList()) ); ScriptService scriptService = new ScriptService(Settings.EMPTY, scriptModule.engines, scriptModule.contexts); SimilarityService similarityService = new SimilarityService(indexSettings, scriptService, Map.of()); MapperService mapperService = new MapperService( indexSettings, createIndexAnalyzers(indexSettings), xContentRegistry(), similarityService, mapperRegistry, () -> { throw new UnsupportedOperationException(); }, () -> true, scriptService ); merge(mapperService, mapping); return mapperService; } protected final void withLuceneIndex( MapperService mapperService, CheckedConsumer<RandomIndexWriter, IOException> builder, CheckedConsumer<IndexReader, IOException> test ) throws IOException { try ( Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, new IndexWriterConfig(mapperService.indexAnalyzer())) ) { builder.accept(iw); try (IndexReader reader = iw.getReader()) { test.accept(reader); } } } protected final SourceToParse source(CheckedConsumer<XContentBuilder, IOException> build) throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().startObject(); build.accept(builder); builder.endObject(); return new SourceToParse("test", "1", BytesReference.bytes(builder), XContentType.JSON); } /** * Merge a new mapping into the one in the provided {@link MapperService}. */ protected final void merge(MapperService mapperService, XContentBuilder mapping) throws IOException { mapperService.merge(null, new CompressedXContent(BytesReference.bytes(mapping)), MergeReason.MAPPING_UPDATE); } protected final XContentBuilder mapping(CheckedConsumer<XContentBuilder, IOException> buildFields) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties"); buildFields.accept(builder); return builder.endObject().endObject().endObject(); } protected final XContentBuilder fieldMapping(CheckedConsumer<XContentBuilder, IOException> buildField) throws IOException { return mapping(b -> { b.startObject("field"); buildField.accept(b); b.endObject(); }); } QueryShardContext createQueryShardContext(MapperService mapperService) { QueryShardContext queryShardContext = mock(QueryShardContext.class); when(queryShardContext.getMapperService()).thenReturn(mapperService); when(queryShardContext.fieldMapper(anyString())).thenAnswer(inv -> mapperService.fieldType(inv.getArguments()[0].toString())); when(queryShardContext.getIndexAnalyzers()).thenReturn(mapperService.getIndexAnalyzers()); when(queryShardContext.getSearchQuoteAnalyzer(anyObject())).thenCallRealMethod(); when(queryShardContext.getSearchAnalyzer(anyObject())).thenCallRealMethod(); when(queryShardContext.getIndexSettings()).thenReturn(mapperService.getIndexSettings()); when(queryShardContext.simpleMatchToIndexNames(anyObject())).thenAnswer( inv -> mapperService.simpleMatchToFullName(inv.getArguments()[0].toString()) ); return queryShardContext; } ======= public abstract class MapperTestCase extends MapperServiceTestCase { >>>>>>> public abstract class MapperTestCase extends MapperServiceTestCase {
<<<<<<< ======= import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedDocValuesField; >>>>>>> import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedDocValuesField; <<<<<<< protected ParentFieldMapper(MappedFieldType fieldType, String type, @Nullable Settings fieldDataSettings, Settings indexSettings) { super(fieldType, false, fieldDataSettings, indexSettings); ======= protected ParentFieldMapper(String name, String indexName, String type, @Nullable Settings fieldDataSettings, Settings indexSettings) { super(new Names(name, indexName, indexName, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), Version.indexCreated(indexSettings).onOrAfter(Version.V_2_0_0), Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, null, null, fieldDataSettings, indexSettings); >>>>>>> protected ParentFieldMapper(MappedFieldType fieldType, String type, @Nullable Settings fieldDataSettings, Settings indexSettings) { super(fieldType, false, fieldDataSettings, indexSettings); <<<<<<< fields.add(new Field(fieldType.names().indexName(), Uid.createUid(context.stringBuilder(), type, parentId), fieldType)); ======= fields.add(new Field(names.indexName(), Uid.createUid(context.stringBuilder(), type, parentId), fieldType)); if (hasDocValues()) { fields.add(createJoinField(type, parentId)); } >>>>>>> fields.add(new Field(fieldType.names().indexName(), Uid.createUid(context.stringBuilder(), type, parentId), fieldType)); if (fieldType.hasDocValues()) { fields.add(createJoinField(type, parentId)); } <<<<<<< fields.add(new Field(fieldType.names().indexName(), Uid.createUid(context.stringBuilder(), type, parentId), fieldType)); ======= fields.add(new Field(names.indexName(), Uid.createUid(context.stringBuilder(), type, parentId), fieldType)); if (hasDocValues()) { fields.add(createJoinField(type, parentId)); } >>>>>>> fields.add(new Field(fieldType.names().indexName(), Uid.createUid(context.stringBuilder(), type, parentId), fieldType)); if (fieldType.hasDocValues()) { fields.add(createJoinField(type, parentId)); }
<<<<<<< public class HasParentQueryParser extends BaseQueryParserTemp { ======= import static org.elasticsearch.index.query.HasChildQueryParser.joinUtilHelper; public class HasParentQueryParser implements QueryParser { >>>>>>> import static org.elasticsearch.index.query.HasChildQueryParser.joinUtilHelper; public class HasParentQueryParser extends BaseQueryParserTemp {
<<<<<<< this(copy, copy.version(), copy.primaryTerm()); } public ShardRouting(ShardRouting copy, long version) { this(copy, version, copy.primaryTerm()); } public ShardRouting(ShardRouting copy, long version, long primaryTerm) { this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), primaryTerm, copy.primary(), copy.state(), version, copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize()); ======= this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize()); >>>>>>> this(copy, copy.primaryTerm()); } public ShardRouting(ShardRouting copy, long primaryTerm) { this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), primaryTerm, copy.primary(), copy.state(), copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize()); <<<<<<< ShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, long primaryTerm, boolean primary, ShardRoutingState state, long version, ======= ShardRouting(Index index, int shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, >>>>>>> ShardRouting(Index index, int shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, long primaryTerm, boolean primary, ShardRoutingState state, <<<<<<< public static ShardRouting newUnassigned(String index, int shardId, RestoreSource restoreSource, long primaryTerm, boolean primary, UnassignedInfo unassignedInfo) { return new ShardRouting(index, shardId, null, null, restoreSource, primaryTerm, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE); ======= public static ShardRouting newUnassigned(Index index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) { return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE); >>>>>>> public static ShardRouting newUnassigned(Index index, int shardId, RestoreSource restoreSource, long primaryTerm, boolean primary, UnassignedInfo unassignedInfo) { return new ShardRouting(index, shardId, null, null, restoreSource, primaryTerm, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE); <<<<<<< * Initializes an unassigned shard on a node. If the shard is primary, it's term is incremented. ======= * Initializes an unassigned shard on a node. * * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. >>>>>>> * Initializes an unassigned shard on a node. If the shard is primary, it's term is incremented. * * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. <<<<<<< allocationId = AllocationId.newInitializing(); if (primary) { primaryTerm++; } ======= if (existingAllocationId == null) { allocationId = AllocationId.newInitializing(); } else { allocationId = AllocationId.newInitializing(existingAllocationId); } >>>>>>> if (primary) { primaryTerm++; } if (existingAllocationId == null) { allocationId = AllocationId.newInitializing(); } else { allocationId = AllocationId.newInitializing(existingAllocationId); } <<<<<<< sb.append(", v[").append(version).append("]"); sb.append(", t[").append(primaryTerm).append("]"); ======= >>>>>>> sb.append(", t[").append(primaryTerm).append("]");
<<<<<<< /** * Returns a snapshot with operations having a sequence number equal to or greater than <code>minSeqNo</code>. */ public Snapshot newSnapshotFrom(long minSeqNo) throws IOException { return getSnapshotBetween(minSeqNo, Long.MAX_VALUE); } /** * Returns a snapshot with operations having a sequence number equal to or greater than <code>minSeqNo</code> and * equal to or lesser than <code>maxSeqNo</code>. */ public Snapshot getSnapshotBetween(long minSeqNo, long maxSeqNo) throws IOException { ======= /** * Reads and returns the operation from the given location if the generation it references is still available. Otherwise * this method will return <code>null</code>. */ public Operation readOperation(Location location) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); if (location.generation < getMinFileGeneration()) { return null; } if (current.generation == location.generation) { // no need to fsync here the read operation will ensure that buffers are written to disk // if they are still in RAM and we are reading onto that position return current.read(location); } else { // read backwards - it's likely we need to read on that is recent for (int i = readers.size() - 1; i >= 0; i--) { TranslogReader translogReader = readers.get(i); if (translogReader.generation == location.generation) { return translogReader.read(location); } } } } return null; } public Snapshot newSnapshotFromMinSeqNo(long minSeqNo) throws IOException { >>>>>>> /** * Reads and returns the operation from the given location if the generation it references is still available. Otherwise * this method will return <code>null</code>. */ public Operation readOperation(Location location) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); if (location.generation < getMinFileGeneration()) { return null; } if (current.generation == location.generation) { // no need to fsync here the read operation will ensure that buffers are written to disk // if they are still in RAM and we are reading onto that position return current.read(location); } else { // read backwards - it's likely we need to read on that is recent for (int i = readers.size() - 1; i >= 0; i--) { TranslogReader translogReader = readers.get(i); if (translogReader.generation == location.generation) { return translogReader.read(location); } } } } return null; } public Snapshot newSnapshotFromMinSeqNo(long minSeqNo) throws IOException { /** * Returns a snapshot with operations having a sequence number equal to or greater than <code>minSeqNo</code>. */ public Snapshot newSnapshotFrom(long minSeqNo) throws IOException { return getSnapshotBetween(minSeqNo, Long.MAX_VALUE); } /** * Returns a snapshot with operations having a sequence number equal to or greater than <code>minSeqNo</code> and * equal to or lesser than <code>maxSeqNo</code>. */ public Snapshot getSnapshotBetween(long minSeqNo, long maxSeqNo) throws IOException {
<<<<<<< import org.elasticsearch.index.seqno.SequenceNumbersService; ======= import org.elasticsearch.index.shard.DocsStats; >>>>>>> import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.DocsStats; <<<<<<< if (index.origin() == Operation.Origin.RECOVERY) { return false; } else { ======= if (!index.origin().isRecovery()) { >>>>>>> if (!index.origin().isRecovery()) { <<<<<<< Translog.Location translogLocation = translog.add(new Translog.Delete(delete)); versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), translogLocation)); delete.setTranslogLocation(translogLocation); } finally { if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { seqNoService.markSeqNoAsCompleted(delete.seqNo()); } ======= if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location translogLocation = translog.add(new Translog.Delete(delete)); delete.setTranslogLocation(translogLocation); versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), delete.getTranslogLocation())); } else { // we do not replay in to the translog, so there is no // translog location; that is okay because real-time // gets are not possible during recovery and we will // flush when the recovery is complete versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), null)); } >>>>>>> if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location translogLocation = translog.add(new Translog.Delete(delete)); delete.setTranslogLocation(translogLocation); versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), delete.getTranslogLocation())); } else { // we do not replay in to the translog, so there is no // translog location; that is okay because real-time // gets are not possible during recovery and we will // flush when the recovery is complete versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), null)); } } finally { if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { seqNoService.markSeqNoAsCompleted(delete.seqNo()); } <<<<<<< @Override public SequenceNumbersService seqNoService() { return seqNoService; } ======= @Override public DocsStats getDocStats() { final int numDocs = indexWriter.numDocs(); final int maxDoc = indexWriter.maxDoc(); return new DocsStats(numDocs, maxDoc-numDocs); } >>>>>>> @Override public SequenceNumbersService seqNoService() { return seqNoService; } @Override public DocsStats getDocStats() { final int numDocs = indexWriter.numDocs(); final int maxDoc = indexWriter.maxDoc(); return new DocsStats(numDocs, maxDoc-numDocs); }
<<<<<<< 1, randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, 1, randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); ======= randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); >>>>>>> 1, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, 1, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); <<<<<<< List<ShardRouting> shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null, 1, randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, 1, randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); ======= List<ShardRouting> shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); >>>>>>> List<ShardRouting> shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null, 1, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, 1, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); <<<<<<< shards.add(TestShardRouting.newShardRouting(indexMeta.name, randomIntBetween(1, 5), null, null, null, 1, randomBoolean(), ShardRoutingState.UNASSIGNED, randomIntBetween(0, 100), new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); ======= shards.add(TestShardRouting.newShardRouting(indexMeta.name, randomIntBetween(1, 5), null, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); >>>>>>> shards.add(TestShardRouting.newShardRouting(indexMeta.name, randomIntBetween(1, 5), null, null, null, 1, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")));
<<<<<<< import java.util.List; import java.util.Queue; import net.minecraft.command.CommandException; ======= >>>>>>> import net.minecraft.command.CommandException; <<<<<<< import com.forgeessentials.util.CommandParserArgs; ======= import com.forgeessentials.core.misc.Translator; import com.forgeessentials.util.CommandParserArgs; >>>>>>> import com.forgeessentials.core.misc.Translator; import com.forgeessentials.util.CommandParserArgs; <<<<<<< private static void parseList(ICommandSender sender, WorldZone worldZone, Queue<String> args) throws CommandException ======= public static void parseList(CommandParserArgs arguments) >>>>>>> public static void parseList(CommandParserArgs arguments) throws CommandException <<<<<<< private void parseDefine(ICommandSender sender, WorldZone worldZone, Queue<String> args, boolean redefine) throws CommandException ======= public static void parseDefine(CommandParserArgs arguments, boolean redefine) >>>>>>> public static void parseDefine(CommandParserArgs arguments, boolean redefine) throws CommandException <<<<<<< private void parseInfo(ICommandSender sender, WorldZone worldZone, Queue<String> args) throws CommandException ======= public static void parseSelect(CommandParserArgs arguments) >>>>>>> public static void parseSelect(CommandParserArgs arguments) throws CommandException <<<<<<< private void parseEntryExitMessage(ICommandSender sender, WorldZone worldZone, Queue<String> args, boolean isEntry) throws CommandException ======= public static void parseInfo(CommandParserArgs arguments) >>>>>>> public static void parseInfo(CommandParserArgs arguments) throws CommandException <<<<<<< private static void help(ICommandSender sender) { ChatOutputHandler.chatConfirmation(sender, "/zone list [page]: Lists all zones"); ChatOutputHandler.chatConfirmation(sender, "/zone info <zone>|here: Zone information"); ChatOutputHandler.chatConfirmation(sender, "/zone define|redefine <zone-name>: define or redefine a zone."); ChatOutputHandler.chatConfirmation(sender, "/zone delete <zone-id>: Delete a zone."); ChatOutputHandler.chatConfirmation(sender, "/zone entry|exit <zone-id> <message|clear>: Set the zone entry/exit message."); } ======= public static void parseEntryExitMessage(CommandParserArgs arguments, boolean isEntry) { arguments.checkPermission(PERM_SETTINGS); if (arguments.isEmpty()) throw new TranslatedCommandException(FEPermissions.MSG_NOT_ENOUGH_ARGUMENTS); tabCompleteArea(arguments); String areaName = arguments.remove(); >>>>>>> public static void parseEntryExitMessage(CommandParserArgs arguments, boolean isEntry) throws CommandException { arguments.checkPermission(PERM_SETTINGS); if (arguments.isEmpty()) throw new TranslatedCommandException(FEPermissions.MSG_NOT_ENOUGH_ARGUMENTS); tabCompleteArea(arguments); String areaName = arguments.remove(); <<<<<<< @Override public String getCommandUsage(ICommandSender sender) { return "/zone: Displays command help"; ======= arguments.tabComplete("clear"); String msg = arguments.toString(); if (msg.equalsIgnoreCase("clear")) msg = null; if (arguments.isTabCompletion) return; areaZone.setGroupPermissionProperty(Zone.GROUP_DEFAULT, isEntry ? FEPermissions.ZONE_ENTRY_MESSAGE : FEPermissions.ZONE_EXIT_MESSAGE, msg); >>>>>>> arguments.tabComplete("clear"); String msg = arguments.toString(); if (msg.equalsIgnoreCase("clear")) msg = null; if (arguments.isTabCompletion) return; areaZone.setGroupPermissionProperty(Zone.GROUP_DEFAULT, isEntry ? FEPermissions.ZONE_ENTRY_MESSAGE : FEPermissions.ZONE_EXIT_MESSAGE, msg);
<<<<<<< public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval"; ======= /** If we see no indexing operations after this much time for a given shard, we consider that shard inactive (default: 5 minutes). */ public static final String INDEX_SHARD_INACTIVE_TIME_SETTING = "index.shard.inactive_time"; private static final String INDICES_INACTIVE_TIME_SETTING = "indices.memory.shard_inactive_time"; >>>>>>> public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval"; <<<<<<< ======= // We start up inactive active.set(false); >>>>>>> <<<<<<< private void markLastWrite(Engine.Operation op) { lastWriteNS = op.startTime(); active.set(true); ======= private void markLastWrite() { if (active.getAndSet(true) == false) { indexEventListener.onShardActive(this); } >>>>>>> private void markLastWrite() { active.set(true); <<<<<<< logger.debug("shard is now inactive"); indicesLifecycle.onShardInactive(this); ======= updateBufferSize(IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER); logger.debug("marking shard as inactive (inactive_time=[{}]) indexing wise", inactiveTime); indexEventListener.onShardInactive(this); >>>>>>> logger.debug("shard is now inactive"); indexEventListener.onShardInactive(this); <<<<<<< /** * Asynchronously refreshes the engine for new search operations to reflect the latest * changes. */ public void refreshAsync(final String reason) { engineConfig.getThreadPool().executor(ThreadPool.Names.REFRESH).execute(new Runnable() { @Override public void run() { try { refresh(reason); } catch (EngineClosedException ex) { // ignore } } }); } final class EngineRefresher implements Runnable { ======= public IndexEventListener getIndexEventListener() { return indexEventListener; } public TimeValue getInactiveTime() { return inactiveTime; } class EngineRefresher implements Runnable { >>>>>>> public IndexEventListener getIndexEventListener() { return indexEventListener; } /** * Asynchronously refreshes the engine for new search operations to reflect the latest * changes. */ public void refreshAsync(final String reason) { engineConfig.getThreadPool().executor(ThreadPool.Names.REFRESH).execute(new Runnable() { @Override public void run() { try { refresh(reason); } catch (EngineClosedException ex) { // ignore } } }); } final class EngineRefresher implements Runnable {
<<<<<<< public Collection<Module> nodeModules() { return Collections.singletonList(new GceModule(settings)); ======= public Collection<Module> createGuiceModules() { return Collections.singletonList(new GceModule()); >>>>>>> public Collection<Module> createGuiceModules() { return Collections.singletonList(new GceModule(settings)); <<<<<<< public Collection<Class<? extends LifecycleComponent>> nodeServices() { logger.debug("Register gce compute and metadata services"); Collection<Class<? extends LifecycleComponent>> services = new ArrayList<>(); services.add(GceModule.getComputeServiceImpl()); services.add(GceModule.getMetadataServiceImpl()); return services; ======= public Collection<Class<? extends LifecycleComponent>> getGuiceServiceClasses() { return Collections.singletonList(GceModule.getComputeServiceImpl()); >>>>>>> public Collection<Class<? extends LifecycleComponent>> getGuiceServiceClasses() { logger.debug("Register gce compute and metadata services"); return Collections.singletonList( GceModule.getComputeServiceImpl(), GceModule.getMetadataServiceImpl());
<<<<<<< primaryResponse.v2().primaryTerm(primary.primaryTerm()); logger.trace("operation completed on primary [{}]", primary); ======= if (logger.isTraceEnabled()) { logger.trace("operation completed on primary [{}], action [{}], request [{}], cluster state version [{}]", primary, actionName, por.request, observer.observedState().version()); } >>>>>>> primaryResponse.v2().primaryTerm(primary.primaryTerm()); if (logger.isTraceEnabled()) { logger.trace("operation completed on primary [{}], action [{}], request [{}], cluster state version [{}]", primary, actionName, por.request, observer.observedState().version()); }
<<<<<<< Class<? extends ParsedPercentiles> parsedClass = implementationClass(); assertTrue(parsedClass != null && parsedClass.isInstance(parsedAggregation)); ======= public static double[] randomPercents() { List<Double> randomCdfValues = randomSubsetOf(randomIntBetween(1, 7), 0.01d, 0.05d, 0.25d, 0.50d, 0.75d, 0.95d, 0.99d); double[] percents = new double[randomCdfValues.size()]; for (int i = 0; i < randomCdfValues.size(); i++) { percents[i] = randomCdfValues.get(i); } return percents; >>>>>>> Class<? extends ParsedPercentiles> parsedClass = implementationClass(); assertTrue(parsedClass != null && parsedClass.isInstance(parsedAggregation)); } public static double[] randomPercents() { List<Double> randomCdfValues = randomSubsetOf(randomIntBetween(1, 7), 0.01d, 0.05d, 0.25d, 0.50d, 0.75d, 0.95d, 0.99d); double[] percents = new double[randomCdfValues.size()]; for (int i = 0; i < randomCdfValues.size(); i++) { percents[i] = randomCdfValues.get(i); } return percents;
<<<<<<< ======= import org.apache.lucene.search.*; import org.apache.lucene.search.join.ScoreMode; >>>>>>> <<<<<<< } else if (parseContext.parseFieldMatcher().match(currentFieldName, SCORE_FIELD)) { // deprecated we use a boolean now String scoreTypeValue = parser.text(); if ("score".equals(scoreTypeValue)) { score = true; } else if ("none".equals(scoreTypeValue)) { score = false; } } else if ("score".equals(currentFieldName)) { score = parser.booleanValue(); ======= } else if ("score_mode".equals(currentFieldName) || "scoreMode".equals(currentFieldName)) { String scoreModeValue = parser.text(); if ("score".equals(scoreModeValue)) { score = true; } else if ("none".equals(scoreModeValue)) { score = false; } >>>>>>> } else if (parseContext.parseFieldMatcher().match(currentFieldName, SCORE_FIELD)) { String scoreModeValue = parser.text(); if ("score".equals(scoreModeValue)) { score = true; } else if ("none".equals(scoreModeValue)) { score = false; } else { throw new QueryParsingException(parseContext, "[has_parent] query does not support [" + scoreModeValue + "] as an option for score_mode"); } } else if ("score".equals(currentFieldName)) { score = parser.booleanValue(); <<<<<<< @Override public HasParentQueryBuilder getBuilderPrototype() { return PROTOTYPE; ======= static Query createParentQuery(Query innerQuery, String parentType, boolean score, QueryParseContext parseContext, InnerHitsSubSearchContext innerHits) throws IOException { DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { throw new QueryParsingException(parseContext, "[has_parent] query configured 'parent_type' [" + parentType + "] is not a valid type"); } if (innerHits != null) { ParsedQuery parsedQuery = new ParsedQuery(innerQuery, parseContext.copyNamedQueries()); InnerHitsContext.ParentChildInnerHits parentChildInnerHits = new InnerHitsContext.ParentChildInnerHits(innerHits.getSubSearchContext(), parsedQuery, null, parseContext.mapperService(), parentDocMapper); String name = innerHits.getName() != null ? innerHits.getName() : parentType; parseContext.addInnerHits(name, parentChildInnerHits); } Set<String> parentTypes = new HashSet<>(5); parentTypes.add(parentDocMapper.type()); ParentChildIndexFieldData parentChildIndexFieldData = null; for (DocumentMapper documentMapper : parseContext.mapperService().docMappers(false)) { ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper(); if (parentFieldMapper.active()) { DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type()); parentChildIndexFieldData = parseContext.getForField(parentFieldMapper.fieldType()); if (parentTypeDocumentMapper == null) { // Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent. parentTypes.add(parentFieldMapper.type()); } } } if (parentChildIndexFieldData == null) { throw new QueryParsingException(parseContext, "[has_parent] no _parent field configured"); } Query parentTypeQuery = null; if (parentTypes.size() == 1) { DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypes.iterator().next()); if (documentMapper != null) { parentTypeQuery = documentMapper.typeFilter(); } } else { BooleanQuery.Builder parentsFilter = new BooleanQuery.Builder(); for (String parentTypeStr : parentTypes) { DocumentMapper documentMapper = parseContext.mapperService().documentMapper(parentTypeStr); if (documentMapper != null) { parentsFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD); } } parentTypeQuery = parentsFilter.build(); } if (parentTypeQuery == null) { return null; } // wrap the query with type query innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter()); Query childrenFilter = Queries.not(parentTypeQuery); ScoreMode scoreMode = score ? ScoreMode.Max : ScoreMode.None; return joinUtilHelper(parentType, parentChildIndexFieldData, childrenFilter, scoreMode, innerQuery, 0, Integer.MAX_VALUE); >>>>>>> @Override public HasParentQueryBuilder getBuilderPrototype() { return PROTOTYPE;
<<<<<<< public synchronized AmazonS3 client() { String endpoint = getDefaultEndpoint(); String account = settings.get(CLOUD_S3.KEY, settings.get(CLOUD_AWS.KEY)); String key = settings.get(CLOUD_S3.SECRET, settings.get(CLOUD_AWS.SECRET)); return getClient(endpoint, null, account, key, null, null); } @Override public AmazonS3 client(String endpoint, String protocol, String region, String account, String key) { return client(endpoint, protocol, region, account, key, null, null); } @Override public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key, Integer maxRetries, Boolean pathStyleAccess) { if (region != null && endpoint == null) { endpoint = getEndpoint(region); logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint); } else if (endpoint == null) { endpoint = getDefaultEndpoint(); } if (account == null || key == null) { account = settings.get(CLOUD_S3.KEY, settings.get(CLOUD_AWS.KEY)); key = settings.get(CLOUD_S3.SECRET, settings.get(CLOUD_AWS.SECRET)); ======= public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries) { if (Strings.isNullOrEmpty(endpoint)) { // We need to set the endpoint based on the region if (region != null) { endpoint = getEndpoint(region); logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint); } else { // No region has been set so we will use the default endpoint endpoint = getDefaultEndpoint(); } >>>>>>> public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, Boolean pathStyleAccess) { if (Strings.isNullOrEmpty(endpoint)) { // We need to set the endpoint based on the region if (region != null) { endpoint = getEndpoint(region); logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint); } else { // No region has been set so we will use the default endpoint endpoint = getDefaultEndpoint(); } <<<<<<< private synchronized AmazonS3 getClient(String endpoint, String protocol, String account, String key, Integer maxRetries, Boolean pathStyleAccess) { Tuple<String, String> clientDescriptor = new Tuple<String, String>(endpoint, account); ======= private synchronized AmazonS3 getClient(String endpoint, Protocol protocol, String account, String key, Integer maxRetries) { Tuple<String, String> clientDescriptor = new Tuple<>(endpoint, account); >>>>>>> private synchronized AmazonS3 getClient(String endpoint, Protocol protocol, String account, String key, Integer maxRetries, Boolean pathStyleAccess) { Tuple<String, String> clientDescriptor = new Tuple<>(endpoint, account);
<<<<<<< @Override public void testMustRewrite() throws IOException { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); super.testMustRewrite(); } public void testRewrite() throws IOException { FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(new WrapperQueryBuilder(new TermQueryBuilder("foo", "bar").toString())); FunctionScoreQueryBuilder rewrite = (FunctionScoreQueryBuilder) functionScoreQueryBuilder.rewrite(queryShardContext()); assertNotSame(functionScoreQueryBuilder, rewrite); assertEquals(rewrite.query(), new TermQueryBuilder("foo", "bar")); } public void testRewriteWithFunction() throws IOException { TermQueryBuilder secondFunction = new TermQueryBuilder("tq", "2"); QueryBuilder queryBuilder = randomBoolean() ? new WrapperQueryBuilder(new TermQueryBuilder("foo", "bar").toString()) : new TermQueryBuilder("foo", "bar"); FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(queryBuilder, new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ new FunctionScoreQueryBuilder.FilterFunctionBuilder(new WrapperQueryBuilder(new TermQueryBuilder("tq", "1").toString()), new RandomScoreFunctionBuilder()), new FunctionScoreQueryBuilder.FilterFunctionBuilder(secondFunction, new RandomScoreFunctionBuilder()) }); FunctionScoreQueryBuilder rewrite = (FunctionScoreQueryBuilder) functionScoreQueryBuilder.rewrite(queryShardContext()); assertNotSame(functionScoreQueryBuilder, rewrite); assertEquals(rewrite.query(), new TermQueryBuilder("foo", "bar")); assertEquals(rewrite.filterFunctionBuilders()[0].getFilter(), new TermQueryBuilder("tq", "1")); assertSame(rewrite.filterFunctionBuilders()[1].getFilter(), secondFunction); } ======= public void testQueryMalformedArrayNotSupported() throws IOException { String json = "{\n" + " \"function_score\" : {\n" + " \"not_supported\" : []\n" + " }\n" + "}"; try { parseQuery(json); fail("parse should have failed"); } catch (ParsingException e) { assertThat(e.getMessage(), containsString("array [not_supported] is not supported")); } } public void testQueryMalformedFieldNotSupported() throws IOException { String json = "{\n" + " \"function_score\" : {\n" + " \"not_supported\" : \"value\"\n" + " }\n" + "}"; try { parseQuery(json); fail("parse should have failed"); } catch (ParsingException e) { assertThat(e.getMessage(), containsString("field [not_supported] is not supported")); } } public void testMalformedQueryFunctionFieldNotSupported() throws IOException { String json = "{\n" + " \"function_score\" : {\n" + " \"functions\" : [ {\n" + " \"not_supported\" : 23.0\n" + " }\n" + " }\n" + "}"; try { parseQuery(json); fail("parse should have failed"); } catch (ParsingException e) { assertThat(e.getMessage(), containsString("field [not_supported] is not supported")); } } public void testMalformedQuery() throws IOException { //verify that an error is thrown rather than setting the query twice (https://github.com/elastic/elasticsearch/issues/16583) String json = "{\n" + " \"function_score\":{\n" + " \"query\":{\n" + " \"bool\":{\n" + " \"must\":{\"match\":{\"field\":\"value\"}}" + " },\n" + " \"ignored_field_name\": {\n" + " {\"match\":{\"field\":\"value\"}}\n" + " }\n" + " }\n" + " }\n" + " }\n" + "}"; try { parseQuery(json); fail("parse should have failed"); } catch(ParsingException e) { assertThat(e.getMessage(), containsString("[query] is already defined.")); } } >>>>>>> @Override public void testMustRewrite() throws IOException { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); super.testMustRewrite(); } public void testRewrite() throws IOException { FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(new WrapperQueryBuilder(new TermQueryBuilder("foo", "bar").toString())); FunctionScoreQueryBuilder rewrite = (FunctionScoreQueryBuilder) functionScoreQueryBuilder.rewrite(queryShardContext()); assertNotSame(functionScoreQueryBuilder, rewrite); assertEquals(rewrite.query(), new TermQueryBuilder("foo", "bar")); } public void testRewriteWithFunction() throws IOException { TermQueryBuilder secondFunction = new TermQueryBuilder("tq", "2"); QueryBuilder queryBuilder = randomBoolean() ? new WrapperQueryBuilder(new TermQueryBuilder("foo", "bar").toString()) : new TermQueryBuilder("foo", "bar"); FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(queryBuilder, new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ new FunctionScoreQueryBuilder.FilterFunctionBuilder(new WrapperQueryBuilder(new TermQueryBuilder("tq", "1").toString()), new RandomScoreFunctionBuilder()), new FunctionScoreQueryBuilder.FilterFunctionBuilder(secondFunction, new RandomScoreFunctionBuilder()) }); FunctionScoreQueryBuilder rewrite = (FunctionScoreQueryBuilder) functionScoreQueryBuilder.rewrite(queryShardContext()); assertNotSame(functionScoreQueryBuilder, rewrite); assertEquals(rewrite.query(), new TermQueryBuilder("foo", "bar")); assertEquals(rewrite.filterFunctionBuilders()[0].getFilter(), new TermQueryBuilder("tq", "1")); assertSame(rewrite.filterFunctionBuilders()[1].getFilter(), secondFunction); } public void testQueryMalformedArrayNotSupported() throws IOException { String json = "{\n" + " \"function_score\" : {\n" + " \"not_supported\" : []\n" + " }\n" + "}"; try { parseQuery(json); fail("parse should have failed"); } catch (ParsingException e) { assertThat(e.getMessage(), containsString("array [not_supported] is not supported")); } } public void testQueryMalformedFieldNotSupported() throws IOException { String json = "{\n" + " \"function_score\" : {\n" + " \"not_supported\" : \"value\"\n" + " }\n" + "}"; try { parseQuery(json); fail("parse should have failed"); } catch (ParsingException e) { assertThat(e.getMessage(), containsString("field [not_supported] is not supported")); } } public void testMalformedQueryFunctionFieldNotSupported() throws IOException { String json = "{\n" + " \"function_score\" : {\n" + " \"functions\" : [ {\n" + " \"not_supported\" : 23.0\n" + " }\n" + " }\n" + "}"; try { parseQuery(json); fail("parse should have failed"); } catch (ParsingException e) { assertThat(e.getMessage(), containsString("field [not_supported] is not supported")); } } public void testMalformedQuery() throws IOException { //verify that an error is thrown rather than setting the query twice (https://github.com/elastic/elasticsearch/issues/16583) String json = "{\n" + " \"function_score\":{\n" + " \"query\":{\n" + " \"bool\":{\n" + " \"must\":{\"match\":{\"field\":\"value\"}}" + " },\n" + " \"ignored_field_name\": {\n" + " {\"match\":{\"field\":\"value\"}}\n" + " }\n" + " }\n" + " }\n" + " }\n" + "}"; try { parseQuery(json); fail("parse should have failed"); } catch(ParsingException e) { assertThat(e.getMessage(), containsString("[query] is already defined.")); } }
<<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, right.actual, true); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true); <<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, right.actual, true); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true); <<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, right.actual, true); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true); <<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, right.actual, true); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, true); <<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, false, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, false); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, false); <<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, false, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, false); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, false); <<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, false, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, false); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, false); <<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, false, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, right.actual, false); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, false); <<<<<<< final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, false, true); ======= final Type promote = AnalyzerCaster.promoteNumeric(definition, left.actual, right.actual, false); >>>>>>> final Type promote = AnalyzerCaster.promoteNumeric(left.actual, right.actual, false);
<<<<<<< // TODO: We haven't had timestamp for Index operations in Lucene yet, we need to loosen this check without timestamp. // We don't store versionType in Lucene index, we need to exclude it from this check final boolean sameOp; if (newOp instanceof Translog.Index && prvOp instanceof Translog.Index) { final Translog.Index o1 = (Translog.Index) newOp; final Translog.Index o2 = (Translog.Index) prvOp; sameOp = Objects.equals(o1.id(), o2.id()) && Objects.equals(o1.type(), o2.type()) && Objects.equals(o1.source(), o2.source()) && Objects.equals(o1.routing(), o2.routing()) && o1.primaryTerm() == o2.primaryTerm() && o1.seqNo() == o2.seqNo() && o1.version() == o2.version(); } else if (newOp instanceof Translog.Delete && prvOp instanceof Translog.Delete) { final Translog.Delete o1 = (Translog.Delete) newOp; final Translog.Delete o2 = (Translog.Delete) prvOp; sameOp = Objects.equals(o1.id(), o2.id()) && Objects.equals(o1.type(), o2.type()) && o1.primaryTerm() == o2.primaryTerm() && o1.seqNo() == o2.seqNo() && o1.version() == o2.version(); } else { sameOp = false; } if (sameOp == false) { throw new AssertionError( "seqNo [" + seqNo + "] was processed twice in generation [" + generation + "], with different data. " + "prvOp [" + prvOp + "], newOp [" + newOp + "]", previous.v2()); } ======= if (newOp.equals(prvOp) == false) { throw new AssertionError( "seqNo [" + seqNo + "] was processed twice in generation [" + generation + "], with different data. " + "prvOp [" + prvOp + "], newOp [" + newOp + "]", previous.v2()); } >>>>>>> // TODO: We haven't had timestamp for Index operations in Lucene yet, we need to loosen this check without timestamp. // We don't store versionType in Lucene index, we need to exclude it from this check final boolean sameOp; if (newOp instanceof Translog.Index && prvOp instanceof Translog.Index) { final Translog.Index o1 = (Translog.Index) prvOp; final Translog.Index o2 = (Translog.Index) newOp; sameOp = Objects.equals(o1.id(), o2.id()) && Objects.equals(o1.type(), o2.type()) && Objects.equals(o1.source(), o2.source()) && Objects.equals(o1.routing(), o2.routing()) && o1.primaryTerm() == o2.primaryTerm() && o1.seqNo() == o2.seqNo() && o1.version() == o2.version(); } else if (newOp instanceof Translog.Delete && prvOp instanceof Translog.Delete) { final Translog.Delete o1 = (Translog.Delete) newOp; final Translog.Delete o2 = (Translog.Delete) prvOp; sameOp = Objects.equals(o1.id(), o2.id()) && Objects.equals(o1.type(), o2.type()) && o1.primaryTerm() == o2.primaryTerm() && o1.seqNo() == o2.seqNo() && o1.version() == o2.version(); } else { sameOp = false; } if (sameOp == false) { throw new AssertionError( "seqNo [" + seqNo + "] was processed twice in generation [" + generation + "], with different data. " + "prvOp [" + prvOp + "], newOp [" + newOp + "]", previous.v2()); }
<<<<<<< InputStream synonyms = getClass().getResourceAsStream("synonyms.txt"); InputStream synonymsWordnet = getClass().getResourceAsStream("synonyms_wordnet.txt"); Path home = createTempDir(); Path config = home.resolve("config"); Files.createDirectory(config); Files.copy(synonyms, config.resolve("synonyms.txt")); Files.copy(synonymsWordnet, config.resolve("synonyms_wordnet.txt")); ======= String json = "/org/elasticsearch/index/analysis/synonyms/synonyms.json"; >>>>>>> InputStream synonyms = getClass().getResourceAsStream("synonyms.txt"); InputStream synonymsWordnet = getClass().getResourceAsStream("synonyms_wordnet.txt"); Path home = createTempDir(); Path config = home.resolve("config"); Files.createDirectory(config); Files.copy(synonyms, config.resolve("synonyms.txt")); Files.copy(synonymsWordnet, config.resolve("synonyms_wordnet.txt")); String json = "/org/elasticsearch/index/analysis/synonyms/synonyms.json"; <<<<<<< loadFromClasspath("org/elasticsearch/index/analysis/synonyms/synonyms.json") .put("path.home", home) ======= loadFromStream(json, getClass().getResourceAsStream(json)) .put("path.home", createTempDir().toString()) >>>>>>> loadFromStream(json, getClass().getResourceAsStream(json)) .put("path.home", home)
<<<<<<< public void onModule(SettingsModule settingsModule) { ======= @Override public String name() { return "mapper-attachments"; } @Override public String description() { return "Adds the attachment type allowing to parse difference attachment formats"; } @Override public List<Setting<?>> getSettings() { >>>>>>> @Override public List<Setting<?>> getSettings() {
<<<<<<< import org.elasticsearch.index.mapper.core.BinaryFieldMapper; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; import org.elasticsearch.index.mapper.core.CompletionFieldMapper; import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.KeywordFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.TextFieldMapper; import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.internal.IdFieldMapper; import org.elasticsearch.index.mapper.internal.IndexFieldMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; import org.elasticsearch.index.mapper.internal.SeqNoFieldMapper; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.internal.TTLFieldMapper; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.ip.IpFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; ======= import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TokenCountFieldMapper; import org.elasticsearch.index.mapper.ScaledFloatFieldMapper; import org.elasticsearch.index.mapper.StringFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimestampFieldMapper; import org.elasticsearch.index.mapper.TTLFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; >>>>>>> import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.ScaledFloatFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.StringFieldMapper; import org.elasticsearch.index.mapper.TTLFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TimestampFieldMapper; import org.elasticsearch.index.mapper.TokenCountFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.mapper.internal.SeqNoFieldMapper; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction;
<<<<<<< public static EventHandler eventHandler = new EventHandler(); ======= public static CommandsEventHandler eventHandler = new CommandsEventHandler(); private static MCStatsHelper mcstats = new MCStatsHelper(); >>>>>>> public static CommandsEventHandler eventHandler = new CommandsEventHandler();
<<<<<<< ======= import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; >>>>>>>
<<<<<<< ======= import org.apache.lucene.search.Query; import org.apache.lucene.search.spans.FieldMaskingSpanQuery; import org.apache.lucene.search.spans.SpanQuery; import org.elasticsearch.common.ParsingException; >>>>>>> import org.elasticsearch.common.ParsingException; <<<<<<< ======= import org.elasticsearch.index.mapper.MappedFieldType; >>>>>>> <<<<<<< public FieldMaskingSpanQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException { ======= public Query parse(QueryParseContext parseContext) throws IOException, ParsingException { >>>>>>> public FieldMaskingSpanQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { <<<<<<< QueryBuilder query = parseContext.parseInnerQueryBuilder(); if (!(query instanceof SpanQueryBuilder)) { throw new QueryParsingException(parseContext, "[field_masking_span] query must be of type span query"); ======= Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { throw new ParsingException(parseContext, "[field_masking_span] query] must be of type span query"); >>>>>>> QueryBuilder query = parseContext.parseInnerQueryBuilder(); if (!(query instanceof SpanQueryBuilder)) { throw new ParsingException(parseContext, "[field_masking_span] query must be of type span query");
<<<<<<< public String getPermissionNode() { return TeleportModule.PERM_HOME; } @Override public List<String> addTabCompletionOptions(ICommandSender sender, String[] args, BlockPos pos) ======= public List<String> addTabCompletionOptions(ICommandSender sender, String[] args) >>>>>>> public List<String> addTabCompletionOptions(ICommandSender sender, String[] args, BlockPos pos)
<<<<<<< ======= import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.lucene.BytesRefs; >>>>>>> import org.elasticsearch.common.ParsingException; <<<<<<< public RangeQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, QueryParsingException { ======= public Query parse(QueryParseContext parseContext) throws IOException, ParsingException { >>>>>>> public RangeQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { <<<<<<< RangeQueryBuilder rangeQuery = new RangeQueryBuilder(fieldName); rangeQuery.from(from); rangeQuery.to(to); rangeQuery.includeLower(includeLower); rangeQuery.includeUpper(includeUpper); if (timeZone != null) { rangeQuery.timeZone(timeZone); ======= Query query = null; MappedFieldType mapper = parseContext.fieldMapper(fieldName); if (mapper != null) { if (mapper instanceof DateFieldMapper.DateFieldType) { query = ((DateFieldMapper.DateFieldType) mapper).rangeQuery(from, to, includeLower, includeUpper, timeZone, forcedDateParser); } else { if (timeZone != null) { throw new ParsingException(parseContext, "[range] time_zone can not be applied to non date field [" + fieldName + "]"); } //LUCENE 4 UPGRADE Mapper#rangeQuery should use bytesref as well? query = mapper.rangeQuery(from, to, includeLower, includeUpper); } >>>>>>> RangeQueryBuilder rangeQuery = new RangeQueryBuilder(fieldName); rangeQuery.from(from); rangeQuery.to(to); rangeQuery.includeLower(includeLower); rangeQuery.includeUpper(includeUpper); if (timeZone != null) { rangeQuery.timeZone(timeZone);
<<<<<<< expression.analyze(variables); arguments.set(argument, expression.cast(variables)); ======= expression.internal = true; expression.analyze(settings, definition, variables); arguments.set(argument, expression.cast(settings, definition, variables)); >>>>>>> expression.internal = true; expression.analyze(variables); arguments.set(argument, expression.cast(variables));
<<<<<<< ======= import org.apache.lucene.search.Query; >>>>>>> <<<<<<< /** * Parser for query filter * @deprecated use any query instead directly, possible since queries and filters are merged. */ ======= // TODO: remove when https://github.com/elastic/elasticsearch/issues/13326 is fixed >>>>>>> /** * Parser for query filter * @deprecated use any query instead directly, possible since queries and filters are merged. */ // TODO: remove when https://github.com/elastic/elasticsearch/issues/13326 is fixed <<<<<<< public QueryFilterBuilder getBuilderPrototype() { return QueryFilterBuilder.PROTOTYPE; ======= public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { return parseContext.parseInnerQuery(); >>>>>>> public QueryFilterBuilder getBuilderPrototype() { return QueryFilterBuilder.PROTOTYPE;
<<<<<<< public static class CustomSignificanceHeuristicPlugin extends Plugin { ======= public static class CustomSignificanceHeuristicPlugin extends Plugin implements ScriptPlugin { @Override public String name() { return "test-plugin-significance-heuristic"; } @Override public String description() { return "Significance heuristic plugin"; } >>>>>>> public static class CustomSignificanceHeuristicPlugin extends Plugin implements ScriptPlugin {