conflict_resolution
stringlengths 27
16k
|
---|
<<<<<<<
dependency.addEvidence(EvidenceType.PRODUCT, name, "Project", group, Confidence.HIGH);
dependency.addEvidence(EvidenceType.VENDOR, name, "Project", group, Confidence.HIGH);
=======
dependency.getProductEvidence().addEvidence(name, "Project",
group, Confidence.HIGH);
dependency.setName(group);
>>>>>>>
dependency.addEvidence(EvidenceType.PRODUCT, name, "Project", group, Confidence.HIGH);
dependency.addEvidence(EvidenceType.VENDOR, name, "Project", group, Confidence.HIGH);
dependency.setName(group);
<<<<<<<
final String source = currentDep.getDisplayFileName();
currentDep.addEvidence(EvidenceType.PRODUCT, source, "Product", product, Confidence.MEDIUM);
currentDep.addEvidence(EvidenceType.VENDOR, source, "Vendor", product, Confidence.MEDIUM);
currentDep.addEvidence(EvidenceType.VERSION, source, "Version", version, Confidence.MEDIUM);
=======
final String source = currentDep.getFileName();
currentDep.getProductEvidence().addEvidence(source, "Product",
product, Confidence.MEDIUM);
currentDep.getVersionEvidence().addEvidence(source, "Version",
version, Confidence.MEDIUM);
currentDep.setName(product);
currentDep.setVersion(version);
>>>>>>>
final String source = currentDep.getFileName();
currentDep.addEvidence(EvidenceType.PRODUCT, source, "Product", product, Confidence.MEDIUM);
currentDep.addEvidence(EvidenceType.VENDOR, source, "Vendor", product, Confidence.MEDIUM);
currentDep.addEvidence(EvidenceType.VERSION, source, "Version", version, Confidence.MEDIUM);
currentDep.setName(product);
currentDep.setVersion(version); |
<<<<<<<
public abstract class BaseSecondaryIndexDataStore implements
=======
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class BaseSecondaryIndexDataStore<MutationType> implements
>>>>>>>
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class BaseSecondaryIndexDataStore implements
<<<<<<<
private final static Logger LOGGER = Logger.getLogger(BaseSecondaryIndexDataStore.class);
protected final Map<String, Writer> writerCache = new HashMap<>();
=======
private final static Logger LOGGER = LoggerFactory.getLogger(BaseSecondaryIndexDataStore.class);
protected final Map<String, Writer<MutationType>> writerCache = new HashMap<>();
>>>>>>>
private final static Logger LOGGER = LoggerFactory.getLogger(BaseSecondaryIndexDataStore.class);
protected final Map<String, Writer> writerCache = new HashMap<>(); |
<<<<<<<
import mil.nga.giat.geowave.core.index.sfc.tiered.TieredSFCIndexFactory;
import mil.nga.giat.geowave.core.store.cli.remote.options.IndexPluginOptions.BaseIndexBuilder;
=======
import mil.nga.giat.geowave.core.index.sfc.xz.XZHierarchicalIndexFactory;
>>>>>>>
import mil.nga.giat.geowave.core.index.sfc.xz.XZHierarchicalIndexFactory;
import mil.nga.giat.geowave.core.store.cli.remote.options.IndexPluginOptions.BaseIndexBuilder; |
<<<<<<<
import mil.nga.giat.geowave.adapter.vector.plugin.GeoWaveGTDataStore;
import mil.nga.giat.geowave.adapter.vector.stats.FeatureBoundingBoxStatistics;
import mil.nga.giat.geowave.adapter.vector.stats.FeatureTimeRangeStatistics;
import mil.nga.giat.geowave.core.geotime.store.query.TemporalRange;
import mil.nga.giat.geowave.core.index.ByteArrayId;
import mil.nga.giat.geowave.core.store.CloseableIterator;
import mil.nga.giat.geowave.core.store.adapter.statistics.DataStatistics;
import mil.nga.giat.geowave.core.store.adapter.statistics.DataStatisticsStore;
import mil.nga.giat.geowave.core.store.operations.remote.options.DataStorePluginOptions;
=======
>>>>>>>
import mil.nga.giat.geowave.adapter.vector.plugin.GeoWaveGTDataStore;
import mil.nga.giat.geowave.adapter.vector.stats.FeatureBoundingBoxStatistics;
import mil.nga.giat.geowave.adapter.vector.stats.FeatureTimeRangeStatistics;
import mil.nga.giat.geowave.core.geotime.store.query.TemporalRange;
import mil.nga.giat.geowave.core.index.ByteArrayId;
import mil.nga.giat.geowave.core.store.CloseableIterator;
import mil.nga.giat.geowave.core.store.adapter.statistics.DataStatistics;
import mil.nga.giat.geowave.core.store.adapter.statistics.DataStatisticsStore;
import mil.nga.giat.geowave.core.store.operations.remote.options.DataStorePluginOptions;
<<<<<<<
public static Envelope getGeoBounds(
final DataStorePluginOptions dataStorePlugin,
final ByteArrayId adapterId ) {
final DataStatisticsStore statisticsStore = dataStorePlugin.createDataStatisticsStore();
final CloseableIterator<DataStatistics<?>> statsIt = statisticsStore.getDataStatistics(adapterId);
while (statsIt.hasNext()) {
final DataStatistics stats = statsIt.next();
if (stats instanceof FeatureBoundingBoxStatistics) {
final FeatureBoundingBoxStatistics bbStats = (FeatureBoundingBoxStatistics) stats;
return new Envelope(
bbStats.getMinX(),
bbStats.getMaxX(),
bbStats.getMinY(),
bbStats.getMaxY());
}
}
return null;
}
=======
public static Envelope getGeoBounds(
final DataStorePluginOptions dataStorePlugin,
final ByteArrayId adapterId,
final String geomField ) {
final DataStatisticsStore statisticsStore = dataStorePlugin.createDataStatisticsStore();
ByteArrayId geoStatId = FeatureBoundingBoxStatistics.composeId(geomField);
DataStatistics<?> geoStat = statisticsStore.getDataStatistics(
adapterId,
geoStatId,
null);
if (geoStat != null) {
if (geoStat instanceof FeatureBoundingBoxStatistics) {
final FeatureBoundingBoxStatistics bbStats = (FeatureBoundingBoxStatistics) geoStat;
return new Envelope(
bbStats.getMinX(),
bbStats.getMaxX(),
bbStats.getMinY(),
bbStats.getMaxY());
}
}
return null;
}
>>>>>>>
public static Envelope getGeoBounds(
final DataStorePluginOptions dataStorePlugin,
final ByteArrayId adapterId,
final String geomField ) {
final DataStatisticsStore statisticsStore = dataStorePlugin.createDataStatisticsStore();
ByteArrayId geoStatId = FeatureBoundingBoxStatistics.composeId(geomField);
DataStatistics<?> geoStat = statisticsStore.getDataStatistics(
adapterId,
geoStatId,
null);
if (geoStat != null) {
if (geoStat instanceof FeatureBoundingBoxStatistics) {
final FeatureBoundingBoxStatistics bbStats = (FeatureBoundingBoxStatistics) geoStat;
return new Envelope(
bbStats.getMinX(),
bbStats.getMaxX(),
bbStats.getMinY(),
bbStats.getMaxY());
}
}
return null;
} |
<<<<<<<
package mil.nga.giat.geowave.core.store.operations.remote.options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import com.beust.jcommander.ParametersDelegate;
import mil.nga.giat.geowave.core.cli.api.DefaultPluginOptions;
import mil.nga.giat.geowave.core.cli.api.PluginOptions;
import mil.nga.giat.geowave.core.index.ByteArrayId;
import mil.nga.giat.geowave.core.index.CompoundIndexStrategy;
import mil.nga.giat.geowave.core.index.simple.HashKeyIndexStrategy;
import mil.nga.giat.geowave.core.index.simple.RoundRobinKeyIndexStrategy;
import mil.nga.giat.geowave.core.store.index.CustomIdIndex;
import mil.nga.giat.geowave.core.store.index.PrimaryIndex;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeOptions;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeProviderSpi;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeRegistry;
/**
* This class is responsible for loading index SPI plugins and populating
* parameters delegate with relevant options for that index.
*/
public class IndexPluginOptions extends
DefaultPluginOptions implements
PluginOptions
{
public static final String INDEX_PROPERTY_NAMESPACE = "index";
public static final String DEFAULT_PROPERTY_NAMESPACE = "indexdefault";
private final static Logger LOGGER = LoggerFactory.getLogger(IndexPluginOptions.class);
private String indexType;
@Parameter(names = {
"-np",
"--numPartitions"
}, description = "The number of partitions. Default partitions will be 1.")
protected int numPartitions = 1;
@Parameter(names = {
"-ps",
"--partitionStrategy"
}, description = "The partition strategy to use. Default will be none.")
protected PartitionStrategy partitionStrategy = PartitionStrategy.NONE;
// This is the plugin loaded from SPI based on "type"
private DimensionalityTypeProviderSpi indexPlugin = null;
// These are the options loaded from indexPlugin based on "type"
@ParametersDelegate
private DimensionalityTypeOptions indexOptions = null;
/**
* Constructor
*/
public IndexPluginOptions() {
}
@Override
public void selectPlugin(
String qualifier ) {
// Load the Index options.
indexType = qualifier;
if (qualifier != null) {
indexPlugin = DimensionalityTypeRegistry.getSelectedDimensionalityProvider(qualifier);
if (indexPlugin == null) {
throw new ParameterException(
"Unknown index type specified");
}
indexOptions = indexPlugin.getOptions();
}
else {
indexPlugin = null;
indexOptions = null;
}
}
@Override
public String getType() {
return indexType;
}
public int getNumPartitions() {
return numPartitions;
}
public PartitionStrategy getPartitionStrategy() {
return partitionStrategy;
}
public DimensionalityTypeProviderSpi getIndexPlugin() {
return indexPlugin;
}
public PrimaryIndex createPrimaryIndex() {
PrimaryIndex index = indexPlugin.createPrimaryIndex();
return wrapIndexWithOptions(index);
}
private PrimaryIndex wrapIndexWithOptions(
final PrimaryIndex index ) {
PrimaryIndex retVal = index;
if ((numPartitions > 1) && partitionStrategy.equals(PartitionStrategy.HASH)) {
retVal = new CustomIdIndex(
new CompoundIndexStrategy(
new HashKeyIndexStrategy(
index.getIndexStrategy().getOrderedDimensionDefinitions(),
numPartitions),
index.getIndexStrategy()),
index.getIndexModel(),
new ByteArrayId(
index.getId().getString() + "_" + PartitionStrategy.HASH.name() + "_" + numPartitions));
}
else if (numPartitions > 1) {
// default to round robin partitioning (none is not valid if there
// are more than 1 partition)
if (partitionStrategy.equals(PartitionStrategy.NONE)) {
LOGGER.warn("Partition strategy is necessary when using more than 1 partition, defaulting to 'round_robin' partitioning.");
}
retVal = new CustomIdIndex(
new CompoundIndexStrategy(
new RoundRobinKeyIndexStrategy(
numPartitions),
index.getIndexStrategy()),
index.getIndexModel(),
new ByteArrayId(
index.getId().getString() + "_" + PartitionStrategy.ROUND_ROBIN.name() + "_" + numPartitions));
}
if ((getType() != null) && (getType().length() > 0)) {
retVal = new CustomIdIndex(
retVal.getIndexStrategy(),
retVal.getIndexModel(),
new ByteArrayId(
getType()));
}
return retVal;
}
public static String getIndexNamespace(
String name ) {
return String.format(
"%s.%s",
INDEX_PROPERTY_NAMESPACE,
name);
}
public static enum PartitionStrategy {
NONE,
HASH,
ROUND_ROBIN;
}
}
=======
package mil.nga.giat.geowave.core.store.operations.remote.options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import com.beust.jcommander.ParametersDelegate;
import mil.nga.giat.geowave.core.cli.api.DefaultPluginOptions;
import mil.nga.giat.geowave.core.cli.api.PluginOptions;
import mil.nga.giat.geowave.core.index.ByteArrayId;
import mil.nga.giat.geowave.core.index.CompoundIndexStrategy;
import mil.nga.giat.geowave.core.index.simple.HashKeyIndexStrategy;
import mil.nga.giat.geowave.core.index.simple.RoundRobinKeyIndexStrategy;
import mil.nga.giat.geowave.core.store.index.CustomIdIndex;
import mil.nga.giat.geowave.core.store.index.PrimaryIndex;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeOptions;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeProviderSpi;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeRegistry;
/**
* This class is responsible for loading index SPI plugins and populating
* parameters delegate with relevant options for that index.
*/
public class IndexPluginOptions extends
DefaultPluginOptions implements
PluginOptions
{
public static final String INDEX_PROPERTY_NAMESPACE = "index";
public static final String DEFAULT_PROPERTY_NAMESPACE = "indexdefault";
private final static Logger LOGGER = LoggerFactory.getLogger(IndexPluginOptions.class);
private String indexType;
@Parameter(names = {
"--indexName"
}, description = "A custom name can be given to this index. Default name will be the based on configuration parameters.")
protected String nameOverride = null;
@Parameter(names = {
"-np",
"--numPartitions"
}, description = "The number of partitions. Default partitions will be 1.")
protected int numPartitions = 1;
@Parameter(names = {
"-ps",
"--partitionStrategy"
}, description = "The partition strategy to use. Default will be none.")
protected PartitionStrategy partitionStrategy = PartitionStrategy.NONE;
// This is the plugin loaded from SPI based on "type"
private DimensionalityTypeProviderSpi indexPlugin = null;
// These are the options loaded from indexPlugin based on "type"
@ParametersDelegate
private DimensionalityTypeOptions indexOptions = null;
/**
* Constructor
*/
public IndexPluginOptions() {
}
@Override
public void selectPlugin(
String qualifier ) {
// Load the Index options.
indexType = qualifier;
if (qualifier != null) {
indexPlugin = DimensionalityTypeRegistry.getSelectedDimensionalityProvider(qualifier);
if (indexPlugin == null) {
throw new ParameterException(
"Unknown index type specified");
}
indexOptions = indexPlugin.getOptions();
}
else {
indexPlugin = null;
indexOptions = null;
}
}
@Override
public String getType() {
return indexType;
}
public int getNumPartitions() {
return numPartitions;
}
public String getNameOverride() {
return nameOverride;
}
public PartitionStrategy getPartitionStrategy() {
return partitionStrategy;
}
public DimensionalityTypeProviderSpi getIndexPlugin() {
return indexPlugin;
}
public PrimaryIndex createPrimaryIndex() {
PrimaryIndex index = indexPlugin.createPrimaryIndex();
return wrapIndexWithOptions(index);
}
private PrimaryIndex wrapIndexWithOptions(
final PrimaryIndex index ) {
PrimaryIndex retVal = index;
if ((numPartitions > 1) && partitionStrategy.equals(PartitionStrategy.HASH)) {
retVal = new CustomIdIndex(
new CompoundIndexStrategy(
new HashKeyIndexStrategy(
index.getIndexStrategy().getOrderedDimensionDefinitions(),
numPartitions),
index.getIndexStrategy()),
index.getIndexModel(),
new ByteArrayId(
index.getId().getString() + "_" + PartitionStrategy.HASH.name() + "_" + numPartitions));
}
else if (numPartitions > 1) {
// default to round robin partitioning (none is not valid if there
// are more than 1 partition)
if (partitionStrategy.equals(PartitionStrategy.NONE)) {
LOGGER
.warn("Partition strategy is necessary when using more than 1 partition, defaulting to 'round_robin' partitioning.");
}
retVal = new CustomIdIndex(
new CompoundIndexStrategy(
new RoundRobinKeyIndexStrategy(
numPartitions),
index.getIndexStrategy()),
index.getIndexModel(),
new ByteArrayId(
index.getId().getString() + "_" + PartitionStrategy.ROUND_ROBIN.name() + "_"
+ numPartitions));
}
if ((getNameOverride() != null) && (getNameOverride().length() > 0)) {
retVal = new CustomIdIndex(
retVal.getIndexStrategy(),
retVal.getIndexModel(),
new ByteArrayId(
getNameOverride()));
}
return retVal;
}
public static String getIndexNamespace(
String name ) {
return String.format(
"%s.%s",
INDEX_PROPERTY_NAMESPACE,
name);
}
public static enum PartitionStrategy {
NONE,
HASH,
ROUND_ROBIN;
}
}
>>>>>>>
package mil.nga.giat.geowave.core.store.operations.remote.options;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import com.beust.jcommander.ParametersDelegate;
import mil.nga.giat.geowave.core.cli.api.DefaultPluginOptions;
import mil.nga.giat.geowave.core.cli.api.PluginOptions;
import mil.nga.giat.geowave.core.index.ByteArrayId;
import mil.nga.giat.geowave.core.index.CompoundIndexStrategy;
import mil.nga.giat.geowave.core.index.simple.HashKeyIndexStrategy;
import mil.nga.giat.geowave.core.index.simple.RoundRobinKeyIndexStrategy;
import mil.nga.giat.geowave.core.store.index.CustomIdIndex;
import mil.nga.giat.geowave.core.store.index.PrimaryIndex;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeOptions;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeProviderSpi;
import mil.nga.giat.geowave.core.store.spi.DimensionalityTypeRegistry;
/**
* This class is responsible for loading index SPI plugins and populating
* parameters delegate with relevant options for that index.
*/
public class IndexPluginOptions extends
DefaultPluginOptions implements
PluginOptions
{
public static final String INDEX_PROPERTY_NAMESPACE = "index";
public static final String DEFAULT_PROPERTY_NAMESPACE = "indexdefault";
private final static Logger LOGGER = LoggerFactory.getLogger(IndexPluginOptions.class);
private String indexType;
@Parameter(names = {
"--indexName"
}, description = "A custom name can be given to this index. Default name will be the based on configuration parameters.")
protected String nameOverride = null;
@Parameter(names = {
"-np",
"--numPartitions"
}, description = "The number of partitions. Default partitions will be 1.")
protected int numPartitions = 1;
@Parameter(names = {
"-ps",
"--partitionStrategy"
}, description = "The partition strategy to use. Default will be none.")
protected PartitionStrategy partitionStrategy = PartitionStrategy.NONE;
// This is the plugin loaded from SPI based on "type"
private DimensionalityTypeProviderSpi indexPlugin = null;
// These are the options loaded from indexPlugin based on "type"
@ParametersDelegate
private DimensionalityTypeOptions indexOptions = null;
/**
* Constructor
*/
public IndexPluginOptions() {
}
@Override
public void selectPlugin(
String qualifier ) {
// Load the Index options.
indexType = qualifier;
if (qualifier != null) {
indexPlugin = DimensionalityTypeRegistry.getSelectedDimensionalityProvider(qualifier);
if (indexPlugin == null) {
throw new ParameterException(
"Unknown index type specified");
}
indexOptions = indexPlugin.getOptions();
}
else {
indexPlugin = null;
indexOptions = null;
}
}
@Override
public String getType() {
return indexType;
}
public int getNumPartitions() {
return numPartitions;
}
public String getNameOverride() {
return nameOverride;
}
public PartitionStrategy getPartitionStrategy() {
return partitionStrategy;
}
public DimensionalityTypeProviderSpi getIndexPlugin() {
return indexPlugin;
}
public PrimaryIndex createPrimaryIndex() {
PrimaryIndex index = indexPlugin.createPrimaryIndex();
return wrapIndexWithOptions(index);
}
private PrimaryIndex wrapIndexWithOptions(
final PrimaryIndex index ) {
PrimaryIndex retVal = index;
if ((numPartitions > 1) && partitionStrategy.equals(PartitionStrategy.HASH)) {
retVal = new CustomIdIndex(
new CompoundIndexStrategy(
new HashKeyIndexStrategy(
index.getIndexStrategy().getOrderedDimensionDefinitions(),
numPartitions),
index.getIndexStrategy()),
index.getIndexModel(),
new ByteArrayId(
index.getId().getString() + "_" + PartitionStrategy.HASH.name() + "_" + numPartitions));
}
else if (numPartitions > 1) {
// default to round robin partitioning (none is not valid if there
// are more than 1 partition)
if (partitionStrategy.equals(PartitionStrategy.NONE)) {
LOGGER.warn("Partition strategy is necessary when using more than 1 partition, defaulting to 'round_robin' partitioning.");
}
retVal = new CustomIdIndex(
new CompoundIndexStrategy(
new RoundRobinKeyIndexStrategy(
numPartitions),
index.getIndexStrategy()),
index.getIndexModel(),
new ByteArrayId(
index.getId().getString() + "_" + PartitionStrategy.ROUND_ROBIN.name() + "_" + numPartitions));
}
if ((getNameOverride() != null) && (getNameOverride().length() > 0)) {
retVal = new CustomIdIndex(
retVal.getIndexStrategy(),
retVal.getIndexModel(),
new ByteArrayId(
getNameOverride()));
}
return retVal;
}
public static String getIndexNamespace(
String name ) {
return String.format(
"%s.%s",
INDEX_PROPERTY_NAMESPACE,
name);
}
public static enum PartitionStrategy {
NONE,
HASH,
ROUND_ROBIN;
}
} |
<<<<<<<
import org.apache.log4j.Logger;
=======
import mil.nga.giat.geowave.core.index.ByteArrayId;
import mil.nga.giat.geowave.core.index.Mergeable;
import mil.nga.giat.geowave.core.store.adapter.statistics.AbstractDataStatistics;
import mil.nga.giat.geowave.core.store.adapter.statistics.DataStatistics;
import mil.nga.giat.geowave.core.store.base.DataStoreEntryInfo;
import net.sf.json.JSONException;
import net.sf.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
>>>>>>>
import mil.nga.giat.geowave.core.index.ByteArrayId;
import mil.nga.giat.geowave.core.index.Mergeable;
import mil.nga.giat.geowave.core.store.adapter.statistics.AbstractDataStatistics;
import mil.nga.giat.geowave.core.store.adapter.statistics.DataStatistics;
import mil.nga.giat.geowave.core.store.base.DataStoreEntryInfo;
import net.sf.json.JSONException;
import net.sf.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
<<<<<<<
final String fieldName,
final int precision ) {
=======
final String statisticsId,
int precision ) {
>>>>>>>
final String statisticsId,
final int precision ) { |
<<<<<<<
import mil.nga.giat.geowave.core.index.Coordinate;
import mil.nga.giat.geowave.core.index.CoordinateRange;
=======
import mil.nga.giat.geowave.core.index.ByteArrayRange;
>>>>>>>
import mil.nga.giat.geowave.core.index.CoordinateRange; |
<<<<<<<
import mil.nga.giat.geowave.test.mapreduce.KDERasterResizeIT;
=======
import mil.nga.giat.geowave.test.mapreduce.GeoWaveKMeansIT;
>>>>>>>
import mil.nga.giat.geowave.test.mapreduce.GeoWaveKMeansIT;
import mil.nga.giat.geowave.test.mapreduce.KDERasterResizeIT;
<<<<<<<
KDERasterResizeIT.class,
=======
GeoWaveKMeansIT.class,
// KDEMapReduceIT.class, //for now this is commented out, further
// investigation is required
>>>>>>>
KDERasterResizeIT.class,
GeoWaveKMeansIT.class, |
<<<<<<<
final GeowaveRowId rowId,
=======
boolean wholeRowEncoding,
final AccumuloRowId rowId,
>>>>>>>
final boolean wholeRowEncoding,
final GeowaveRowId rowId,
<<<<<<<
final GeowaveRowId rowId,
=======
boolean wholeRowEncoding,
final AccumuloRowId rowId,
>>>>>>>
final boolean wholeRowEncoding,
final GeowaveRowId rowId,
<<<<<<<
final GeowaveRowId rowId,
=======
boolean wholeRowEncoding,
final AccumuloRowId rowId,
>>>>>>>
final boolean wholeRowEncoding,
final GeowaveRowId rowId, |
<<<<<<<
idsFromAdapter.length + 4);
newSet[idsFromAdapter.length] = RowRangeHistogramStatistics.STATS_ID;
newSet[idsFromAdapter.length + 1] = IndexMetaDataSet.STATS_ID;
newSet[idsFromAdapter.length + 2] = DifferingFieldVisibilityEntryCount.STATS_ID;
newSet[idsFromAdapter.length + 3] = DuplicateEntryCount.STATS_ID;
=======
idsFromAdapter.length + 5);
newSet[idsFromAdapter.length] = RowRangeDataStatistics.STATS_TYPE;
newSet[idsFromAdapter.length + 1] = RowRangeHistogramStatistics.STATS_TYPE;
newSet[idsFromAdapter.length + 2] = IndexMetaDataSet.STATS_TYPE;
newSet[idsFromAdapter.length + 3] = DifferingFieldVisibilityEntryCount.STATS_TYPE;
newSet[idsFromAdapter.length + 4] = DuplicateEntryCount.STATS_TYPE;
>>>>>>>
idsFromAdapter.length + 4);
newSet[idsFromAdapter.length] = RowRangeHistogramStatistics.STATS_TYPE;
newSet[idsFromAdapter.length + 1] = IndexMetaDataSet.STATS_TYPE;
newSet[idsFromAdapter.length + 2] = DifferingFieldVisibilityEntryCount.STATS_TYPE;
newSet[idsFromAdapter.length + 3] = DuplicateEntryCount.STATS_TYPE;
<<<<<<<
final ByteArrayId statisticsId ) {
if (statisticsId.equals(RowRangeHistogramStatistics.STATS_ID)) {
=======
final ByteArrayId statisticsType ) {
if (statisticsType.equals(RowRangeDataStatistics.STATS_TYPE)) {
return new RowRangeDataStatistics(
index.getId());
}
if (statisticsType.equals(RowRangeHistogramStatistics.STATS_TYPE)) {
>>>>>>>
final ByteArrayId statisticsType ) {
if (statisticsType.equals(RowRangeHistogramStatistics.STATS_TYPE)) { |
<<<<<<<
import mil.nga.giat.geowave.core.store.data.visibility.DifferingFieldVisibilityEntryCount;
import mil.nga.giat.geowave.core.store.entities.GeoWaveRow;
=======
>>>>>>>
import mil.nga.giat.geowave.core.store.data.visibility.DifferingFieldVisibilityEntryCount;
import mil.nga.giat.geowave.core.store.entities.GeoWaveRow;
<<<<<<<
@Override
public <T> CloseableIterator<T> query(
=======
protected <T> CloseableIterator<T> internalQuery(
>>>>>>>
protected <T> CloseableIterator<T> internalQuery(
<<<<<<<
if (sanitizedQuery instanceof InsertionIdQuery) {
=======
if (delete) {
final DataStoreCallbackManager callbackCache = new DataStoreCallbackManager(
statisticsStore,
secondaryIndexDataStore,
queriedAdapters.add(adapter.getAdapterId()));
deleteCallbacks.add(callbackCache);
ScanCallback callback = queryOptions.getScanCallback();
final PrimaryIndex index = indexAdapterPair.getLeft();
queryOptions.setScanCallback(new ScanCallback<Object>() {
@Override
public void entryScanned(
DataStoreEntryInfo entryInfo,
Object entry ) {
if (callback != null) {
callback.entryScanned(
entryInfo,
entry);
}
callbackCache.getDeleteCallback(
(WritableDataAdapter<Object>) adapter,
index).entryDeleted(
entryInfo,
entry);
}
});
}
if (sanitizedQuery instanceof RowIdQuery) {
>>>>>>>
if (delete) {
final DataStoreCallbackManager callbackCache = new DataStoreCallbackManager(
statisticsStore,
secondaryIndexDataStore,
queriedAdapters.add(adapter.getAdapterId()));
deleteCallbacks.add(callbackCache);
ScanCallback callback = queryOptions.getScanCallback();
final PrimaryIndex index = indexAdapterPair.getLeft();
queryOptions.setScanCallback(new ScanCallback<Object>() {
@Override
public void entryScanned(
DataStoreEntryInfo entryInfo,
Object entry ) {
if (callback != null) {
callback.entryScanned(
entryInfo,
entry);
}
callbackCache.getDeleteCallback(
(WritableDataAdapter<Object>) adapter,
index).entryDeleted(
entryInfo,
entry);
}
});
}
if (sanitizedQuery instanceof InsertionIdQuery) {
<<<<<<<
=======
else if (sanitizedQuery instanceof DataIdQuery) {
final DataIdQuery idQuery = (DataIdQuery) sanitizedQuery;
if (idQuery.getAdapterId().equals(
adapter.getAdapterId())) {
results.add(getEntries(
indexAdapterPair.getLeft(),
idQuery.getDataIds(),
(DataAdapter<Object>) adapterStore.getAdapter(idQuery.getAdapterId()),
filter,
(ScanCallback<Object>) sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getAuthorizations(),
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
delete));
}
continue;
}
>>>>>>>
<<<<<<<
@Override
=======
@SuppressWarnings("unchecked")
protected CloseableIterator<Object> getEntries(
final PrimaryIndex index,
final List<ByteArrayId> dataIds,
final DataAdapter<Object> adapter,
final DedupeFilter dedupeFilter,
final ScanCallback<Object> callback,
final String[] authorizations,
final double[] maxResolutionSubsamplingPerDimension,
boolean delete )
throws IOException {
final String altIdxTableName = index.getId().getString() + ALT_INDEX_TABLE;
MemoryAdapterStore tempAdapterStore;
tempAdapterStore = new MemoryAdapterStore(
new DataAdapter[] {
adapter
});
if (baseOptions.isUseAltIndex() && baseOperations.tableExists(altIdxTableName)) {
final List<ByteArrayId> rowIds = getAltIndexRowIds(
altIdxTableName,
dataIds,
adapter.getAdapterId());
if (rowIds.size() > 0) {
final QueryOptions options = new QueryOptions();
options.setScanCallback(callback);
options.setAuthorizations(authorizations);
options.setMaxResolutionSubsamplingPerDimension(maxResolutionSubsamplingPerDimension);
options.setLimit(-1);
return queryRowIds(
adapter,
index,
rowIds,
dedupeFilter,
options,
tempAdapterStore,
delete);
}
}
else {
return getEntryRows(
index,
tempAdapterStore,
dataIds,
adapter,
callback,
dedupeFilter,
authorizations,
delete);
}
return new CloseableIterator.Empty();
}
>>>>>>>
@Override
<<<<<<<
=======
else if (query instanceof DataIdQuery) {
final DataIdQuery idQuery = (DataIdQuery) query;
dataIt = getEntries(
index,
idQuery.getDataIds(),
adapter,
null,
callback,
queryOptions.getAuthorizations(),
null,
true);
}
>>>>>>>
<<<<<<<
finally {
try {
if (idxDeleter != null) {
idxDeleter.close();
}
if (altIdxDeleter != null) {
altIdxDeleter.close();
}
}
catch (final Exception e) {
LOGGER.warn(
"Unable to close deleter",
e);
}
}
=======
}
>>>>>>>
finally {
try {
if (idxDeleter != null) {
idxDeleter.close();
}
if (altIdxDeleter != null) {
altIdxDeleter.close();
}
}
catch (final Exception e) {
LOGGER.warn(
"Unable to close deleter",
e);
}
}
<<<<<<<
final QueryOptions queryOptions ) {
return queryConstraints(
Collections.singletonList(adapter.getAdapterId()),
index,
new DataIdQuery(
dataIds),
dedupeFilter,
queryOptions,
tempAdapterStore);
}
protected CloseableIterator<Object> queryConstraints(
final List<ByteArrayId> adapterIdsToQuery,
final PrimaryIndex index,
final Query sanitizedQuery,
final DedupeFilter filter,
final QueryOptions sanitizedQueryOptions,
final AdapterStore tempAdapterStore ) {
final BaseConstraintsQuery constraintsQuery = new BaseConstraintsQuery(
this,
adapterIdsToQuery,
index,
sanitizedQuery,
filter,
sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getAggregation(),
sanitizedQueryOptions.getFieldIdsAdapterPair(),
IndexMetaDataSet.getIndexMetadata(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
DuplicateEntryCount.getDuplicateCounts(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
sanitizedQueryOptions.getAuthorizations());
return constraintsQuery.query(
baseOperations,
baseOptions,
tempAdapterStore,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
sanitizedQueryOptions.getLimit());
}
protected CloseableIterator<Object> queryRowPrefix(
final PrimaryIndex index,
final ByteArrayId partitionKey,
final ByteArrayId sortPrefix,
final QueryOptions sanitizedQueryOptions,
final AdapterStore tempAdapterStore,
final List<ByteArrayId> adapterIdsToQuery ) {
final BaseRowPrefixQuery<Object> prefixQuery = new BaseRowPrefixQuery<Object>(
this,
index,
partitionKey,
sortPrefix,
(ScanCallback<Object, ?>) sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getLimit(),
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
sanitizedQueryOptions.getAuthorizations());
return prefixQuery.query(
baseOperations,
baseOptions,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
tempAdapterStore);
}
protected CloseableIterator<Object> queryInsertionId(
final DataAdapter<Object> adapter,
final PrimaryIndex index,
final InsertionIdQuery query,
final DedupeFilter filter,
final QueryOptions sanitizedQueryOptions,
final AdapterStore tempAdapterStore ) {
final DifferingFieldVisibilityEntryCount visibilityCounts = DifferingFieldVisibilityEntryCount
.getVisibilityCounts(
index,
Collections.singletonList(adapter.getAdapterId()),
statisticsStore,
sanitizedQueryOptions.getAuthorizations());
final BaseInsertionIdQuery<Object> q = new BaseInsertionIdQuery<Object>(
this,
adapter,
index,
query,
(ScanCallback<Object, ?>) sanitizedQueryOptions.getScanCallback(),
filter,
visibilityCounts,
sanitizedQueryOptions.getAuthorizations());
return q.query(
baseOperations,
baseOptions,
tempAdapterStore,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
sanitizedQueryOptions.getLimit());
}
protected <T> IndexWriter<T> createIndexWriter(
final WritableDataAdapter<T> adapter,
final PrimaryIndex index,
final DataStoreOperations baseOperations,
final DataStoreOptions baseOptions,
final IngestCallback<T> callback,
final Closeable closable ) {
return new BaseIndexWriter<T>(
adapter,
index,
baseOperations,
baseOptions,
callback,
closable);
}
protected <T> void initOnIndexWriterCreate(
final DataAdapter<T> adapter,
final PrimaryIndex index ) {}
=======
final String[] authorizations,
boolean delete );
protected abstract CloseableIterator<Object> queryConstraints(
List<ByteArrayId> adapterIdsToQuery,
PrimaryIndex index,
Query sanitizedQuery,
DedupeFilter filter,
QueryOptions sanitizedQueryOptions,
AdapterStore tempAdapterStore,
boolean delete );
protected abstract CloseableIterator<Object> queryRowPrefix(
PrimaryIndex index,
ByteArrayId rowPrefix,
QueryOptions sanitizedQueryOptions,
AdapterStore tempAdapterStore,
List<ByteArrayId> adapterIdsToQuery,
boolean delete );
protected abstract CloseableIterator<Object> queryRowIds(
DataAdapter<Object> adapter,
PrimaryIndex index,
List<ByteArrayId> rowIds,
DedupeFilter filter,
QueryOptions sanitizedQueryOptions,
AdapterStore tempAdapterStore,
boolean delete );
protected abstract <T> void addAltIndexCallback(
List<IngestCallback<T>> callbacks,
String indexName,
DataAdapter<T> adapter,
ByteArrayId primaryIndexId );
protected abstract IndexWriter createIndexWriter(
DataAdapter adapter,
PrimaryIndex index,
DataStoreOperations baseOperations,
DataStoreOptions baseOptions,
final IngestCallback callback,
final Closeable closable );
protected abstract void initOnIndexWriterCreate(
final DataAdapter adapter,
final PrimaryIndex index );
>>>>>>>
final QueryOptions queryOptions) {
return queryConstraints(
Collections.singletonList(adapter.getAdapterId()),
index,
new DataIdQuery(
dataIds),
dedupeFilter,
queryOptions,
tempAdapterStore);
}
protected CloseableIterator<Object> queryConstraints(
final List<ByteArrayId> adapterIdsToQuery,
final PrimaryIndex index,
final Query sanitizedQuery,
final DedupeFilter filter,
final QueryOptions sanitizedQueryOptions,
final AdapterStore tempAdapterStore ) {
final BaseConstraintsQuery constraintsQuery = new BaseConstraintsQuery(
this,
adapterIdsToQuery,
index,
sanitizedQuery,
filter,
sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getAggregation(),
sanitizedQueryOptions.getFieldIdsAdapterPair(),
IndexMetaDataSet.getIndexMetadata(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
DuplicateEntryCount.getDuplicateCounts(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
sanitizedQueryOptions.getAuthorizations());
return constraintsQuery.query(
baseOperations,
baseOptions,
tempAdapterStore,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
sanitizedQueryOptions.getLimit());
}
protected CloseableIterator<Object> queryRowPrefix(
final PrimaryIndex index,
final ByteArrayId partitionKey,
final ByteArrayId sortPrefix,
final QueryOptions sanitizedQueryOptions,
final AdapterStore tempAdapterStore,
final List<ByteArrayId> adapterIdsToQuery ) {
final BaseRowPrefixQuery<Object> prefixQuery = new BaseRowPrefixQuery<Object>(
this,
index,
partitionKey,
sortPrefix,
(ScanCallback<Object, ?>) sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getLimit(),
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
sanitizedQueryOptions.getAuthorizations());
return prefixQuery.query(
baseOperations,
baseOptions,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
tempAdapterStore);
}
protected CloseableIterator<Object> queryInsertionId(
final DataAdapter<Object> adapter,
final PrimaryIndex index,
final InsertionIdQuery query,
final DedupeFilter filter,
final QueryOptions sanitizedQueryOptions,
final AdapterStore tempAdapterStore ) {
final DifferingFieldVisibilityEntryCount visibilityCounts = DifferingFieldVisibilityEntryCount
.getVisibilityCounts(
index,
Collections.singletonList(adapter.getAdapterId()),
statisticsStore,
sanitizedQueryOptions.getAuthorizations());
final BaseInsertionIdQuery<Object> q = new BaseInsertionIdQuery<Object>(
this,
adapter,
index,
query,
(ScanCallback<Object, ?>) sanitizedQueryOptions.getScanCallback(),
filter,
visibilityCounts,
sanitizedQueryOptions.getAuthorizations());
return q.query(
baseOperations,
baseOptions,
tempAdapterStore,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
sanitizedQueryOptions.getLimit());
}
protected <T> IndexWriter<T> createIndexWriter(
final WritableDataAdapter<T> adapter,
final PrimaryIndex index,
final DataStoreOperations baseOperations,
final DataStoreOptions baseOptions,
final IngestCallback<T> callback,
final Closeable closable ) {
return new BaseIndexWriter<T>(
adapter,
index,
baseOperations,
baseOptions,
callback,
closable);
}
protected <T> void initOnIndexWriterCreate(
final DataAdapter<T> adapter,
final PrimaryIndex index ) {} |
<<<<<<<
import mil.nga.giat.geowave.core.index.sfc.tiered.TieredSFCIndexFactory;
import mil.nga.giat.geowave.core.store.cli.remote.options.IndexPluginOptions.BaseIndexBuilder;
=======
import mil.nga.giat.geowave.core.index.sfc.xz.XZHierarchicalIndexFactory;
>>>>>>>
import mil.nga.giat.geowave.core.index.sfc.xz.XZHierarchicalIndexFactory;
import mil.nga.giat.geowave.core.store.cli.remote.options.IndexPluginOptions.BaseIndexBuilder; |
<<<<<<<
import mil.nga.giat.geowave.core.index.Coordinate;
=======
>>>>>>>
<<<<<<<
protected static List<SinglePartitionQueryRanges> getQueryRanges(
final BinnedNumericDataset[] binnedQueries,
final SpaceFillingCurve sfc,
final int maxRanges,
final byte tier ) {
final List<SinglePartitionQueryRanges> queryRanges = new ArrayList<SinglePartitionQueryRanges>();
int maxRangeDecompositionPerBin = maxRanges;
if ((maxRanges > 1) && (binnedQueries.length > 1)) {
maxRangeDecompositionPerBin = (int) Math.ceil((double) maxRanges / (double) binnedQueries.length);
}
for (final BinnedNumericDataset binnedQuery : binnedQueries) {
final RangeDecomposition rangeDecomp = sfc.decomposeRange(
binnedQuery,
true,
maxRangeDecompositionPerBin);
final byte[] tierAndBinId = ByteArrayUtils.combineArrays(
new byte[] {
tier
// we're assuming tiers only go to 127 (the max byte
// value)
},
binnedQuery.getBinId());
queryRanges.add(new SinglePartitionQueryRanges(
new ByteArrayId(
tierAndBinId),
Arrays.asList(rangeDecomp.getRanges())));
}
return queryRanges;
}
=======
>>>>>>>
<<<<<<<
synchronized private SinglePartitionInsertionIds getRowIds(
=======
public boolean tierExists(
Byte tierId ) {
return orderedSfcIndexToTierId.containsValue(tierId);
}
synchronized private List<ByteArrayId> getRowIds(
>>>>>>>
public boolean tierExists(
Byte tierId ) {
return orderedSfcIndexToTierId.containsValue(tierId);
}
synchronized private List<ByteArrayId> getRowIds(
<<<<<<<
if (rowCount.equals(BigInteger.ONE)) {
final byte[] tierAndBinId = ByteArrayUtils.combineArrays(
new byte[] {
tierId
},
index.getBinId());
final double[] maxValues = index.getMaxValuesPerDimension();
retVal.add(new ByteArrayId(
sfc.getId(maxValues)));
return new SinglePartitionInsertionIds(
new ByteArrayId(
tierAndBinId),
retVal);
=======
ByteArrayId singleId = BinnedSFCUtils.getSingleBinnedRowId(
rowCount,
tierId,
index,
sfc);
if (singleId != null) {
return Collections.singletonList(singleId);
>>>>>>>
ByteArrayId singleId = BinnedSFCUtils.getSingleBinnedRowId(
rowCount,
tierId,
index,
sfc);
if (singleId != null) {
return Collections.singletonList(singleId);
<<<<<<<
final InsertionIds ids ) {
for (final SinglePartitionInsertionIds partitionIds : ids.getPartitionKeys()) {
tierCounts[orderedTierIdToSfcIndex.get(
partitionIds.getPartitionKey().getBytes()[0]).intValue()] += partitionIds.getSortKeys().size();
=======
final List<ByteArrayId> ids ) {
for (final ByteArrayId id : ids) {
final byte first = id.getBytes()[0];
if (orderedTierIdToSfcIndex.containsKey(first)) {
tierCounts[orderedTierIdToSfcIndex.get(
first).intValue()]++;
}
>>>>>>>
final InsertionIds ids ) {
for (final SinglePartitionInsertionIds partitionIds : ids.getPartitionKeys()) {
final byte first = id.getBytes()[0];
if (orderedTierIdToSfcIndex.containsKey(first)) {
tierCounts[orderedTierIdToSfcIndex.get(
partitionIds.getPartitionKey().getBytes()[0]).intValue()] += partitionIds.getSortKeys().size();
}
<<<<<<<
final InsertionIds ids ) {
for (final SinglePartitionInsertionIds partitionIds : ids.getPartitionKeys()) {
tierCounts[orderedTierIdToSfcIndex.get(
partitionIds.getPartitionKey().getBytes()[0]).intValue()] -= partitionIds.getSortKeys().size();
=======
final List<ByteArrayId> ids ) {
for (final ByteArrayId id : ids) {
final byte first = id.getBytes()[0];
if (orderedTierIdToSfcIndex.containsKey(first)) {
tierCounts[orderedTierIdToSfcIndex.get(
first).intValue()]--;
}
>>>>>>>
final InsertionIds ids ) {
for (final SinglePartitionInsertionIds partitionIds : ids.getPartitionKeys()) {
final byte first = id.getBytes()[0];
if (orderedTierIdToSfcIndex.containsKey(first)) {
tierCounts[orderedTierIdToSfcIndex.get(
partitionIds.getPartitionKey().getBytes()[0]).intValue()] -= partitionIds.getSortKeys().size();
} |
<<<<<<<
import org.apache.log4j.Logger;
=======
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
>>>>>>>
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; |
<<<<<<<
final String vendorString = result.getEvidence(EvidenceType.VENDOR).toString();
=======
final String vendorString = result.getVendorEvidence().toString();
assertEquals(RubyGemspecAnalyzer.DEPENDENCY_ECOSYSTEM, result.getEcosystem());
>>>>>>>
final String vendorString = result.getEvidence(EvidenceType.VENDOR).toString();
assertEquals(RubyGemspecAnalyzer.DEPENDENCY_ECOSYSTEM, result.getEcosystem());
<<<<<<<
assertTrue(result.size()>0);
=======
assertTrue(result.getEvidence().size()>0);
assertEquals(RubyGemspecAnalyzer.DEPENDENCY_ECOSYSTEM, result.getEcosystem());
assertEquals("pg",result.getName());
assertEquals("0.18.4",result.getVersion());
assertEquals("pg:0.18.4",result.getDisplayFileName());
>>>>>>>
assertTrue(result.size() > 0);
assertEquals(RubyGemspecAnalyzer.DEPENDENCY_ECOSYSTEM, result.getEcosystem());
assertEquals("pg", result.getName());
assertEquals("0.18.4", result.getVersion());
assertEquals("pg:0.18.4", result.getDisplayFileName()); |
<<<<<<<
import org.apache.log4j.Logger;
=======
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.security.visibility.CellVisibility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
>>>>>>>
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
<<<<<<<
private final static Logger LOGGER = Logger.getLogger(
HBaseSecondaryIndexDataStore.class);
=======
private final static Logger LOGGER = LoggerFactory.getLogger(HBaseSecondaryIndexDataStore.class);
private final BasicHBaseOperations hbaseOperations;
@SuppressWarnings("unused")
private final HBaseOptions hbaseOptions;
private DataStore dataStore = null;
>>>>>>>
private final static Logger LOGGER = LoggerFactory.getLogger(HBaseSecondaryIndexDataStore.class);
private final BasicHBaseOperations hbaseOperations;
@SuppressWarnings("unused")
private final HBaseOptions hbaseOptions;
private DataStore dataStore = null; |
<<<<<<<
private static final Logger LOGGER = Logger.getLogger(AbstractHBaseRowQuery.class);
protected final ScanCallback<T, ?> scanCallback;
=======
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractHBaseRowQuery.class);
protected final ScanCallback<T> scanCallback;
>>>>>>>
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractHBaseRowQuery.class);
protected final ScanCallback<T, ?> scanCallback; |
<<<<<<<
public static final Charset GEOWAVE_CHAR_SET = Charset.forName("ISO-8859-1");
public static final Charset UTF8_CHAR_SET = Charset.forName("UTF-8");
=======
private static final Logger LOGGER = LoggerFactory.getLogger(StringUtils.class);
public static final Charset UTF8_CHAR_SET = Charset.forName("ISO-8859-1");
>>>>>>>
public static final Charset GEOWAVE_CHAR_SET = Charset.forName("ISO-8859-1");
public static final Charset UTF8_CHAR_SET = Charset.forName("UTF-8");
private static final Logger LOGGER = LoggerFactory.getLogger(StringUtils.class); |
<<<<<<<
import mil.nga.giat.geowave.core.store.data.visibility.UniformVisibilityWriter;
import mil.nga.giat.geowave.core.store.entities.GeowaveRowId;
=======
import mil.nga.giat.geowave.core.store.adapter.statistics.DuplicateEntryCount;
import mil.nga.giat.geowave.core.store.data.visibility.DifferingFieldVisibilityEntryCount;
>>>>>>>
import mil.nga.giat.geowave.core.store.adapter.statistics.DuplicateEntryCount;
import mil.nga.giat.geowave.core.store.data.visibility.DifferingFieldVisibilityEntryCount;
import mil.nga.giat.geowave.core.store.entities.GeowaveRowId;
<<<<<<<
import mil.nga.giat.geowave.core.store.memory.DataStoreUtils;
=======
import mil.nga.giat.geowave.core.store.memory.MemoryAdapterStore;
import mil.nga.giat.geowave.core.store.query.DataIdQuery;
>>>>>>>
<<<<<<<
}
=======
final IngestCallbackList<T> callbacksList = new IngestCallbackList<T>(
callbacks);
writers[i] = new AccumuloIndexWriter(
adapter,
index,
accumuloOperations,
accumuloOptions,
callbacksList,
callbacksList);
>>>>>>>
}
<<<<<<<
protected CloseableIterator<Object> queryRowPrefix(
final PrimaryIndex index,
final ByteArrayId rowPrefix,
final QueryOptions sanitizedQueryOptions,
final AdapterStore tempAdapterStore ) {
final AccumuloRowPrefixQuery<Object> prefixQuery = new AccumuloRowPrefixQuery<Object>(
index,
rowPrefix,
(ScanCallback<Object>) sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getLimit(),
sanitizedQueryOptions.getAuthorizations());
return prefixQuery.query(
accumuloOperations,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
tempAdapterStore);
}
=======
public <T> CloseableIterator<T> query(
final QueryOptions queryOptions,
final Query query ) {
final List<CloseableIterator<Object>> results = new ArrayList<CloseableIterator<Object>>();
// all queries will use the same instance of the dedupe filter for
// client side filtering because the filter needs to be applied across
// indices
final QueryOptions sanitizedQueryOptions = (queryOptions == null) ? new QueryOptions() : queryOptions;
final Query sanitizedQuery = (query == null) ? new EverythingQuery() : query;
final DedupeFilter filter = new DedupeFilter();
MemoryAdapterStore tempAdapterStore;
try {
tempAdapterStore = new MemoryAdapterStore(
sanitizedQueryOptions.getAdaptersArray(adapterStore));
for (final Pair<PrimaryIndex, List<DataAdapter<Object>>> indexAdapterPair : sanitizedQueryOptions
.getAdaptersWithMinimalSetOfIndices(
tempAdapterStore,
indexMappingStore,
indexStore)) {
final List<ByteArrayId> adapterIdsToQuery = new ArrayList<>();
for (final DataAdapter<Object> adapter : indexAdapterPair.getRight()) {
if (sanitizedQuery instanceof RowIdQuery) {
final AccumuloRowIdsQuery<Object> q = new AccumuloRowIdsQuery<Object>(
adapter,
indexAdapterPair.getLeft(),
((RowIdQuery) sanitizedQuery).getRowIds(),
(ScanCallback<Object>) sanitizedQueryOptions.getScanCallback(),
filter,
sanitizedQueryOptions.getAuthorizations());
results.add(q.query(
accumuloOperations,
tempAdapterStore,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
-1));
continue;
}
else if (sanitizedQuery instanceof DataIdQuery) {
final DataIdQuery idQuery = (DataIdQuery) sanitizedQuery;
if (idQuery.getAdapterId().equals(
adapter.getAdapterId())) {
results.add(getEntries(
indexAdapterPair.getLeft(),
idQuery.getDataIds(),
(DataAdapter<Object>) adapterStore.getAdapter(idQuery.getAdapterId()),
filter,
(ScanCallback<Object>) sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getAuthorizations(),
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension()));
}
continue;
}
else if (sanitizedQuery instanceof PrefixIdQuery) {
final PrefixIdQuery prefixIdQuery = (PrefixIdQuery) sanitizedQuery;
final AccumuloRowPrefixQuery<Object> prefixQuery = new AccumuloRowPrefixQuery<Object>(
indexAdapterPair.getLeft(),
prefixIdQuery.getRowPrefix(),
(ScanCallback<Object>) sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getLimit(),
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
indexAdapterPair.getLeft(),
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
sanitizedQueryOptions.getAuthorizations());
results.add(prefixQuery.query(
accumuloOperations,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
tempAdapterStore));
continue;
}
adapterIdsToQuery.add(adapter.getAdapterId());
}
// supports querying multiple adapters in a single index
// in one query instance (one scanner) for efficiency
if (adapterIdsToQuery.size() > 0) {
AccumuloConstraintsQuery accumuloQuery;
accumuloQuery = new AccumuloConstraintsQuery(
adapterIdsToQuery,
indexAdapterPair.getLeft(),
sanitizedQuery,
filter,
sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getAggregation(),
sanitizedQueryOptions.getFieldIdsAdapterPair(),
IndexMetaDataSet.getIndexMetadata(
indexAdapterPair.getLeft(),
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
DuplicateEntryCount.getDuplicateCounts(
indexAdapterPair.getLeft(),
adapterIdsToQuery,
statisticsStore,
queryOptions.getAuthorizations()),
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
indexAdapterPair.getLeft(),
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
sanitizedQueryOptions.getAuthorizations());
results.add(accumuloQuery.query(
accumuloOperations,
tempAdapterStore,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
sanitizedQueryOptions.getLimit()));
}
}
}
catch (final IOException e1)
>>>>>>>
protected CloseableIterator<Object> queryRowPrefix(
final PrimaryIndex index,
final ByteArrayId rowPrefix,
final QueryOptions sanitizedQueryOptions,
final AdapterStore tempAdapterStore,
final List<ByteArrayId> adapterIdsToQuery ) {
final AccumuloRowPrefixQuery<Object> prefixQuery = new AccumuloRowPrefixQuery<Object>(
index,
rowPrefix,
(ScanCallback<Object>) sanitizedQueryOptions.getScanCallback(),
sanitizedQueryOptions.getLimit(),
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
index,
adapterIdsToQuery,
statisticsStore,
sanitizedQueryOptions.getAuthorizations()),
sanitizedQueryOptions.getAuthorizations());
return prefixQuery.query(
accumuloOperations,
sanitizedQueryOptions.getMaxResolutionSubsamplingPerDimension(),
tempAdapterStore);
}
<<<<<<<
=======
@SuppressWarnings("unchecked")
private CloseableIterator<Object> getEntries(
final PrimaryIndex index,
final List<ByteArrayId> dataIds,
final DataAdapter<Object> adapter,
final DedupeFilter dedupeFilter,
final ScanCallback<Object> callback,
final String[] authorizations,
final double[] maxResolutionSubsamplingPerDimension )
throws IOException {
final String altIdxTableName = index.getId().getString() + AccumuloUtils.ALT_INDEX_TABLE;
MemoryAdapterStore tempAdapterStore;
tempAdapterStore = new MemoryAdapterStore(
new DataAdapter[] {
adapter
});
if (accumuloOptions.isUseAltIndex() && accumuloOperations.tableExists(altIdxTableName)) {
final List<ByteArrayId> rowIds = getAltIndexRowIds(
altIdxTableName,
dataIds,
adapter.getAdapterId());
if (rowIds.size() > 0) {
final AccumuloRowIdsQuery<Object> q = new AccumuloRowIdsQuery<Object>(
adapter,
index,
rowIds,
callback,
dedupeFilter,
authorizations);
return q.query(
accumuloOperations,
tempAdapterStore,
maxResolutionSubsamplingPerDimension,
-1);
}
}
else {
return getEntryRows(
index,
tempAdapterStore,
dataIds,
adapter,
callback,
dedupeFilter,
authorizations);
}
return new CloseableIterator.Empty();
}
>>>>>>>
<<<<<<<
new AccumuloEntryIteratorWrapper(
=======
new EntryIteratorWrapper(
visibilityCount.isAnyEntryDifferingFieldVisiblity(),
>>>>>>>
new AccumuloEntryIteratorWrapper(
visibilityCount.isAnyEntryDifferingFieldVisiblity(),
<<<<<<<
final ByteArrayId adapterId,
final String... authorizations ) {
=======
final ByteArrayId adapterId ) {
>>>>>>>
final ByteArrayId adapterId,
final String... authorizations ) {
<<<<<<<
=======
if (accumuloOptions.isUseAltIndex() && accumuloOperations.tableExists(tableName)) {
ScannerBase scanner = null;
for (final ByteArrayId dataId : dataIds) {
try {
scanner = accumuloOperations.createScanner(tableName);
((Scanner) scanner).setRange(Range.exact(new Text(
dataId.getBytes())));
scanner.fetchColumnFamily(new Text(
adapterId.getBytes()));
final Iterator<Map.Entry<Key, Value>> iterator = scanner.iterator();
while (iterator.hasNext()) {
result.add(new ByteArrayId(
iterator.next().getKey().getColumnQualifierData().getBackingArray()));
}
}
catch (final TableNotFoundException e) {
LOGGER.warn(
"Unable to query table '" + tableName + "'. Table does not exist.",
e);
}
finally {
if (scanner != null) {
scanner.close();
}
}
}
}
return result;
}
@Override
public boolean delete(
final QueryOptions queryOptions,
final Query query ) {
if (((query == null) || (query instanceof EverythingQuery)) && queryOptions.isAllAdapters()) {
try {
// TODO These interfaces should all provide remove and removeAll
// capabilities instead of having to clear the
// AbstractPersistence's cache manually
((AbstractAccumuloPersistence) indexStore).clearCache();
((AbstractAccumuloPersistence) adapterStore).clearCache();
((AbstractAccumuloPersistence) statisticsStore).clearCache();
((AccumuloSecondaryIndexDataStore) secondaryIndexDataStore).clearCache();
((AbstractAccumuloPersistence) indexMappingStore).clearCache();
accumuloOperations.deleteAll();
return true;
}
catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) {
LOGGER.error(
"Unable to delete all tables",
e);
}
return false;
}
final AtomicBoolean aOk = new AtomicBoolean(
true);
// keep a list of adapters that have been queried, to only low an
// adapter to be queried
// once
final Set<ByteArrayId> queriedAdapters = new HashSet<ByteArrayId>();
>>>>>>>
<<<<<<<
=======
};
CloseableIterator<?> dataIt = null;
if (query instanceof RowIdQuery) {
final AccumuloRowIdsQuery<Object> q = new AccumuloRowIdsQuery<Object>(
adapter,
index,
((RowIdQuery) query).getRowIds(),
callback,
null,
queryOptions.getAuthorizations());
dataIt = q.query(
accumuloOperations,
adapterStore,
null,
-1);
}
else if (query instanceof DataIdQuery) {
final DataIdQuery idQuery = (DataIdQuery) query;
dataIt = getEntries(
index,
idQuery.getDataIds(),
adapter,
null,
callback,
queryOptions.getAuthorizations(),
null);
}
else if (query instanceof PrefixIdQuery) {
dataIt = new AccumuloRowPrefixQuery<Object>(
index,
((PrefixIdQuery) query).getRowPrefix(),
callback,
null,
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
indexAdapterPair.getLeft(),
Collections.singletonList(adapter.getAdapterId()),
statisticsStore,
queryOptions.getAuthorizations()),
queryOptions.getAuthorizations()).query(
accumuloOperations,
null,
adapterStore);
}
else {
final List<ByteArrayId> adapterIds = Collections.singletonList(adapter.getAdapterId());
dataIt = new AccumuloConstraintsQuery(
adapterIds,
index,
query,
null,
callback,
null,
queryOptions.getFieldIdsAdapterPair(),
IndexMetaDataSet.getIndexMetadata(
indexAdapterPair.getLeft(),
adapterIds,
statisticsStore,
queryOptions.getAuthorizations()),
DuplicateEntryCount.getDuplicateCounts(
indexAdapterPair.getLeft(),
adapterIds,
statisticsStore,
queryOptions.getAuthorizations()),
DifferingFieldVisibilityEntryCount.getVisibilityCounts(
indexAdapterPair.getLeft(),
adapterIds,
statisticsStore,
queryOptions.getAuthorizations()),
queryOptions.getAuthorizations()).query(
accumuloOperations,
adapterStore,
null,
null);
}
while (dataIt.hasNext()) {
dataIt.next();
}
try {
dataIt.close();
>>>>>>>
<<<<<<<
=======
public IndexStore getIndexStore() {
return indexStore;
}
public AdapterStore getAdapterStore() {
return adapterStore;
}
public DataStatisticsStore getStatisticsStore() {
return statisticsStore;
}
public SecondaryIndexDataStore getSecondaryIndexDataStore() {
return secondaryIndexDataStore;
}
>>>>>>> |
<<<<<<<
import mil.nga.giat.geowave.core.store.operations.DataStoreOperations;
import mil.nga.giat.geowave.core.store.operations.Deleter;
import mil.nga.giat.geowave.core.store.operations.Writer;
=======
import mil.nga.giat.geowave.core.store.query.DistributableQuery;
>>>>>>>
import mil.nga.giat.geowave.core.store.operations.DataStoreOperations;
import mil.nga.giat.geowave.core.store.operations.Deleter;
import mil.nga.giat.geowave.core.store.operations.Writer;
import mil.nga.giat.geowave.core.store.query.DistributableQuery; |
<<<<<<<
import android.animation.Animator;
import android.animation.AnimatorSet;
import android.animation.ObjectAnimator;
import android.annotation.TargetApi;
=======
import android.annotation.SuppressLint;
>>>>>>>
import android.animation.Animator;
import android.animation.AnimatorSet;
import android.animation.ObjectAnimator;
import android.annotation.SuppressLint;
import android.annotation.TargetApi;
<<<<<<<
import com.vinaygaba.creditcardview.util.AndroidUtils;
=======
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
>>>>>>>
import com.vinaygaba.creditcardview.util.AndroidUtils;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
<<<<<<<
private static int CARD_FRONT = 0;
private static int CARD_BACK = 1;
=======
private static final boolean DEBUG = false;
private Context mContext;
>>>>>>>
private static int CARD_FRONT = 0;
private static int CARD_BACK = 1;
private static final boolean DEBUG = false;
private Context mContext;
<<<<<<<
private int mBrandLogoPosition = 1;
private int cardSide = CARD_FRONT;
=======
>>>>>>>
private int cardSide = CARD_FRONT;
<<<<<<<
chip = (ImageView)getChildAt(4);
validTill = (TextView)getChildAt(5);
expiryDate = (EditText)getChildAt(6);
mFlipBtn = (ImageButton)getChildAt(7);
=======
cardNumber = (EditText) findViewById(R.id.card_number);
cardName = (EditText) findViewById(R.id.card_name);
type = (ImageView) findViewById(R.id.card_logo);
brandLogo = (ImageView) findViewById(R.id.brand_logo);
chip = (ImageView) findViewById(R.id.chip);
validTill = (TextView) findViewById(R.id.valid_till);
expiryDate = (EditText) findViewById(R.id.expiry_date);
>>>>>>>
cardNumber = (EditText) findViewById(R.id.card_number);
cardName = (EditText) findViewById(R.id.card_name);
type = (ImageView) findViewById(R.id.card_logo);
brandLogo = (ImageView) findViewById(R.id.brand_logo);
chip = (ImageView) findViewById(R.id.chip);
validTill = (TextView) findViewById(R.id.valid_till);
expiryDate = (EditText) findViewById(R.id.expiry_date);
mFlipBtn = (ImageButton)findViewById(R.id.flip_btn); |
<<<<<<<
Logger.getLogger(ConnectionFactory.class.getName()).log(Level.FINE, "An error occurred closing the connection", ex);
=======
LOGGER.log(Level.FINE, "An error occured closing the connection", ex);
>>>>>>>
LOGGER.log(Level.FINE, "An error occurred closing the connection", ex);
<<<<<<<
Logger.getLogger(ConnectionFactory.class.getName()).log(Level.FINE, "An error occurred unloading the database driver", ex);
=======
LOGGER.log(Level.FINE, "An error occured unloading the databse driver", ex);
>>>>>>>
LOGGER.log(Level.FINE, "An error occurred unloading the database driver", ex);
<<<<<<<
Logger.getLogger(ConnectionFactory.class.getName()).log(Level.FINE,
"An unexpected throwable occurred unloading the database driver", unexpected);
=======
LOGGER.log(Level.FINE,
"An unexpected throwable occured unloading the databse driver", unexpected);
>>>>>>>
LOGGER.log(Level.FINE,
"An unexpected throwable occurred unloading the database driver", unexpected); |
<<<<<<<
=======
public static void writeIntLE(WritableByteChannel channel, int value) throws IOException {
ByteBuffer allocate = ByteBuffer.allocate(4);
allocate.order(ByteOrder.LITTLE_ENDIAN);
channel.write((ByteBuffer) allocate.putInt(value).flip());
}
>>>>>>>
public static void writeIntLE(WritableByteChannel channel, int value) throws IOException {
ByteBuffer allocate = ByteBuffer.allocate(4);
allocate.order(ByteOrder.LITTLE_ENDIAN);
channel.write((ByteBuffer) allocate.putInt(value).flip());
}
<<<<<<<
public static ByteBuffer duplicate(ByteBuffer bb) {
ByteBuffer out = ByteBuffer.allocate(bb.remaining());
out.put(bb.duplicate());
out.flip();
return out;
}
public static int find(List<ByteBuffer> catalog, ByteBuffer key) {
byte[] keyA = toArray(key);
for (int i = 0; i < catalog.size(); i++) {
if (Arrays.equals(toArray(catalog.get(i)), keyA))
return i;
}
return -1;
}
public static interface FileReaderListener {
void progress(int percentDone);
}
public static abstract class FileReader {
private int oldPd;
protected abstract void data(ByteBuffer data, long filePos);
protected abstract void done();
public void readFile(SeekableByteChannel ch, int bufferSize, FileReaderListener listener) throws IOException {
ByteBuffer buf = ByteBuffer.allocate(bufferSize);
long size = ch.size();
for (long pos = ch.position(); ch.read(buf) != -1; pos = ch.position()) {
buf.flip();
data(buf, pos);
buf.flip();
if (listener != null) {
int newPd = (int) (100 * pos / size);
if (newPd != oldPd)
listener.progress(newPd);
oldPd = newPd;
}
}
done();
}
public void readFile(File source, int bufferSize, FileReaderListener listener) throws IOException {
SeekableByteChannel ch = null;
try {
ch = NIOUtils.readableFileChannel(source);
readFile(ch, bufferSize, listener);
} finally {
NIOUtils.closeQuietly(ch);
}
}
}
public static byte getRel(ByteBuffer bb, int rel) {
return bb.get(bb.position() + rel);
}
public static ByteBuffer cloneBuffer(ByteBuffer pesBuffer) {
ByteBuffer res = ByteBuffer.allocate(pesBuffer.remaining());
res.put(pesBuffer.duplicate());
res.clear();
return res;
}
=======
public static ByteBuffer clone(ByteBuffer byteBuffer) {
ByteBuffer result = ByteBuffer.allocate(byteBuffer.remaining());
result.put(byteBuffer.duplicate());
result.flip();
return result;
}
>>>>>>>
public static ByteBuffer duplicate(ByteBuffer bb) {
ByteBuffer out = ByteBuffer.allocate(bb.remaining());
out.put(bb.duplicate());
out.flip();
return out;
}
public static int find(List<ByteBuffer> catalog, ByteBuffer key) {
byte[] keyA = toArray(key);
for (int i = 0; i < catalog.size(); i++) {
if (Arrays.equals(toArray(catalog.get(i)), keyA))
return i;
}
return -1;
}
public static interface FileReaderListener {
void progress(int percentDone);
}
public static abstract class FileReader {
private int oldPd;
protected abstract void data(ByteBuffer data, long filePos);
protected abstract void done();
public void readFile(SeekableByteChannel ch, int bufferSize, FileReaderListener listener) throws IOException {
ByteBuffer buf = ByteBuffer.allocate(bufferSize);
long size = ch.size();
for (long pos = ch.position(); ch.read(buf) != -1; pos = ch.position()) {
buf.flip();
data(buf, pos);
buf.flip();
if (listener != null) {
int newPd = (int) (100 * pos / size);
if (newPd != oldPd)
listener.progress(newPd);
oldPd = newPd;
}
}
done();
}
public void readFile(File source, int bufferSize, FileReaderListener listener) throws IOException {
SeekableByteChannel ch = null;
try {
ch = NIOUtils.readableFileChannel(source);
readFile(ch, bufferSize, listener);
} finally {
NIOUtils.closeQuietly(ch);
}
}
}
public static byte getRel(ByteBuffer bb, int rel) {
return bb.get(bb.position() + rel);
}
public static ByteBuffer cloneBuffer(ByteBuffer pesBuffer) {
ByteBuffer res = ByteBuffer.allocate(pesBuffer.remaining());
res.put(pesBuffer.duplicate());
res.clear();
return res;
}
public static ByteBuffer clone(ByteBuffer byteBuffer) {
ByteBuffer result = ByteBuffer.allocate(byteBuffer.remaining());
result.put(byteBuffer.duplicate());
result.flip();
return result;
} |
<<<<<<<
public void predictInFrame(Picture reference, int x, int y, int[][] mbPix, BitReader _in, int motionType,
=======
public void predictInFrame(Picture8Bit reference, int x, int y, int[][] mbPix, BitReader in, int motionType,
>>>>>>>
public void predictInFrame(Picture8Bit reference, int x, int y, int[][] mbPix, BitReader _in, int motionType, |
<<<<<<<
import org.jcodec.containers.mp4.boxes.AudioSampleEntry;
import org.jcodec.containers.mp4.boxes.Box;
=======
import org.jcodec.containers.mp4.boxes.ChannelBox;
>>>>>>>
import org.jcodec.containers.mp4.boxes.ChannelBox;
<<<<<<<
import org.jcodec.containers.mp4.boxes.PixelAspectExt;
import org.jcodec.containers.mp4.boxes.ProductionApertureBox;
=======
import org.jcodec.containers.mp4.boxes.PixelAspectExt;
>>>>>>>
import org.jcodec.containers.mp4.boxes.PixelAspectExt;
import org.jcodec.containers.mp4.boxes.ProductionApertureBox;
<<<<<<<
import org.jcodec.containers.mp4.boxes.VideoSampleEntry;
import org.jcodec.containers.mp4.muxer.FramesMP4MuxerTrack;
import org.jcodec.movtool.streaming.VirtualMovie.PacketChunk;
=======
import org.jcodec.containers.mp4.boxes.channel.ChannelUtils;
import org.jcodec.containers.mp4.muxer.MP4Muxer;
import org.jcodec.movtool.streaming.VirtualMP4Movie.PacketChunk;
>>>>>>>
import org.jcodec.containers.mp4.boxes.channel.ChannelUtils;
import org.jcodec.containers.mp4.muxer.FramesMP4MuxerTrack;
import org.jcodec.containers.mp4.muxer.MP4Muxer;
import org.jcodec.movtool.streaming.VirtualMP4Movie.PacketChunk;
<<<<<<<
public static ByteBuffer produceHeader(PacketChunk[] chunks, VirtualTrack[] tracks, long dataSize, Brand brand)
throws IOException {
=======
public static ByteBuffer produceHeader(PacketChunk[] chunks, VirtualTrack[] tracks, long dataSize)
throws IOException {
>>>>>>>
public static ByteBuffer produceHeader(PacketChunk[] chunks, VirtualTrack[] tracks, long dataSize, Brand brand)
throws IOException {
<<<<<<<
Size dd = new Size(0, 0), sd = new Size(0, 0);
if (se instanceof VideoSampleEntry) {
VideoSampleEntry vse = (VideoSampleEntry) se;
PixelAspectExt pasp = Box.findFirst(vse, PixelAspectExt.class, "pasp");
if (pasp == null)
sd = dd = new Size(vse.getWidth(), vse.getHeight());
else {
Rational r = pasp.getRational();
dd = new Size(r.multiplyS(vse.getWidth()), vse.getHeight());
sd = new Size(vse.getWidth(), vse.getHeight());
}
=======
Size dd = new Size(0, 0);
if (codecMeta instanceof VideoCodecMeta) {
dd = ((VideoCodecMeta) codecMeta).getSize();
>>>>>>>
Size dd = new Size(0, 0), sd = new Size(0, 0);
if (codecMeta instanceof VideoCodecMeta) {
VideoCodecMeta meta = (VideoCodecMeta) codecMeta;
Rational pasp = meta.getPasp();
if (pasp == null)
sd = dd = meta.getSize();
else {
sd = meta.getSize();
dd = new Size(pasp.multiplyS(sd.getWidth()), sd.getHeight());
}
<<<<<<<
TrackType tt = (se instanceof AudioSampleEntry) ? TrackType.SOUND : TrackType.VIDEO;
if (tt == TrackType.VIDEO) {
NodeBox tapt = new NodeBox(new Header("tapt"));
tapt.add(new ClearApertureBox(dd.getWidth(), dd.getHeight()));
tapt.add(new ProductionApertureBox(dd.getWidth(), dd.getHeight()));
tapt.add(new EncodedPixelBox(sd.getWidth(), sd.getHeight()));
trak.add(tapt);
}
=======
TrackType tt = (codecMeta instanceof AudioCodecMeta) ? TrackType.SOUND : TrackType.VIDEO;
>>>>>>>
TrackType tt = (codecMeta instanceof AudioCodecMeta) ? TrackType.SOUND : TrackType.VIDEO;
if (tt == TrackType.VIDEO) {
NodeBox tapt = new NodeBox(new Header("tapt"));
tapt.add(new ClearApertureBox(dd.getWidth(), dd.getHeight()));
tapt.add(new ProductionApertureBox(dd.getWidth(), dd.getHeight()));
tapt.add(new EncodedPixelBox(sd.getWidth(), sd.getHeight()));
trak.add(tapt);
}
<<<<<<<
stts.add(new TimeToSampleEntry(prevCount, prevDur));
=======
stts.add(new TimeToSampleEntry(prevCount, (int) Math.round(prevDur * timescale)));
>>>>>>>
stts.add(new TimeToSampleEntry(prevCount, prevDur));
<<<<<<<
private static void populateStblPCM(NodeBox stbl, PacketChunk[] chunks, int trackId, SampleEntry se)
throws IOException {
AudioSampleEntry ase = (AudioSampleEntry) se;
int frameSize = ase.calcFrameSize();
=======
private static void populateStblPCM(NodeBox stbl, PacketChunk[] chunks, int trackId, CodecMeta se)
throws IOException {
AudioCodecMeta ase = (AudioCodecMeta) se;
int frameSize = ase.getFrameSize();
>>>>>>>
private static void populateStblPCM(NodeBox stbl, PacketChunk[] chunks, int trackId, CodecMeta se)
throws IOException {
AudioCodecMeta ase = (AudioCodecMeta) se;
int frameSize = ase.getFrameSize(); |
<<<<<<<
=======
import java.nio.ByteBuffer;
>>>>>>>
<<<<<<<
import org.jcodec.codecs.prores.ProresDecoder;
=======
import org.jcodec.codecs.h264.H264Encoder;
import org.jcodec.codecs.h264.H264Utils;
import org.jcodec.codecs.h264.encode.H264FixedRateControl;
>>>>>>>
import org.jcodec.codecs.prores.ProresDecoder;
<<<<<<<
import org.jcodec.common.VideoDecoder;
=======
import org.jcodec.codecs.prores.ProresEncoder.Profile;
import org.jcodec.common.VideoDecoder;
import org.jcodec.common.VideoEncoder;
>>>>>>>
import org.jcodec.common.VideoDecoder;
<<<<<<<
public class Prores2AVCTrack extends Transcode2AVCTrack {
=======
public class Prores2AVCTrack extends TranscodeTrack {
>>>>>>>
public class Prores2AVCTrack extends Transcode2AVCTrack { |
<<<<<<<
import org.jcodec.common.AudioFormat;
import org.jcodec.containers.mp4.boxes.AudioSampleEntry;
=======
import javax.sound.sampled.AudioFormat;
>>>>>>>
import org.jcodec.common.AudioFormat; |
<<<<<<<
private final HashMap<String, HashMap<ShopChunk, HashMap<Location, Shop>>> shops = new HashMap<>();
private final Set<Shop> loadedShops = Sets.newHashSet();
=======
private Set<Shop> loadedShops = new CopyOnWriteArraySet<>();
>>>>>>>
private final Set<Shop> loadedShops = Sets.newHashSet();
<<<<<<<
/**
* Get all loaded shops.
*
* @return All loaded shops.
*/
@NotNull
public Set<Shop> getLoadedShops() {
return this.loadedShops;
}
/**
* Get a players all shops.
*
* @param playerUUID The player's uuid.
* @return The list have this player's all shops.
*/
public @NotNull List<Shop> getPlayerAllShops(@NotNull UUID playerUUID) {
return getAllShops().stream().filter(shop -> shop.getOwner().equals(playerUUID)).collect(Collectors.toList());
}
/**
* Get the all shops in the world.
*
* @param world The world you want get the shops.
* @return The list have this world all shops
*/
public @NotNull List<Shop> getShopsInWorld(@NotNull World world) {
return getAllShops().stream().filter(shop -> Objects.equals(shop.getLocation().getWorld(), world)).collect(Collectors.toList());
}
=======
>>>>>>> |
<<<<<<<
BusinessObjectDefinitionCreateRequest request =
createBusinessObjectDefinitionCreateRequest(NAMESPACE, BDEF_NAME, DATA_PROVIDER_NAME, BDEF_DESCRIPTION, BDEF_DISPLAY_NAME);
=======
BusinessObjectDefinitionCreateRequest request = businessObjectDefinitionServiceTestHelper
.createBusinessObjectDefinitionCreateRequest(NAMESPACE, BDEF_NAME, DATA_PROVIDER_NAME, BDEF_DESCRIPTION);
>>>>>>>
BusinessObjectDefinitionCreateRequest request = businessObjectDefinitionServiceTestHelper
.createBusinessObjectDefinitionCreateRequest(NAMESPACE, BDEF_NAME, DATA_PROVIDER_NAME, BDEF_DESCRIPTION, BDEF_DISPLAY_NAME); |
<<<<<<<
@Override
protected void analyzeDependency(Dependency dependency, Engine engine) throws AnalysisException {
final File file = dependency.getActualFile();
if (!file.isFile() || file.length() == 0) {
return;
}
try (JsonReader jsonReader = Json.createReader(FileUtils.openInputStream(file))) {
final JsonObject json = jsonReader.readObject();
if (json.containsKey("name")) {
final Object value = json.get("name");
if (value instanceof JsonString) {
final String valueString = ((JsonString) value).getString();
dependency.addEvidence(EvidenceType.PRODUCT, PACKAGE_JSON, "name", valueString, Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VENDOR, PACKAGE_JSON, "name_project",
String.format("%s_project", valueString), Confidence.LOW);
} else {
LOGGER.warn("JSON value not string as expected: {}", value);
}
}
addToEvidence(dependency, EvidenceType.PRODUCT, json, "description");
addToEvidence(dependency, EvidenceType.VENDOR, json, "author");
addToEvidence(dependency, EvidenceType.VERSION, json, "version");
dependency.setDisplayFileName(String.format("%s/%s", file.getParentFile().getName(), file.getName()));
} catch (JsonException e) {
LOGGER.warn("Failed to parse package.json file.", e);
} catch (IOException e) {
throw new AnalysisException("Problem occurred while reading dependency file.", e);
}
}
=======
@Override
protected void analyzeDependency(Dependency dependency, Engine engine) throws AnalysisException {
dependency.setEcosystem(DEPENDENCY_ECOSYSTEM);
final File file = dependency.getActualFile();
if (!file.isFile() || file.length() == 0) {
return;
}
try (JsonReader jsonReader = Json.createReader(FileUtils.openInputStream(file))) {
final JsonObject json = jsonReader.readObject();
final EvidenceCollection productEvidence = dependency.getProductEvidence();
final EvidenceCollection vendorEvidence = dependency.getVendorEvidence();
if (json.containsKey("name")) {
final Object value = json.get("name");
if (value instanceof JsonString) {
final String valueString = ((JsonString) value).getString();
productEvidence.addEvidence(PACKAGE_JSON, "name", valueString, Confidence.HIGHEST);
dependency.setName(valueString);
vendorEvidence.addEvidence(PACKAGE_JSON, "name_project", String.format("%s_project", valueString),
Confidence.LOW);
} else {
LOGGER.warn("JSON value not string as expected: {}", value);
}
}
addToEvidence(json, productEvidence, "description");
addToEvidence(json, vendorEvidence, "author");
final String version = addToEvidence(json, dependency.getVersionEvidence(), "version");
dependency.setVersion(version);
} catch (JsonException e) {
LOGGER.warn("Failed to parse package.json file.", e);
} catch (IOException e) {
throw new AnalysisException("Problem occurred while reading dependency file.", e);
}
}
>>>>>>>
@Override
protected void analyzeDependency(Dependency dependency, Engine engine) throws AnalysisException {
dependency.setEcosystem(DEPENDENCY_ECOSYSTEM);
final File file = dependency.getActualFile();
if (!file.isFile() || file.length() == 0) {
return;
}
try (JsonReader jsonReader = Json.createReader(FileUtils.openInputStream(file))) {
final JsonObject json = jsonReader.readObject();
if (json.containsKey("name")) {
final Object value = json.get("name");
if (value instanceof JsonString) {
final String valueString = ((JsonString) value).getString();
dependency.setName(valueString);
dependency.addEvidence(EvidenceType.PRODUCT, PACKAGE_JSON, "name", valueString, Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VENDOR, PACKAGE_JSON, "name_project",
String.format("%s_project", valueString), Confidence.LOW);
} else {
LOGGER.warn("JSON value not string as expected: {}", value);
}
}
addToEvidence(dependency, EvidenceType.PRODUCT, json, "description");
addToEvidence(dependency, EvidenceType.VENDOR, json, "author");
final String version = addToEvidence(dependency, EvidenceType.VERSION, json, "version");
dependency.setVersion(version);
} catch (JsonException e) {
LOGGER.warn("Failed to parse package.json file.", e);
} catch (IOException e) {
throw new AnalysisException("Problem occurred while reading dependency file.", e);
}
}
<<<<<<<
dep.addEvidence(t, PACKAGE_JSON, key, ((JsonString) value).getString(), Confidence.HIGHEST);
=======
evidenceStr = ((JsonString) value).getString();
collection.addEvidence(PACKAGE_JSON, key, evidenceStr, Confidence.HIGHEST);
>>>>>>>
evidenceStr = ((JsonString) value).getString();
dep.addEvidence(t, PACKAGE_JSON, key, evidenceStr, Confidence.HIGHEST);
<<<<<<<
dep.addEvidence(t, PACKAGE_JSON,
=======
evidenceStr = ((JsonString) subValue).getString();
collection.addEvidence(PACKAGE_JSON,
>>>>>>>
evidenceStr = ((JsonString) subValue).getString();
dep.addEvidence(t, PACKAGE_JSON, |
<<<<<<<
// Create and return the business object format object from the deleted entity.
BusinessObjectFormat deletedBusinessObjectFormat = businessObjectFormatHelper.createBusinessObjectFormatFromEntity(businessObjectFormatEntity);
=======
// Check if business object format being deleted is used as a descriptive format.
if (businessObjectFormatEntity.equals(businessObjectFormatEntity.getBusinessObjectDefinition().getDescriptiveBusinessObjectFormat()))
{
businessObjectFormatEntity.getBusinessObjectDefinition().setDescriptiveBusinessObjectFormat(null);
businessObjectDefinitionDao.saveAndRefresh(businessObjectFormatEntity.getBusinessObjectDefinition());
}
>>>>>>>
// Create and return the business object format object from the deleted entity.
BusinessObjectFormat deletedBusinessObjectFormat = businessObjectFormatHelper.createBusinessObjectFormatFromEntity(businessObjectFormatEntity);
// Check if business object format being deleted is used as a descriptive format.
if (businessObjectFormatEntity.equals(businessObjectFormatEntity.getBusinessObjectDefinition().getDescriptiveBusinessObjectFormat()))
{
businessObjectFormatEntity.getBusinessObjectDefinition().setDescriptiveBusinessObjectFormat(null);
businessObjectDefinitionDao.saveAndRefresh(businessObjectFormatEntity.getBusinessObjectDefinition());
} |
<<<<<<<
// Set the web client logger to warn level so we don't get unnecessary info level logging on the output.
setLogLevel(DataBridgeWebClient.class, LogLevel.WARN);
setLogLevel(DownloaderWebClient.class, LogLevel.WARN);
=======
>>>>>>>
// Set the web client logger to warn level so we don't get unnecessary info level logging on the output.
setLogLevel(DataBridgeWebClient.class, LogLevel.WARN);
setLogLevel(DownloaderWebClient.class, LogLevel.WARN); |
<<<<<<<
try {
conn = Downloader.getConnection(url);
conn.setRequestMethod("HEAD");
conn.connect();
timestamp = conn.getLastModified();
} catch (Exception ex) {
throw new DownloadFailedException("Error making HTTP HEAD request.", ex);
} finally {
if (conn != null) {
try {
conn.disconnect();
} finally {
conn = null;
=======
//TODO add the FPR protocol?
if ("file".equalsIgnoreCase(url.getProtocol())) {
File lastModifiedFile;
try {
// if (System.getProperty("os.name").toLowerCase().startsWith("windows")) {
// String filePath = url.toString();
// if (filePath.matches("file://[a-zA-Z]:.*")) {
// f = new File(filePath.substring(7));
// } else {
// f = new File(url.toURI());
// }
// } else {
lastModifiedFile = new File(url.toURI());
// }
} catch (URISyntaxException ex) {
final String msg = String.format("Unable to locate '%s'; is the cve.url-2.0.modified property set correctly?", url.toString());
throw new DownloadFailedException(msg);
}
timestamp = lastModifiedFile.lastModified();
} else {
HttpURLConnection conn = null;
try {
conn = Downloader.getConnection(url);
conn.setRequestMethod("HEAD");
conn.connect();
timestamp = conn.getLastModified();
} catch (Exception ex) {
throw new DownloadFailedException("Error making HTTP HEAD request.", ex);
} finally {
if (conn != null) {
try {
conn.disconnect();
} finally {
conn = null;
}
>>>>>>>
//TODO add the FPR protocol?
if ("file".equalsIgnoreCase(url.getProtocol())) {
File lastModifiedFile;
try {
lastModifiedFile = new File(url.toURI());
} catch (URISyntaxException ex) {
final String msg = String.format("Unable to locate '%s'; is the cve.url-2.0.modified property set correctly?", url.toString());
throw new DownloadFailedException(msg);
}
timestamp = lastModifiedFile.lastModified();
} else {
HttpURLConnection conn = null;
try {
conn = Downloader.getConnection(url);
conn.setRequestMethod("HEAD");
conn.connect();
timestamp = conn.getLastModified();
} catch (Exception ex) {
throw new DownloadFailedException("Error making HTTP HEAD request.", ex);
} finally {
if (conn != null) {
try {
conn.disconnect();
} finally {
conn = null;
} |
<<<<<<<
import org.finra.herd.dao.impl.S3DaoImpl;
=======
>>>>>>>
<<<<<<<
private static final Logger LOGGER = LoggerFactory.getLogger(UploaderControllerTest.class);
@Before
@Override
public void setup() throws Exception
{
super.setup();
// Set the web client logger to warn level so we don't get unnecessary info level logging on the output.
setLogLevel(DataBridgeWebClient.class, LogLevel.WARN);
setLogLevel(UploaderWebClient.class, LogLevel.WARN);
setLogLevel(S3DaoImpl.class, LogLevel.WARN);
}
=======
>>>>>>>
private static final Logger LOGGER = LoggerFactory.getLogger(UploaderControllerTest.class);
@Before
@Override
public void setup() throws Exception
{
super.setup();
// Set the web client logger to warn level so we don't get unnecessary info level logging on the output.
setLogLevel(DataBridgeWebClient.class, LogLevel.WARN);
setLogLevel(UploaderWebClient.class, LogLevel.WARN);
setLogLevel(S3DaoImpl.class, LogLevel.WARN);
}
<<<<<<<
runUpload(UploaderController.MIN_THREADS, Boolean.TRUE);
}
@Test
public void testPerformUploadWithAttributes() throws Exception
{
// Upload and register business object data parents.
uploadAndRegisterTestDataParents(uploaderWebClient);
HashMap<String, String> attributes = new HashMap<>();
attributes.put("key1", "value1");
attributes.put("key2", "value2");
runUpload(UploaderController.MIN_THREADS, attributes);
}
@Test
public void testPerformUploadMinThreads() throws Exception
{
// Upload and register business object data parents.
uploadAndRegisterTestDataParents(uploaderWebClient);
// Calling to run the upload using number of threads just below the low threshold,
// that should result in UploaderController adjusting the number of threads to MIN_THREADS value.
runUpload(UploaderController.MIN_THREADS - 1);
}
@Test
public void testPerformUploadMaxThreads() throws Exception
{
// Upload and register business object data parents.
uploadAndRegisterTestDataParents(uploaderWebClient);
// Calling to run the upload using number of threads just above the upper threshold,
// that should result in UploaderController adjusting the number of threads to MAX_THREADS value.
runUpload(UploaderController.MAX_THREADS + 1);
}
@Test
public void testPerformUploadWithLoggerLevelSetToWarn() throws Exception
{
LogLevel origLoggerLevel = getLogLevel(UploaderController.class);
setLogLevel(UploaderController.class, LogLevel.WARN);
try
{
// Upload and register business object data parents.
uploadAndRegisterTestDataParents(uploaderWebClient);
runUpload(UploaderController.MIN_THREADS);
}
finally
{
setLogLevel(UploaderController.class, origLoggerLevel);
}
=======
runUpload(UploaderController.MIN_THREADS, true, false);
>>>>>>>
runUpload(UploaderController.MIN_THREADS, true, false);
}
@Test
public void testPerformUploadWithAttributes() throws Exception
{
// Upload and register business object data parents.
uploadAndRegisterTestDataParents(uploaderWebClient);
HashMap<String, String> attributes = new HashMap<>();
attributes.put("key1", "value1");
attributes.put("key2", "value2");
runUpload(UploaderController.MIN_THREADS, attributes);
}
@Test
public void testPerformUploadMinThreads() throws Exception
{
// Upload and register business object data parents.
uploadAndRegisterTestDataParents(uploaderWebClient);
// Calling to run the upload using number of threads just below the low threshold,
// that should result in UploaderController adjusting the number of threads to MIN_THREADS value.
runUpload(UploaderController.MIN_THREADS - 1);
}
@Test
public void testPerformUploadMaxThreads() throws Exception
{
// Upload and register business object data parents.
uploadAndRegisterTestDataParents(uploaderWebClient);
// Calling to run the upload using number of threads just above the upper threshold,
// that should result in UploaderController adjusting the number of threads to MAX_THREADS value.
runUpload(UploaderController.MAX_THREADS + 1);
}
@Test
public void testPerformUploadWithLoggerLevelSetToWarn() throws Exception
{
LogLevel origLoggerLevel = getLogLevel(UploaderController.class);
setLogLevel(UploaderController.class, LogLevel.WARN);
try
{
// Upload and register business object data parents.
uploadAndRegisterTestDataParents(uploaderWebClient);
runUpload(UploaderController.MIN_THREADS);
}
finally
{
setLogLevel(UploaderController.class, origLoggerLevel);
}
<<<<<<<
protected void runUpload(Integer numOfThreads, HashMap<String, String> attributes, Boolean createNewVersion, String hostname, String storageName)
throws Exception
=======
protected void runUpload(Integer numOfThreads, HashMap<String, String> attributes, Boolean createNewVersion, Boolean force, String hostname,
String storageName) throws Exception
>>>>>>>
protected void runUpload(Integer numOfThreads, HashMap<String, String> attributes, Boolean createNewVersion, Boolean force, String hostname,
String storageName) throws Exception |
<<<<<<<
private GreenhouseUserDetailsService userDetailsService;
=======
private AuthenticationManager authenticationManager;
>>>>>>>
private GreenhouseUserDetailsService userDetailsService;
private AuthenticationManager authenticationManager;
<<<<<<<
public SignupController(JdbcTemplate jdbcTemplate, GreenhouseUserDetailsService userDetailsService, GreenhouseUpdatesService updatesService) {
=======
public SignupController(JdbcTemplate jdbcTemplate, GreenhouseUpdatesService updatesService,
@Qualifier("org.springframework.security.authenticationManager") ProviderManager authenticationManager) {
>>>>>>>
public SignupController(JdbcTemplate jdbcTemplate, GreenhouseUpdatesService updatesService,
GreenhouseUserDetailsService userDetailsService,
@Qualifier("org.springframework.security.authenticationManager") ProviderManager authenticationManager) {
<<<<<<<
this.userDetailsService = userDetailsService;
=======
this.authenticationManager = authenticationManager;
>>>>>>>
this.userDetailsService = userDetailsService;
this.authenticationManager = authenticationManager; |
<<<<<<<
import org.rdfhdt.hdt.rdf.TripleWriter;
import org.rdfhdt.hdt.triples.TripleString;
import org.rdfhdt.hdt.util.StopWatch;
=======
import org.rdfhdt.hdt.triples.IteratorTripleString;
>>>>>>>
import org.rdfhdt.hdt.rdf.TripleWriter;
import org.rdfhdt.hdt.triples.TripleString; |
<<<<<<<
if(numTriples>0) {
bitY.append(true);
bitZ.append(true);
}
vectorY.aggresiveTrimToSize();
=======
vectorY.aggressiveTrimToSize();
>>>>>>>
vectorY.aggressiveTrimToSize();
<<<<<<<
if(isClosed) {
throw new IllegalStateException("Cannot search on BitmapTriples if it's already closed");
}
=======
if (getNumberOfElements() == 0 || pattern.isNoMatch()) {
return new EmptyTriplesIterator(order);
}
>>>>>>>
if(isClosed) {
throw new IllegalStateException("Cannot search on BitmapTriples if it's already closed");
}
if (getNumberOfElements() == 0 || pattern.isNoMatch()) {
return new EmptyTriplesIterator(order);
} |
<<<<<<<
public void populateHeaderStructure(String baseUri) {
if(baseUri==null || baseUri.length()==0) {
throw new IllegalArgumentException("baseURI cannot be empty");
}
if(isClosed) {
throw new IllegalStateException("Cannot add header to a closed HDT.");
}
=======
@Override
public void populateHeaderStructure(String baseUri) {
>>>>>>>
@Override
public void populateHeaderStructure(String baseUri) {
if(baseUri==null || baseUri.length()==0) {
throw new IllegalArgumentException("baseURI cannot be empty");
}
if(isClosed) {
throw new IllegalStateException("Cannot add header to a closed HDT.");
}
<<<<<<<
if(e instanceof FileNotFoundException) {
System.out.println("The .hdt.index doesn't exist, generating a new one.");
} else {
System.out.println("Error reading .hdt.index, generating a new one. The error was: "+e.getMessage());
e.printStackTrace();
}
=======
log.info("Could not read .hdt.index, Generating a new one.");
>>>>>>>
if(e instanceof FileNotFoundException) {
System.out.println("The .hdt.index doesn't exist, generating a new one.");
} else {
System.out.println("Error reading .hdt.index, generating a new one. The error was: "+e.getMessage());
e.printStackTrace();
}
<<<<<<<
System.err.println("Error writing index file.");
e2.printStackTrace();
} finally {
IOUtil.closeQuietly(out);
=======
log.warn("Could not save .hdt.index: {}", e2.toString());
>>>>>>>
System.err.println("Error writing index file.");
e2.printStackTrace();
} finally {
IOUtil.closeQuietly(out);
<<<<<<<
public boolean isMapped() {
return isMapped;
}
=======
// For debugging
@Override
public String toString() {
return String.format("HDT[file=%s,#triples=%d]", hdtFileName, triples.getNumberOfElements());
}
>>>>>>>
public boolean isMapped() {
return isMapped;
} |
<<<<<<<
import org.rdfhdt.hdt.dictionary.impl.FourSectionDictionary;
=======
import org.rdfhdt.hdt.enums.ResultEstimationType;
>>>>>>>
import org.rdfhdt.hdt.dictionary.impl.FourSectionDictionary;
import org.rdfhdt.hdt.enums.ResultEstimationType;
<<<<<<<
if(triple.isNoMatch()) {
throw new NotFoundException("String not found in dictionary");
=======
if(triple.getSubject()==-1 || triple.getPredicate()==-1 || triple.getObject()==-1) {
//throw new NotFoundException("String not found in dictionary");
return new IteratorTripleString() {
@Override
public TripleString next() {
return null;
}
@Override
public boolean hasNext() {
return false;
}
@Override
public TripleString previous() {
return null;
}
@Override
public ResultEstimationType numResultEstimation() {
return ResultEstimationType.EXACT;
}
@Override
public boolean hasPrevious() {
return false;
}
@Override
public void goToStart() {
}
@Override
public long estimatedNumResults() {
return 0;
}
};
>>>>>>>
if(triple.isNoMatch()) {
//throw new NotFoundException("String not found in dictionary");
return new IteratorTripleString() {
@Override
public TripleString next() {
return null;
}
@Override
public boolean hasNext() {
return false;
}
@Override
public ResultEstimationType numResultEstimation() {
return ResultEstimationType.EXACT;
}
@Override
public void goToStart() {
}
@Override
public long estimatedNumResults() {
return 0;
}
};
<<<<<<<
CountInputStream in=null;
=======
CountInputStream in =null;
>>>>>>>
CountInputStream in=null;
<<<<<<<
=======
//in.close();
>>>>>>>
<<<<<<<
if(this.hdtFileName!=null) {
FileOutputStream out=null;
try {
out = new FileOutputStream(versionName);
ci.clear();
triples.saveIndex(out, ci, listener);
out.close();
System.out.println("Index generated and saved in "+st.stopAndShow());
} catch (IOException e2) {
System.err.println("Error writing index file.");
e2.printStackTrace();
} finally {
IOUtil.closeQuietly(out);
}
=======
try {
out = new FileOutputStream(versionName);
ci.clear();
triples.saveIndex(out, ci, listener);
out.close();
} catch (IOException e2) {
} finally {
IOUtil.closeQuietly(out);
>>>>>>>
if(this.hdtFileName!=null) {
FileOutputStream out=null;
try {
out = new FileOutputStream(versionName);
ci.clear();
triples.saveIndex(out, ci, listener);
out.close();
System.out.println("Index generated and saved in "+st.stopAndShow());
} catch (IOException e2) {
System.err.println("Error writing index file.");
e2.printStackTrace();
} finally {
IOUtil.closeQuietly(out);
} |
<<<<<<<
import java.util.function.Predicate;
import org.apache.jena.atlas.lib.tuple.Tuple;
import org.apache.jena.atlas.logging.Log;
import org.rdfhdt.hdtjena.HDTGraph;
import org.rdfhdt.hdtjena.HDTJenaConstants;
import org.rdfhdt.hdtjena.bindings.HDTId;
=======
import java.util.function.Predicate;
>>>>>>>
import java.util.function.Predicate;
<<<<<<<
=======
import org.rdfhdt.hdtjena.HDTGraph;
import org.rdfhdt.hdtjena.HDTJenaConstants;
import org.rdfhdt.hdtjena.bindings.HDTId;
>>>>>>>
<<<<<<<
Predicate<Tuple<HDTId>> filter;
=======
final Predicate<Tuple<HDTId>> filter;
>>>>>>>
final Predicate<Tuple<HDTId>> filter; |
<<<<<<<
import java.io.FileNotFoundException;
=======
import java.io.BufferedInputStream;
import java.io.FileInputStream;
>>>>>>>
import java.io.FileNotFoundException;
<<<<<<<
=======
import java.util.zip.GZIPInputStream;
>>>>>>>
<<<<<<<
import org.rdfhdt.hdt.util.io.IOUtil;
=======
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
>>>>>>>
import org.rdfhdt.hdt.util.io.IOUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
<<<<<<<
e.printStackTrace();
throw new ParserException(e);
=======
log.error("Unexpected exception.", e);
throw new ParserException();
>>>>>>>
log.error("Unexpected exception.", e);
throw new ParserException(e);
<<<<<<<
triple.setAll(parsedTriple.getSubject().toString(), parsedTriple.getPredicate().toString(), parsedTriple.getObject().toString());
callback.processTriple(triple, 0);
=======
triple.setAll(
JenaNodeFormatter.format(parsedTriple.getSubject()),
JenaNodeFormatter.format(parsedTriple.getPredicate()),
JenaNodeFormatter.format(parsedTriple.getObject()));
callback.processTriple(triple, 0);
>>>>>>>
triple.setAll(
JenaNodeFormatter.format(parsedTriple.getSubject()),
JenaNodeFormatter.format(parsedTriple.getPredicate()),
JenaNodeFormatter.format(parsedTriple.getObject()));
callback.processTriple(triple, 0);
<<<<<<<
triple.setAll(quad.getSubject().toString(), quad.getPredicate().toString(), quad.getObject().toString());
callback.processTriple(triple, 0);
=======
triple.setAll(
JenaNodeFormatter.format(quad.getSubject()),
JenaNodeFormatter.format(quad.getPredicate()),
JenaNodeFormatter.format(quad.getObject()));
callback.processTriple(triple, 0);
>>>>>>>
triple.setAll(
JenaNodeFormatter.format(quad.getSubject()),
JenaNodeFormatter.format(quad.getPredicate()),
JenaNodeFormatter.format(quad.getObject()));
callback.processTriple(triple, 0); |
<<<<<<<
/**
* @return config
*/
public MyriaConfiguration getConfig() {
return CONFIG;
}
=======
/**
* @return the master catalog.
*/
public MasterCatalog getCatalog() {
return catalog;
}
>>>>>>>
/**
* @return config
*/
public MyriaConfiguration getConfig() {
return CONFIG;
}
/**
* @return the master catalog.
*/
public MasterCatalog getCatalog() {
return catalog;
} |
<<<<<<<
LOG.warning("An error occurred with the .NET AssemblyAnalyzer, please see the log for more details.");
LOG.fine("GrokAssembly.exe is not working properly");
=======
LOGGER.warning("An error occured with the .NET AssemblyAnalyzer, please see the log for more details.");
LOGGER.fine("GrokAssembly.exe is not working properly");
>>>>>>>
LOGGER.warning("An error occurred with the .NET AssemblyAnalyzer, please see the log for more details.");
LOGGER.fine("GrokAssembly.exe is not working properly");
<<<<<<<
LOG.warning("An error occurred with the .NET AssemblyAnalyzer; "
+ "this can be ignored unless you are scanning .NET DLLs. Please see the log for more details.");
LOG.log(Level.FINE, "Could not execute GrokAssembly {0}", e.getMessage());
throw new AnalysisException("An error occurred with the .NET AssemblyAnalyzer", e);
=======
LOGGER.warning("An error occured with the .NET AssemblyAnalyzer; "
+ "this can be ignored unless you are scanning .NET dlls. Please see the log for more details.");
LOGGER.log(Level.FINE, "Could not execute GrokAssembly {0}", e.getMessage());
throw new AnalysisException("An error occured with the .NET AssemblyAnalyzer", e);
>>>>>>>
LOGGER.warning("An error occured with the .NET AssemblyAnalyzer; "
+ "this can be ignored unless you are scanning .NET DLLs. Please see the log for more details.");
LOGGER.log(Level.FINE, "Could not execute GrokAssembly {0}", e.getMessage());
throw new AnalysisException("An error occured with the .NET AssemblyAnalyzer", e); |
<<<<<<<
inner.setStartIndices(ByteString.copyFrom(startIndicesBytes));
inner.setEndIndices(ByteString.copyFrom(endIndicesBytes));
return ColumnMessage.newBuilder().setType(ColumnMessage.Type.STRING).setStringColumn(inner).build();
=======
inner.setStartIndices(ByteString.copyFrom(startIndicesBytes.toByteArray()));
inner.setEndIndices(ByteString.copyFrom(endIndicesBytes.toByteArray()));
return ColumnMessage.newBuilder().setType(ColumnMessageType.STRING).setStringColumn(inner).build();
>>>>>>>
inner.setStartIndices(ByteString.copyFrom(startIndicesBytes.toByteArray()));
inner.setEndIndices(ByteString.copyFrom(endIndicesBytes.toByteArray()));
return ColumnMessage.newBuilder().setType(ColumnMessage.Type.STRING).setStringColumn(inner).build(); |
<<<<<<<
import org.slf4j.LoggerFactory;
import com.almworks.sqlite4java.SQLiteConnection;
import com.almworks.sqlite4java.SQLiteException;
=======
>>>>>>>
import com.almworks.sqlite4java.SQLiteConnection;
import com.almworks.sqlite4java.SQLiteException; |
<<<<<<<
private final int numTbl1Worker1 = 2000;
private final int numTbl1Worker2 = 2000;
=======
private final int numTbl1Worker1 = 500;
private final int numTbl1Worker2 = 600;
>>>>>>>
private final int numTbl1Worker1 = 2000;
private final int numTbl1Worker2 = 2000;
<<<<<<<
final SQLiteQueryScan scan1 = new SQLiteQueryScan("select * from " + testtableKeys.get(0), tableSchema);
final SQLiteQueryScan scan2 = new SQLiteQueryScan("select * from " + testtableKeys.get(0), tableSchema);
=======
final SQLiteQueryScan scan1 =
new SQLiteQueryScan(null, "select * from " + testtableKeys.get(0).toString("sqlite"), tableSchema);
final SQLiteQueryScan scan2 =
new SQLiteQueryScan(null, "select * from " + testtableKeys.get(0).toString("sqlite"), tableSchema);
>>>>>>>
final SQLiteQueryScan scan1 =
new SQLiteQueryScan("select * from " + testtableKeys.get(0).toString("sqlite"), tableSchema);
final SQLiteQueryScan scan2 =
new SQLiteQueryScan("select * from " + testtableKeys.get(0).toString("sqlite"), tableSchema);
<<<<<<<
scan[i] = new SQLiteQueryScan("select * from " + testtableKeys.get(i), tableSchema);
=======
scan[i] = new SQLiteQueryScan(null, "select * from " + testtableKeys.get(i).toString("sqlite"), tableSchema);
>>>>>>>
scan[i] = new SQLiteQueryScan("select * from " + testtableKeys.get(i).toString("sqlite"), tableSchema);
<<<<<<<
final CollectConsumer serverCollect =
new CollectConsumer(tableSchema, serverReceiveID, new int[] { WORKER_ID[0], WORKER_ID[1] });
final LinkedBlockingQueue<TupleBatch> receivedTupleBatches = new LinkedBlockingQueue<TupleBatch>();
final TBQueueExporter queueStore = new TBQueueExporter(receivedTupleBatches, serverCollect);
SinkRoot serverPlan = new SinkRoot(queueStore);
server.submitQueryPlan(serverPlan, workerPlans).sync();
TupleBatchBuffer actualResult = new TupleBatchBuffer(queueStore.getSchema());
while (!receivedTupleBatches.isEmpty()) {
tb = receivedTupleBatches.poll();
if (tb != null) {
tb.compactInto(actualResult);
}
}
final HashMap<Tuple, Integer> resultBag = TestUtils.tupleBatchToTupleBag(actualResult);
TestUtils.assertTupleBagEqual(expectedResult, resultBag);
=======
final Long queryId = 0L;
final CollectConsumer serverPlan = new CollectConsumer(tableSchema, serverReceiveID, WORKER_ID);
server.dispatchWorkerQueryPlans(queryId, workerPlans);
LOGGER.debug("Query dispatched to the workers");
TupleBatchBuffer result = server.startServerQuery(queryId, serverPlan);
>>>>>>>
final CollectConsumer serverCollect =
new CollectConsumer(tableSchema, serverReceiveID, new int[] { WORKER_ID[0], WORKER_ID[1] });
final LinkedBlockingQueue<TupleBatch> receivedTupleBatches = new LinkedBlockingQueue<TupleBatch>();
final TBQueueExporter queueStore = new TBQueueExporter(receivedTupleBatches, serverCollect);
SinkRoot serverPlan = new SinkRoot(queueStore);
server.submitQueryPlan(serverPlan, workerPlans).sync();
TupleBatchBuffer actualResult = new TupleBatchBuffer(queueStore.getSchema());
while (!receivedTupleBatches.isEmpty()) {
tb = receivedTupleBatches.poll();
if (tb != null) {
tb.compactInto(actualResult);
}
}
final HashMap<Tuple, Integer> resultBag = TestUtils.tupleBatchToTupleBag(actualResult);
TestUtils.assertTupleBagEqual(expectedResult, resultBag); |
<<<<<<<
final CollectConsumer cc =
new CollectConsumer(cp.getSchema(), worker2ReceiveID, new int[] { WORKER_ID[0], WORKER_ID[1] });
final BlockingSQLiteDataReceiver block2 = new BlockingSQLiteDataReceiver("temptable.db", "temptable", cc);
=======
final CollectConsumer cc = new CollectConsumer(cp, worker2ReceiveID, WORKER_ID);
final BlockingSQLiteDataReceiver block2 = new BlockingSQLiteDataReceiver(null, "temptable", cc);
>>>>>>>
final CollectConsumer cc = new CollectConsumer(cp.getSchema(), worker2ReceiveID, WORKER_ID);
final BlockingSQLiteDataReceiver block2 = new BlockingSQLiteDataReceiver(null, "temptable", cc); |
<<<<<<<
=======
/** The logger for this class. */
private static final Logger LOGGER = LoggerFactory.getLogger(LocalMultiwayConsumer.class.getName());
>>>>>>>
/** The logger for this class. */
private static final Logger LOGGER = LoggerFactory.getLogger(LocalMultiwayConsumer.class.getName()); |
<<<<<<<
@Type(name = "URI", value = UriSink.class),
@Type(name = "Pipe", value = PipeSink.class),
@Type(name = "Bytes", value = ByteSink.class)
})
=======
@Type(name = "Bytes", value = ByteArraySource.class),
@Type(name = "File", value = FileSource.class),
@Type(name = "URI", value = UriSource.class),
@Type(name = "Empty", value = EmptySource.class)
})
>>>>>>>
@Type(name = "Pipe", value = PipeSink.class),
@Type(name = "Bytes", value = ByteSink.class)
}) |
<<<<<<<
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.Channel;
=======
import org.apache.mina.core.future.IoFutureListener;
import org.apache.mina.core.future.WriteFuture;
import org.apache.mina.core.service.IoHandler;
import org.apache.mina.core.service.IoHandlerAdapter;
import org.apache.mina.core.session.IdleStatus;
import org.apache.mina.core.session.IoSession;
import org.apache.mina.transport.socket.nio.NioSocketAcceptor;
import org.slf4j.LoggerFactory;
>>>>>>>
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.Channel;
import org.slf4j.LoggerFactory;
<<<<<<<
=======
protected final class ServerHandler extends IoHandlerAdapter {
// required in ParallelTest for instrument
final Thread mainThread;
public ServerHandler(final Thread mainThread) {
this.mainThread = mainThread;
}
@Override
public void exceptionCaught(final IoSession session, final Throwable cause) {
cause.printStackTrace();
// ParallelUtility.closeSession(session);
}
@Override
public void messageReceived(final IoSession session, final Object message) {
if (message instanceof TransportMessage) {
final TransportMessage tm = (TransportMessage) message;
if ((tm.getType() == TransportMessage.TransportMessageType.CONTROL)
&& (ControlMessage.ControlMessageType.CONNECT == tm.getControl().getType())) {
// connect request sent from other workers
final ControlMessage cm = tm.getControl();
// ControlProto.ExchangePairID epID = cm.getExchangePairID();
// ExchangePairID operatorID = ExchangePairID.fromExisting(epID.getExchangePairID());
// String senderID = epID.getWorkerID();
session.setAttribute("remoteID", cm.getRemoteID());
// session.setAttribute("operatorID", operatorID);
} else {
final Integer senderID = (Integer) session.getAttribute("remoteID");
if (senderID != null) {
final MessageWrapper mw = new MessageWrapper();
mw.senderID = senderID;
mw.message = tm;
// ExchangePairID operatorID = (ExchangePairID) session.getAttribute("operatorID");
messageBuffer.add(mw);
} else {
LOGGER.error("Error: message received from an unknown unit: " + message);
}
}
} else {
LOGGER.error("Error: Unknown message received: " + message);
}
}
@Override
public void sessionIdle(final IoSession session, final IdleStatus status) {
if (status.equals(IdleStatus.BOTH_IDLE)) {
session.close(false);
}
}
}
>>>>>>>
<<<<<<<
final ConcurrentHashMap<Integer, SocketInfo> workers;
final ServerBootstrap ipcServer;
private Channel ipcServerChannel;
final ConcurrentHashMap<Integer, HashMap<Integer, Integer>> workersAssignedToQuery;
final ConcurrentHashMap<Integer, BitSet> workersReceivedQuery;
=======
private final ConcurrentHashMap<Integer, SocketInfo> workers;
private final NioSocketAcceptor acceptor;
private final ServerHandler minaHandler;
private final ConcurrentHashMap<Integer, HashMap<Integer, Integer>> workersAssignedToQuery;
private final ConcurrentHashMap<Integer, BitSet> workersReceivedQuery;
>>>>>>>
final ConcurrentHashMap<Integer, SocketInfo> workers;
final ServerBootstrap ipcServer;
private Channel ipcServerChannel;
final ConcurrentHashMap<Integer, HashMap<Integer, Integer>> workersAssignedToQuery;
final ConcurrentHashMap<Integer, BitSet> workersReceivedQuery;
<<<<<<<
if (ch == null || !ch.isConnected()) {
System.out.println("Fail to connect the worker: " + worker + ". Continue cleaning");
=======
if (session == null) {
LOGGER.error("Fail to connect the worker: " + worker + ". Continue cleaning");
>>>>>>>
if (ch == null || !ch.isConnected()) {
LOGGER.error("Fail to connect the worker: " + worker + ". Continue cleaning"); |
<<<<<<<
final SQLiteQueryScan scan1 = new SQLiteQueryScan("select * from " + testtable1Key, schema);
final SQLiteQueryScan scan2 = new SQLiteQueryScan("select * from " + testtable2Key, schema);
=======
final SQLiteQueryScan scan1 =
new SQLiteQueryScan(null, "select * from " + testtable1Key.toString("sqlite"), schema);
final SQLiteQueryScan scan2 =
new SQLiteQueryScan(null, "select * from " + testtable2Key.toString("sqlite"), schema);
>>>>>>>
final SQLiteQueryScan scan1 = new SQLiteQueryScan("select * from " + testtable1Key.toString("sqlite"), schema);
final SQLiteQueryScan scan2 = new SQLiteQueryScan("select * from " + testtable2Key.toString("sqlite"), schema);
<<<<<<<
new SQLiteSQLProcessor("select * from " + temptable1Key + " inner join " + temptable2Key + " on "
+ temptable1Key + ".name=" + temptable2Key + ".name", outputSchema, new Operator[] { buffer1, buffer2 });
=======
new SQLiteSQLProcessor(null, "select * from " + temptable1Key.toString("sqlite") + " inner join "
+ temptable2Key.toString("sqlite") + " on " + temptable1Key.toString("sqlite") + ".name="
+ temptable2Key.toString("sqlite") + ".name", outputSchema, new Operator[] { buffer1, buffer2 });
>>>>>>>
new SQLiteSQLProcessor("select * from " + temptable1Key.toString("sqlite") + " inner join "
+ temptable2Key.toString("sqlite") + " on " + temptable1Key.toString("sqlite") + ".name="
+ temptable2Key.toString("sqlite") + ".name", outputSchema, new Operator[] { buffer1, buffer2 }); |
<<<<<<<
private final Operator[] children;
private final Schema outputSchema;
public Merge(final Schema schema, final Operator child1, final Operator child2) {
Preconditions.checkArgument(child1.getSchema().equals(child2.getSchema()));
Preconditions.checkArgument(child1.getSchema().equals(schema));
outputSchema = schema;
children = new Operator[2];
children[0] = child1;
children[1] = child2;
}
=======
private Operator child1, child2;
private final Schema outputSchema;
>>>>>>>
private final Operator[] children;
private final Schema outputSchema; |
<<<<<<<
FileUtils.write(gemFile, displayFileName); // unique contents to avoid dependency bundling
final Dependency dependency = new Dependency(gemFile);
=======
FileUtils.write(tempFile, displayFileName, Charset.defaultCharset()); // unique contents to avoid dependency bundling
final Dependency dependency = new Dependency(tempFile);
>>>>>>>
FileUtils.write(gemFile, displayFileName, Charset.defaultCharset()); // unique contents to avoid dependency bundling
final Dependency dependency = new Dependency(gemFile); |
<<<<<<<
final SQLiteQueryScan scanTable = new SQLiteQueryScan("select * from " + testtableKey, schema);
=======
final SQLiteQueryScan scanTable =
new SQLiteQueryScan(null, "select * from " + testtableKey.toString("sqlite"), schema);
>>>>>>>
final SQLiteQueryScan scanTable = new SQLiteQueryScan("select * from " + testtableKey.toString("sqlite"), schema); |
<<<<<<<
final SQLiteQueryScan scanCount = new SQLiteQueryScan("SELECT COUNT(*) FROM " + tuplesRRKey, countResultSchema);
=======
final SQLiteQueryScan scanCount =
new SQLiteQueryScan(null, "SELECT COUNT(*) FROM " + tuplesRRKey.toString("sqlite"), countResultSchema);
>>>>>>>
final SQLiteQueryScan scanCount =
new SQLiteQueryScan("SELECT COUNT(*) FROM " + tuplesRRKey.toString("sqlite"), countResultSchema); |
<<<<<<<
for (int column = 0; column < tb.numColumns(); ++column) {
uniqueTuples.put(column, tbColumns.get(column), inColumnRow);
}
tupleIndexList = new TIntArrayList(1);
=======
tupleIndexList = new TIntArrayList();
>>>>>>>
tupleIndexList = new TIntArrayList(1); |
<<<<<<<
import com.google.common.collect.ImmutableMap;
=======
import com.google.common.base.Preconditions;
>>>>>>>
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
<<<<<<<
=======
this.filename = filename;
>>>>>>>
this.filename = filename;
<<<<<<<
protected void init(final ImmutableMap<String, Object> execEnvVars) throws DbException {
=======
public void init() throws DbException {
buffer = new TupleBatchBuffer(getSchema());
if (filename != null) {
try {
inputStream = new FileInputStream(filename);
} catch (FileNotFoundException e) {
e.printStackTrace();
return;
}
}
Preconditions.checkArgument(inputStream != null, "FileScan input stream has not been set!");
>>>>>>>
protected void init(final ImmutableMap<String, Object> execEnvVars) throws DbException {
buffer = new TupleBatchBuffer(getSchema());
if (filename != null) {
try {
inputStream = new FileInputStream(filename);
} catch (FileNotFoundException e) {
e.printStackTrace();
return;
}
}
Preconditions.checkArgument(inputStream != null, "FileScan input stream has not been set!"); |
<<<<<<<
final SQLiteQueryScan scan = new SQLiteQueryScan("select distinct * from " + testtableKey, schema);
=======
final SQLiteQueryScan scan =
new SQLiteQueryScan(null, "select distinct * from " + testtableKey.toString("sqlite"), schema);
>>>>>>>
final SQLiteQueryScan scan =
new SQLiteQueryScan("select distinct * from " + testtableKey.toString("sqlite"), schema);
<<<<<<<
new SQLiteSQLProcessor("select distinct * from " + temptableKey, schema, new Operator[] { block2 });
=======
new SQLiteSQLProcessor(null, "select distinct * from " + temptableKey.toString("sqlite"), schema,
new Operator[] { block2 });
>>>>>>>
new SQLiteSQLProcessor("select distinct * from " + temptableKey.toString("sqlite"), schema,
new Operator[] { block2 }); |
<<<<<<<
// shuffleTestSQLite(args);
// sqliteInsertSpeedTest();
filesystemWriteTest();
=======
//shuffleTestSQLite(args);
dupElimTestSQLite(args);
>>>>>>>
// shuffleTestSQLite(args);
// sqliteInsertSpeedTest();
filesystemWriteTest();
//shuffleTestSQLite(args);
dupElimTestSQLite(args); |
<<<<<<<
=======
/**
* The working thread, which executes the child operator and send the tuples to the paired LocalMultiwayConsumer
* operator.
*/
class WorkingThread extends Thread {
/** Constructor, set the thread name. */
public WorkingThread() {
super();
setName("LocalMultiwayProducer-WorkingThread-" + getId());
}
@Override
public void run() {
final Channel channel = getConnectionPool().reserveLongTermConnection(selfWorkerID);
try {
final TupleBatchBuffer buffer = new TupleBatchBuffer(getSchema());
TupleBatch tup = null;
TransportMessage[] dms = null;
while (!child.eos()) {
while ((tup = child.next()) != null) {
tup.compactInto(buffer);
while ((dms = buffer.popFilledAsTM(getOperatorIDs())) != null) {
for (TransportMessage dm : dms) {
channel.write(dm);
}
}
}
while ((dms = buffer.popAnyAsTM(getOperatorIDs())) != null) {
for (TransportMessage dm : dms) {
channel.write(dm);
}
}
if (child.eoi()) {
for (ExchangePairID operatorID : getOperatorIDs()) {
channel.write(IPCUtils.eoiTM(operatorID));
}
child.setEOI(false);
}
}
for (final ExchangePairID operatorID : getOperatorIDs()) {
channel.write(IPCUtils.eosTM(operatorID));
}
} catch (final DbException e) {
e.printStackTrace();
} finally {
getConnectionPool().releaseLongTermConnection(channel);
}
}
}
>>>>>>>
<<<<<<<
public LocalMultiwayProducer(final Operator child, final ExchangePairID[] operatorIDs) {
super(child, operatorIDs);
=======
private transient WorkingThread runningThread;
/**
* The paired collect consumer address.
*/
private final int selfWorkerID;
private Operator child;
public LocalMultiwayProducer(final Operator child, final ExchangePairID[] operatorIDs, final int selfWorkerID) {
super(operatorIDs);
this.child = child;
this.selfWorkerID = selfWorkerID;
>>>>>>>
public LocalMultiwayProducer(final Operator child, final ExchangePairID[] operatorIDs) {
super(child, operatorIDs); |
<<<<<<<
=======
/** The logger for this class. */
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(OperatorTestUsingSQLiteStorage.class);
>>>>>>>
/** The logger for this class. */
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(OperatorTestUsingSQLiteStorage.class); |
<<<<<<<
public static File getAbsoluteDBFile(final int workerId) {
String fileName =
FilenameUtils.concat(DeploymentUtils.getPathToWorkerDir(workingDir, workerId), "worker_" + workerId
+ "_data.db");
return new File(fileName);
=======
public static void deleteTable(final int workerID, final RelationKey relationKey) throws IOException,
CatalogException {
try {
SQLiteUtils.deleteTable(getAbsoluteDBFile(workerID).getAbsolutePath(), relationKey);
} catch (SQLiteException e) {
throw new CatalogException(e);
}
}
public static boolean existsTable(final int workerID, final RelationKey relationKey) throws IOException,
CatalogException {
try {
return SQLiteUtils.existsTable(getAbsoluteDBFile(workerID).getAbsolutePath(), relationKey);
} catch (SQLiteException e) {
throw new CatalogException(e);
}
}
public static File getAbsoluteDBFile(final int workerID) throws CatalogException, FileNotFoundException {
final String workerDir = getWorkerFolder(workerID);
final WorkerCatalog wc = WorkerCatalog.open(FilenameUtils.concat(workerDir, "worker.catalog"));
final SQLiteInfo sqliteInfo =
(SQLiteInfo) ConnectionInfo.of(MyriaConstants.STORAGE_SYSTEM_SQLITE, wc
.getConfigurationValue(MyriaSystemConfigKeys.WORKER_STORAGE_DATABASE_CONN_INFO));
final File ret = new File(sqliteInfo.getDatabaseFilename());
wc.close();
return ret;
>>>>>>>
public static File getAbsoluteDBFile(final int workerId) {
String fileName =
FilenameUtils.concat(DeploymentUtils.getPathToWorkerDir(workingDir, workerId), "worker_" + workerId
+ "_data.db");
return new File(fileName);
}
public static void deleteTable(final int workerID, final RelationKey relationKey) throws IOException,
CatalogException {
try {
SQLiteUtils.deleteTable(getAbsoluteDBFile(workerID).getAbsolutePath(), relationKey);
} catch (SQLiteException e) {
throw new CatalogException(e);
}
}
public static boolean existsTable(final int workerID, final RelationKey relationKey) throws IOException,
CatalogException {
try {
return SQLiteUtils.existsTable(getAbsoluteDBFile(workerID).getAbsolutePath(), relationKey);
} catch (SQLiteException e) {
throw new CatalogException(e);
} |
<<<<<<<
groupAggs = new HashMap<SimpleArrayWrapper, Aggregator[]>();
=======
if (gfields == null || gfields.length == 0) {
this.gfields = new int[0];
groupBy = false;
groupAggs = null;
} else {
this.gfields = gfields;
groupBy = true;
groupAggs = new HashMap<SimpleArrayWrapper, Aggregator[]>();
}
>>>>>>>
groupAggs = new HashMap<SimpleArrayWrapper, Aggregator[]>();
this.gfields = gfields;
<<<<<<<
/**
* Returns the next tuple. If there is a group by field, then the first field is the field by which we are grouping,
* and the second field is the result of computing the aggregate, If there is no group by field, then the result tuple
* should contain one field representing the result of the aggregate. Should return null if there are no more tuples.
*
* @throws DbException if any error occurs.
* @throws InterruptedException if interrupted
* @return result TB.
*/
=======
>>>>>>>
/**
* Returns the next tuple. If there is a group by field, then the first field is the field by which we are grouping,
* and the second field is the result of computing the aggregate, If there is no group by field, then the result tuple
* should contain one field representing the result of the aggregate. Should return null if there are no more tuples.
*
* @throws DbException if any error occurs.
* @throws InterruptedException if interrupted
* @return result TB.
*/
<<<<<<<
}
// add the tuples into the aggregator
for (SimpleArrayWrapper saw : tmpMap.keySet()) {
Aggregator[] aggs = groupAggs.get(saw);
TupleBatchBuffer tbb = tmpMap.get(saw);
TupleBatch filledTb = null;
while ((filledTb = tbb.popAny()) != null) {
for (final Aggregator aggLocal : aggs) {
aggLocal.add(filledTb);
=======
// add the tuples into the aggregator
for (SimpleArrayWrapper saw : tmpMap.keySet()) {
Aggregator[] aggs = groupAggs.get(saw);
TupleBatchBuffer tbb = tmpMap.get(saw);
TupleBatch filledTb = null;
while ((filledTb = tbb.popAny()) != null) {
for (Aggregator aggregator : aggs) {
aggregator.add(filledTb);
}
>>>>>>>
}
// add the tuples into the aggregator
for (SimpleArrayWrapper saw : tmpMap.keySet()) {
Aggregator[] aggs = groupAggs.get(saw);
TupleBatchBuffer tbb = tmpMap.get(saw);
TupleBatch filledTb = null;
while ((filledTb = tbb.popAny()) != null) {
for (final Aggregator aggLocal : aggs) {
aggLocal.add(filledTb); |
<<<<<<<
import edu.washington.escience.myria.api.MyriaJsonMapperProvider;
import edu.washington.escience.myria.api.encoding.DatasetEncoding;
import edu.washington.escience.myria.api.encoding.DatasetStatus;
=======
>>>>>>>
import edu.washington.escience.myria.api.encoding.DatasetStatus;
<<<<<<<
public static String ingest(final RelationKey key, final Schema schema, final DataSource source,
@Nullable final Character delimiter, @Nullable final PartitionFunction pf) throws JsonProcessingException {
DatasetEncoding ingest = new DatasetEncoding();
ingest.relationKey = key;
ingest.schema = schema;
ingest.source = source;
if (delimiter != null) {
ingest.delimiter = delimiter;
}
if (pf != null) {
ingest.partitionFunction = pf;
}
return MyriaJsonMapperProvider.getWriter().writeValueAsString(ingest);
}
=======
>>>>>>> |
<<<<<<<
break;
default:
LOGGER.error("Unexpected control message received at master: {}", controlM);
}
break;
case QUERY:
final QueryMessage qm = m.getQueryMessage();
final SubQueryId subQueryId = new SubQueryId(qm.getQueryId(), qm.getSubqueryId());
MasterSubQuery mqp = executingSubQueries.get(subQueryId);
switch (qm.getType()) {
case QUERY_READY_TO_EXECUTE:
LOGGER.error("Query ready to execute");
mqp.queryReceivedByWorker(senderID);
break;
case QUERY_COMPLETE:
LOGGER.error("Query complete");
QueryReport qr = qm.getQueryReport();
if (qr.getSuccess()) {
LOGGER.error("Query success");
mqp.workerComplete(senderID);
} else {
LOGGER.error("Query failed before");
ObjectInputStream osis = null;
Throwable cause = null;
try {
osis = new ObjectInputStream(new ByteArrayInputStream(qr.getCause().toByteArray()));
cause = (Throwable) (osis.readObject());
} catch (IOException | ClassNotFoundException e) {
LOGGER.error("Error decoding failure cause", e);
}
LOGGER.error("Query failed after");
mqp.workerFail(senderID, cause);
LOGGER.error("Worker #{} failed in executing query #{}.", senderID, subQueryId, cause);
=======
}
break;
default:
LOGGER.error("Unexpected control message received at master: {}", controlM);
}
break;
case QUERY:
final QueryMessage qm = m.getQueryMessage();
final SubQueryId subQueryId = new SubQueryId(qm.getQueryId(), qm.getSubqueryId());
MasterSubQuery mqp = executingSubQueries.get(subQueryId);
switch (qm.getType()) {
case QUERY_READY_TO_EXECUTE:
mqp.queryReceivedByWorker(senderID);
break;
case QUERY_COMPLETE:
QueryReport qr = qm.getQueryReport();
if (qr.getSuccess()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Query #{} in Worker #{} succeeded.", subQueryId, senderID);
}
mqp.workerComplete(senderID);
} else {
ObjectInputStream osis = null;
Throwable cause = null;
try {
osis = new ObjectInputStream(new ByteArrayInputStream(qr.getCause().toByteArray()));
cause = (Throwable) (osis.readObject());
} catch (IOException | ClassNotFoundException e) {
LOGGER.error("Error decoding failure cause", e);
>>>>>>>
break;
default:
LOGGER.error("Unexpected control message received at master: {}", controlM);
}
break;
case QUERY:
final QueryMessage qm = m.getQueryMessage();
final SubQueryId subQueryId = new SubQueryId(qm.getQueryId(), qm.getSubqueryId());
MasterSubQuery mqp = executingSubQueries.get(subQueryId);
switch (qm.getType()) {
case QUERY_READY_TO_EXECUTE:
mqp.queryReceivedByWorker(senderID);
break;
case QUERY_COMPLETE:
QueryReport qr = qm.getQueryReport();
if (qr.getSuccess()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Query #{} in Worker #{} succeeded.", subQueryId, senderID);
}
mqp.workerComplete(senderID);
} else {
ObjectInputStream osis = null;
Throwable cause = null;
try {
osis = new ObjectInputStream(new ByteArrayInputStream(qr.getCause().toByteArray()));
cause = (Throwable) (osis.readObject());
} catch (IOException | ClassNotFoundException e) {
LOGGER.error("Error decoding failure cause", e);
}
mqp.workerFail(senderID, cause);
LOGGER.error("Worker #{} failed in executing query #{}.", senderID, subQueryId, cause); |
<<<<<<<
/**
* Count, always of long type.
* */
=======
private double stdev;
>>>>>>>
private double stdev;
/**
* Count, always of long type.
* */ |
<<<<<<<
/* Start the workers */
QueryFuture qf =
submitQuery("ingest " + relationKey.toString(), "ingest " + relationKey.toString(),
"ingest " + relationKey.toString(), new SingleQueryPlanWithArgs(scatter), workerPlans, false).sync();
if (qf == null) {
return null;
}
/* TODO(dhalperi) -- figure out how to populate the numTuples column. */
DatasetStatus status =
new DatasetStatus(relationKey, source.getSchema(), -1, qf.getQuery().getQueryID(), qf.getQuery()
.getExecutionStatistics().getEndTime());
return status;
=======
qf =
submitQuery("ingest " + relationKey.toString("sqlite"), "ingest " + relationKey.toString("sqlite"), "ingest "
+ relationKey.toString("sqlite"), new SubQueryPlan(scatter), workerPlans, false);
>>>>>>>
qf =
submitQuery("ingest " + relationKey.toString(), "ingest " + relationKey.toString(), "ingest "
+ relationKey.toString(), new SubQueryPlan(scatter), workerPlans, false);
<<<<<<<
QueryFuture qf =
submitQuery("import " + relationKey.toString(), "import " + relationKey.toString(),
"import " + relationKey.toString(), new SingleQueryPlanWithArgs(new SinkRoot(new EOSSource())),
workerPlans, false).sync();
if (qf == null) {
throw new DbException("Cannot import dataset right now, server is overloaded.");
=======
ListenableFuture<Query> qf =
submitQuery("import " + relationKey.toString("sqlite"), "import " + relationKey.toString("sqlite"), "import "
+ relationKey.toString("sqlite"), new SubQueryPlan(new SinkRoot(new EOSSource())), workerPlans, false);
Query queryState;
try {
queryState = qf.get();
} catch (ExecutionException e) {
throw new DbException("Error executing query", e.getCause());
>>>>>>>
ListenableFuture<Query> qf =
submitQuery("import " + relationKey.toString(), "import " + relationKey.toString(), "import "
+ relationKey.toString(), new SubQueryPlan(new SinkRoot(new EOSSource())), workerPlans, false);
Query queryState;
try {
queryState = qf.get();
} catch (ExecutionException e) {
throw new DbException("Error executing query", e.getCause());
<<<<<<<
SingleQueryPlanWithArgs workerPlan = new SingleQueryPlanWithArgs(producer);
Map<Integer, SingleQueryPlanWithArgs> workerPlans =
new HashMap<Integer, SingleQueryPlanWithArgs>(scanWorkers.size());
=======
/* Construct the workers' {@link SingleQueryPlanWithArgs}. */
SubQueryPlan workerPlan = new SubQueryPlan(producer);
Map<Integer, SubQueryPlan> workerPlans = new HashMap<Integer, SubQueryPlan>(scanWorkers.size());
>>>>>>>
SubQueryPlan workerPlan = new SubQueryPlan(producer);
Map<Integer, SubQueryPlan> workerPlans = new HashMap<>(scanWorkers.size());
<<<<<<<
SingleQueryPlanWithArgs workerPlan = new SingleQueryPlanWithArgs(producer);
Map<Integer, SingleQueryPlanWithArgs> workerPlans = new HashMap<>(actualWorkers.size());
=======
/* Construct the workers' {@link SingleQueryPlanWithArgs}. */
SubQueryPlan workerPlan = new SubQueryPlan(producer);
Map<Integer, SubQueryPlan> workerPlans = new HashMap<>(actualWorkers.size());
>>>>>>>
SubQueryPlan workerPlan = new SubQueryPlan(producer);
Map<Integer, SubQueryPlan> workerPlans = new HashMap<>(actualWorkers.size());
<<<<<<<
public QueryFuture startLogDataStream(final long queryId, final long fragmentId, final long start, final long end,
final TupleWriter writer) throws DbException {
=======
public ListenableFuture<Query> startHistogramDataStream(final long queryId, final long fragmentId,
final TupleWriter writer) throws DbException {
>>>>>>>
public QueryFuture startLogDataStream(final long queryId, final long fragmentId, final long start, final long end,
final TupleWriter writer) throws DbException {
/* Get the relation's schema, to make sure it exists. */
final QueryStatusEncoding queryStatus;
try {
queryStatus = catalog.getQuery(queryId);
} catch (CatalogException e) {
throw new DbException(e);
}
Preconditions.checkArgument(queryStatus != null, "query %s not found", queryId);
Preconditions.checkArgument(queryStatus.status == QueryStatusEncoding.Status.SUCCESS,
"query %s did not succeed (%s)", queryId, queryStatus.status);
Preconditions.checkArgument(queryStatus.profilingMode, "query %s was not run with profiling enabled", queryId);
Preconditions.checkArgument(start < end, "range cannot be negative");
final Schema schema =
new Schema(ImmutableList.of(Type.STRING_TYPE, Type.LONG_TYPE, Type.LONG_TYPE, Type.LONG_TYPE), ImmutableList
.of("opId", "startTime", "endTime", "numTuples"));
Set<Integer> actualWorkers = ((QueryEncoding) queryStatus.physicalPlan).getWorkers();
String queryString =
Joiner.on(' ').join("SELECT opid, starttime, endtime, numtuples FROM",
MyriaConstants.PROFILING_RELATION.toString(getDBMS()), "WHERE fragmentId =", fragmentId, "AND queryid =",
queryId, "AND endtime >", start, "AND starttime <", end, "ORDER BY starttime ASC");
DbQueryScan scan = new DbQueryScan(queryString, schema);
ImmutableList.Builder<Expression> emitExpressions = ImmutableList.builder();
emitExpressions.add(new Expression("workerId", new WorkerIdExpression()));
for (int column = 0; column < schema.numColumns(); column++) {
VariableExpression copy = new VariableExpression(column);
emitExpressions.add(new Expression(schema.getColumnName(column), copy));
}
Apply addWorkerId = new Apply(scan, emitExpressions.build());
final ExchangePairID operatorId = ExchangePairID.newID();
CollectProducer producer = new CollectProducer(addWorkerId, operatorId, MyriaConstants.MASTER_ID);
SubQueryPlan workerPlan = new SubQueryPlan(producer);
Map<Integer, SubQueryPlan> workerPlans = new HashMap<>(actualWorkers.size());
for (Integer worker : actualWorkers) {
workerPlans.put(worker, workerPlan);
}
final CollectConsumer consumer =
new CollectConsumer(addWorkerId.getSchema(), operatorId, ImmutableSet.copyOf(actualWorkers));
DataOutput output = new DataOutput(consumer, writer);
final SubQueryPlan masterPlan = new SubQueryPlan(output);
/* Submit the plan for the download. */
String planString =
Joiner.on('\0').join("download profiling data (query=", queryId, ", fragment=", fragmentId, ", range=[",
Joiner.on(", ").join(start, end), "]", ")");
try {
return submitQuery(planString, planString, planString, masterPlan, workerPlans, false);
} catch (CatalogException e) {
throw new DbException(e);
}
}
/**
* @param queryId query id.
* @param fragmentId the fragment id to return data for. All fragments, if < 0.
* @param start start of the histogram
* @param end the end of the histogram
* @param step the step size between min and max
* @param writer writer to get data.
* @return profiling logs for the query.
*
* @throws DbException if there is an error when accessing profiling logs.
*/
public QueryFuture startHistogramDataStream(final long queryId, final long fragmentId, final long start,
final long end, final long step, final TupleWriter writer) throws DbException {
<<<<<<<
SingleQueryPlanWithArgs workerPlan = new SingleQueryPlanWithArgs(producer);
Map<Integer, SingleQueryPlanWithArgs> workerPlans = new HashMap<>(actualWorkers.size());
=======
/* Construct the workers' {@link SingleQueryPlanWithArgs}. */
SubQueryPlan workerPlan = new SubQueryPlan(producer);
Map<Integer, SubQueryPlan> workerPlans = new HashMap<>(actualWorkers.size());
>>>>>>>
SubQueryPlan workerPlan = new SubQueryPlan(producer);
Map<Integer, SubQueryPlan> workerPlans = new HashMap<>(actualWorkers.size());
<<<<<<<
CollectProducer producer = new CollectProducer(scan, operatorId, MyriaConstants.MASTER_ID);
SingleQueryPlanWithArgs workerPlan = new SingleQueryPlanWithArgs(producer);
Map<Integer, SingleQueryPlanWithArgs> workerPlans = new HashMap<>(actualWorkers.size());
for (Integer worker : actualWorkers) {
workerPlans.put(worker, workerPlan);
}
/* Construct the master plan. */
final CollectConsumer consumer =
new CollectConsumer(scan.getSchema(), operatorId, ImmutableSet.copyOf(actualWorkers));
// Aggregate range on master
final Aggregate sumAggregate =
new Aggregate(consumer, new int[] { 0, 1 }, new int[] { Aggregator.AGG_OP_MIN, Aggregator.AGG_OP_MAX });
DataOutput output = new DataOutput(sumAggregate, writer);
final SingleQueryPlanWithArgs masterPlan = new SingleQueryPlanWithArgs(output);
=======
final StatefulApply hist = new StatefulApply(order, eb.build(), ib.build(), ub.build());
DataOutput output = new DataOutput(hist, writer);
final SubQueryPlan masterPlan = new SubQueryPlan(output);
>>>>>>>
CollectProducer producer = new CollectProducer(scan, operatorId, MyriaConstants.MASTER_ID);
SubQueryPlan workerPlan = new SubQueryPlan(producer);
Map<Integer, SubQueryPlan> workerPlans = new HashMap<>(actualWorkers.size());
for (Integer worker : actualWorkers) {
workerPlans.put(worker, workerPlan);
}
/* Construct the master plan. */
final CollectConsumer consumer =
new CollectConsumer(scan.getSchema(), operatorId, ImmutableSet.copyOf(actualWorkers));
// Aggregate range on master
final Aggregate sumAggregate =
new Aggregate(consumer, new int[] { 0, 1 }, new int[] { Aggregator.AGG_OP_MIN, Aggregator.AGG_OP_MAX });
DataOutput output = new DataOutput(sumAggregate, writer);
final SubQueryPlan masterPlan = new SubQueryPlan(output); |
<<<<<<<
@Override
public void begin(RepositorySessionDelegate receiver) {
receiver.beginCallback(RepoStatusCode.DONE);
}
@Override
public void finish(RepositorySessionDelegate receiver) {
receiver.finishCallback(RepoStatusCode.DONE);
}
=======
>>>>>>> |
<<<<<<<
=======
HashMap<Integer, IoHandler> handlers = new HashMap<Integer, IoHandler>();
handlers.put(0, handler);
IPCConnectionPool connectionPool = new IPCConnectionPool(0, computingUnits, handlers);
while ((tb = tbb.popAny()) != null) {
List<Column<?>> columns = tb.outputRawData();
final ColumnMessage[] columnProtos = new ColumnMessage[columns.size()];
int j = 0;
for (final Column<?> c : columns) {
columnProtos[j] = c.serializeToProto();
j++;
}
>>>>>>>
<<<<<<<
=======
// initialSession.close(false).awaitUninterruptibly();
LOGGER.debug("sent " + sent);
>>>>>>> |
<<<<<<<
QueryStatusEncoding status = server.getQueryManager().getQueryStatus(queryId);
assertEquals(Status.SUCCESS, status.status);
=======
QueryStatusEncoding status = server.getQueryStatus(queryId);
assertEquals(status.message, Status.SUCCESS, status.status);
>>>>>>>
QueryStatusEncoding status = server.getQueryManager().getQueryStatus(queryId);
assertEquals(status.message, Status.SUCCESS, status.status); |
<<<<<<<
final SQLiteQueryScan scan1 = new SQLiteQueryScan("select * from " + testtableKey, tableSchema);
=======
final SQLiteQueryScan scan1 =
new SQLiteQueryScan(null, "select * from " + testtableKey.toString("sqlite"), tableSchema);
>>>>>>>
final SQLiteQueryScan scan1 = new SQLiteQueryScan("select * from " + testtableKey.toString("sqlite"), tableSchema);
<<<<<<<
new LocalMultiwayProducer(scan1, new ExchangePairID[] { consumerID1, consumerID2 });
final LocalMultiwayConsumer multiConsumer1_1 = new LocalMultiwayConsumer(multiProducer1.getSchema(), consumerID1);
final LocalMultiwayConsumer multiConsumer1_2 = new LocalMultiwayConsumer(multiProducer1.getSchema(), consumerID2);
=======
new LocalMultiwayProducer(scan1, new ExchangePairID[] { consumerID1, consumerID2 }, WORKER_ID[0]);
final LocalMultiwayConsumer multiConsumer1_1 =
new LocalMultiwayConsumer(multiProducer1.getSchema(), consumerID1, WORKER_ID[0]);
final LocalMultiwayConsumer multiConsumer1_2 =
new LocalMultiwayConsumer(multiProducer1.getSchema(), consumerID2, WORKER_ID[0]);
>>>>>>>
new LocalMultiwayProducer(scan1, new ExchangePairID[] { consumerID1, consumerID2 });
final LocalMultiwayConsumer multiConsumer1_1 = new LocalMultiwayConsumer(multiProducer1.getSchema(), consumerID1);
final LocalMultiwayConsumer multiConsumer1_2 = new LocalMultiwayConsumer(multiProducer1.getSchema(), consumerID2);
<<<<<<<
new LocalMultiwayProducer(scan1, new ExchangePairID[] { consumerID1, consumerID2 });
final LocalMultiwayConsumer multiConsumer2_1 = new LocalMultiwayConsumer(multiProducer2.getSchema(), consumerID1);
final LocalMultiwayConsumer multiConsumer2_2 = new LocalMultiwayConsumer(multiProducer2.getSchema(), consumerID2);
=======
new LocalMultiwayProducer(scan1, new ExchangePairID[] { consumerID1, consumerID2 }, WORKER_ID[1]);
final LocalMultiwayConsumer multiConsumer2_1 =
new LocalMultiwayConsumer(multiProducer2.getSchema(), consumerID1, WORKER_ID[1]);
final LocalMultiwayConsumer multiConsumer2_2 =
new LocalMultiwayConsumer(multiProducer2.getSchema(), consumerID2, WORKER_ID[1]);
>>>>>>>
new LocalMultiwayProducer(scan1, new ExchangePairID[] { consumerID1, consumerID2 });
final LocalMultiwayConsumer multiConsumer2_1 = new LocalMultiwayConsumer(multiProducer2.getSchema(), consumerID1);
final LocalMultiwayConsumer multiConsumer2_2 = new LocalMultiwayConsumer(multiProducer2.getSchema(), consumerID2); |
<<<<<<<
import java.util.Arrays;
=======
import java.util.ArrayList;
>>>>>>>
import java.util.ArrayList;
import java.util.Arrays;
<<<<<<<
public final class LocalJoin extends Operator {
private class IndexedTuple {
private final int index;
private final TupleBatch tb;
public IndexedTuple(final TupleBatch tb, final int index) {
this.tb = tb;
this.index = index;
}
public boolean compareField(final IndexedTuple another, final int colIndx1, final int colIndx2) {
final Type type1 = tb.getSchema().getColumnType(colIndx1);
// type check in query plan?
final int rowIndx1 = index;
final int rowIndx2 = another.index;
switch (type1) {
case INT_TYPE:
return tb.getInt(colIndx1, rowIndx1) == another.tb.getInt(colIndx2, rowIndx2);
case DOUBLE_TYPE:
return tb.getDouble(colIndx1, rowIndx1) == another.tb.getDouble(colIndx2, rowIndx2);
case STRING_TYPE:
return tb.getString(colIndx1, rowIndx1).equals(another.tb.getString(colIndx2, rowIndx2));
case FLOAT_TYPE:
return tb.getFloat(colIndx1, rowIndx1) == another.tb.getFloat(colIndx2, rowIndx2);
case BOOLEAN_TYPE:
return tb.getBoolean(colIndx1, rowIndx1) == another.tb.getBoolean(colIndx2, rowIndx2);
case LONG_TYPE:
return tb.getLong(colIndx1, rowIndx1) == another.tb.getLong(colIndx2, rowIndx2);
}
return false;
}
@Override
public boolean equals(final Object o) {
if (!(o instanceof IndexedTuple)) {
return false;
}
final IndexedTuple another = (IndexedTuple) o;
if (!(tb.getSchema().equals(another.tb.getSchema()))) {
return false;
}
for (int i = 0; i < tb.getSchema().numColumns(); ++i) {
if (!compareField(another, i, i)) {
return false;
}
}
return true;
}
@Override
public int hashCode() {
return tb.hashCode(index);
}
public int hashCode4Keys(final int[] colIndx) {
return tb.hashCode(index, colIndx);
}
public boolean joinEquals(final Object o, final int[] compareIndx1, final int[] compareIndx2) {
if (!(o instanceof IndexedTuple)) {
return false;
}
if (compareIndx1.length != compareIndx2.length) {
return false;
}
final IndexedTuple another = (IndexedTuple) o;
for (int i = 0; i < compareIndx1.length; ++i) {
if (!compareField(another, compareIndx1[i], compareIndx2[i])) {
return false;
}
}
return true;
}
}
=======
public final class LocalJoin extends Operator {
>>>>>>>
public final class LocalJoin extends Operator {
<<<<<<<
private final Schema outputSchema;
private final int[] compareIndx1;
private final int[] compareIndx2;
private transient HashMap<Integer, List<IndexedTuple>> hashTable1;
private transient HashMap<Integer, List<IndexedTuple>> hashTable2;
private transient TupleBatchBuffer ans;
=======
private final Schema outputSchema;
private final int[] compareIndx1;
private final int[] compareIndx2;
private transient HashMap<Integer, List<Integer>> hashTable1Indices;
private transient HashMap<Integer, List<Integer>> hashTable2Indices;
private transient TupleBatchBuffer hashTable1;
private transient TupleBatchBuffer hashTable2;
private transient TupleBatchBuffer ans;
>>>>>>>
private final Schema outputSchema;
private final int[] compareIndx1;
private final int[] compareIndx2;
private transient HashMap<Integer, List<Integer>> hashTable1Indices;
private transient HashMap<Integer, List<Integer>> hashTable2Indices;
private transient TupleBatchBuffer hashTable1;
private transient TupleBatchBuffer hashTable2;
private transient TupleBatchBuffer ans;
<<<<<<<
=======
ans = new TupleBatchBuffer(outputSchema);
>>>>>>>
<<<<<<<
processChildTB(tb, true);
} else {
if (child1.eoi()) {
child1.setEOI(false);
childrenEOI[0] = true;
}
=======
processChildTB(tb, hashTable1, hashTable2, hashTable1Indices, hashTable2Indices, compareIndx1, compareIndx2,
true);
>>>>>>>
processChildTB(tb, hashTable1, hashTable2, hashTable1Indices, hashTable2Indices, compareIndx1, compareIndx2,
true);
} else {
if (child1.eoi()) {
child1.setEOI(false);
childrenEOI[0] = true;
}
<<<<<<<
processChildTB(tb, false);
} else {
if (child2.eoi()) {
child2.setEOI(false);
childrenEOI[1] = true;
}
=======
processChildTB(tb, hashTable2, hashTable1, hashTable2Indices, hashTable1Indices, compareIndx2, compareIndx1,
false);
>>>>>>>
processChildTB(tb, hashTable2, hashTable1, hashTable2Indices, hashTable1Indices, compareIndx2, compareIndx1,
false);
} else {
if (child2.eoi()) {
child2.setEOI(false);
childrenEOI[1] = true;
}
<<<<<<<
public void init(final ImmutableMap<String, Object> execEnvVars) throws DbException {
hashTable1 = new HashMap<Integer, List<IndexedTuple>>();
hashTable2 = new HashMap<Integer, List<IndexedTuple>>();
ans = new TupleBatchBuffer(outputSchema);
=======
public void init() throws DbException {
hashTable1Indices = new HashMap<Integer, List<Integer>>();
hashTable2Indices = new HashMap<Integer, List<Integer>>();
hashTable1 = new TupleBatchBuffer(child1.getSchema());
hashTable2 = new TupleBatchBuffer(child2.getSchema());
ans = new TupleBatchBuffer(getSchema());
>>>>>>>
public void init(final ImmutableMap<String, Object> execEnvVars) throws DbException {
hashTable1Indices = new HashMap<Integer, List<Integer>>();
hashTable2Indices = new HashMap<Integer, List<Integer>>();
hashTable1 = new TupleBatchBuffer(child1.getSchema());
hashTable2 = new TupleBatchBuffer(child2.getSchema());
ans = new TupleBatchBuffer(outputSchema);
<<<<<<<
=======
>>>>>>> |
<<<<<<<
import java.util.Map;
import java.util.Random;
import java.util.concurrent.LinkedBlockingQueue;
=======
>>>>>>>
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
<<<<<<<
import edu.washington.escience.myriad.operator.DupElim;
=======
import edu.washington.escience.myriad.coordinator.catalog.CatalogException;
>>>>>>>
import edu.washington.escience.myriad.operator.DupElim;
<<<<<<<
import edu.washington.escience.myriad.parallel.ExchangePairID;
=======
import edu.washington.escience.myriad.parallel.Consumer;
import edu.washington.escience.myriad.parallel.EOSController;
import edu.washington.escience.myriad.parallel.Exchange.ExchangePairID;
>>>>>>>
import edu.washington.escience.myriad.parallel.Consumer;
import edu.washington.escience.myriad.parallel.EOSController;
import edu.washington.escience.myriad.parallel.ExchangePairID;
<<<<<<<
private final int MaxID = 100;
private final int numTbl1Worker1 = 10000;
private final int numTbl1Worker2 = 10000;
=======
private final int MaxID = 400;
private final int numTbl1Worker1 = 500;
private final int numTbl1Worker2 = 600;
>>>>>>>
private final int MaxID = 400;
private final int numTbl1Worker1 = 50;
private final int numTbl1Worker2 = 60;
<<<<<<<
final SQLiteQueryScan scan1 = new SQLiteQueryScan("select * from " + testtableKey, tableSchema);
final SQLiteQueryScan scan2 = new SQLiteQueryScan("select * from " + identityKey, tableSchema);
final ExchangePairID consumerID1 = ExchangePairID.newID();
final ExchangePairID consumerID2 = ExchangePairID.newID();
final LocalMultiwayConsumer sendBack_worker1 = new LocalMultiwayConsumer(tableSchema, consumerID1);
final LocalMultiwayConsumer sendBack_worker2 = new LocalMultiwayConsumer(tableSchema, consumerID1);
final LocalMultiwayConsumer send2server_worker1 = new LocalMultiwayConsumer(tableSchema, consumerID2);
final LocalMultiwayConsumer send2server_worker2 = new LocalMultiwayConsumer(tableSchema, consumerID2);
final IDBInput idbinput_worker1 = new IDBInput(scan2, sendBack_worker1);
final IDBInput idbinput_worker2 = new IDBInput(scan2, sendBack_worker2);
=======
final SQLiteQueryScan scan1 =
new SQLiteQueryScan(null, "select * from " + testtableKey.toString("sqlite"), tableSchema);
final SQLiteQueryScan scan2 =
new SQLiteQueryScan(null, "select * from " + testtableKey.toString("sqlite"), tableSchema);
>>>>>>>
final SQLiteQueryScan scan1 = new SQLiteQueryScan("select * from " + testtableKey.toString("sqlite"), tableSchema);
final SQLiteQueryScan scan2 = new SQLiteQueryScan("select * from " + testtableKey.toString("sqlite"), tableSchema);
<<<<<<<
new LocalMultiwayProducer(dupelim_worker1, new ExchangePairID[] { consumerID1, consumerID2 });
=======
new LocalMultiwayProducer(idbinput_worker1, new ExchangePairID[] { consumerID1, consumerID2 }, WORKER_ID[0]);
>>>>>>>
new LocalMultiwayProducer(idbinput_worker1, new ExchangePairID[] { consumerID1, consumerID2 });
<<<<<<<
new LocalMultiwayProducer(dupelim_worker2, new ExchangePairID[] { consumerID1, consumerID2 });
=======
new LocalMultiwayProducer(idbinput_worker2, new ExchangePairID[] { consumerID1, consumerID2 }, WORKER_ID[1]);
final LocalMultiwayConsumer send2join_worker1 = new LocalMultiwayConsumer(tableSchema, consumerID1, WORKER_ID[0]);
final LocalMultiwayConsumer send2join_worker2 = new LocalMultiwayConsumer(tableSchema, consumerID1, WORKER_ID[1]);
final LocalMultiwayConsumer send2server_worker1 = new LocalMultiwayConsumer(tableSchema, consumerID2, WORKER_ID[0]);
final LocalMultiwayConsumer send2server_worker2 = new LocalMultiwayConsumer(tableSchema, consumerID2, WORKER_ID[1]);
final LocalJoin join_worker1 =
new LocalJoin(sc1, send2join_worker1, new int[] { 1 }, new int[] { 0 }, new int[] { 0 }, new int[] { 1 });
final LocalJoin join_worker2 =
new LocalJoin(sc1, send2join_worker2, new int[] { 1 }, new int[] { 0 }, new int[] { 0 }, new int[] { 1 });
sp3_worker1.setChildren(new Operator[] { join_worker1 });
sp3_worker2.setChildren(new Operator[] { join_worker2 });
>>>>>>>
new LocalMultiwayProducer(idbinput_worker2, new ExchangePairID[] { consumerID1, consumerID2 });
final LocalMultiwayConsumer send2join_worker1 = new LocalMultiwayConsumer(tableSchema, consumerID1);
final LocalMultiwayConsumer send2join_worker2 = new LocalMultiwayConsumer(tableSchema, consumerID1);
final LocalMultiwayConsumer send2server_worker1 = new LocalMultiwayConsumer(tableSchema, consumerID2);
final LocalMultiwayConsumer send2server_worker2 = new LocalMultiwayConsumer(tableSchema, consumerID2);
final LocalJoin join_worker1 =
new LocalJoin(sc1, send2join_worker1, new int[] { 1 }, new int[] { 0 }, new int[] { 0 }, new int[] { 1 });
final LocalJoin join_worker2 =
new LocalJoin(sc1, send2join_worker2, new int[] { 1 }, new int[] { 0 }, new int[] { 0 }, new int[] { 1 });
sp3_worker1.setChildren(new Operator[] { join_worker1 });
sp3_worker2.setChildren(new Operator[] { join_worker2 }); |
<<<<<<<
import edu.washington.escience.myriad.operator.RootOperator;
=======
import edu.washington.escience.myriad.operator.Project;
>>>>>>>
import edu.washington.escience.myriad.operator.Project;
import edu.washington.escience.myriad.operator.RootOperator;
<<<<<<<
Map<Integer, RootOperator[]> queryPlan = deserializeJsonQueryPlan(userData.get("query_plan"));
=======
final String expectedResultSize = (String) userData.get("expected_result_size");
Map<Integer, Operator[]> queryPlan = deserializeJsonQueryPlan(userData.get("query_plan"));
>>>>>>>
final String expectedResultSize = (String) userData.get("expected_result_size");
Map<Integer, RootOperator[]> queryPlan = deserializeJsonQueryPlan(userData.get("query_plan"));
<<<<<<<
private static RootOperator[] deserializeJsonLocalPlans(Object jsonLocalPlanList) throws Exception {
=======
private static Operator[] deserializeJsonLocalPlans(final Object jsonLocalPlanList) throws Exception {
>>>>>>>
private static RootOperator[] deserializeJsonLocalPlans(Object jsonLocalPlanList) throws Exception {
<<<<<<<
private static RootOperator deserializeJsonLocalPlan(Object jsonLocalPlan) throws Exception {
=======
private static Operator deserializeJsonLocalPlan(final Object jsonLocalPlan) throws Exception {
>>>>>>>
private static RootOperator deserializeJsonLocalPlan(final Object jsonLocalPlan) throws Exception {
<<<<<<<
return new SQLiteInsert(child, RelationKey.of(userName, programName, relationName), overwrite);
=======
return new SQLiteInsert(child, RelationKey.of(userName, programName, relationName), null, null, overwrite);
}
>>>>>>>
return new SQLiteInsert(child, RelationKey.of(userName, programName, relationName), overwrite);
}
<<<<<<<
return new SQLiteQueryScan("SELECT * from " + relationKey, schema);
=======
return new SQLiteQueryScan(null, "SELECT * from " + relationKey, schema);
}
case "Consumer": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
return new Consumer(schema, operatorID, workerIDs);
}
case "ShuffleConsumer": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
return new ShuffleConsumer(schema, operatorID, workerIDs);
}
case "CollectConsumer": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
return new CollectConsumer(schema, operatorID, workerIDs);
}
case "LocalMultiwayConsumer": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int workerID = deserializeInt(jsonOperator, "arg_workerID");
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
return new LocalMultiwayConsumer(schema, operatorID, workerID);
}
case "ShuffleProducer": {
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
PartitionFunction<?, ?> pf = deserializePF(jsonOperator, "arg_pf", workerIDs.length);
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "ShuffleProducer child Operator " + childName + " not previously defined");
return new ShuffleProducer(child, operatorID, workerIDs, pf);
}
case "CollectProducer": {
int workerID = deserializeInt(jsonOperator, "arg_workerID");
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "CollectProducer child Operator " + childName + " not previously defined");
return new CollectProducer(child, operatorID, workerID);
}
case "LocalMultiwayProducer": {
int workerID = deserializeInt(jsonOperator, "arg_workerID");
long[] tmpOpIDs = deserializeLongArray(jsonOperator, "arg_operatorIDs", false);
ExchangePairID[] operatorIDs = new ExchangePairID[tmpOpIDs.length];
for (int i = 0; i < tmpOpIDs.length; ++i) {
operatorIDs[i] = ExchangePairID.fromExisting(tmpOpIDs[i]);
}
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "CollectProducer child Operator " + childName + " not previously defined");
return new LocalMultiwayProducer(child, operatorIDs, workerID);
}
case "IDBInput": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int selfWorkerID = deserializeInt(jsonOperator, "arg_workerID");
int selfIDBID = deserializeInt(jsonOperator, "arg_idbID");
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
int controllerWorkerID = deserializeInt(jsonOperator, "arg_controllerWorkerID");
String child1Name = deserializeString(jsonOperator, "arg_child1");
String child2Name = deserializeString(jsonOperator, "arg_child2");
String child3Name = deserializeString(jsonOperator, "arg_child3");
Operator child1 = operators.get(child1Name);
Operator child2 = operators.get(child2Name);
Operator child3 = operators.get(child3Name);
Objects.requireNonNull(child1, "IDBInput child1 Operator " + child1Name + " not previously defined");
Objects.requireNonNull(child2, "IDBInput child2 Operator " + child2Name + " not previously defined");
Objects.requireNonNull(child3, "IDBInput child3 Operator " + child3Name + " not previously defined");
return new IDBInput(schema, selfWorkerID, selfIDBID, operatorID, controllerWorkerID, child1, child2, child3);
}
case "EOSController": {
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "IDBInput child Operator " + childName + " not previously defined");
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
long[] tmpOpIDs = deserializeLongArray(jsonOperator, "arg_idbOpIDs", false);
ExchangePairID[] idbOpIDs = new ExchangePairID[tmpOpIDs.length];
for (int i = 0; i < tmpOpIDs.length; ++i) {
idbOpIDs[i] = ExchangePairID.fromExisting(tmpOpIDs[i]);
}
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
return new EOSController(child, operatorID, idbOpIDs, workerIDs);
}
case "DupElim": {
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "DupElim child Operator " + childName + " not previously defined");
return new DupElim(child);
}
case "Merge": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
String child1Name = deserializeString(jsonOperator, "arg_child1");
Operator child1 = operators.get(child1Name);
Objects.requireNonNull(child1, "Merge child1 Operator " + child1Name + " not previously defined");
String child2Name = deserializeString(jsonOperator, "arg_child2");
Operator child2 = operators.get(child2Name);
Objects.requireNonNull(child2, "Merge child2 Operator " + child2Name + " not previously defined");
return new Merge(schema, child1, child2);
}
case "Project": {
int[] fieldList = deserializeIntArray(jsonOperator, "arg_fieldList", false);
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "Merge child Operator " + childName + " not previously defined");
return new Project(fieldList, child);
}
>>>>>>>
return new SQLiteQueryScan("SELECT * from " + relationKey.toString("sqlite"), schema);
}
case "Consumer": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
return new Consumer(schema, operatorID, workerIDs);
}
case "ShuffleConsumer": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
return new ShuffleConsumer(schema, operatorID, workerIDs);
}
case "CollectConsumer": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
return new CollectConsumer(schema, operatorID, workerIDs);
}
case "LocalMultiwayConsumer": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int workerID = deserializeInt(jsonOperator, "arg_workerID");
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
return new LocalMultiwayConsumer(schema, operatorID);
}
case "ShuffleProducer": {
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
PartitionFunction<?, ?> pf = deserializePF(jsonOperator, "arg_pf", workerIDs.length);
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "ShuffleProducer child Operator " + childName + " not previously defined");
return new ShuffleProducer(child, operatorID, workerIDs, pf);
}
case "CollectProducer": {
int workerID = deserializeInt(jsonOperator, "arg_workerID");
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "CollectProducer child Operator " + childName + " not previously defined");
return new CollectProducer(child, operatorID, workerID);
}
case "LocalMultiwayProducer": {
int workerID = deserializeInt(jsonOperator, "arg_workerID");
long[] tmpOpIDs = deserializeLongArray(jsonOperator, "arg_operatorIDs", false);
ExchangePairID[] operatorIDs = new ExchangePairID[tmpOpIDs.length];
for (int i = 0; i < tmpOpIDs.length; ++i) {
operatorIDs[i] = ExchangePairID.fromExisting(tmpOpIDs[i]);
}
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "CollectProducer child Operator " + childName + " not previously defined");
return new LocalMultiwayProducer(child, operatorIDs);
}
case "IDBInput": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
int selfWorkerID = deserializeInt(jsonOperator, "arg_workerID");
int selfIDBID = deserializeInt(jsonOperator, "arg_idbID");
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
int controllerWorkerID = deserializeInt(jsonOperator, "arg_controllerWorkerID");
String child1Name = deserializeString(jsonOperator, "arg_child1");
String child2Name = deserializeString(jsonOperator, "arg_child2");
String child3Name = deserializeString(jsonOperator, "arg_child3");
Operator child1 = operators.get(child1Name);
Operator child2 = operators.get(child2Name);
Consumer child3 = (Consumer) operators.get(child3Name);
Objects.requireNonNull(child1, "IDBInput child1 Operator " + child1Name + " not previously defined");
Objects.requireNonNull(child2, "IDBInput child2 Operator " + child2Name + " not previously defined");
Objects.requireNonNull(child3, "IDBInput child3 Operator " + child3Name + " not previously defined");
return new IDBInput(selfIDBID, operatorID, controllerWorkerID, child1, child2, child3);
}
case "EOSController": {
String childName = deserializeString(jsonOperator, "arg_child");
Consumer child = (Consumer) operators.get(childName);
Objects.requireNonNull(child, "IDBInput child Operator " + childName + " not previously defined");
ExchangePairID operatorID = ExchangePairID.fromExisting(deserializeLong(jsonOperator, "arg_operatorID"));
long[] tmpOpIDs = deserializeLongArray(jsonOperator, "arg_idbOpIDs", false);
ExchangePairID[] idbOpIDs = new ExchangePairID[tmpOpIDs.length];
for (int i = 0; i < tmpOpIDs.length; ++i) {
idbOpIDs[i] = ExchangePairID.fromExisting(tmpOpIDs[i]);
}
int[] workerIDs = deserializeIntArray(jsonOperator, "arg_workerIDs", false);
return new EOSController(new Consumer[] { child }, idbOpIDs, workerIDs);
}
case "DupElim": {
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "DupElim child Operator " + childName + " not previously defined");
return new DupElim(child);
}
case "Merge": {
Schema schema = deserializeSchema(jsonOperator, "arg_schema");
String child1Name = deserializeString(jsonOperator, "arg_child1");
Operator child1 = operators.get(child1Name);
Objects.requireNonNull(child1, "Merge child1 Operator " + child1Name + " not previously defined");
String child2Name = deserializeString(jsonOperator, "arg_child2");
Operator child2 = operators.get(child2Name);
Objects.requireNonNull(child2, "Merge child2 Operator " + child2Name + " not previously defined");
return new Merge(new Operator[] { child1, child2 });
}
case "Project": {
int[] fieldList = deserializeIntArray(jsonOperator, "arg_fieldList", false);
String childName = deserializeString(jsonOperator, "arg_child");
Operator child = operators.get(childName);
Objects.requireNonNull(child, "Merge child Operator " + childName + " not previously defined");
return new Project(fieldList, child);
} |
<<<<<<<
final long exchangePairIDLong = data.getOperatorID();
final ExchangePairID exchangePairID = ExchangePairID.fromExisting(exchangePairIDLong);
ConsumerChannel cc = consumerChannelMap.get(new ExchangeChannelID(exchangePairIDLong, senderID));
final Schema operatorSchema = cc.op.getSchema();
switch (data.getType()) {
case EOS:
LOGGER.info("EOS from: " + senderID + "," + workers.get(senderID));
receiveData(new ExchangeData(exchangePairID, senderID, operatorSchema, MetaMessage.EOS));
cc.ownerTask.notifyNewInput();
break;
case EOI:
receiveData(new ExchangeData(exchangePairID, senderID, operatorSchema, MetaMessage.EOI));
cc.ownerTask.notifyNewInput();
break;
case NORMAL:
final List<ColumnMessage> columnMessages = data.getColumnsList();
final Column<?>[] columnArray = new Column[columnMessages.size()];
int idx = 0;
for (final ColumnMessage cm : columnMessages) {
columnArray[idx++] = ColumnFactory.columnFromColumnMessage(cm, data.getNumTuples());
}
final List<Column<?>> columns = Arrays.asList(columnArray);
=======
final ExchangePairID exchangePairID = ExchangePairID.fromExisting(data.getOperatorID());
final Schema operatorSchema = exchangeSchema.get(exchangePairID);
if (data.getType() == DataMessageType.EOS) {
receiveData(new ExchangeData(exchangePairID, senderID, operatorSchema, 0));
} else if (data.getType() == DataMessageType.EOI) {
receiveData(new ExchangeData(exchangePairID, senderID, operatorSchema, 1));
} else {
final List<ColumnMessage> columnMessages = data.getColumnsList();
final Column<?>[] columnArray = new Column<?>[columnMessages.size()];
int idx = 0;
for (final ColumnMessage cm : columnMessages) {
columnArray[idx++] = ColumnFactory.columnFromColumnMessage(cm);
}
final List<Column<?>> columns = Arrays.asList(columnArray);
>>>>>>>
final long exchangePairIDLong = data.getOperatorID();
final ExchangePairID exchangePairID = ExchangePairID.fromExisting(exchangePairIDLong);
ConsumerChannel cc = consumerChannelMap.get(new ExchangeChannelID(exchangePairIDLong, senderID));
final Schema operatorSchema = cc.op.getSchema();
switch (data.getType()) {
case EOS:
LOGGER.info("EOS from: " + senderID + "," + workers.get(senderID));
receiveData(new ExchangeData(exchangePairID, senderID, operatorSchema, MetaMessage.EOS));
cc.ownerTask.notifyNewInput();
break;
case EOI:
receiveData(new ExchangeData(exchangePairID, senderID, operatorSchema, MetaMessage.EOI));
cc.ownerTask.notifyNewInput();
break;
case NORMAL:
final List<ColumnMessage> columnMessages = data.getColumnsList();
final Column<?>[] columnArray = new Column[columnMessages.size()];
int idx = 0;
for (final ColumnMessage cm : columnMessages) {
columnArray[idx++] = ColumnFactory.columnFromColumnMessage(cm, data.getNumTuples());
}
final List<Column<?>> columns = Arrays.asList(columnArray);
<<<<<<<
static final String usage = "Usage: Server catalogFile [-explain] [-f queryFile]";
=======
/** Time constant. */
private static final int ONE_SEC_IN_MILLIS = 1000;
/** Time constant. */
private static final int ONE_MIN_IN_MILLIS = 60 * ONE_SEC_IN_MILLIS;
/** Time constant. */
private static final int ONE_HR_IN_MILLIS = 60 * ONE_MIN_IN_MILLIS;
/** The usage message for this server. */
static final String USAGE = "Usage: Server catalogFile [-explain] [-f queryFile]";
>>>>>>>
static final String usage = "Usage: Server catalogFile [-explain] [-f queryFile]";
<<<<<<<
private final ConcurrentHashMap<Integer, SocketInfo> workers;
/**
* Queries currently in execution.
* */
private final ConcurrentHashMap<Long, MasterQueryPartition> activeQueries;
=======
private final ConcurrentHashMap<Integer, SocketInfo> workers;
private final ConcurrentHashMap<Long, HashMap<Integer, Integer>> workersAssignedToQuery;
>>>>>>>
private final ConcurrentHashMap<Integer, SocketInfo> workers;
/**
* Queries currently in execution.
* */
private final ConcurrentHashMap<Long, MasterQueryPartition> activeQueries;
<<<<<<<
private void setupExchangeChannels(final MasterQueryPartition query) throws DbException {
RootOperator root = query.root;
QuerySubTreeTask drivingTask =
new QuerySubTreeTask(MyriaConstants.MASTER_ID, query, root, serverQueryExecutor,
Worker.QueryExecutionMode.NON_BLOCKING);
query.setRootTask(drivingTask);
if (root instanceof Producer) {
Producer p = (Producer) root;
ExchangePairID[] oIDs = p.operatorIDs();
int[] destWorkers = p.getDestinationWorkerIDs(MyriaConstants.MASTER_ID);
for (int i = 0; i < destWorkers.length; i++) {
ExchangeChannelID ecID = new ExchangeChannelID(oIDs[i].getLong(), destWorkers[i]);
ProducerChannel pc = new ProducerChannel(drivingTask, p, ecID);
producerChannelMap.put(ecID, pc);
=======
public TupleBatchBuffer startServerQuery(final Long queryId, final CollectConsumer serverPlan,
final String expectedResultSize) throws DbException {
final BitSet workersReceived = workersReceivedQuery.get(queryId);
final HashMap<Integer, Integer> workersAssigned = workersAssignedToQuery.get(queryId);
/* Can't progress until all assigned workers are alive. */
while (!aliveWorkers.containsAll(workersAssigned.keySet())) {
try {
Thread.sleep(SHORT_SLEEP_MILLIS);
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
}
}
/* Can't progress until all assigned workers have received the query. */
while (workersReceived.nextClearBit(0) < workersAssigned.size()) {
try {
Thread.sleep(SHORT_SLEEP_MILLIS);
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
>>>>>>>
private void setupExchangeChannels(final MasterQueryPartition query) throws DbException {
RootOperator root = query.root;
QuerySubTreeTask drivingTask =
new QuerySubTreeTask(MyriaConstants.MASTER_ID, query, root, serverQueryExecutor,
Worker.QueryExecutionMode.NON_BLOCKING);
query.setRootTask(drivingTask);
if (root instanceof Producer) {
Producer p = (Producer) root;
ExchangePairID[] oIDs = p.operatorIDs();
int[] destWorkers = p.getDestinationWorkerIDs(MyriaConstants.MASTER_ID);
for (int i = 0; i < destWorkers.length; i++) {
ExchangeChannelID ecID = new ExchangeChannelID(oIDs[i].getLong(), destWorkers[i]);
// ProducerChannel pc = new ProducerChannel(drivingTask, p, ecID);
ProducerChannel pc = new ProducerChannel(drivingTask, ecID);
producerChannelMap.put(ecID, pc);
<<<<<<<
=======
serverPlan.close();
dataBuffer.remove(serverPlan.getOperatorID());
final Date end = new Date();
LOGGER.info("Number of results: " + cnt);
if (expectedResultSize != null) {
if (Integer.parseInt(expectedResultSize) != cnt) {
LOGGER.info("WRONG SIZE! expected to be: " + expectedResultSize);
} else {
LOGGER.info("Correct size!");
}
}
int elapse = (int) (end.getTime() - start.getTime());
final int hour = elapse / ONE_HR_IN_MILLIS;
elapse -= hour * ONE_HR_IN_MILLIS;
final int minute = elapse / ONE_MIN_IN_MILLIS;
elapse -= minute * ONE_MIN_IN_MILLIS;
final int second = elapse / ONE_SEC_IN_MILLIS;
elapse -= second * ONE_SEC_IN_MILLIS;
LOGGER.info(String.format("Time elapsed: %1$dh%2$dm%3$ds.%4$03d", hour, minute, second, elapse));
return outBufferForTesting;
>>>>>>>
<<<<<<<
throws CatalogException, InterruptedException {
/* The Master plan: scan the data and scatter it to all the workers. */
FileScan fileScan = new FileScan(new ByteArrayInputStream(data), schema);
=======
throws CatalogException {
/* The Server plan: scan the data and scatter it to all the workers. */
FileScan fileScan = new FileScan(schema);
fileScan.setInputStream(new ByteArrayInputStream(data));
>>>>>>>
throws CatalogException, InterruptedException {
/* The Master plan: scan the data and scatter it to all the workers. */
FileScan fileScan = new FileScan(schema);
fileScan.setInputStream(new ByteArrayInputStream(data));
<<<<<<<
submitQuery("ingest " + relationKey, "ingest " + relationKey, scatter, workerPlans).sync();
} catch (CatalogException | DbException e) {
=======
Long queryId =
startQuery("ingest " + relationKey.toString("sqlite"), "ingest " + relationKey.toString("sqlite"),
workerPlans);
/* Do it! */
scatter.open();
while (!scatter.eos()) {
scatter.next();
}
scatter.close();
while (!queryCompleted(queryId)) {
try {
Thread.sleep(SHORT_SLEEP_MILLIS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
} catch (CatalogException | IOException | DbException e) {
>>>>>>>
submitQuery("ingest " + relationKey, "ingest " + relationKey, scatter, workerPlans).sync();
} catch (CatalogException | DbException e) { |
<<<<<<<
@Type(name = "Consumer", value = ConsumerEncoding.class),
@Type(name = "LocalMultiwayConsumer", value = LocalMultiwayConsumerEncoding.class),
=======
@Type(name = "LocalJoin", value = LocalJoinEncoding.class),
@Type(name = "LocalCountingJoin", value = LocalCountingJoinEncoding.class),
@Type(name = "MultiGroupByAggregate", value = MultiGroupByAggregateEncoding.class),
>>>>>>>
@Type(name = "Consumer", value = ConsumerEncoding.class), |
<<<<<<<
transient URI parsedUri;
=======
@JsonProperty private final String uri;
>>>>>>>
private URI parsedUri; |
<<<<<<<
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
=======
import java.io.IOException;
>>>>>>>
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
<<<<<<<
import edu.washington.escience.myria.MyriaSystemConfigKeys;
=======
import edu.washington.escience.myria.MyriaConstants.FTMODE;
>>>>>>>
import edu.washington.escience.myria.MyriaConstants.FTMODE;
import edu.washington.escience.myria.MyriaSystemConfigKeys; |
<<<<<<<
import android.os.Build;
=======
import android.graphics.Typeface;
>>>>>>>
import android.os.Build;
import android.graphics.Typeface;
<<<<<<<
import android.text.StaticLayout;
=======
import android.text.TextUtils;
import android.text.style.StyleSpan;
>>>>>>>
import android.text.StaticLayout;
import android.text.TextUtils;
import android.text.style.StyleSpan;
<<<<<<<
import java.lang.reflect.Field;
=======
import java.util.ArrayList;
import java.util.Arrays;
>>>>>>>
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays; |
<<<<<<<
=======
import com.stratio.connector.streaming.core.engine.query.util.StreamUtil;
>>>>>>>
import com.stratio.connector.streaming.core.engine.query.util.StreamUtil;
<<<<<<<
import com.stratio.connector.streaming.core.engine.query.util.StreamUtil;
import com.stratio.connector.streaming.core.engine.query.queryExecutor.ConnectorQueryExecutor;
import com.stratio.connector.streaming.core.engine.query.queryExecutor.QueryExecutorFactory;
=======
>>>>>>>
import com.stratio.connector.streaming.core.engine.query.util.StreamUtil;
import com.stratio.connector.streaming.core.engine.query.queryExecutor.ConnectorQueryExecutor;
import com.stratio.connector.streaming.core.engine.query.queryExecutor.QueryExecutorFactory;
<<<<<<<
queryExecutor = QueryExecutorFactory.getQueryExecutor(queryData);
queryExecutor.executeQuery(query, connection, queryData,resultHandler);
=======
queryExecutor = new ConnectorQueryExecutor();
queryExecutor.executeQuery(query, connection, queryData, resultHandler);
>>>>>>>
queryExecutor = QueryExecutorFactory.getQueryExecutor(queryData);
queryExecutor.executeQuery(query, connection, queryData,resultHandler); |
<<<<<<<
@Test
void testCompilerDirective() {
String module = "&НаКлиенте\n" +
"Процедура Метод6()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
assertThat(methods).hasSize(1);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getName()).isEqualTo("Метод6");
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testCompilerDirectiveAtServerNoContext() {
String module = "&НаСервереБезКонтекста\n" +
"Процедура Метод7()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getName()).isEqualTo("Метод7");
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_SERVER_NO_CONTEXT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testSeveralCompilerDirective() {
String module = "&НаКлиенте\n&НаСервере\n" +
"Процедура Метод8()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getName()).isEqualTo("Метод8");
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testNonCompilerDirectiveAndNonAnnotation() {
String module = "Процедура Метод9()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getName()).isEqualTo("Метод9");
assertThat(methodSymbol.getCompilerDirective().isPresent()).isEqualTo(false);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testAnnotation() {
String module = "&После\n" +
"Процедура Метод10()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
assertThat(methods).hasSize(1);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().isPresent()).isEqualTo(false);
var annotations = methodSymbol.getAnnotations();
assertThat(annotations).hasSize(1);
assertThat(annotations.get(0)).isEqualTo(Annotation.AFTER);
}
@Test
void testCompilerDirectiveAndAnnotation() {
String module = "&НаКлиенте\n&После\n" +
"Процедура Метод11()\n" +
"КонецПроцедуры";
checkCompilerDirective_for_AtClient_AndAnnotation_After(module);
}
@Test
void testCompilerDirectiveAndAnnotationOtherOrder() {
String module = "&После\n&НаКлиенте\n" +
"Процедура Метод12()\n" +
"КонецПроцедуры";
checkCompilerDirective_for_AtClient_AndAnnotation_After(module);
}
@Test
void testCompilerDirectiveAndAnnotationForFunction() {
String module = "&НаКлиенте\n&После\n" +
"Функция Метод13()\n" +
"КонецФункции";
checkCompilerDirective_for_AtClient_AndAnnotation_After(module);
}
private static void checkCompilerDirective_for_AtClient_AndAnnotation_After(String module) {
List<MethodSymbol> methods = getMethodSymbols(module);
assertThat(methods).hasSize(1);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT);
var annotations = methodSymbol.getAnnotations();
assertThat(annotations).hasSize(1);
assertThat(annotations.get(0)).isEqualTo(Annotation.AFTER);
}
@Test
void testSeveralAnnotationsForFunction() {
String module = "&Аннотация1\n" +
"&Аннотация2\n" +
"Процедура Метод14() Экспорт\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().isPresent()).isEqualTo(false);
var annotations = methodSymbol.getAnnotations();
assertThat(annotations).hasSize(2);
assertThat(annotations.get(0)).isEqualTo(Annotation.CUSTOM);
assertThat(annotations.get(1)).isEqualTo(Annotation.CUSTOM);
}
// есть определенные предпочтения при использовании &НаКлиентеНаСервереБезКонтекста в модуле упр.формы
// при ее использовании с другой директивой будет использоваться именно она
// например, порядок 1
//&НаКлиентеНаСервереБезКонтекста
//&НаСервереБезКонтекста
//показывает Сервер в отладчике и доступен серверный объект ТаблицаЗначений
// или порядок 2
//&НаСервереБезКонтекста
//&НаКлиентеНаСервереБезКонтекста
//аналогично
//т.е. порядок этих 2х директив не важен, все равно используется &НаКлиентеНаСервереБезКонтекста.
// проверял на 8.3.15
@Test
void testSeveralDirectivesWithoutContext() {
String module = "&НаСервереБезКонтекста\n" +
"&НаКлиентеНаСервереБезКонтекста\n" +
"Процедура Метод15()\n" +
"КонецПроцедуры\n";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_SERVER_NO_CONTEXT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testSeveralDirectivesWithoutContextReverse() {
String module = "&НаКлиентеНаСервереБезКонтекста\n" +
"&НаСервереБезКонтекста\n" +
"Процедура Метод16()\n" +
"КонецПроцедуры\n";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_SERVER_NO_CONTEXT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
// есть определенные предпочтения при использовании &НаКлиентеНаСервере в модуле команды
// при ее использовании с другой директивой будет использоваться именно она
// проверял на 8.3.15
//порядок
//1
//&НаКлиентеНаСервере
//&НаКлиенте
//вызывает клиент при вызове метода с клиента
//вызывает сервер при вызове метода с сервера
//2
//&НаКлиенте
//&НаКлиентеНаСервере
//вызывает клиент при вызове метода с клиента
//вызывает сервер при вызове метода с сервера
@Test
void testSeveralDirectivesWithClient() {
String module = "&НаКлиентеНаСервере\n" +
"&НаКлиенте\n" +
"Процедура Метод17()\n" +
"КонецПроцедуры\n";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT_AT_SERVER);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testSeveralDirectivesWithClientReverse() {
String module = "&НаКлиенте\n" +
"&НаКлиентеНаСервере\n" +
"Процедура Метод18()\n" +
"КонецПроцедуры\n";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT_AT_SERVER);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
private static List<MethodSymbol> getMethodSymbols(String module) {
DocumentContext documentContext = TestUtils.getDocumentContext(module);
return documentContext.getSymbolTree().getMethods();
}
private void checkModule(ServerContext serverContext, String path, String mdoRef) throws IOException {
=======
private void checkModule(
ServerContext serverContext,
String path,
String mdoRef,
int methodsCount
) throws IOException {
>>>>>>>
@Test
void testCompilerDirective() {
String module = "&НаКлиенте\n" +
"Процедура Метод6()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
assertThat(methods).hasSize(1);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getName()).isEqualTo("Метод6");
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testCompilerDirectiveAtServerNoContext() {
String module = "&НаСервереБезКонтекста\n" +
"Процедура Метод7()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getName()).isEqualTo("Метод7");
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_SERVER_NO_CONTEXT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testSeveralCompilerDirective() {
String module = "&НаКлиенте\n&НаСервере\n" +
"Процедура Метод8()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getName()).isEqualTo("Метод8");
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testNonCompilerDirectiveAndNonAnnotation() {
String module = "Процедура Метод9()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getName()).isEqualTo("Метод9");
assertThat(methodSymbol.getCompilerDirective().isPresent()).isEqualTo(false);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testAnnotation() {
String module = "&После\n" +
"Процедура Метод10()\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
assertThat(methods).hasSize(1);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().isPresent()).isEqualTo(false);
var annotations = methodSymbol.getAnnotations();
assertThat(annotations).hasSize(1);
assertThat(annotations.get(0)).isEqualTo(Annotation.AFTER);
}
@Test
void testCompilerDirectiveAndAnnotation() {
String module = "&НаКлиенте\n&После\n" +
"Процедура Метод11()\n" +
"КонецПроцедуры";
checkCompilerDirective_for_AtClient_AndAnnotation_After(module);
}
@Test
void testCompilerDirectiveAndAnnotationOtherOrder() {
String module = "&После\n&НаКлиенте\n" +
"Процедура Метод12()\n" +
"КонецПроцедуры";
checkCompilerDirective_for_AtClient_AndAnnotation_After(module);
}
@Test
void testCompilerDirectiveAndAnnotationForFunction() {
String module = "&НаКлиенте\n&После\n" +
"Функция Метод13()\n" +
"КонецФункции";
checkCompilerDirective_for_AtClient_AndAnnotation_After(module);
}
private static void checkCompilerDirective_for_AtClient_AndAnnotation_After(String module) {
List<MethodSymbol> methods = getMethodSymbols(module);
assertThat(methods).hasSize(1);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT);
var annotations = methodSymbol.getAnnotations();
assertThat(annotations).hasSize(1);
assertThat(annotations.get(0)).isEqualTo(Annotation.AFTER);
}
@Test
void testSeveralAnnotationsForFunction() {
String module = "&Аннотация1\n" +
"&Аннотация2\n" +
"Процедура Метод14() Экспорт\n" +
"КонецПроцедуры";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().isPresent()).isEqualTo(false);
var annotations = methodSymbol.getAnnotations();
assertThat(annotations).hasSize(2);
assertThat(annotations.get(0)).isEqualTo(Annotation.CUSTOM);
assertThat(annotations.get(1)).isEqualTo(Annotation.CUSTOM);
}
// есть определенные предпочтения при использовании &НаКлиентеНаСервереБезКонтекста в модуле упр.формы
// при ее использовании с другой директивой будет использоваться именно она
// например, порядок 1
//&НаКлиентеНаСервереБезКонтекста
//&НаСервереБезКонтекста
//показывает Сервер в отладчике и доступен серверный объект ТаблицаЗначений
// или порядок 2
//&НаСервереБезКонтекста
//&НаКлиентеНаСервереБезКонтекста
//аналогично
//т.е. порядок этих 2х директив не важен, все равно используется &НаКлиентеНаСервереБезКонтекста.
// проверял на 8.3.15
@Test
void testSeveralDirectivesWithoutContext() {
String module = "&НаСервереБезКонтекста\n" +
"&НаКлиентеНаСервереБезКонтекста\n" +
"Процедура Метод15()\n" +
"КонецПроцедуры\n";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_SERVER_NO_CONTEXT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testSeveralDirectivesWithoutContextReverse() {
String module = "&НаКлиентеНаСервереБезКонтекста\n" +
"&НаСервереБезКонтекста\n" +
"Процедура Метод16()\n" +
"КонецПроцедуры\n";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_SERVER_NO_CONTEXT);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
// есть определенные предпочтения при использовании &НаКлиентеНаСервере в модуле команды
// при ее использовании с другой директивой будет использоваться именно она
// проверял на 8.3.15
//порядок
//1
//&НаКлиентеНаСервере
//&НаКлиенте
//вызывает клиент при вызове метода с клиента
//вызывает сервер при вызове метода с сервера
//2
//&НаКлиенте
//&НаКлиентеНаСервере
//вызывает клиент при вызове метода с клиента
//вызывает сервер при вызове метода с сервера
@Test
void testSeveralDirectivesWithClient() {
String module = "&НаКлиентеНаСервере\n" +
"&НаКлиенте\n" +
"Процедура Метод17()\n" +
"КонецПроцедуры\n";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT_AT_SERVER);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
@Test
void testSeveralDirectivesWithClientReverse() {
String module = "&НаКлиенте\n" +
"&НаКлиентеНаСервере\n" +
"Процедура Метод18()\n" +
"КонецПроцедуры\n";
List<MethodSymbol> methods = getMethodSymbols(module);
var methodSymbol = methods.get(0);
assertThat(methodSymbol.getCompilerDirective().orElse(null)).isEqualTo(CompilerDirective.AT_CLIENT_AT_SERVER);
assertThat(methodSymbol.getAnnotations()).hasSize(0);
}
private static List<MethodSymbol> getMethodSymbols(String module) {
DocumentContext documentContext = TestUtils.getDocumentContext(module);
return documentContext.getSymbolTree().getMethods();
}
private void checkModule(
ServerContext serverContext,
String path,
String mdoRef,
int methodsCount
) throws IOException { |
<<<<<<<
import static org.mockito.internal.exceptions.Reporter.tooLittleActualInvocations;
import static org.mockito.internal.invocation.InvocationMarker.markVerified;
import static org.mockito.internal.invocation.InvocationsFinder.findInvocations;
import static org.mockito.internal.invocation.InvocationsFinder.getLastLocation;
=======
import java.util.Iterator;
import java.util.List;
>>>>>>>
import java.util.Iterator;
import static org.mockito.internal.exceptions.Reporter.tooLittleActualInvocations;
import static org.mockito.internal.invocation.InvocationMarker.markVerified;
import static org.mockito.internal.invocation.InvocationsFinder.findInvocations;
import static org.mockito.internal.invocation.InvocationsFinder.getLastLocation; |
<<<<<<<
public void defaultAnswerDoesNotAcceptNullParameter() {
throw new MockitoException("defaultAnswer() does not accept null parameter");
}
=======
public void serializableWontWorkForObjectsThatDontImplementSerializable(Class classToMock) {
throw new MockitoException(join(
"You are using the setting 'withSettings().serializable()' however the type you are trying to mock '" + classToMock.getSimpleName() + "'",
" do not implement Serializable.",
"This combination is requested, otherwise you will get an 'java.io.InvalidClassException' when the mock will be serialized",
"",
"Also note that as requested by the Java serialization specification, the whole hierarchy need to implements Serializable,",
"i.e. the top-most superclass has to implements Serializable.",
""
));
}
>>>>>>>
public void defaultAnswerDoesNotAcceptNullParameter() {
throw new MockitoException("defaultAnswer() does not accept null parameter");
}
public void serializableWontWorkForObjectsThatDontImplementSerializable(Class classToMock) {
throw new MockitoException(join(
"You are using the setting 'withSettings().serializable()' however the type you are trying to mock '" + classToMock.getSimpleName() + "'",
" do not implement Serializable.",
"This combination is requested, otherwise you will get an 'java.io.InvalidClassException' when the mock will be serialized",
"",
"Also note that as requested by the Java serialization specification, the whole hierarchy need to implements Serializable,",
"i.e. the top-most superclass has to implements Serializable.",
""
));
} |
<<<<<<<
public void check(List<Invocation> invocations, InvocationMatcher wanted, int wantedCount) {
=======
private AtLeastXNumberOfInvocationsChecker(){}
public static void checkAtLeastNumberOfInvocations(List<Invocation> invocations, InvocationMatcher wanted, int wantedCount) {
>>>>>>>
public static void checkAtLeastNumberOfInvocations(List<Invocation> invocations, InvocationMatcher wanted, int wantedCount) {
<<<<<<<
private void removeAlreadyVerified(List<Invocation> invocations) {
for (Iterator<Invocation> iterator = invocations.iterator(); iterator.hasNext(); ) {
Invocation i = iterator.next();
if (i.isVerified()) {
iterator.remove();
}
}
}
=======
public static void checkAtLeastNumberOfInvocations(List<Invocation> invocations, InvocationMatcher wanted, int wantedCount,InOrderContext orderingContext) {
List<Invocation> chunk = findAllMatchingUnverifiedChunks(invocations, wanted, orderingContext);
int actualCount = chunk.size();
if (wantedCount > actualCount) {
Location lastLocation = getLastLocation(chunk);
throw tooLittleActualInvocationsInOrder(new AtLeastDiscrepancy(wantedCount, actualCount), wanted, lastLocation);
}
markVerifiedInOrder(chunk, wanted, orderingContext);
}
>>>>>>>
private static void removeAlreadyVerified(List<Invocation> invocations) {
for (Iterator<Invocation> iterator = invocations.iterator(); iterator.hasNext(); ) {
Invocation i = iterator.next();
if (i.isVerified()) {
iterator.remove();
}
}
}
public static void checkAtLeastNumberOfInvocations(List<Invocation> invocations, InvocationMatcher wanted, int wantedCount,InOrderContext orderingContext) {
List<Invocation> chunk = findAllMatchingUnverifiedChunks(invocations, wanted, orderingContext);
int actualCount = chunk.size();
if (wantedCount > actualCount) {
Location lastLocation = getLastLocation(chunk);
throw tooLittleActualInvocationsInOrder(new AtLeastDiscrepancy(wantedCount, actualCount), wanted, lastLocation);
}
markVerifiedInOrder(chunk, wanted, orderingContext);
} |
<<<<<<<
import static org.mockito.internal.exceptions.Reporter.neverWantedButInvoked;
import static org.mockito.internal.exceptions.Reporter.tooLittleActualInvocations;
import static org.mockito.internal.exceptions.Reporter.tooManyActualInvocations;
import static org.mockito.internal.invocation.InvocationMarker.markVerified;
import static org.mockito.internal.invocation.InvocationsFinder.findInvocations;
import static org.mockito.internal.invocation.InvocationsFinder.getLastLocation;
=======
import java.util.Iterator;
>>>>>>>
import static org.mockito.internal.exceptions.Reporter.neverWantedButInvoked;
import static org.mockito.internal.exceptions.Reporter.tooLittleActualInvocations;
import static org.mockito.internal.exceptions.Reporter.tooManyActualInvocations;
import static org.mockito.internal.invocation.InvocationMarker.markVerified;
import static org.mockito.internal.invocation.InvocationsFinder.findInvocations;
import static org.mockito.internal.invocation.InvocationsFinder.getLastLocation;
import java.util.Iterator;
<<<<<<<
markVerified(actualInvocations, wanted);
=======
removeAlreadyVerified(actualInvocations);
invocationMarker.markVerified(actualInvocations, wanted);
>>>>>>>
removeAlreadyVerified(actualInvocations);
markVerified(actualInvocations, wanted); |
<<<<<<<
=======
private Set<String> setL = Set.of(listForCheckLeft.split(" "));
private Set<String> setR = Set.of(listForCheckRight.split(" "));
private Set<String> setLR = Set.of(listForCheckLeftAndRight.split(" "));
private final Set<String> setUnary = Set.of(UNARY.split(" "));
public MissingSpaceDiagnostic(DiagnosticInfo info) {
super(info);
}
>>>>>>>
private Set<String> setL = Set.of(listForCheckLeft.split(" "));
private Set<String> setR = Set.of(listForCheckRight.split(" "));
private Set<String> setLR = Set.of(listForCheckLeftAndRight.split(" "));
private final Set<String> setUnary = Set.of(UNARY.split(" "));
<<<<<<<
DiagnosticHelper.configureDiagnostic(this, configuration,
"checkSpaceToRightOfUnary", "allowMultipleCommas");
String listLParam =
(String) configuration.getOrDefault("listForCheckLeft", DEFAULT_LIST_FOR_CHECK_LEFT);
listForCheckLeft = getRegularString(listLParam);
patternL = compilePattern(listForCheckLeft);
String listRParam =
(String) configuration.getOrDefault("listForCheckRight", DEFAULT_LIST_FOR_CHECK_RIGHT);
listForCheckRight = getRegularString(listRParam);
patternR = compilePattern(listForCheckRight);
String listLRParam =
(String) configuration.getOrDefault("listForCheckLeftAndRight", DEFAULT_LIST_FOR_CHECK_LEFT_AND_RIGHT);
listForCheckLeftAndRight = getRegularString(listLRParam);
patternLr = compilePattern(listForCheckLeftAndRight);
=======
super.configure(configuration);
>>>>>>>
super.configure(configuration); |
<<<<<<<
import static org.mockito.internal.verification.checkers.NonGreedyNumberOfInvocationsInOrderChecker.check;
=======
import static org.mockito.internal.verification.checkers.MissingInvocationChecker.checkMissingInvocation;
>>>>>>>
import static org.mockito.internal.verification.checkers.NonGreedyNumberOfInvocationsInOrderChecker.check;
import static org.mockito.internal.verification.checkers.MissingInvocationChecker.checkMissingInvocation; |
<<<<<<<
@SuppressWarnings("unchecked")
protected static <S extends Type, T>
List<T> convertToNative(List<S> arr) {
List<T> out = new ArrayList<T>();
for (Iterator<S> it = arr.iterator(); it.hasNext(); ) {
out.add((T)it.next().getValue());
}
return out;
}
=======
/**
* Adds a log field to {@link EventValues}.
*/
public static class EventValuesWithLog {
private final EventValues eventValues;
private final Log log;
private EventValuesWithLog(EventValues eventValues, Log log) {
this.eventValues = eventValues;
this.log = log;
}
public List<Type> getIndexedValues() {
return eventValues.getIndexedValues();
}
public List<Type> getNonIndexedValues() {
return eventValues.getNonIndexedValues();
}
public Log getLog() {
return log;
}
}
>>>>>>>
/**
* Adds a log field to {@link EventValues}.
*/
public static class EventValuesWithLog {
private final EventValues eventValues;
private final Log log;
private EventValuesWithLog(EventValues eventValues, Log log) {
this.eventValues = eventValues;
this.log = log;
}
public List<Type> getIndexedValues() {
return eventValues.getIndexedValues();
}
public List<Type> getNonIndexedValues() {
return eventValues.getNonIndexedValues();
}
public Log getLog() {
return log;
}
}
@SuppressWarnings("unchecked")
protected static <S extends Type, T>
List<T> convertToNative(List<S> arr) {
List<T> out = new ArrayList<T>();
for (Iterator<S> it = arr.iterator(); it.hasNext(); ) {
out.add((T)it.next().getValue());
}
return out;
} |
<<<<<<<
MethodSpec.Builder methodBuilder, ParameterizedTypeName tupleType) {
=======
MethodSpec.Builder methodBuilder, ParameterizedTypeName tupleType,
List<TypeName> outputParameterTypes)
throws ClassNotFoundException {
>>>>>>>
MethodSpec.Builder methodBuilder, ParameterizedTypeName tupleType,
List<TypeName> outputParameterTypes) { |
<<<<<<<
methodBuilder.addStatement("$T function = "
+ "new $T($N, \n$T.<$T>asList($L), "
=======
methodBuilder.addStatement("final $T function = "
+ "new $T($S, \n$T.<$T>asList($L), "
>>>>>>>
methodBuilder.addStatement("final $T function = "
+ "new $T($N, \n$T.<$T>asList($L), "
<<<<<<<
methodBuilder.addStatement("$T function = new $T(\n$N, \n$T.<$T>asList($L), \n$T"
=======
methodBuilder.addStatement("final $T function = new $T(\n$S, \n$T.<$T>asList($L), \n$T"
>>>>>>>
methodBuilder.addStatement("final $T function = new $T(\n$N, \n$T.<$T>asList($L), \n$T" |
<<<<<<<
final Event event = new Event("Notify",
Arrays.<TypeReference<?>>asList(),
Arrays.<TypeReference<?>>asList(new TypeReference<Uint256>() {}, new TypeReference<Uint256>() {}));
List<Contract.EventValuesWithLog> valueList = extractEventParametersWithLog(event, transactionReceipt);
=======
List<Contract.EventValuesWithLog> valueList = extractEventParametersWithLog(NOTIFY_EVENT, transactionReceipt);
>>>>>>>
List<Contract.EventValuesWithLog> valueList = extractEventParametersWithLog(NOTIFY_EVENT, transactionReceipt);
<<<<<<<
public Observable<NotifyEventResponse> notifyEventObservable(DefaultBlockParameter startBlock, DefaultBlockParameter endBlock) {
final Event event = new Event("Notify",
Arrays.<TypeReference<?>>asList(),
Arrays.<TypeReference<?>>asList(new TypeReference<Uint256>() {}, new TypeReference<Uint256>() {}));
EthFilter filter = new EthFilter(startBlock, endBlock, getContractAddress());
filter.addSingleTopic(EventEncoder.encode(event));
=======
public Observable<NotifyEventResponse> notifyEventObservable(EthFilter filter) {
>>>>>>>
public Observable<NotifyEventResponse> notifyEventObservable(EthFilter filter) {
<<<<<<<
=======
public Observable<NotifyEventResponse> notifyEventObservable(DefaultBlockParameter startBlock, DefaultBlockParameter endBlock) {
EthFilter filter = new EthFilter(startBlock, endBlock, getContractAddress());
filter.addSingleTopic(EventEncoder.encode(NOTIFY_EVENT));
return notifyEventObservable(filter);
}
public RemoteCall<TransactionReceipt> fibonacciNotify(BigInteger number) {
final Function function = new Function(
"fibonacciNotify",
Arrays.<Type>asList(new org.web3j.abi.datatypes.generated.Uint256(number)),
Collections.<TypeReference<?>>emptyList());
return executeRemoteCallTransaction(function);
}
public RemoteCall<BigInteger> fibonacci(BigInteger number) {
final Function function = new Function("fibonacci",
Arrays.<Type>asList(new org.web3j.abi.datatypes.generated.Uint256(number)),
Arrays.<TypeReference<?>>asList(new TypeReference<Uint256>() {}));
return executeRemoteCallSingleValueReturn(function, BigInteger.class);
}
>>>>>>>
public Observable<NotifyEventResponse> notifyEventObservable(DefaultBlockParameter startBlock, DefaultBlockParameter endBlock) {
EthFilter filter = new EthFilter(startBlock, endBlock, getContractAddress());
filter.addSingleTopic(EventEncoder.encode(NOTIFY_EVENT));
return notifyEventObservable(filter);
} |
<<<<<<<
+ " org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n"
+ " FUNC_FUNCTIONNAME, \n"
=======
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n"
+ " \"functionName\", \n"
>>>>>>>
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n"
+ " FUNC_FUNCTIONNAME, \n"
<<<<<<<
+ " org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n"
+ " FUNC_FUNCTIONNAME, \n"
=======
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n"
+ " \"functionName\", \n"
>>>>>>>
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\n"
+ " FUNC_FUNCTIONNAME, \n"
<<<<<<<
+ " org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n"
=======
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\"functionName\", \n"
>>>>>>>
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n"
<<<<<<<
+ " org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n"
=======
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(\"functionName\", \n"
>>>>>>>
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n" |
<<<<<<<
import com.lambdaworks.redis.AbstractRedisClient;
import com.lambdaworks.redis.RedisException;
import com.lambdaworks.redis.RedisURI;
import com.lambdaworks.redis.StatefulRedisConnectionImpl;
import com.lambdaworks.redis.api.StatefulRedisConnection;
import com.lambdaworks.redis.cluster.api.StatefulRedisClusterConnection;
=======
import com.lambdaworks.redis.AbstractRedisClient;
import com.lambdaworks.redis.RedisChannelWriter;
import com.lambdaworks.redis.RedisClusterConnection;
import com.lambdaworks.redis.RedisException;
import com.lambdaworks.redis.RedisURI;
>>>>>>>
import com.lambdaworks.redis.AbstractRedisClient;
import com.lambdaworks.redis.RedisException;
import com.lambdaworks.redis.RedisURI;
import com.lambdaworks.redis.StatefulRedisConnectionImpl;
import com.lambdaworks.redis.api.StatefulRedisConnection;
import com.lambdaworks.redis.cluster.api.StatefulRedisClusterConnection;
import com.lambdaworks.redis.AbstractRedisClient;
import com.lambdaworks.redis.RedisChannelWriter;
import com.lambdaworks.redis.RedisClusterConnection;
import com.lambdaworks.redis.RedisException;
import com.lambdaworks.redis.RedisURI;
<<<<<<<
public RedisAdvancedClusterAsyncConnection<String, String> connectClusterAsync() {
return connectClusterImpl(newStringStringCodec(), getSocketAddressSupplier()).async();
=======
public RedisAdvancedClusterConnection<String, String> connectClusterAsync() {
return connectClusterAsyncImpl(codec, getSocketAddressSupplier());
>>>>>>>
public RedisAdvancedClusterAsyncConnection<String, String> connectClusterAsync() {
return connectClusterImpl(newStringStringCodec(), getSocketAddressSupplier()).async();
<<<<<<<
<K, V> StatefulRedisConnection<K, V> connectToNode(RedisCodec<K, V> codec, final SocketAddress socketAddress) {
=======
<K, V> RedisAdvancedClusterConnectionImpl<K, V> connectAsyncImpl(RedisCodec<K, V> codec, final SocketAddress socketAddress) {
>>>>>>>
<K, V> StatefulRedisConnection<K, V> connectToNode(RedisCodec<K, V> codec, final SocketAddress socketAddress) {
<<<<<<<
=======
RedisAdvancedClusterConnectionImpl<K, V> connection = newRedisAsyncConnectionImpl(handler, codec, timeout, unit);
>>>>>>>
<<<<<<<
<K, V> StatefulRedisClusterConnection<K, V> connectClusterImpl(RedisCodec<K, V> codec) {
return connectClusterImpl(codec, getSocketAddressSupplier());
=======
<K, V> RedisAdvancedClusterConnectionImpl<K, V> connectClusterAsyncImpl(RedisCodec<K, V> codec) {
return connectClusterAsyncImpl(codec, getSocketAddressSupplier());
>>>>>>>
<K, V> StatefulRedisClusterConnection<K, V> connectClusterImpl(RedisCodec<K, V> codec) {
return connectClusterImpl(codec, getSocketAddressSupplier());
<<<<<<<
<K, V> StatefulRedisClusterConnectionImpl<K, V> connectClusterImpl(RedisCodec<K, V> codec,
=======
<K, V> RedisAdvancedClusterConnectionImpl<K, V> connectClusterAsyncImpl(RedisCodec<K, V> codec,
>>>>>>>
<K, V> StatefulRedisClusterConnectionImpl<K, V> connectClusterImpl(RedisCodec<K, V> codec,
<<<<<<<
StatefulRedisClusterConnectionImpl<K, V> connection = new StatefulRedisClusterConnectionImpl<>(clusterWriter, codec,
timeout, unit);
=======
RedisAdvancedClusterConnectionImpl<K, V> connection = newRedisAsyncConnectionImpl(clusterWriter, codec, timeout, unit);
if (getPartitions() == null) {
reloadPartitions();
}
connection.setPartitions(partitions);
>>>>>>>
StatefulRedisClusterConnectionImpl<K, V> connection = new StatefulRedisClusterConnectionImpl<>(clusterWriter, codec,
timeout, unit);
<<<<<<<
StatefulRedisConnection<String, String> connection = connectToNode(initialUri.getResolvedAddress());
=======
RedisAdvancedClusterConnectionImpl<String, String> connection = connectAsyncImpl(initialUri
.getResolvedAddress());
>>>>>>>
StatefulRedisConnection<String, String> connection = connectToNode(initialUri.getResolvedAddress());
<<<<<<<
=======
/**
* Construct a new {@link RedisAdvancedClusterConnectionImpl}. Can be overridden in order to construct a subclass of
* {@link RedisAdvancedClusterConnectionImpl}
*
* @param channelWriter the channel writer
* @param codec the codec to use
* @param timeout Timeout value
* @param unit Timeout unit
* @param <K> Key type.
* @param <V> Value type.
* @return RedisAsyncConnectionImpl<K, V> instance
*/
protected <K, V> RedisAdvancedClusterConnectionImpl<K, V> newRedisAsyncConnectionImpl(
RedisChannelWriter<K, V> channelWriter, RedisCodec<K, V> codec, long timeout, TimeUnit unit) {
return new RedisAdvancedClusterConnectionImpl<K, V>(channelWriter, codec, timeout, unit);
}
>>>>>>> |
<<<<<<<
/*
* Copyright (C) 2012-2019 52°North Initiative for Geospatial Open Source
=======
/**
* Copyright (C) 2012-2020 52°North Initiative for Geospatial Open Source
>>>>>>>
/*
* Copyright (C) 2012-2020 52°North Initiative for Geospatial Open Source |
<<<<<<<
}
break;
case LONG_HALF_PACKET:
if(packageType == MySQLPacket.EOF_PACKET){
//当前半包不透传
SQLEngineCtx.INSTANCE().getDataTransferChannel()
.transferToFront(mySQLBackendConnection, false, false);
}else{
//当前半包透传
SQLEngineCtx.INSTANCE().getDataTransferChannel()
.transferToFront(mySQLBackendConnection, true, false);
}
break;
case SHORT_HALF_PACKET:
//当前半包不透传
SQLEngineCtx.INSTANCE().getDataTransferChannel()
.transferToFront(mySQLBackendConnection, false, false);
break;
case NONE:
break;
=======
break;
case SHORT_HALF_PACKET:
//当前半包不透传
SQLEngineCtx.INSTANCE().getDataTransferChannel()
.transferToFront(mySQLBackendConnection, false, false);
break;
case NORMAL:
break;
>>>>>>>
break;
case SHORT_HALF_PACKET:
//当前半包不透传
SQLEngineCtx.INSTANCE().getDataTransferChannel()
.transferToFront(mySQLBackendConnection, false, false);
break;
case NONE:
break; |
<<<<<<<
=======
@SqlUpdate
void updateEntry(@Bind("recordId") Long id,
@Bind("eventJson") String eventJson,
@Bind("searchKey1") final Long searchKey1,
@Bind("searchKey2") final Long searchKey2,
@Define("tableName") final String tableName);
@SqlQuery
int getCountReadyEntries(@Bind("searchKey1") final Long searchKey1,
@Bind("searchKey2") final Long searchKey2,
@Bind("now") Date now,
@Define("tableName") final String tableName);
>>>>>>>
@SqlUpdate
void updateEntry(@Bind("recordId") Long id,
@Bind("eventJson") String eventJson,
@Bind("searchKey1") final Long searchKey1,
@Bind("searchKey2") final Long searchKey2,
@Define("tableName") final String tableName); |
<<<<<<<
private DrawerLayout mDrawerLayout;
private String username;
=======
>>>>>>>
<<<<<<<
username = new GitskariosSettings(this).getAuthUser(null);
mDrawerLayout = (DrawerLayout) findViewById(R.id.drawer_layout);
=======
>>>>>>>
<<<<<<<
public void onUserEventsSelected() {
username = new GitskariosSettings(this).getAuthUser(null);
if (eventsFragment == null && username != null) {
eventsFragment = EventsListFragment.newInstance(username);
}
setFragment(eventsFragment);
}
@Override
public void onBackPressed() {
if ((mDrawerLayout != null && mDrawerLayout.isDrawerOpen(Gravity.START))) {
closeMenu();
} else {
super.onBackPressed();
}
}
@Override
=======
>>>>>>>
public void onUserEventsSelected() {
}
@Override |
<<<<<<<
this(description + " (" + instanceId + ")", instanceId, description, remoteFS, numExecutors, labelString, mode, initScript, tmpDir, Collections.<NodeProperty<?>> emptyList(), remoteAdmin, jvmopts, stopOnTerminate, idleTerminationMinutes, publicDNS, privateDNS, tags, cloudName, false, false, launchTimeout, amiType, "-1");
=======
this(description + " (" + instanceId + ")", instanceId, description, remoteFS, numExecutors, labelString, mode, initScript, tmpDir, Collections.emptyList(), remoteAdmin, jvmopts, stopOnTerminate, idleTerminationMinutes, publicDNS, privateDNS, tags, cloudName, false, false, launchTimeout, amiType);
>>>>>>>
this(description + " (" + instanceId + ")", instanceId, description, remoteFS, numExecutors, labelString, mode, initScript, tmpDir, Collections.emptyList(), remoteAdmin, jvmopts, stopOnTerminate, idleTerminationMinutes, publicDNS, privateDNS, tags, cloudName, false, false, launchTimeout, amiType, "-1");
<<<<<<<
this(description + " (" + instanceId + ")", instanceId, description, remoteFS, numExecutors, labelString, mode, initScript, tmpDir, Collections.<NodeProperty<?>> emptyList(), remoteAdmin, jvmopts, stopOnTerminate, idleTerminationMinutes, publicDNS, privateDNS, tags, cloudName, usePrivateDnsName, useDedicatedTenancy, launchTimeout, amiType, maxTotalUses);
=======
this(description + " (" + instanceId + ")", instanceId, description, remoteFS, numExecutors, labelString, mode, initScript, tmpDir, Collections.emptyList(), remoteAdmin, jvmopts, stopOnTerminate, idleTerminationMinutes, publicDNS, privateDNS, tags, cloudName, usePrivateDnsName, useDedicatedTenancy, launchTimeout, amiType);
>>>>>>>
this(description + " (" + instanceId + ")", instanceId, description, remoteFS, numExecutors, labelString, mode, initScript, tmpDir, Collections.emptyList(), remoteAdmin, jvmopts, stopOnTerminate, idleTerminationMinutes, publicDNS, privateDNS, tags, cloudName, usePrivateDnsName, useDedicatedTenancy, launchTimeout, amiType, maxTotalUses);
<<<<<<<
this(instanceId, instanceId, "debug", "/tmp/hudson", 1, "debug", Mode.NORMAL, "", "/tmp", Collections.<NodeProperty<?>> emptyList(), null, null, false, null, "Fake public", "Fake private", null, null, false, false, 0, new UnixData(null, null), "-1");
=======
this(instanceId, instanceId, "debug", "/tmp/hudson", 1, "debug", Mode.NORMAL, "", "/tmp", Collections.emptyList(), null, null, false, null, "Fake public", "Fake private", null, null, false, false, 0, new UnixData(null, null, null, null));
>>>>>>>
this(instanceId, instanceId, "debug", "/tmp/hudson", 1, "debug", Mode.NORMAL, "", "/tmp", Collections.emptyList(), null, null, false, null, "Fake public", "Fake private", null, null, false, false, 0, new UnixData(null, null, null, null), "-1"); |
<<<<<<<
private final List<EC2Tag> tags;
=======
public final boolean usePrivateDnsName;
>>>>>>>
private final List<EC2Tag> tags;
public final boolean usePrivateDnsName;
<<<<<<<
public SlaveTemplate(String ami, String zone, String securityGroups, String remoteFS, String sshPort, InstanceType type,
String labelString, String description, String initScript, String userData, String numExecutors,
String remoteAdmin, String rootCommandPrefix, String jvmopts, boolean stopOnTerminate,
String subnetId, List<EC2Tag> tags, String idleTerminationMinutes ) {
=======
public SlaveTemplate(String ami, String zone, String securityGroups, String remoteFS, String sshPort, InstanceType type, String labelString, String description, String initScript, String userData, String numExecutors, String remoteAdmin, String rootCommandPrefix, String jvmopts, boolean stopOnTerminate, boolean usePrivateDnsName) {
>>>>>>>
public SlaveTemplate(String ami, String zone, String securityGroups, String remoteFS, String sshPort, InstanceType type,
String labelString, String description, String initScript, String userData, String numExecutors,
String remoteAdmin, String rootCommandPrefix, String jvmopts, boolean stopOnTerminate,
String subnetId, List<EC2Tag> tags, String idleTerminationMinutes, boolean usePrivateDnsName ) {
<<<<<<<
this.subnetId = subnetId;
this.tags = tags;
this.idleTerminationMinutes = idleTerminationMinutes;
=======
this.usePrivateDnsName = usePrivateDnsName;
>>>>>>>
this.subnetId = subnetId;
this.tags = tags;
this.idleTerminationMinutes = idleTerminationMinutes;
this.usePrivateDnsName = usePrivateDnsName;
<<<<<<<
return new EC2Slave(inst.getInstanceId(), description, remoteFS, getSshPort(), getNumExecutors(), labels,
initScript, remoteAdmin, rootCommandPrefix, jvmopts, stopOnTerminate, idleTerminationMinutes,
inst.getPublicDnsName(), inst.getPrivateDnsName(), EC2Tag.fromAmazonTags( inst.getTags() ));
=======
return new EC2Slave(inst.getInstanceId(), description, remoteFS, getSshPort(), getNumExecutors(), labels, initScript, remoteAdmin, rootCommandPrefix, jvmopts, stopOnTerminate, usePrivateDnsName);
>>>>>>>
return new EC2Slave(inst.getInstanceId(), description, remoteFS, getSshPort(), getNumExecutors(), labels,
initScript, remoteAdmin, rootCommandPrefix, jvmopts, stopOnTerminate, idleTerminationMinutes,
inst.getPublicDnsName(), inst.getPrivateDnsName(), EC2Tag.fromAmazonTags( inst.getTags() ),
usePrivateDnsName ); |
<<<<<<<
EC2OndemandSlave slaveNormal = new EC2OndemandSlave("instanceId", "description", "remoteFS", 1, "labelString", Node.Mode.NORMAL, "initScript", "tmpDir", "remoteAdmin", "jvmopts", false, "30", "publicDNS", "privateDNS", null, "cloudName", false, false, 0, new UnixData("a", "b"), "-1");
=======
EC2OndemandSlave slaveNormal = new EC2OndemandSlave("instanceId", "description", "remoteFS", 1, "labelString", Node.Mode.NORMAL, "initScript", "tmpDir", "remoteAdmin", "jvmopts", false, "30", "publicDNS", "privateDNS", null, "cloudName", false, false, 0, new UnixData("a", null, null, "b"));
>>>>>>>
EC2OndemandSlave slaveNormal = new EC2OndemandSlave("instanceId", "description", "remoteFS", 1, "labelString", Node.Mode.NORMAL, "initScript", "tmpDir", "remoteAdmin", "jvmopts", false, "30", "publicDNS", "privateDNS", null, "cloudName", false, false, 0, new UnixData("a", null, null, "b"), "-1");
<<<<<<<
EC2OndemandSlave slaveExclusive = new EC2OndemandSlave("instanceId", "description", "remoteFS", 1, "labelString", Node.Mode.EXCLUSIVE, "initScript", "tmpDir", "remoteAdmin", "jvmopts", false, "30", "publicDNS", "privateDNS", null, "cloudName", false, false, 0, new UnixData("a", "b"), "-1");
=======
EC2OndemandSlave slaveExclusive = new EC2OndemandSlave("instanceId", "description", "remoteFS", 1, "labelString", Node.Mode.EXCLUSIVE, "initScript", "tmpDir", "remoteAdmin", "jvmopts", false, "30", "publicDNS", "privateDNS", null, "cloudName", false, false, 0, new UnixData("a", null, null, "b"));
>>>>>>>
EC2OndemandSlave slaveExclusive = new EC2OndemandSlave("instanceId", "description", "remoteFS", 1, "labelString", Node.Mode.EXCLUSIVE, "initScript", "tmpDir", "remoteAdmin", "jvmopts", false, "30", "publicDNS", "privateDNS", null, "cloudName", false, false, 0, new UnixData("a", null, null, "b"), "-1"); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.