repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CuFileWriteHandle.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Represents a cuFile file handle for reading.
*/
public final class CuFileWriteHandle extends CuFileHandle {
/**
* Construct a writer using the specified file path.
*
* @param path The file path for writing.
*/
public CuFileWriteHandle(String path) {
super(create(path));
}
/**
* Write the specified cuFile buffer into the file.
*
* @param buffer The cuFile buffer to write from.
* @param length The number of bytes to write.
* @param fileOffset The starting file offset from which to write.
*/
public void write(CuFileBuffer buffer, long length, long fileOffset) {
writeFromBuffer(getPointer(), fileOffset, buffer.getPointer(), length);
}
/**
* Append the specified cuFile buffer to the file.
*
* @param buffer The cuFile buffer to append from.
* @param length The number of bytes to append.
* @return The file offset from which the buffer was appended.
*/
public long append(CuFileBuffer buffer, long length) {
return appendFromBuffer(getPointer(), buffer.getPointer(), length);
}
private static native long create(String path);
private static native void writeFromBuffer(long file, long fileOffset, long buffer, long length);
private static native long appendFromBuffer(long file, long buffer, long length);
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmAllocationMode.java
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
public class RmmAllocationMode {
/**
* Use cudaMalloc for allocation
*/
public static final int CUDA_DEFAULT = 0x00000000;
/**
* Use pool suballocation strategy
*/
public static final int POOL = 0x00000001;
/**
* Use cudaMallocManaged rather than cudaMalloc
*/
public static final int CUDA_MANAGED_MEMORY = 0x00000002;
/**
* Use arena suballocation strategy
*/
public static final int ARENA = 0x00000004;
/**
* Use CUDA async suballocation strategy
*/
public static final int CUDA_ASYNC = 0x00000008;
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/StreamedTableReader.java
|
/*
*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Provides an interface for reading multiple tables from a single input source.
*/
public interface StreamedTableReader extends AutoCloseable {
/**
* Get the next table if available.
* @return the next Table or null if done reading tables.
* @throws CudfException on any error.
*/
Table getNextIfAvailable() throws CudfException;
/**
* Get the next table if available.
* @param rowTarget the target number of rows to read (this is really just best effort).
* @return the next Table or null if done reading tables.
* @throws CudfException on any error.
*/
Table getNextIfAvailable(int rowTarget) throws CudfException;
@Override
void close() throws CudfException;
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CompressionMetadataWriterOptions.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.LinkedHashMap;
import java.util.Map;
public class CompressionMetadataWriterOptions extends ColumnWriterOptions.StructColumnWriterOptions {
private final CompressionType compressionType;
private final Map<String, String> metadata;
protected CompressionMetadataWriterOptions(Builder builder) {
super(builder);
this.compressionType = builder.compressionType;
this.metadata = builder.metadata;
}
@Override
boolean[] getFlatIsTimeTypeInt96() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatIsTimeTypeInt96());
}
@Override
int[] getFlatPrecision() {
return super.getFlatInts(new int[]{}, (opt) -> opt.getFlatPrecision());
}
@Override
boolean[] getFlatHasParquetFieldId() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatHasParquetFieldId());
}
@Override
int[] getFlatParquetFieldId() {
return super.getFlatInts(new int[]{}, (opt) -> opt.getFlatParquetFieldId());
}
@Override
int[] getFlatNumChildren() {
return super.getFlatInts(new int[]{}, (opt) -> opt.getFlatNumChildren());
}
@Override
boolean[] getFlatIsNullable() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatIsNullable());
}
@Override
boolean[] getFlatIsMap() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatIsMap());
}
@Override
boolean[] getFlatIsBinary() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatIsBinary());
}
@Override
String[] getFlatColumnNames() {
return super.getFlatColumnNames(new String[]{});
}
String[] getMetadataKeys() {
return metadata.keySet().toArray(new String[metadata.size()]);
}
String[] getMetadataValues() {
return metadata.values().toArray(new String[metadata.size()]);
}
public CompressionType getCompressionType() {
return compressionType;
}
public Map<String, String> getMetadata() {
return metadata;
}
public int getTopLevelChildren() {
return childColumnOptions.length;
}
public abstract static class Builder<T extends Builder,
V extends CompressionMetadataWriterOptions> extends AbstractStructBuilder<T, V> {
final Map<String, String> metadata = new LinkedHashMap<>();
CompressionType compressionType = CompressionType.AUTO;
/**
* Add a metadata key and a value
*/
public T withMetadata(String key, String value) {
this.metadata.put(key, value);
return (T) this;
}
/**
* Add a map of metadata keys and values
*/
public T withMetadata(Map<String, String> metadata) {
this.metadata.putAll(metadata);
return (T) this;
}
/**
* Set the compression type to use for writing
*/
public T withCompressionType(CompressionType compression) {
this.compressionType = compression;
return (T) this;
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/GatherMap.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* This class tracks the data associated with a gather map, a buffer of INT32 elements that index
* a source table and can be passed to a table gather operation.
*/
public class GatherMap implements AutoCloseable {
private DeviceMemoryBuffer buffer;
/**
* Construct a gather map instance from a device buffer. The buffer length must be a multiple of
* the {@link DType#INT32} size, as each row of the gather map is an INT32.
* @param buffer device buffer backing the gather map data
*/
public GatherMap(DeviceMemoryBuffer buffer) {
if (buffer.getLength() % DType.INT32.getSizeInBytes() != 0) {
throw new IllegalArgumentException("buffer length not a multiple of 4");
}
this.buffer = buffer;
}
/** Return the number of rows in the gather map */
public long getRowCount() {
ensureOpen();
return buffer.getLength() / 4;
}
/**
* Create a column view that can be used to perform a gather operation. Note that the resulting
* column view MUST NOT outlive the underlying device buffer within this instance!
* @param startRow row offset where the resulting gather map will start
* @param numRows number of rows in the resulting gather map
* @return column view of gather map data
*/
public ColumnView toColumnView(long startRow, int numRows) {
ensureOpen();
return ColumnView.fromDeviceBuffer(buffer, startRow * 4, DType.INT32, numRows);
}
/**
* Release the underlying device buffer instance. After this is called, closing this instance
* will not close the underlying device buffer. It is the responsibility of the caller to close
* the returned device buffer.
* @return device buffer backing gather map data or null if the buffer has already been released
*/
public DeviceMemoryBuffer releaseBuffer() {
DeviceMemoryBuffer result = buffer;
buffer = null;
return result;
}
/** Close the device buffer backing the gather map data. */
@Override
public void close() {
if (buffer != null) {
buffer.close();
buffer = null;
}
}
private void ensureOpen() {
if (buffer == null) {
throw new IllegalStateException("instance is closed");
}
if (buffer.closed) {
throw new IllegalStateException("buffer is closed");
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ColumnVector.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.function.Consumer;
/**
* This class represents the immutable vector of data. This class holds
* references to device(GPU) memory and is reference counted to know when to release it. Call
* close to decrement the reference count when you are done with the column, and call incRefCount
* to increment the reference count.
*/
public final class ColumnVector extends ColumnView {
/**
* Interface to handle events for this ColumnVector. Only invoked during
* close, hence `onClosed` is the only event.
*/
public interface EventHandler {
/**
* `onClosed` is invoked with the updated `refCount` during `close`.
* The last invocation of `onClosed` will be with `refCount=0`.
*
* @note the callback is invoked with this `ColumnVector`'s lock held.
*
* @param cv reference to the ColumnVector we are closing
* @param refCount the updated ref count for this ColumnVector at the time
* of invocation
*/
void onClosed(ColumnVector cv, int refCount);
}
private static final Logger log = LoggerFactory.getLogger(ColumnVector.class);
static {
NativeDepsLoader.loadNativeDeps();
}
private Optional<Long> nullCount = Optional.empty();
private int refCount;
private EventHandler eventHandler;
/**
* Wrap an existing on device cudf::column with the corresponding ColumnVector. The new
* ColumnVector takes ownership of the pointer and will free it when the ref count reaches zero.
* @param nativePointer host address of the cudf::column object which will be
* owned by this instance.
*/
public ColumnVector(long nativePointer) {
super(new OffHeapState(nativePointer));
assert nativePointer != 0;
MemoryCleaner.register(this, offHeap);
this.refCount = 0;
incRefCountInternal(true);
}
private static OffHeapState makeOffHeap(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer dataBuffer, DeviceMemoryBuffer validityBuffer,
DeviceMemoryBuffer offsetBuffer) {
long viewHandle = initViewHandle(
type, (int)rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(),
dataBuffer, validityBuffer, offsetBuffer, null);
return new OffHeapState(dataBuffer, validityBuffer, offsetBuffer, null, viewHandle);
}
/**
* Create a new column vector based off of data already on the device.
* @param type the type of the vector
* @param rows the number of rows in this vector.
* @param nullCount the number of nulls in the dataset.
* @param dataBuffer the data stored on the device. The column vector takes ownership of the
* buffer. Do not use the buffer after calling this.
* @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0. The
* column vector takes ownership of the buffer. Do not use the buffer
* after calling this.
* @param offsetBuffer a host buffer required for strings and string categories. The column
* vector takes ownership of the buffer. Do not use the buffer after calling
* this.
*/
public ColumnVector(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer dataBuffer, DeviceMemoryBuffer validityBuffer,
DeviceMemoryBuffer offsetBuffer) {
super(makeOffHeap(type, rows, nullCount, dataBuffer, validityBuffer, offsetBuffer));
assert !type.equals(DType.LIST) : "This constructor should not be used for list type";
if (!type.equals(DType.STRING)) {
assert offsetBuffer == null : "offsets are only supported for STRING";
}
assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE)
|| !nullCount.isPresent();
MemoryCleaner.register(this, offHeap);
this.nullCount = nullCount;
this.refCount = 0;
incRefCountInternal(true);
}
/**
* This method is internal and exposed purely for testing purposes
*/
static OffHeapState makeOffHeap(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer dataBuffer, DeviceMemoryBuffer validityBuffer,
DeviceMemoryBuffer offsetBuffer, List<DeviceMemoryBuffer> toClose, long[] childHandles) {
long viewHandle = initViewHandle(type, (int)rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(),
dataBuffer, validityBuffer,
offsetBuffer, childHandles);
return new OffHeapState(dataBuffer, validityBuffer, offsetBuffer, toClose, viewHandle);
}
/**
* Create a new column vector based off of data already on the device with child columns.
* @param type the type of the vector, typically a nested type
* @param rows the number of rows in this vector.
* @param nullCount the number of nulls in the dataset.
* @param dataBuffer the data stored on the device. The column vector takes ownership of the
* buffer. Do not use the buffer after calling this.
* @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0. The
* column vector takes ownership of the buffer. Do not use the buffer
* after calling this.
* @param offsetBuffer a host buffer required for strings and string categories. The column
* vector takes ownership of the buffer. Do not use the buffer after calling
* this.
* @param toClose List of buffers to track and close once done, usually in case of children
* @param childHandles array of longs for child column view handles.
*/
public ColumnVector(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer dataBuffer, DeviceMemoryBuffer validityBuffer,
DeviceMemoryBuffer offsetBuffer, List<DeviceMemoryBuffer> toClose, long[] childHandles) {
super(makeOffHeap(type, rows, nullCount, dataBuffer, validityBuffer, offsetBuffer, toClose, childHandles));
if (!type.equals(DType.STRING) && !type.equals(DType.LIST)) {
assert offsetBuffer == null : "offsets are only supported for STRING, LISTS";
}
assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE)
|| !nullCount.isPresent();
MemoryCleaner.register(this, offHeap);
this.refCount = 0;
incRefCountInternal(true);
}
/**
* This is a very special constructor that should only ever be called by
* fromViewWithContiguousAllocation. It takes a cudf::column_view * instead of a cudf::column *.
* But to maintain memory ownership properly we need to slice the memory in the view off from
* a separate buffer that actually owns the memory allocation.
* @param viewAddress the address of the cudf::column_view
* @param contiguousBuffer the buffer that this is based off of.
*/
private ColumnVector(long viewAddress, DeviceMemoryBuffer contiguousBuffer) {
super(new OffHeapState(viewAddress, contiguousBuffer));
MemoryCleaner.register(this, offHeap);
// TODO we may want to ask for the null count anyways...
this.nullCount = Optional.empty();
this.refCount = 0;
incRefCountInternal(true);
}
/**
* For a ColumnVector this is really just incrementing the reference count.
* @return this
*/
@Override
public ColumnVector copyToColumnVector() {
return incRefCount();
}
/**
* Retrieves the column_view for a cudf::column and if it fails to do so, the column is deleted
* and the exception is thrown to the caller.
* @param nativePointer the cudf::column handle
* @return the column_view handle
*/
private static long getColumnViewFromColumn(long nativePointer) {
try {
return ColumnVector.getNativeColumnView(nativePointer);
} catch (CudfException ce) {
deleteCudfColumn(nativePointer);
throw ce;
}
}
static long initViewHandle(DType type, int numRows, int nullCount,
BaseDeviceMemoryBuffer dataBuffer,
BaseDeviceMemoryBuffer validityBuffer,
BaseDeviceMemoryBuffer offsetBuffer, long[] childHandles) {
long cd = dataBuffer == null ? 0 : dataBuffer.address;
long cdSize = dataBuffer == null ? 0 : dataBuffer.length;
long od = offsetBuffer == null ? 0 : offsetBuffer.address;
long vd = validityBuffer == null ? 0 : validityBuffer.address;
return makeCudfColumnView(type.typeId.getNativeId(), type.getScale(), cd, cdSize,
od, vd, nullCount, numRows, childHandles);
}
static ColumnVector fromViewWithContiguousAllocation(long columnViewAddress, DeviceMemoryBuffer buffer) {
return new ColumnVector(columnViewAddress, buffer);
}
/**
* Set an event handler for this vector. This method can be invoked with null
* to unset the handler.
*
* @param newHandler - the EventHandler to use from this point forward
* @return the prior event handler, or null if not set.
*/
public synchronized EventHandler setEventHandler(EventHandler newHandler) {
EventHandler prev = this.eventHandler;
this.eventHandler = newHandler;
return prev;
}
/**
* Returns the current event handler for this ColumnVector or null if no handler
* is associated.
*/
public synchronized EventHandler getEventHandler() {
return this.eventHandler;
}
/**
* This is a really ugly API, but it is possible that the lifecycle of a column of
* data may not have a clear lifecycle thanks to java and GC. This API informs the leak
* tracking code that this is expected for this column, and big scary warnings should
* not be printed when this happens.
*/
public void noWarnLeakExpected() {
offHeap.noWarnLeakExpected();
}
/**
* Close this Vector and free memory allocated for HostMemoryBuffer and DeviceMemoryBuffer
*/
@Override
public synchronized void close() {
refCount--;
offHeap.delRef();
if (eventHandler != null) {
eventHandler.onClosed(this, refCount);
}
if (refCount == 0) {
super.close();
offHeap.clean(false);
} else if (refCount < 0) {
offHeap.logRefCountDebug("double free " + this);
throw new IllegalStateException("Close called too many times " + this);
}
}
@Override
public String toString() {
return "ColumnVector{" +
"rows=" + rows +
", type=" + type +
", nullCount=" + nullCount +
", offHeap=" + offHeap +
'}';
}
/////////////////////////////////////////////////////////////////////////////
// METADATA ACCESS
/////////////////////////////////////////////////////////////////////////////
/**
* Increment the reference count for this column. You need to call close on this
* to decrement the reference count again.
*/
public ColumnVector incRefCount() {
return incRefCountInternal(false);
}
private synchronized ColumnVector incRefCountInternal(boolean isFirstTime) {
offHeap.addRef();
if (refCount <= 0 && !isFirstTime) {
offHeap.logRefCountDebug("INC AFTER CLOSE " + this);
throw new IllegalStateException("Column is already closed");
}
refCount++;
return this;
}
/**
* Returns the number of nulls in the data. Note that this might end up
* being a very expensive operation because if the null count is not
* known it will be calculated.
*/
public long getNullCount() {
if (!nullCount.isPresent()) {
nullCount = Optional.of(offHeap.getNativeNullCount());
}
return nullCount.get();
}
/**
* Returns this column's current refcount
*/
public synchronized int getRefCount() {
return refCount;
}
/**
* Returns if the vector has a validity vector allocated or not.
*/
public boolean hasValidityVector() {
return (offHeap.getValid() != null);
}
/**
* Returns if the vector has nulls. Note that this might end up
* being a very expensive operation because if the null count is not
* known it will be calculated.
*/
public boolean hasNulls() {
return getNullCount() > 0;
}
/////////////////////////////////////////////////////////////////////////////
// RAW DATA ACCESS
/////////////////////////////////////////////////////////////////////////////
/**
* Get access to the raw device buffer for this column. This is intended to be used with a lot
* of caution. The lifetime of the buffer is tied to the lifetime of the column (Do not close
* the buffer, as the column will take care of it). Do not modify the contents of the buffer or
* it might negatively impact what happens on the column. The data must be on the device for
* this to work. Strings and string categories do not currently work because their underlying
* device layout is currently hidden.
* @param type the type of buffer to get access to.
* @return the underlying buffer or null if no buffer is associated with it for this column.
* Please note that if the column is empty there may be no buffers at all associated with the
* column.
*/
public BaseDeviceMemoryBuffer getDeviceBufferFor(BufferType type) {
BaseDeviceMemoryBuffer srcBuffer;
switch(type) {
case VALIDITY:
srcBuffer = offHeap.getValid();
break;
case DATA:
srcBuffer = offHeap.getData();
break;
case OFFSET:
srcBuffer = offHeap.getOffsets();
break;
default:
throw new IllegalArgumentException(type + " is not a supported buffer type.");
}
return srcBuffer;
}
/**
* Ensures the ByteBuffer passed in is a direct byte buffer.
* If it is not then it creates one and copies the data in
* the byte buffer passed in to the direct byte buffer
* it created and returns it.
*/
private static ByteBuffer bufferAsDirect(ByteBuffer buf) {
ByteBuffer bufferOut = buf;
if (bufferOut != null && !bufferOut.isDirect()) {
bufferOut = ByteBuffer.allocateDirect(buf.remaining());
bufferOut.put(buf);
bufferOut.flip();
}
return bufferOut;
}
/**
* Create a ColumnVector from the Apache Arrow byte buffers passed in.
* Any of the buffers not used for that datatype should be set to null.
* The buffers are expected to be off heap buffers, but if they are not,
* it will handle copying them to direct byte buffers.
* This only supports primitive types. Strings, Decimals and nested types
* such as list and struct are not supported.
* @param type - type of the column
* @param numRows - Number of rows in the arrow column
* @param nullCount - Null count
* @param data - ByteBuffer of the Arrow data buffer
* @param validity - ByteBuffer of the Arrow validity buffer
* @param offsets - ByteBuffer of the Arrow offsets buffer
* @return - new ColumnVector
*/
public static ColumnVector fromArrow(
DType type,
long numRows,
long nullCount,
ByteBuffer data,
ByteBuffer validity,
ByteBuffer offsets) {
long columnHandle = fromArrow(type.typeId.getNativeId(), numRows, nullCount,
bufferAsDirect(data), bufferAsDirect(validity), bufferAsDirect(offsets));
ColumnVector vec = new ColumnVector(columnHandle);
return vec;
}
/**
* Create a new vector of length rows, where each row is filled with the Scalar's
* value
* @param scalar - Scalar to use to fill rows
* @param rows - Number of rows in the new ColumnVector
* @return - new ColumnVector
*/
public static ColumnVector fromScalar(Scalar scalar, int rows) {
long columnHandle = fromScalar(scalar.getScalarHandle(), rows);
return new ColumnVector(columnHandle);
}
/**
* Create a new struct vector made up of existing columns. Note that this will copy
* the contents of the input columns to make a new vector. If you only want to
* do a quick temporary computation you can use ColumnView.makeStructView.
* @param columns the columns to make the struct from.
* @return the new ColumnVector
*/
public static ColumnVector makeStruct(ColumnView... columns) {
try (ColumnView cv = ColumnView.makeStructView(columns)) {
return cv.copyToColumnVector();
}
}
/**
* Create a new struct vector made up of existing columns. Note that this will copy
* the contents of the input columns to make a new vector. If you only want to
* do a quick temporary computation you can use ColumnView.makeStructView.
* @param rows the number of rows in the struct. Used for structs with no children.
* @param columns the columns to make the struct from.
* @return the new ColumnVector
*/
public static ColumnVector makeStruct(long rows, ColumnView... columns) {
try (ColumnView cv = ColumnView.makeStructView(rows, columns)) {
return cv.copyToColumnVector();
}
}
/**
* Create a LIST column from the given columns. Each list in the returned column will have the
* same number of entries in it as columns passed into this method. Be careful about the
* number of rows passed in as there are limits on the maximum output size supported for
* column lists.
* @param columns the columns to make up the list column, in the order they will appear in the
* resulting lists.
* @return the new LIST ColumnVector
*/
public static ColumnVector makeList(ColumnView... columns) {
if (columns.length <= 0) {
throw new IllegalArgumentException("At least one column is needed to get the row count");
}
return makeList(columns[0].getRowCount(), columns[0].getType(), columns);
}
/**
* Create a LIST column from the given columns. Each list in the returned column will have the
* same number of entries in it as columns passed into this method. Be careful about the
* number of rows passed in as there are limits on the maximum output size supported for
* column lists.
* @param rows the number of rows to create, for the special case of an empty list.
* @param type the type of the child column, for the special case of an empty list.
* @param columns the columns to make up the list column, in the order they will appear in the
* resulting lists.
* @return the new LIST ColumnVector
*/
public static ColumnVector makeList(long rows, DType type, ColumnView... columns) {
long[] handles = new long[columns.length];
for (int i = 0; i < columns.length; i++) {
ColumnView cv = columns[i];
if (rows != cv.getRowCount()) {
throw new IllegalArgumentException("All columns must have the same number of rows");
}
if (!type.equals(cv.getType())) {
throw new IllegalArgumentException("All columns must have the same type");
}
handles[i] = cv.getNativeView();
}
if (columns.length == 0 && type.isNestedType()) {
throw new IllegalArgumentException(
"Creating an empty list column of nested types is not currently supported");
}
return new ColumnVector(makeList(handles, type.typeId.nativeId, type.getScale(), rows));
}
/**
* Create a LIST column from the current column and a given offsets column. The output column will
* contain lists having elements that are copied from the current column and their sizes are
* determined by the given offsets.
*
* Note that the caller is responsible to make sure the given offsets column is of type INT32 and
* it contains valid indices to create a LIST column. There will not be any validity check for
* these offsets during calling to this function. If the given offsets are invalid, we may have
* bad memory accesses and/or data corruption.
*
* @param rows the number of rows to create.
* @param offsets the offsets pointing to row indices of the current column to create an output
* LIST column.
*/
public ColumnVector makeListFromOffsets(long rows, ColumnView offsets) {
return new ColumnVector(makeListFromOffsets(getNativeView(), offsets.getNativeView(), rows));
}
/**
* Create a new vector of length rows, starting at the initialValue and going by step each time.
* Only numeric types are supported.
* @param initialValue the initial value to start at.
* @param step the step to add to each subsequent row.
* @param rows the total number of rows
* @return the new ColumnVector.
*/
public static ColumnVector sequence(Scalar initialValue, Scalar step, int rows) {
if (!initialValue.isValid() || !step.isValid()) {
throw new IllegalArgumentException("nulls are not supported in sequence");
}
return new ColumnVector(sequence(initialValue.getScalarHandle(), step.getScalarHandle(), rows));
}
/**
* Create a new vector of length rows, starting at the initialValue and going by 1 each time.
* Only numeric types are supported.
* @param initialValue the initial value to start at.
* @param rows the total number of rows
* @return the new ColumnVector.
*/
public static ColumnVector sequence(Scalar initialValue, int rows) {
if (!initialValue.isValid()) {
throw new IllegalArgumentException("nulls are not supported in sequence");
}
return new ColumnVector(sequence(initialValue.getScalarHandle(), 0, rows));
}
/**
* Create a list column in which each row is a sequence of values starting from a `start` value,
* incrementing by one, and its cardinality is specified by a `size` value. The `start` and `size`
* values used to generate each list is taken from the corresponding row of the input start and
* size columns.
* @param start first values in the result sequences
* @param size numbers of values in the result sequences
* @return the new ColumnVector.
*/
public static ColumnVector sequence(ColumnView start, ColumnView size) {
assert start.getNullCount() == 0 || size.getNullCount() == 0 : "starts and sizes input " +
"columns must not have nulls.";
return new ColumnVector(sequences(start.getNativeView(), size.getNativeView(), 0));
}
/**
* Create a list column in which each row is a sequence of values starting from a `start` value,
* incrementing by a `step` value, and its cardinality is specified by a `size` value.
* The values `start`, `step`, and `size` used to generate each list is taken from the
* corresponding row of the input starts, steps, and sizes columns.
* @param start first values in the result sequences
* @param size numbers of values in the result sequences
* @param step increment values for the result sequences.
* @return the new ColumnVector.
*/
public static ColumnVector sequence(ColumnView start, ColumnView size, ColumnView step) {
assert start.getNullCount() == 0 || size.getNullCount() == 0 || step.getNullCount() == 0:
"start, size and step must not have nulls.";
assert step.getType() == start.getType() : "start and step input columns must" +
" have the same type.";
return new ColumnVector(sequences(start.getNativeView(), size.getNativeView(),
step.getNativeView()));
}
/**
* Create a new vector by concatenating multiple columns together.
* Note that all columns must have the same type.
*/
public static ColumnVector concatenate(ColumnView... columns) {
if (columns.length < 2) {
throw new IllegalArgumentException("Concatenate requires 2 or more columns");
}
long[] columnHandles = new long[columns.length];
for (int i = 0; i < columns.length; ++i) {
columnHandles[i] = columns[i].getNativeView();
}
return new ColumnVector(concatenate(columnHandles));
}
/**
* Concatenate columns of strings together, combining a corresponding row from each column
* into a single string row of a new column with no separator string inserted between each
* combined string and maintaining null values in combined rows.
* @param columns array of columns containing strings, must be non-empty
* @return A new java column vector containing the concatenated strings.
*/
public static ColumnVector stringConcatenate(ColumnView[] columns) {
try (Scalar emptyString = Scalar.fromString("");
Scalar nullString = Scalar.fromString(null)) {
return stringConcatenate(emptyString, nullString, columns);
}
}
/**
* Concatenate columns of strings together, combining a corresponding row from each column into
* a single string row of a new column. This version includes the separator for null rows
* if 'narep' is valid.
* @param separator string scalar inserted between each string being merged.
* @param narep string scalar indicating null behavior. If set to null and any string in the row
* is null the resulting string will be null. If not null, null values in any column
* will be replaced by the specified string.
* @param columns array of columns containing strings, must be non-empty
* @return A new java column vector containing the concatenated strings.
*/
public static ColumnVector stringConcatenate(Scalar separator, Scalar narep, ColumnView[] columns) {
return stringConcatenate(separator, narep, columns, true);
}
/**
* Concatenate columns of strings together, combining a corresponding row from each column into
* a single string row of a new column.
* @param separator string scalar inserted between each string being merged.
* @param narep string scalar indicating null behavior. If set to null and any string in the row
* is null the resulting string will be null. If not null, null values in any column
* will be replaced by the specified string.
* @param columns array of columns containing strings, must be non-empty
* @param separateNulls if true, then the separator is included for null rows if
* `narep` is valid.
* @return A new java column vector containing the concatenated strings.
*/
public static ColumnVector stringConcatenate(Scalar separator, Scalar narep, ColumnView[] columns,
boolean separateNulls) {
assert columns != null : "input columns should not be null";
assert columns.length > 0 : "input columns should not be empty";
assert separator != null : "separator scalar provided may not be null";
assert separator.getType().equals(DType.STRING) : "separator scalar must be a string scalar";
assert narep != null : "narep scalar provided may not be null";
assert narep.getType().equals(DType.STRING) : "narep scalar must be a string scalar";
long[] columnViews = new long[columns.length];
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(stringConcatenation(columnViews, separator.getScalarHandle(),
narep.getScalarHandle(), separateNulls));
}
/**
* Concatenate columns of strings together using a separator specified for each row
* and returns the result as a string column. If the row separator for a given row is null,
* output column for that row is null. Null column values for a given row are skipped.
* @param columns array of columns containing strings
* @param sepCol strings column that provides the separator for a given row
* @return A new java column vector containing the concatenated strings with separator between.
*/
public static ColumnVector stringConcatenate(ColumnView[] columns, ColumnView sepCol) {
try (Scalar nullString = Scalar.fromString(null);
Scalar emptyString = Scalar.fromString("")) {
return stringConcatenate(columns, sepCol, nullString, emptyString, false);
}
}
/**
* Concatenate columns of strings together using a separator specified for each row
* and returns the result as a string column. If the row separator for a given row is null,
* output column for that row is null unless separatorNarep is provided.
* The separator is applied between two output row values if the separateNulls
* is `YES` or only between valid rows if separateNulls is `NO`.
* @param columns array of columns containing strings
* @param sepCol strings column that provides the separator for a given row
* @param separatorNarep string scalar indicating null behavior when a separator is null.
* If set to null and the separator is null the resulting string will
* be null. If not null, this string will be used in place of a null
* separator.
* @param colNarep string that should be used in place of any null strings
* found in any column.
* @param separateNulls if true, then the separator is included for null rows if
* `colNarep` is valid.
* @return A new java column vector containing the concatenated strings with separator between.
*/
public static ColumnVector stringConcatenate(ColumnView[] columns,
ColumnView sepCol, Scalar separatorNarep, Scalar colNarep, boolean separateNulls) {
assert columns.length >= 1 : ".stringConcatenate() operation requires at least 1 column";
assert separatorNarep != null : "separator narep scalar provided may not be null";
assert colNarep != null : "column narep scalar provided may not be null";
assert separatorNarep.getType().equals(DType.STRING) : "separator naprep scalar must be a string scalar";
assert colNarep.getType().equals(DType.STRING) : "column narep scalar must be a string scalar";
long[] columnViews = new long[columns.length];
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(stringConcatenationSepCol(columnViews, sepCol.getNativeView(),
separatorNarep.getScalarHandle(), colNarep.getScalarHandle(), separateNulls));
}
/**
* Concatenate columns of lists horizontally (row by row), combining a corresponding row
* from each column into a single list row of a new column.
* NOTICE: Any concatenation involving a null list element will result in a null list.
*
* @param columns array of columns containing lists, must be non-empty
* @return A new java column vector containing the concatenated lists.
*/
public static ColumnVector listConcatenateByRow(ColumnView... columns) {
return listConcatenateByRow(false, columns);
}
/**
* Concatenate columns of lists horizontally (row by row), combining a corresponding row
* from each column into a single list row of a new column.
*
* @param ignoreNull whether to ignore null list element of input columns: If true, null list
* will be ignored from concatenation; Otherwise, any concatenation involving
* a null list element will result in a null list
* @param columns array of columns containing lists, must be non-empty
* @return A new java column vector containing the concatenated lists.
*/
public static ColumnVector listConcatenateByRow(boolean ignoreNull, ColumnView... columns) {
assert columns != null : "input columns should not be null";
assert columns.length > 0 : "input columns should not be empty";
long[] columnViews = new long[columns.length];
for(int i = 0; i < columns.length; i++) {
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(concatListByRow(columnViews, ignoreNull));
}
/**
* Create a new vector containing the MD5 hash of each row in the table.
*
* @param columns array of columns to hash, must have identical number of rows.
* @return the new ColumnVector of 32 character hex strings representing each row's hash value.
*/
public static ColumnVector md5Hash(ColumnView... columns) {
if (columns.length < 1) {
throw new IllegalArgumentException("MD5 hashing requires at least 1 column of input");
}
long[] columnViews = new long[columns.length];
long size = columns[0].getRowCount();
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
assert columns[i].getRowCount() == size : "Row count mismatch, all columns must be the same size";
assert !columns[i].getType().isDurationType() : "Unsupported column type Duration";
assert !columns[i].getType().isTimestampType() : "Unsupported column type Timestamp";
assert !columns[i].getType().isNestedType() || columns[i].getType().equals(DType.LIST) :
"Unsupported nested type column";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(hash(columnViews, HashType.HASH_MD5.getNativeId(), 0));
}
/**
* Create a new vector containing spark's 32-bit murmur3 hash of each row in the table.
* Spark's murmur3 hash uses a different tail processing algorithm.
*
* @param seed integer seed for the murmur3 hash function
* @param columns array of columns to hash, must have identical number of rows.
* @return the new ColumnVector of 32-bit values representing each row's hash value.
*/
public static ColumnVector spark32BitMurmurHash3(int seed, ColumnView columns[]) {
if (columns.length < 1) {
throw new IllegalArgumentException("Murmur3 hashing requires at least 1 column of input");
}
long[] columnViews = new long[columns.length];
long size = columns[0].getRowCount();
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
assert columns[i].getRowCount() == size : "Row count mismatch, all columns must be the same size";
assert !columns[i].getType().isDurationType() : "Unsupported column type Duration";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(hash(columnViews, HashType.HASH_SPARK_MURMUR3.getNativeId(), seed));
}
/**
* Create a new vector containing spark's 32-bit murmur3 hash of each row in the table with the
* seed set to 0. Spark's murmur3 hash uses a different tail processing algorithm.
*
* @param columns array of columns to hash, must have identical number of rows.
* @return the new ColumnVector of 32-bit values representing each row's hash value.
*/
public static ColumnVector spark32BitMurmurHash3(ColumnView columns[]) {
return spark32BitMurmurHash3(0, columns);
}
/**
* Generic method to cast ColumnVector
* When casting from a Date, Timestamp, or Boolean to a numerical type the underlying numerical
* representation of the data will be used for the cast.
*
* For Strings:
* Casting strings from/to timestamp isn't supported atm.
* Please look at {@link ColumnVector#asTimestamp(DType, String)}
* and {@link ColumnVector#asStrings(String)} for casting string to timestamp when the format
* is known
*
* Float values when converted to String could be different from the expected default behavior in
* Java
* e.g.
* 12.3 => "12.30000019" instead of "12.3"
* Double.POSITIVE_INFINITY => "Inf" instead of "INFINITY"
* Double.NEGATIVE_INFINITY => "-Inf" instead of "-INFINITY"
*
* @param type type of the resulting ColumnVector
* @return A new vector allocated on the GPU
*/
@Override
public ColumnVector castTo(DType type) {
if (this.type.equals(type)) {
// Optimization
return incRefCount();
}
return super.castTo(type);
}
/////////////////////////////////////////////////////////////////////////////
// NATIVE METHODS
/////////////////////////////////////////////////////////////////////////////
private static native long sequence(long initialValue, long step, int rows);
private static native long sequences(long startHandle, long sizeHandle, long stepHandle)
throws CudfException;
private static native long fromArrow(int type, long col_length,
long null_count, ByteBuffer data, ByteBuffer validity,
ByteBuffer offsets) throws CudfException;
private static native long fromScalar(long scalarHandle, int rowCount) throws CudfException;
private static native long makeList(long[] handles, long typeHandle, int scale, long rows)
throws CudfException;
private static native long makeListFromOffsets(long childHandle, long offsetsHandle, long rows)
throws CudfException;
private static native long concatenate(long[] viewHandles) throws CudfException;
/**
* Native method to concatenate columns of lists horizontally (row by row), combining a row
* from each column into a single list.
*
* @param columnViews array of longs holding the native handles of the column_views to combine.
* @return native handle of the resulting cudf column, used to construct the Java column
* by the listConcatenateByRow method.
*/
private static native long concatListByRow(long[] columnViews, boolean ignoreNull);
/**
* Native method to concatenate columns of strings together, combining a row from
* each column into a single string.
*
* @param columnViews array of longs holding the native handles of the column_views to combine.
* @param separator string scalar inserted between each string being merged, may not be null.
* @param narep string scalar indicating null behavior. If set to null and any string in
* the row is null the resulting string will be null. If not null, null
* values in any column will be replaced by the specified string. The
* underlying value in the string scalar may be null, but the object passed
* in may not.
* @param separate_nulls boolean if true, then the separator is included for null rows if
* `narep` is valid.
* @return native handle of the resulting cudf column, used to construct the Java column
* by the stringConcatenate method.
*/
private static native long stringConcatenation(long[] columnViews, long separator, long narep,
boolean separate_nulls);
/**
* Native method to concatenate columns of strings together using a separator specified for each row
* and returns the result as a string column.
* @param columnViews array of longs holding the native handles of the column_views to combine.
* @param sep_column long holding the native handle of the strings_column_view used as separators.
* @param separator_narep string scalar indicating null behavior when a separator is null.
* If set to null and the separator is null the resulting string will
* be null. If not null, this string will be used in place of a null
* separator.
* @param col_narep string String scalar that should be used in place of any null strings
* found in any column.
* @param separate_nulls boolean if true, then the separator is included for null rows if
* `col_narep` is valid.
* @return native handle of the resulting cudf column, used to construct the Java column.
*/
private static native long stringConcatenationSepCol(long[] columnViews,
long sep_column,
long separator_narep,
long col_narep,
boolean separate_nulls);
/**
* Native method to hash each row of the given table. Hashing function dispatched on the
* native side using the hashId.
*
* @param viewHandles array of native handles to the cudf::column_view columns being operated on.
* @param hashId integer native ID of the hashing function identifier HashType.
* @param seed integer seed for the hash. Only used by serial murmur3 hash.
* @return native handle of the resulting cudf column containing the hex-string hashing results.
*/
private static native long hash(long[] viewHandles, int hashId, int seed) throws CudfException;
/////////////////////////////////////////////////////////////////////////////
// INTERNAL/NATIVE ACCESS
/////////////////////////////////////////////////////////////////////////////
////////
// Native methods specific to cudf::column. These either take or create a cudf::column
// instead of a cudf::column_view so they need to be used with caution. These should
// only be called from the OffHeap inner class.
////////
/**
* Delete the column. This is not private because there are a few cases where Table
* may get back an array of columns pointers and we want to have best effort in cleaning them up
* on any failure.
*/
static native void deleteCudfColumn(long cudfColumnHandle) throws CudfException;
private static native int getNativeNullCountColumn(long cudfColumnHandle) throws CudfException;
private static native void setNativeNullCountColumn(long cudfColumnHandle, int nullCount) throws CudfException;
/**
* Create a cudf::column_view from a cudf::column.
* @param cudfColumnHandle the pointer to the cudf::column
* @return a pointer to a cudf::column_view
* @throws CudfException on any error
*/
static native long getNativeColumnView(long cudfColumnHandle) throws CudfException;
static native long makeEmptyCudfColumn(int type, int scale);
/////////////////////////////////////////////////////////////////////////////
// HELPER CLASSES
/////////////////////////////////////////////////////////////////////////////
/**
* Holds the off heap state of the column vector so we can clean it up, even if it is leaked.
*/
protected static final class OffHeapState extends MemoryCleaner.Cleaner {
// This must be kept in sync with the native code
public static final long UNKNOWN_NULL_COUNT = -1;
private long columnHandle;
private long viewHandle = 0;
private List<MemoryBuffer> toClose = new ArrayList<>();
/**
* Make a column form an existing cudf::column *.
*/
public OffHeapState(long columnHandle) {
this.columnHandle = columnHandle;
this.toClose.add(getData());
this.toClose.add(getValid());
this.toClose.add(getOffsets());
}
/**
* Create from existing cudf::column_view and buffers.
*/
public OffHeapState(DeviceMemoryBuffer data, DeviceMemoryBuffer valid, DeviceMemoryBuffer offsets,
List<DeviceMemoryBuffer> buffers,
long viewHandle) {
assert(viewHandle != 0);
if (data != null) {
this.toClose.add(data);
}
if (valid != null) {
this.toClose.add(valid);
}
if (offsets != null) {
this.toClose.add(offsets);
}
if (buffers != null) {
toClose.addAll(buffers);
}
this.viewHandle = viewHandle;
}
/**
* Create from existing cudf::column_view and contiguous buffer.
*/
public OffHeapState(long viewHandle, DeviceMemoryBuffer contiguousBuffer) {
assert viewHandle != 0;
this.viewHandle = viewHandle;
BaseDeviceMemoryBuffer valid = getValid();
BaseDeviceMemoryBuffer data = getData();
BaseDeviceMemoryBuffer offsets = getOffsets();
toClose.add(data);
toClose.add(valid);
toClose.add(offsets);
contiguousBuffer.incRefCount();
toClose.add(contiguousBuffer);
}
public long getViewHandle() {
if (viewHandle == 0) {
viewHandle = ColumnVector.getNativeColumnView(columnHandle);
}
return viewHandle;
}
public long getNativeNullCount() {
if (viewHandle != 0) {
return ColumnView.getNativeNullCount(getViewHandle());
}
return getNativeNullCountColumn(columnHandle);
}
private void setNativeNullCount(int nullCount) throws CudfException {
assert viewHandle == 0 : "Cannot set the null count if a view has already been created";
assert columnHandle != 0;
setNativeNullCountColumn(columnHandle, nullCount);
}
public BaseDeviceMemoryBuffer getData() {
return getDataBuffer(getViewHandle());
}
public BaseDeviceMemoryBuffer getValid() {
return getValidityBuffer(getViewHandle());
}
public BaseDeviceMemoryBuffer getOffsets() {
return getOffsetsBuffer(getViewHandle());
}
@Override
public void noWarnLeakExpected() {
super.noWarnLeakExpected();
BaseDeviceMemoryBuffer valid = getValid();
BaseDeviceMemoryBuffer data = getData();
BaseDeviceMemoryBuffer offsets = getOffsets();
if (valid != null) {
valid.noWarnLeakExpected();
}
if (data != null) {
data.noWarnLeakExpected();
}
if(offsets != null) {
offsets.noWarnLeakExpected();
}
}
@Override
public String toString() {
return "(ID: " + id + " " + Long.toHexString(columnHandle == 0 ? viewHandle : columnHandle) + ")";
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
long address = 0;
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
Throwable toThrow = null;
if (viewHandle != 0) {
address = viewHandle;
try {
ColumnView.deleteColumnView(viewHandle);
} catch (Throwable t) {
toThrow = t;
} finally {
viewHandle = 0;
}
neededCleanup = true;
}
if (columnHandle != 0) {
if (address != 0) {
address = columnHandle;
}
try {
ColumnVector.deleteCudfColumn(columnHandle);
} catch (Throwable t) {
if (toThrow != null) {
toThrow.addSuppressed(t);
} else {
toThrow = t;
}
} finally {
columnHandle = 0;
}
neededCleanup = true;
}
if (!toClose.isEmpty()) {
try {
for (MemoryBuffer toCloseBuff : toClose) {
if (toCloseBuff != null) {
try {
toCloseBuff.close();
} catch (Throwable t) {
if (toThrow != null) {
toThrow.addSuppressed(t);
} else {
toThrow = t;
}
}
}
}
} finally {
toClose.clear();
}
neededCleanup = true;
}
if (toThrow != null) {
throw new RuntimeException(toThrow);
}
if (neededCleanup) {
if (logErrorIfNotClean) {
log.error("A DEVICE COLUMN VECTOR WAS LEAKED (ID: " + id + " " + Long.toHexString(address)+ ")");
logRefCountDebug("Leaked vector");
}
}
return neededCleanup;
}
@Override
public boolean isClean() {
return viewHandle == 0 && columnHandle == 0 && toClose.isEmpty();
}
}
/////////////////////////////////////////////////////////////////////////////
// BUILDER
/////////////////////////////////////////////////////////////////////////////
/**
* Create a new vector.
* @param type the type of vector to build.
* @param rows maximum number of rows that the vector can hold.
* @param init what will initialize the vector.
* @return the created vector.
*/
public static ColumnVector build(DType type, int rows, Consumer<Builder> init) {
try (Builder builder = HostColumnVector.builder(type, rows)) {
init.accept(builder);
return builder.buildAndPutOnDevice();
}
}
public static ColumnVector build(int rows, long stringBufferSize, Consumer<Builder> init) {
try (Builder builder = HostColumnVector.builder(rows, stringBufferSize)) {
init.accept(builder);
return builder.buildAndPutOnDevice();
}
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector boolFromBytes(byte... values) {
return build(DType.BOOL8, values.length, (b) -> b.appendArray(values));
}
/**
* This method is evolving, unstable and currently test only.
* Please use with caution and expect it to change in the future.
*/
public static<T> ColumnVector fromLists(HostColumnVector.DataType dataType, List<T>... lists) {
try (HostColumnVector host = HostColumnVector.fromLists(dataType, lists)) {
return host.copyToDevice();
}
}
/**
* This method is evolving, unstable and currently test only.
* Please use with caution and expect it to change in the future.
*/
public static ColumnVector fromStructs(HostColumnVector.DataType dataType,
List<HostColumnVector.StructData> lists) {
try (HostColumnVector host = HostColumnVector.fromStructs(dataType, lists)) {
return host.copyToDevice();
}
}
/**
* This method is evolving, unstable and currently test only.
* Please use with caution and expect it to change in the future.
*/
public static ColumnVector fromStructs(HostColumnVector.DataType dataType,
HostColumnVector.StructData... lists) {
try (HostColumnVector host = HostColumnVector.fromStructs(dataType, lists)) {
return host.copyToDevice();
}
}
/**
* This method is evolving, unstable and currently test only.
* Please use with caution and expect it to change in the future.
*/
public static ColumnVector emptyStructs(HostColumnVector.DataType dataType, long numRows) {
try (HostColumnVector host = HostColumnVector.emptyStructs(dataType, numRows)) {
return host.copyToDevice();
}
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromBooleans(boolean... values) {
byte[] bytes = new byte[values.length];
for (int i = 0; i < values.length; i++) {
bytes[i] = values[i] ? (byte) 1 : (byte) 0;
}
return build(DType.BOOL8, values.length, (b) -> b.appendArray(bytes));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromBytes(byte... values) {
return build(DType.INT8, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned byte type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromUnsignedBytes(byte... values) {
return build(DType.UINT8, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromShorts(short... values) {
return build(DType.INT16, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned short type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromUnsignedShorts(short... values) {
return build(DType.UINT16, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromInts(int... values) {
return build(DType.INT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned int type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromUnsignedInts(int... values) {
return build(DType.UINT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromLongs(long... values) {
return build(DType.INT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned long type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromUnsignedLongs(long... values) {
return build(DType.UINT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromFloats(float... values) {
return build(DType.FLOAT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromDoubles(double... values) {
return build(DType.FLOAT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector daysFromInts(int... values) {
return build(DType.TIMESTAMP_DAYS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationSecondsFromLongs(long... values) {
return build(DType.DURATION_SECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector timestampSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_SECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationDaysFromInts(int... values) {
return build(DType.DURATION_DAYS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationMilliSecondsFromLongs(long... values) {
return build(DType.DURATION_MILLISECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector timestampMilliSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_MILLISECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationMicroSecondsFromLongs(long... values) {
return build(DType.DURATION_MICROSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector timestampMicroSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_MICROSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationNanoSecondsFromLongs(long... values) {
return build(DType.DURATION_NANOSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector timestampNanoSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_NANOSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new decimal vector from unscaled values (int array) and scale.
* The created vector is of type DType.DECIMAL32, whose max precision is 9.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromInts(int scale, int... values) {
try (HostColumnVector host = HostColumnVector.decimalFromInts(scale, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from boxed unscaled values (Integer array) and scale.
* The created vector is of type DType.DECIMAL32, whose max precision is 9.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromBoxedInts(int scale, Integer... values) {
try (HostColumnVector host = HostColumnVector.decimalFromBoxedInts(scale, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from unscaled values (long array) and scale.
* The created vector is of type DType.DECIMAL64, whose max precision is 18.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromLongs(int scale, long... values) {
try (HostColumnVector host = HostColumnVector.decimalFromLongs(scale, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from boxed unscaled values (Long array) and scale.
* The created vector is of type DType.DECIMAL64, whose max precision is 18.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromBoxedLongs(int scale, Long... values) {
try (HostColumnVector host = HostColumnVector.decimalFromBoxedLongs(scale, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from double floats with specific DecimalType and RoundingMode.
* All doubles will be rescaled if necessary, according to scale of input DecimalType and RoundingMode.
* If any overflow occurs in extracting integral part, an IllegalArgumentException will be thrown.
* This API is inefficient because of slow double -> decimal conversion, so it is mainly for testing.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromDoubles(DType type, RoundingMode mode, double... values) {
try (HostColumnVector host = HostColumnVector.decimalFromDoubles(type, mode, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from BigIntegers
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromBigInt(int scale, BigInteger... values) {
try (HostColumnVector host = HostColumnVector.decimalFromBigIntegers(scale, values)) {
ColumnVector columnVector = host.copyToDevice();
return columnVector;
}
}
/**
* Create a new string vector from the given values. This API
* supports inline nulls. This is really intended to be used only for testing as
* it is slow and memory intensive to translate between java strings and UTF8 strings.
*/
public static ColumnVector fromStrings(String... values) {
try (HostColumnVector host = HostColumnVector.fromStrings(values)) {
return host.copyToDevice();
}
}
/**
* Create a new string vector from the given values. This API
* supports inline nulls.
*/
public static ColumnVector fromUTF8Strings(byte[]... values) {
try (HostColumnVector host = HostColumnVector.fromUTF8Strings(values)) {
return host.copyToDevice();
}
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than building from primitive array of unscaledValues.
* Notice:
* 1. All input BigDecimals should share same scale.
* 2. The scale will be zero if all input values are null.
*/
public static ColumnVector fromDecimals(BigDecimal... values) {
try (HostColumnVector hcv = HostColumnVector.fromDecimals(values)) {
return hcv.copyToDevice();
}
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedBooleans(Boolean... values) {
return build(DType.BOOL8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedBytes(Byte... values) {
return build(DType.INT8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned byte type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromBoxedUnsignedBytes(Byte... values) {
return build(DType.UINT8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedShorts(Short... values) {
return build(DType.INT16, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned short type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromBoxedUnsignedShorts(Short... values) {
return build(DType.UINT16, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedInts(Integer... values) {
return build(DType.INT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned int type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromBoxedUnsignedInts(Integer... values) {
return build(DType.UINT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedLongs(Long... values) {
return build(DType.INT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned long type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromBoxedUnsignedLongs(Long... values) {
return build(DType.UINT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedFloats(Float... values) {
return build(DType.FLOAT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedDoubles(Double... values) {
return build(DType.FLOAT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampDaysFromBoxedInts(Integer... values) {
return build(DType.TIMESTAMP_DAYS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationDaysFromBoxedInts(Integer... values) {
return build(DType.DURATION_DAYS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_SECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_SECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationMilliSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_MILLISECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampMilliSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_MILLISECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationMicroSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_MICROSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampMicroSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_MICROSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationNanoSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_NANOSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampNanoSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_NANOSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Creates an empty column according to the data type.
*
* It will create all the nested columns by iterating all the children in the input
* type object 'colType'.
*
* The performance is not good, so use it carefully. We may want to move this implementation
* to the native once figuring out a way to pass the nested data type to the native.
*
* @param colType the data type of the empty column
* @return an empty ColumnVector with its children. Each children contains zero elements.
* Users should close the ColumnVector to avoid memory leak.
*/
public static ColumnVector empty(HostColumnVector.DataType colType) {
if (colType == null || colType.getType() == null) {
throw new IllegalArgumentException("The data type and its 'DType' should NOT be null.");
}
if (colType instanceof HostColumnVector.BasicType) {
// Non nested type
DType dt = colType.getType();
return new ColumnVector(makeEmptyCudfColumn(dt.typeId.getNativeId(), dt.getScale()));
} else if (colType instanceof HostColumnVector.ListType) {
// List type
assert colType.getNumChildren() == 1 : "List type requires one child type";
try (ColumnVector child = empty(colType.getChild(0))) {
return makeList(child);
}
} else if (colType instanceof HostColumnVector.StructType) {
// Struct type
ColumnVector[] children = new ColumnVector[colType.getNumChildren()];
try {
for (int i = 0; i < children.length; i++) {
children[i] = empty(colType.getChild(i));
}
return makeStruct(children);
} finally {
for (ColumnVector cv : children) {
if (cv != null) cv.close();
}
}
} else {
throw new IllegalArgumentException("Unsupported data type: " + colType);
}
}
static ColumnVector[] getColumnVectorsFromPointers(long[] nativeHandles) {
ColumnVector[] columns = new ColumnVector[nativeHandles.length];
try {
for (int i = 0; i < nativeHandles.length; i++) {
long nativeHandle = nativeHandles[i];
// setting address to zero, so we don't clean it in case of an exception as it
// will be cleaned up by the constructor
nativeHandles[i] = 0;
columns[i] = new ColumnVector(nativeHandle);
}
return columns;
} catch (Throwable t) {
for (ColumnVector columnVector : columns) {
if (columnVector != null) {
try {
columnVector.close();
} catch (Throwable s) {
t.addSuppressed(s);
}
}
}
for (long nativeHandle : nativeHandles) {
if (nativeHandle != 0) {
try {
deleteCudfColumn(nativeHandle);
} catch (Throwable s) {
t.addSuppressed(s);
}
}
}
throw t;
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/NativeDepsLoader.java
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
/**
* This class will load the native dependencies.
*/
public class NativeDepsLoader {
private static final Logger log = LoggerFactory.getLogger(NativeDepsLoader.class);
/**
* Set this system property to true to prevent unpacked dependency files from
* being deleted immediately after they are loaded. The files will still be
* scheduled for deletion upon exit.
*/
private static final Boolean preserveDepsAfterLoad = Boolean.getBoolean(
"ai.rapids.cudf.preserve-dependencies");
/**
* Defines the loading order for the dependencies. Dependencies are loaded in
* stages where all the dependencies in a stage are not interdependent and
* therefore can be loaded in parallel. All dependencies within an earlier
* stage are guaranteed to have finished loading before any dependencies in
* subsequent stages are loaded.
*/
private static final String[][] loadOrder = new String[][]{
new String[]{
"nvcomp_bitcomp", "nvcomp_gdeflate"
},
new String[]{
"nvcomp"
},
new String[]{
"cudf"
},
new String[]{
"cudfjni"
}
};
private static final ClassLoader loader = NativeDepsLoader.class.getClassLoader();
private static boolean loaded = false;
/**
* Load the native libraries needed for libcudf, if not loaded already.
*/
public static synchronized void loadNativeDeps() {
if (!loaded) {
try {
loadNativeDeps(loadOrder);
loaded = true;
} catch (Throwable t) {
log.error("Could not load cudf jni library...", t);
}
}
}
/**
* Allows other libraries to reuse the same native deps loading logic. Libraries will be searched
* for under ${os.arch}/${os.name}/ in the class path using the class loader for this class.
* <br/>
* Because this just loads the libraries and loading the libraries themselves needs to be a
* singleton operation it is recommended that any library using this provide their own wrapper
* function similar to
* <pre>
* private static boolean loaded = false;
* static synchronized void loadNativeDeps() {
* if (!loaded) {
* try {
* // If you also depend on the cudf liobrary being loaded, be sure it is loaded
* // first
* ai.rapids.cudf.NativeDepsLoader.loadNativeDeps();
* ai.rapids.cudf.NativeDepsLoader.loadNativeDeps(new String[]{...});
* loaded = true;
* } catch (Throwable t) {
* log.error("Could not load ...", t);
* }
* }
* }
* </pre>
* This function should be called from the static initialization block of any class that uses
* JNI. For example
* <pre>
* public class UsesJNI {
* static {
* MyNativeDepsLoader.loadNativeDeps();
* }
* }
* </pre>
* @param loadOrder the base name of the libraries. For example libfoo.so would be passed in as
* "foo". The libraries are loaded in the order provided.
* @throws IOException on any error trying to load the libraries.
*/
public static void loadNativeDeps(String[] loadOrder) throws IOException {
String os = System.getProperty("os.name");
String arch = System.getProperty("os.arch");
for (String toLoad : loadOrder) {
loadDep(os, arch, toLoad);
}
}
/**
* Load native dependencies in stages, where the dependency libraries in each stage
* are loaded only after all libraries in earlier stages have completed loading.
* @param loadOrder array of stages with an array of dependency library names in each stage
* @throws IOException on any error trying to load the libraries
*/
private static void loadNativeDeps(String[][] loadOrder) throws IOException {
String os = System.getProperty("os.name");
String arch = System.getProperty("os.arch");
ExecutorService executor = Executors.newCachedThreadPool();
List<List<Future<File>>> allFileFutures = new ArrayList<>();
// Start unpacking and creating the temporary files for each dependency.
// Unpacking a dependency does not depend on stage order.
for (String[] stageDependencies : loadOrder) {
List<Future<File>> stageFileFutures = new ArrayList<>();
allFileFutures.add(stageFileFutures);
for (String name : stageDependencies) {
stageFileFutures.add(executor.submit(() -> createFile(os, arch, name)));
}
}
List<Future<?>> loadCompletionFutures = new ArrayList<>();
// Proceed stage-by-stage waiting for the dependency file to have been
// produced then submit them to the thread pool to be loaded.
for (List<Future<File>> stageFileFutures : allFileFutures) {
// Submit all dependencies in the stage to be loaded in parallel
loadCompletionFutures.clear();
for (Future<File> fileFuture : stageFileFutures) {
loadCompletionFutures.add(executor.submit(() -> loadDep(fileFuture)));
}
// Wait for all dependencies in this stage to have been loaded
for (Future<?> loadCompletionFuture : loadCompletionFutures) {
try {
loadCompletionFuture.get();
} catch (ExecutionException | InterruptedException e) {
throw new IOException("Error loading dependencies", e);
}
}
}
executor.shutdownNow();
}
private static void loadDep(String os, String arch, String baseName) throws IOException {
File path = createFile(os, arch, baseName);
loadDep(path);
}
/** Load a library at the specified path */
private static void loadDep(File path) {
System.load(path.getAbsolutePath());
if (!preserveDepsAfterLoad) {
path.delete();
}
}
/** Load a library, waiting for the specified future to produce the path before loading */
private static void loadDep(Future<File> fileFuture) {
File path;
try {
path = fileFuture.get();
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException("Error loading dependencies", e);
}
loadDep(path);
}
/** Extract the contents of a library resource into a temporary file */
private static File createFile(String os, String arch, String baseName) throws IOException {
String path = arch + "/" + os + "/" + System.mapLibraryName(baseName);
File loc;
URL resource = loader.getResource(path);
if (resource == null) {
throw new FileNotFoundException("Could not locate native dependency " + path);
}
try (InputStream in = resource.openStream()) {
loc = File.createTempFile(baseName, ".so");
loc.deleteOnExit();
try (OutputStream out = new FileOutputStream(loc)) {
byte[] buffer = new byte[1024 * 16];
int read = 0;
while ((read = in.read(buffer)) >= 0) {
out.write(buffer, 0, read);
}
}
}
return loc;
}
public static boolean libraryLoaded() {
if (!loaded) {
loadNativeDeps();
}
return loaded;
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/WriterOptions.java
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
class WriterOptions {
private final String[] columnNames;
private final boolean[] columnNullability;
<T extends WriterBuilder> WriterOptions(T builder) {
columnNames = (String[]) builder.columnNames.toArray(new String[builder.columnNames.size()]);
columnNullability = new boolean[builder.columnNullability.size()];
for (int i = 0; i < builder.columnNullability.size(); i++) {
columnNullability[i] = (boolean)builder.columnNullability.get(i);
}
}
public String[] getColumnNames() {
return columnNames;
}
public boolean[] getColumnNullability() {
return columnNullability;
}
protected static class WriterBuilder<T extends WriterBuilder> {
final List<String> columnNames = new ArrayList<>();
final List<Boolean> columnNullability = new ArrayList<>();
/**
* Add column name(s). For Parquet column names are not optional.
* @param columnNames
*/
public T withColumnNames(String... columnNames) {
this.columnNames.addAll(Arrays.asList(columnNames));
for (int i = 0; i < columnNames.length; i++) {
this.columnNullability.add(true);
}
return (T) this;
}
/**
* Add column name that is not nullable
* @param columnNames
*/
public T withNotNullableColumnNames(String... columnNames) {
this.columnNames.addAll(Arrays.asList(columnNames));
for (int i = 0; i < columnNames.length; i++) {
this.columnNullability.add(false);
}
return (T) this;
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/DataSourceHelper.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* This is here because we need some JNI methods to work with a DataSource, but
* we also want to cache callback methods at startup for performance reasons. If
* we put both in the same class we will get a deadlock because of how we load
* the JNI. We have a static block that blocks loading the class until the JNI
* library is loaded and the JNI library cannot load until the class is loaded
* and cached. This breaks the loop.
*/
class DataSourceHelper {
static {
NativeDepsLoader.loadNativeDeps();
}
static long createWrapperDataSource(DataSource ds) {
return createWrapperDataSource(ds, ds.size(), ds.supportsDeviceRead(),
ds.getDeviceReadCutoff());
}
private static native long createWrapperDataSource(DataSource ds, long size,
boolean deviceReadSupport,
long deviceReadCutoff);
static native void destroyWrapperDataSource(long handle);
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ORCOptions.java
|
/*
*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Options for reading a ORC file
*/
public class ORCOptions extends ColumnFilterOptions {
public static ORCOptions DEFAULT = new ORCOptions(new Builder());
private final boolean useNumPyTypes;
private final DType unit;
private final String[] decimal128Columns;
private ORCOptions(Builder builder) {
super(builder);
decimal128Columns = builder.decimal128Columns.toArray(new String[0]);
useNumPyTypes = builder.useNumPyTypes;
unit = builder.unit;
}
boolean usingNumPyTypes() {
return useNumPyTypes;
}
DType timeUnit() {
return unit;
}
String[] getDecimal128Columns() {
return decimal128Columns;
}
public static Builder builder() {
return new Builder();
}
public static class Builder extends ColumnFilterOptions.Builder<Builder> {
private boolean useNumPyTypes = true;
private DType unit = DType.EMPTY;
final List<String> decimal128Columns = new ArrayList<>();
/**
* Specify whether the parser should implicitly promote TIMESTAMP_DAYS
* columns to TIMESTAMP_MILLISECONDS for compatibility with NumPy.
*
* @param useNumPyTypes true to request this conversion, false to avoid.
* @return builder for chaining
*/
public Builder withNumPyTypes(boolean useNumPyTypes) {
this.useNumPyTypes = useNumPyTypes;
return this;
}
/**
* Specify the time unit to use when returning timestamps.
* @param unit default unit of time specified by the user
* @return builder for chaining
*/
public ORCOptions.Builder withTimeUnit(DType unit) {
assert unit.isTimestampType();
this.unit = unit;
return this;
}
/**
* Specify decimal columns which shall be read as DECIMAL128. Otherwise, decimal columns
* will be read as DECIMAL64 by default in ORC.
*
* In terms of child columns of nested types, their parents need to be prepended as prefix
* of the column name, in case of ambiguity. For struct columns, the names of child columns
* are formatted as `{struct_col_name}.{child_col_name}`. For list columns, the data(child)
* columns are named as `{list_col_name}.1`.
*
* @param names names of columns which read as DECIMAL128
* @return builder for chaining
*/
public Builder decimal128Column(String... names) {
decimal128Columns.addAll(Arrays.asList(names));
return this;
}
public ORCOptions build() { return new ORCOptions(this); }
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CudaMemcpyKind.java
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
enum CudaMemcpyKind {
HOST_TO_HOST(0), /*< Host -> Host */
HOST_TO_DEVICE(1), /*< Host -> Device */
DEVICE_TO_HOST(2), /*< Device -> Host */
DEVICE_TO_DEVICE(3), /*< Device -> Device */
DEFAULT(4); /*< Direction of the transfer is inferred from the pointer values. Requires
unified virtual addressing */
private final int value;
CudaMemcpyKind(int value) {
this.value = value;
}
int getValue() {
return value;
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/GroupByAggregationOnColumn.java
|
/*
*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* A GroupByAggregation for a specific column in a table.
*/
public final class GroupByAggregationOnColumn {
protected final GroupByAggregation wrapped;
protected final int columnIndex;
GroupByAggregationOnColumn(GroupByAggregation wrapped, int columnIndex) {
this.wrapped = wrapped;
this.columnIndex = columnIndex;
}
public int getColumnIndex() {
return columnIndex;
}
GroupByAggregation getWrapped() {
return wrapped;
}
@Override
public int hashCode() {
return 31 * wrapped.hashCode() + columnIndex;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other instanceof GroupByAggregationOnColumn) {
GroupByAggregationOnColumn o = (GroupByAggregationOnColumn) other;
return wrapped.equals(o.wrapped) && columnIndex == o.columnIndex;
}
return false;
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RoundMode.java
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Rounding modes supported in round method.
* HALF_UP : Rounding mode to round towards "nearest neighbor". If both neighbors are
* equidistant, then round up.
* HALF_EVEN : Rounding mode to round towards the "nearest neighbor". If both neighbors are
* equidistant, round towards the even neighbor.
*/
public enum RoundMode {
HALF_UP(0),
HALF_EVEN(1);
final int nativeId;
RoundMode(int nativeId) { this.nativeId = nativeId; }
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CuFile.java
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
/**
* JNI wrapper for accessing the cuFile API.
* <p>
* Using this wrapper requires GPUDirect Storage (GDS)/cuFile to be installed in the target
* environment, and the jar to be built with `USE_GDS=ON`. Otherwise it will throw an exception when
* loading.
* <p>
* The Java APIs are experimental and subject to change.
*
* @see <a href="https://docs.nvidia.com/gpudirect-storage/">GDS documentation</a>
*/
public class CuFile {
private static final Logger log = LoggerFactory.getLogger(CuFile.class);
private static boolean initialized = false;
private static CuFileDriver driver;
static {
initialize();
}
/**
* Load the native libraries needed for libcufilejni, if not loaded already; open the cuFile
* driver, and add a shutdown hook to close it.
*/
static synchronized void initialize() {
if (!initialized) {
try {
NativeDepsLoader.loadNativeDeps(new String[]{"cufilejni"});
driver = new CuFileDriver();
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
driver.close();
}));
initialized = true;
} catch (Throwable t) {
// Cannot throw an exception here as the CI/CD machine may not have GDS installed.
log.error("Could not load cuFile jni library...", t);
}
}
}
/**
* Check if the libcufilejni library is loaded.
*
* @return true if the libcufilejni library has been successfully loaded.
*/
public static boolean libraryLoaded() {
return initialized;
}
/**
* Write a device buffer to a given file path synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param path The file path to copy to.
* @param file_offset The file offset from which to write the buffer.
* @param buffer The device buffer to copy from.
*/
public static void writeDeviceBufferToFile(File path, long file_offset,
BaseDeviceMemoryBuffer buffer) {
writeDeviceMemoryToFile(path, file_offset, buffer.getAddress(), buffer.getLength());
}
/**
* Write device memory to a given file path synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param path The file path to copy to.
* @param file_offset The file offset from which to write the buffer.
* @param address The device memory address to copy from.
* @param length The length to copy.
*/
public static void writeDeviceMemoryToFile(File path, long file_offset, long address,
long length) {
writeToFile(path.getAbsolutePath(), file_offset, address, length);
}
/**
* Append a device buffer to a given file path synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param path The file path to copy to.
* @param buffer The device buffer to copy from.
* @return The file offset from which the buffer was appended.
*/
public static long appendDeviceBufferToFile(File path, BaseDeviceMemoryBuffer buffer) {
return appendDeviceMemoryToFile(path, buffer.getAddress(), buffer.getLength());
}
/**
* Append device memory to a given file path synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param path The file path to copy to.
* @param address The device memory address to copy from.
* @param length The length to copy.
* @return The file offset from which the buffer was appended.
*/
public static long appendDeviceMemoryToFile(File path, long address, long length) {
return appendToFile(path.getAbsolutePath(), address, length);
}
/**
* Read a file into a device buffer synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param buffer The device buffer to copy into.
* @param path The file path to copy from.
* @param fileOffset The file offset from which to copy the content.
*/
public static void readFileToDeviceBuffer(BaseDeviceMemoryBuffer buffer, File path,
long fileOffset) {
readFileToDeviceMemory(buffer.getAddress(), buffer.getLength(), path, fileOffset);
}
/**
* Read a file into device memory synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param address The device memory address to read into.
* @param length The length to read.
* @param path The file path to copy from.
* @param fileOffset The file offset from which to copy the content.
*/
public static void readFileToDeviceMemory(long address, long length, File path, long fileOffset) {
readFromFile(address, length, path.getAbsolutePath(), fileOffset);
}
private static native void writeToFile(String path, long file_offset, long address, long length);
private static native long appendToFile(String path, long address, long length);
private static native void readFromFile(long address, long length, String path, long fileOffset);
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmWrappingDeviceMemoryResource.java
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* A resource that wraps another RmmDeviceMemoryResource
*/
public abstract class RmmWrappingDeviceMemoryResource<C extends RmmDeviceMemoryResource>
implements RmmDeviceMemoryResource {
protected C wrapped = null;
public RmmWrappingDeviceMemoryResource(C wrapped) {
this.wrapped = wrapped;
}
/**
* Get the resource that this is wrapping. Be very careful when using this as the returned value
* should not be added to another resource until it has been released.
* @return the resource that this is wrapping.
*/
public C getWrapped() {
return this.wrapped;
}
/**
* Release the wrapped device memory resource and close this.
* @return the wrapped DeviceMemoryResource.
*/
public C releaseWrapped() {
C ret = this.wrapped;
this.wrapped = null;
close();
return ret;
}
@Override
public void close() {
if (wrapped != null) {
wrapped.close();
wrapped = null;
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmCudaMemoryResource.java
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* A device memory resource that uses `cudaMalloc` and `cudaFree` for allocation and deallocation.
*/
public class RmmCudaMemoryResource implements RmmDeviceMemoryResource {
private long handle = 0;
public RmmCudaMemoryResource() {
handle = Rmm.newCudaMemoryResource();
}
@Override
public long getHandle() {
return handle;
}
@Override
public void close() {
if (handle != 0) {
Rmm.releaseCudaMemoryResource(handle);
handle = 0;
}
}
@Override
public String toString() {
return Long.toHexString(getHandle()) + "/CUDA()";
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmManagedMemoryResource.java
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* A device memory resource that uses `cudaMallocManaged` and `cudaFreeManaged` for allocation and
* deallocation.
*/
public class RmmManagedMemoryResource implements RmmDeviceMemoryResource {
private long handle = 0;
public RmmManagedMemoryResource() {
handle = Rmm.newManagedMemoryResource();
}
@Override
public long getHandle() {
return handle;
}
@Override
public void close() {
if (handle != 0) {
Rmm.releaseManagedMemoryResource(handle);
handle = 0;
}
}
@Override
public String toString() {
return Long.toHexString(getHandle()) + "/CUDA_MANAGED()";
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmDeviceMemoryResource.java
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* A resource that allocates/deallocates device memory. This is not intended to be something that
* a user will just subclass. This is intended to be a wrapper around a C++ class that RMM will
* use directly.
*/
public interface RmmDeviceMemoryResource extends AutoCloseable {
/**
* Returns a pointer to the underlying C++ class that implements rmm::mr::device_memory_resource
*/
long getHandle();
// Remove the exception...
void close();
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/BaseDeviceMemoryBuffer.java
|
/*
*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Base class for all MemoryBuffers that are in device memory.
*/
public abstract class BaseDeviceMemoryBuffer extends MemoryBuffer {
protected BaseDeviceMemoryBuffer(long address, long length, MemoryBuffer parent) {
super(address, length, parent);
}
protected BaseDeviceMemoryBuffer(long address, long length, MemoryBufferCleaner cleaner) {
super(address, length, cleaner);
}
/**
* Copy a subset of src to this buffer starting at destOffset.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
*/
public final void copyFromHostBuffer(long destOffset, HostMemoryBuffer src, long srcOffset, long length) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.memcpy(address + destOffset, src.address + srcOffset, length, CudaMemcpyKind.HOST_TO_DEVICE);
}
/**
* Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream.
* The copy has completed when this returns, but the memory copy could overlap with
* operations occurring on other streams.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
* @param stream CUDA stream to use
*/
public final void copyFromHostBuffer(long destOffset, HostMemoryBuffer src,
long srcOffset, long length, Cuda.Stream stream) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.memcpy(address + destOffset, src.address + srcOffset, length,
CudaMemcpyKind.HOST_TO_DEVICE, stream);
}
/**
* Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream.
* The copy is async and may not have completed when this returns.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
* @param stream CUDA stream to use
*/
public final void copyFromHostBufferAsync(long destOffset, HostMemoryBuffer src,
long srcOffset, long length, Cuda.Stream stream) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.asyncMemcpy(address + destOffset, src.address + srcOffset, length,
CudaMemcpyKind.HOST_TO_DEVICE, stream);
}
/**
* Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream.
* The copy is async and may not have completed when this returns.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
* @param stream CUDA stream to use
*/
public final void copyFromDeviceBufferAsync(long destOffset, BaseDeviceMemoryBuffer src,
long srcOffset, long length, Cuda.Stream stream) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.asyncMemcpy(address + destOffset, src.address + srcOffset, length,
CudaMemcpyKind.DEVICE_TO_DEVICE, stream);
}
/**
* Copy a subset of src to this buffer starting at the beginning of this.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
*/
public final void copyFromHostBuffer(HostMemoryBuffer src, long srcOffset, long length) {
copyFromHostBuffer(0, src, srcOffset, length);
}
/**
* Copy everything from src to this buffer starting at the beginning of this buffer.
* @param src - Buffer to copy data from
*/
public final void copyFromHostBuffer(HostMemoryBuffer src) {
copyFromHostBuffer(0, src, 0, src.length);
}
/**
* Copy entire host buffer starting at the beginning of this buffer using a CUDA stream.
* The copy has completed when this returns, but the memory copy could overlap with
* operations occurring on other streams.
* @param src host buffer to copy from
* @param stream CUDA stream to use
*/
public final void copyFromHostBuffer(HostMemoryBuffer src, Cuda.Stream stream) {
copyFromHostBuffer(0, src, 0, src.length, stream);
}
/**
* Copy entire host buffer starting at the beginning of this buffer using a CUDA stream.
* The copy is async and may not have completed when this returns.
* @param src host buffer to copy from
* @param stream CUDA stream to use
*/
public final void copyFromHostBufferAsync(HostMemoryBuffer src, Cuda.Stream stream) {
copyFromHostBufferAsync(0, src, 0, src.length, stream);
}
/**
* Slice off a part of the device buffer, copying it instead of reference counting it.
* @param offset where to start the slice at.
* @param len how many bytes to slice
* @return a device buffer that will need to be closed independently from this buffer.
*/
public final DeviceMemoryBuffer sliceWithCopy(long offset, long len) {
addressOutOfBoundsCheck(address + offset, len, "slice");
DeviceMemoryBuffer ret = null;
boolean success = false;
try {
ret = DeviceMemoryBuffer.allocate(len);
Cuda.memcpy(ret.getAddress(), getAddress() + offset, len, CudaMemcpyKind.DEVICE_TO_DEVICE);
success = true;
return ret;
} finally {
if (!success && ret != null) {
ret.close();
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/NvtxUniqueRange.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* This class supports start/end NVTX profiling ranges.
*
* Start/end:
*
* The constructor instantiates a new NVTX range and keeps a unique handle that comes back
* from the NVTX api (nvtxRangeId). The handle is used to later close such a range. This type
* of range does not have the same order-of-operation requirements that the push/pop ranges have:
* the `NvtxUniqueRange` instance can be passed to other scopes, and even to other threads
* for the eventual call to close.
*
* It can be used in the same try-with-resources way as push/pop, or interleaved with other
* ranges, like so:
*
* <pre>
* NvtxUniqueRange a = new NvtxUniqueRange("a", NvtxColor.RED);
* NvtxUniqueRange b = new NvtxUniqueRange("b", NvtxColor.BLUE);
* a.close();
* b.close();
* </pre>
*/
public class NvtxUniqueRange implements AutoCloseable {
private static final boolean isEnabled = Boolean.getBoolean("ai.rapids.cudf.nvtx.enabled");
// this is a nvtxRangeId_t in the C++ api side
private final long nvtxRangeId;
// true if this range is already closed
private boolean closed;
static {
if (isEnabled) {
NativeDepsLoader.loadNativeDeps();
}
}
public NvtxUniqueRange(String name, NvtxColor color) {
this(name, color.colorBits);
}
public NvtxUniqueRange(String name, int colorBits) {
if (isEnabled) {
nvtxRangeId = start(name, colorBits);
} else {
// following the implementation in nvtx3, the default value of 0
// is given when NVTX is disabled
nvtxRangeId = 0;
}
}
@Override
public synchronized void close() {
if (closed) {
throw new IllegalStateException(
"Cannot call close on an already closed NvtxUniqueRange!");
}
closed = true;
if (isEnabled) {
end(this.nvtxRangeId);
}
}
private native long start(String name, int colorBits);
private native void end(long nvtxRangeId);
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/AggregationOverWindow.java
|
/*
*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* An Aggregation instance that also holds a column number and window metadata so the aggregation
* can be done over a specific window.
*/
public final class AggregationOverWindow {
private final RollingAggregationOnColumn wrapped;
protected final WindowOptions windowOptions;
AggregationOverWindow(RollingAggregationOnColumn wrapped, WindowOptions windowOptions) {
this.wrapped = wrapped;
this.windowOptions = windowOptions;
if (windowOptions == null) {
throw new IllegalArgumentException("WindowOptions cannot be null!");
}
if (windowOptions.getPrecedingCol() != null || windowOptions.getFollowingCol() != null) {
throw new UnsupportedOperationException("Dynamic windows (via columns) are currently unsupported!");
}
}
public WindowOptions getWindowOptions() {
return windowOptions;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + windowOptions.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other instanceof AggregationOverWindow) {
AggregationOverWindow o = (AggregationOverWindow) other;
return wrapped.equals(o.wrapped) && windowOptions.equals(o.windowOptions);
}
return false;
}
int getColumnIndex() {
return wrapped.getColumnIndex();
}
long createNativeInstance() {
return wrapped.createNativeInstance();
}
long getDefaultOutput() {
return wrapped.getDefaultOutput();
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/GroupByAggregation.java
|
/*
*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* An aggregation that can be used for a reduce.
*/
public final class GroupByAggregation {
private final Aggregation wrapped;
private GroupByAggregation(Aggregation wrapped) {
this.wrapped = wrapped;
}
Aggregation getWrapped() {
return wrapped;
}
/**
* Add a column to the Aggregation so it can be used on a specific column of data.
* @param columnIndex the index of the column to operate on.
*/
public GroupByAggregationOnColumn onColumn(int columnIndex) {
return new GroupByAggregationOnColumn(this, columnIndex);
}
@Override
public int hashCode() {
return wrapped.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other instanceof GroupByAggregation) {
GroupByAggregation o = (GroupByAggregation) other;
return wrapped.equals(o.wrapped);
}
return false;
}
/**
* Count number of valid, a.k.a. non-null, elements.
*/
public static GroupByAggregation count() {
return new GroupByAggregation(Aggregation.count());
}
/**
* Count number of elements.
* @param nullPolicy INCLUDE if nulls should be counted. EXCLUDE if only non-null values
* should be counted.
*/
public static GroupByAggregation count(NullPolicy nullPolicy) {
return new GroupByAggregation(Aggregation.count(nullPolicy));
}
/**
* Sum Aggregation
*/
public static GroupByAggregation sum() {
return new GroupByAggregation(Aggregation.sum());
}
/**
* Product Aggregation.
*/
public static GroupByAggregation product() {
return new GroupByAggregation(Aggregation.product());
}
/**
* Index of max element. Please note that when using this aggregation if the
* data is not already sorted by the grouping keys it may be automatically sorted
* prior to doing the aggregation. This would result in an index into the sorted data being
* returned.
*/
public static GroupByAggregation argMax() {
return new GroupByAggregation(Aggregation.argMax());
}
/**
* Index of min element. Please note that when using this aggregation if the
* data is not already sorted by the grouping keys it may be automatically sorted
* prior to doing the aggregation. This would result in an index into the sorted data being
* returned.
*/
public static GroupByAggregation argMin() {
return new GroupByAggregation(Aggregation.argMin());
}
/**
* Min Aggregation
*/
public static GroupByAggregation min() {
return new GroupByAggregation(Aggregation.min());
}
/**
* Max Aggregation
*/
public static GroupByAggregation max() {
return new GroupByAggregation(Aggregation.max());
}
/**
* Arithmetic mean reduction.
*/
public static GroupByAggregation mean() {
return new GroupByAggregation(Aggregation.mean());
}
/**
* Sum of square of differences from mean.
*/
public static GroupByAggregation M2() {
return new GroupByAggregation(Aggregation.M2());
}
/**
* Variance aggregation with 1 as the delta degrees of freedom.
*/
public static GroupByAggregation variance() {
return new GroupByAggregation(Aggregation.variance());
}
/**
* Variance aggregation.
* @param ddof delta degrees of freedom. The divisor used in calculation of variance is
* <code>N - ddof</code>, where N is the population size.
*/
public static GroupByAggregation variance(int ddof) {
return new GroupByAggregation(Aggregation.variance(ddof));
}
/**
* Standard deviation aggregation with 1 as the delta degrees of freedom.
*/
public static GroupByAggregation standardDeviation() {
return new GroupByAggregation(Aggregation.standardDeviation());
}
/**
* Standard deviation aggregation.
* @param ddof delta degrees of freedom. The divisor used in calculation of std is
* <code>N - ddof</code>, where N is the population size.
*/
public static GroupByAggregation standardDeviation(int ddof) {
return new GroupByAggregation(Aggregation.standardDeviation(ddof));
}
/**
* Aggregate to compute the specified quantiles. Uses linear interpolation by default.
*/
public static GroupByAggregation quantile(double ... quantiles) {
return new GroupByAggregation(Aggregation.quantile(quantiles));
}
/**
* Aggregate to compute various quantiles.
*/
public static GroupByAggregation quantile(QuantileMethod method, double ... quantiles) {
return new GroupByAggregation(Aggregation.quantile(method, quantiles));
}
/**
* Median reduction.
*/
public static GroupByAggregation median() {
return new GroupByAggregation(Aggregation.median());
}
/**
* Number of unique, non-null, elements.
*/
public static GroupByAggregation nunique() {
return new GroupByAggregation(Aggregation.nunique());
}
/**
* Number of unique elements.
* @param nullPolicy INCLUDE if nulls should be counted else EXCLUDE. If nulls are counted they
* compare as equal so multiple null values in a range would all only
* increase the count by 1.
*/
public static GroupByAggregation nunique(NullPolicy nullPolicy) {
return new GroupByAggregation(Aggregation.nunique(nullPolicy));
}
/**
* Get the nth, non-null, element in a group.
* @param offset the offset to look at. Negative numbers go from the end of the group. Any
* value outside of the group range results in a null.
*/
public static GroupByAggregation nth(int offset) {
return new GroupByAggregation(Aggregation.nth(offset));
}
/**
* Get the nth element in a group.
* @param offset the offset to look at. Negative numbers go from the end of the group. Any
* value outside of the group range results in a null.
* @param nullPolicy INCLUDE if nulls should be included in the aggregation or EXCLUDE if they
* should be skipped.
*/
public static GroupByAggregation nth(int offset, NullPolicy nullPolicy) {
return new GroupByAggregation(Aggregation.nth(offset, nullPolicy));
}
/**
* Collect the values into a list. Nulls will be skipped.
*/
public static GroupByAggregation collectList() {
return new GroupByAggregation(Aggregation.collectList());
}
/**
* Collect the values into a list.
*
* @param nullPolicy Indicates whether to include/exclude nulls during collection.
*/
public static GroupByAggregation collectList(NullPolicy nullPolicy) {
return new GroupByAggregation(Aggregation.collectList(nullPolicy));
}
/**
* Collect the values into a set. All null values will be excluded, and all NaN values are regarded as
* unique instances.
*/
public static GroupByAggregation collectSet() {
return new GroupByAggregation(Aggregation.collectSet());
}
/**
* Collect the values into a set.
*
* @param nullPolicy Indicates whether to include/exclude nulls during collection.
* @param nullEquality Flag to specify whether null entries within each list should be considered equal.
* @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal.
*/
public static GroupByAggregation collectSet(NullPolicy nullPolicy, NullEquality nullEquality, NaNEquality nanEquality) {
return new GroupByAggregation(Aggregation.collectSet(nullPolicy, nullEquality, nanEquality));
}
/**
* Merge the partial lists produced by multiple CollectListAggregations.
* NOTICE: The partial lists to be merged should NOT include any null list element (but can include null list entries).
*/
public static GroupByAggregation mergeLists() {
return new GroupByAggregation(Aggregation.mergeLists());
}
/**
* Merge the partial sets produced by multiple CollectSetAggregations. Each null/NaN value will be regarded as
* a unique instance.
*/
public static GroupByAggregation mergeSets() {
return new GroupByAggregation(Aggregation.mergeSets());
}
/**
* Merge the partial sets produced by multiple CollectSetAggregations.
*
* @param nullEquality Flag to specify whether null entries within each list should be considered equal.
* @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal.
*/
public static GroupByAggregation mergeSets(NullEquality nullEquality, NaNEquality nanEquality) {
return new GroupByAggregation(Aggregation.mergeSets(nullEquality, nanEquality));
}
/**
* Merge the partial M2 values produced by multiple instances of M2Aggregation.
*/
public static GroupByAggregation mergeM2() {
return new GroupByAggregation(Aggregation.mergeM2());
}
/**
* Compute a t-digest from on a fixed-width numeric input column.
*
* @param delta Required accuracy (number of buckets).
* @return A list of centroids per grouping, where each centroid has a mean value and a
* weight. The number of centroids will be <= delta.
*/
public static GroupByAggregation createTDigest(int delta) {
return new GroupByAggregation(Aggregation.createTDigest(delta));
}
/**
* Merge t-digests.
*
* @param delta Required accuracy (number of buckets).
* @return A list of centroids per grouping, where each centroid has a mean value and a
* weight. The number of centroids will be <= delta.
*/
public static GroupByAggregation mergeTDigest(int delta) {
return new GroupByAggregation(Aggregation.mergeTDigest(delta));
}
/**
* Histogram aggregation, computing the frequencies for each unique row.
*
* A histogram is given as a lists column, in which the first child stores unique rows from
* the input values and the second child stores their corresponding frequencies.
*
* @return A lists of structs column in which each list contains a histogram corresponding to
* an input key.
*/
public static GroupByAggregation histogram() {
return new GroupByAggregation(Aggregation.histogram());
}
/**
* MergeHistogram aggregation, to merge multiple histograms.
*
* @return A new histogram in which the frequencies of the unique rows are sum up.
*/
public static GroupByAggregation mergeHistogram() {
return new GroupByAggregation(Aggregation.mergeHistogram());
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/DataSource.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
/**
* Base class that can be used to provide data dynamically to CUDF. This follows somewhat
* closely with cudf::io::datasource. There are a few main differences.
* <br/>
* First this does not expose async device reads. It will call the non-async device read API
* instead. This might be added in the future, but there was no direct use case for it in java
* right now to warrant the added complexity.
* <br/>
* Second there is no implementation of the device read API that returns a buffer instead of
* writing into one. This is not used by CUDF yet so testing an implementation that isn't used
* didn't feel ideal. If it is needed we will add one in the future.
*/
public abstract class DataSource implements AutoCloseable {
private static final Logger log = LoggerFactory.getLogger(DataSource.class);
/**
* This is used to keep track of the HostMemoryBuffers in java land so the C++ layer
* does not have to do it.
*/
private final HashMap<Long, HostMemoryBuffer> cachedBuffers = new HashMap<>();
@Override
public void close() {
if (!cachedBuffers.isEmpty()) {
throw new IllegalStateException("DataSource closed before all returned host buffers were closed");
}
}
/**
* Get the size of the source in bytes.
*/
public abstract long size();
/**
* Read data from the source at the given offset. Return a HostMemoryBuffer for the data
* that was read.
* @param offset where to start reading from.
* @param amount the maximum number of bytes to read.
* @return a buffer that points to the data.
* @throws IOException on any error.
*/
public abstract HostMemoryBuffer hostRead(long offset, long amount) throws IOException;
/**
* Called when the buffer returned from hostRead is done. The default is to close the buffer.
*/
protected void onHostBufferDone(HostMemoryBuffer buffer) {
if (buffer != null) {
buffer.close();
}
}
/**
* Read data from the source at the given offset into dest. Note that dest should not be closed,
* and no reference to it can outlive the call to hostRead. The target amount to read is
* dest's length.
* @param offset the offset to start reading from in the source.
* @param dest where to write the data.
* @return the actual number of bytes written to dest.
*/
public abstract long hostRead(long offset, HostMemoryBuffer dest) throws IOException;
/**
* Return true if this supports reading directly to the device else false. The default is
* no device support. This cannot change dynamically. It is typically read just once.
*/
public boolean supportsDeviceRead() {
return false;
}
/**
* Get the size cutoff between device reads and host reads when device reads are supported.
* Anything larger than the cutoff will be a device read and anything smaller will be a
* host read. By default, the cutoff is 0 so all reads will be device reads if device reads
* are supported.
*/
public long getDeviceReadCutoff() {
return 0;
}
/**
* Read data from the source at the given offset into dest. Note that dest should not be closed,
* and no reference to it can outlive the call to hostRead. The target amount to read is
* dest's length.
* @param offset the offset to start reading from
* @param dest where to write the data.
* @param stream the stream to do the copy on.
* @return the actual number of bytes written to dest.
*/
public long deviceRead(long offset, DeviceMemoryBuffer dest,
Cuda.Stream stream) throws IOException {
throw new IllegalStateException("Device read is not implemented");
}
/////////////////////////////////////////////////
// Internal methods called from JNI
/////////////////////////////////////////////////
private static class NoopCleaner extends MemoryBuffer.MemoryBufferCleaner {
@Override
protected boolean cleanImpl(boolean logErrorIfNotClean) {
return true;
}
@Override
public boolean isClean() {
return true;
}
}
private static final NoopCleaner cleaner = new NoopCleaner();
// Called from JNI
private void onHostBufferDone(long bufferId) {
HostMemoryBuffer hmb = cachedBuffers.remove(bufferId);
if (hmb != null) {
onHostBufferDone(hmb);
} else {
// Called from C++ destructor so avoid throwing...
log.warn("Got a close callback for a buffer we could not find " + bufferId);
}
}
// Called from JNI
private long hostRead(long offset, long amount, long dst) throws IOException {
if (amount < 0) {
throw new IllegalArgumentException("Cannot allocate more than " + Long.MAX_VALUE + " bytes");
}
try (HostMemoryBuffer dstBuffer = new HostMemoryBuffer(dst, amount, cleaner)) {
return hostRead(offset, dstBuffer);
}
}
// Called from JNI
private long[] hostReadBuff(long offset, long amount) throws IOException {
if (amount < 0) {
throw new IllegalArgumentException("Cannot read more than " + Long.MAX_VALUE + " bytes");
}
HostMemoryBuffer buff = hostRead(offset, amount);
long[] ret = new long[3];
if (buff != null) {
long id = buff.id;
if (cachedBuffers.put(id, buff) != null) {
throw new IllegalStateException("Already had a buffer cached for " + buff);
}
ret[0] = buff.address;
ret[1] = buff.length;
ret[2] = id;
} // else they are all 0 because java does that already
return ret;
}
// Called from JNI
private long deviceRead(long offset, long amount, long dst, long stream) throws IOException {
if (amount < 0) {
throw new IllegalArgumentException("Cannot read more than " + Long.MAX_VALUE + " bytes");
}
Cuda.Stream strm = Cuda.Stream.wrap(stream);
try (DeviceMemoryBuffer dstBuffer = new DeviceMemoryBuffer(dst, amount, cleaner)) {
return deviceRead(offset, dstBuffer, strm);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/nvcomp/NvcompJni.java
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.nvcomp;
import ai.rapids.cudf.NativeDepsLoader;
/** Raw JNI interface to the nvcomp library. */
class NvcompJni {
static {
NativeDepsLoader.loadNativeDeps();
}
/**
* Get the temporary workspace size required to perform compression of entire LZ4 batch.
* @param batchSize number of chunks in the batch
* @param maxChunkSize maximum size of an uncompressed chunk in bytes
* @return The size of required temporary workspace in bytes to compress the batch.
*/
static native long batchedLZ4CompressGetTempSize(long batchSize, long maxChunkSize);
/**
* Get the maximum size any chunk could compress to in a LZ4 batch. This is the minimum amount of
* output memory to allocate per chunk when batch compressing.
* @param maxChunkSize maximum size of an uncompressed chunk size in bytes
* @return maximum compressed output size of a chunk
*/
static native long batchedLZ4CompressGetMaxOutputChunkSize(long maxChunkSize);
/**
* Asynchronously compress a batch of buffers with LZ4. Note that
* compressedSizesOutPtr must point to pinned memory for this operation
* to be asynchronous.
* @param devInPtrs device address of uncompressed buffer addresses vector
* @param devInSizes device address of uncompressed buffer sizes vector
* @param chunkSize maximum size of an uncompressed chunk in bytes
* @param batchSize number of chunks in the batch
* @param tempPtr device address of the temporary workspace buffer
* @param tempSize size of the temporary workspace buffer in bytes
* @param devOutPtrs device address of output buffer addresses vector
* @param compressedSizesOutPtr device address where to write the sizes of the
* compressed data written to the corresponding
* output buffers. Must point to a buffer with
* at least 8 bytes of memory per output buffer
* in the batch.
* @param stream CUDA stream to use
*/
static native void batchedLZ4CompressAsync(
long devInPtrs,
long devInSizes,
long chunkSize,
long batchSize,
long tempPtr,
long tempSize,
long devOutPtrs,
long compressedSizesOutPtr,
long stream);
/**
* Computes the temporary storage size in bytes needed to decompress a LZ4-compressed batch.
* @param numChunks number of chunks in the batch
* @param maxUncompressedChunkBytes maximum uncompressed size of any chunk in bytes
* @return number of temporary storage bytes needed to decompress the batch
*/
static native long batchedLZ4DecompressGetTempSize(
long numChunks,
long maxUncompressedChunkBytes);
/**
* Asynchronously decompress a batch of LZ4-compressed data buffers.
* @param devInPtrs device address of compressed input buffer addresses vector
* @param devInSizes device address of compressed input buffer sizes vector
* @param devOutSizes device address of uncompressed buffer sizes vector
* @param batchSize number of buffers in the batch
* @param tempPtr device address of the temporary decompression space
* @param tempSize size of the temporary decompression space in bytes
* @param devOutPtrs device address of uncompressed output buffer addresses vector
* @param stream CUDA stream to use
*/
static native void batchedLZ4DecompressAsync(
long devInPtrs,
long devInSizes,
long devOutSizes,
long batchSize,
long tempPtr,
long tempSize,
long devOutPtrs,
long stream);
/**
* Asynchronously calculates the decompressed size needed for each chunk.
* @param devInPtrs device address of compressed input buffer addresses vector
* @param devInSizes device address of compressed input buffer sizes vector
* @param devOutSizes device address of calculated decompress sizes vector
* @param batchSize number of buffers in the batch
* @param stream CUDA stream to use
*/
static native void batchedLZ4GetDecompressSizeAsync(
long devInPtrs,
long devInSizes,
long devOutSizes,
long batchSize,
long stream);
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/nvcomp/BatchedLZ4Compressor.java
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.nvcomp;
import ai.rapids.cudf.BaseDeviceMemoryBuffer;
import ai.rapids.cudf.CloseableArray;
import ai.rapids.cudf.Cuda;
import ai.rapids.cudf.DefaultHostMemoryAllocator;
import ai.rapids.cudf.DeviceMemoryBuffer;
import ai.rapids.cudf.HostMemoryAllocator;
import ai.rapids.cudf.HostMemoryBuffer;
import ai.rapids.cudf.MemoryBuffer;
import ai.rapids.cudf.NvtxColor;
import ai.rapids.cudf.NvtxRange;
/** Multi-buffer LZ4 compressor */
public class BatchedLZ4Compressor {
private static final HostMemoryAllocator hostMemoryAllocator = DefaultHostMemoryAllocator.get();
static final long MAX_CHUNK_SIZE = 16777216; // in bytes
// each chunk has a 64-bit integer value as metadata containing the compressed size
static final long METADATA_BYTES_PER_CHUNK = 8;
private final long chunkSize;
private final long targetIntermediateBufferSize;
private final long maxOutputChunkSize;
/**
* Construct a batched LZ4 compressor instance
* @param chunkSize maximum amount of uncompressed data to compress as a single chunk. Inputs
* larger than this will be compressed in multiple chunks.
* @param targetIntermediateBufferSize desired maximum size of intermediate device buffers
* used during compression.
*/
public BatchedLZ4Compressor(long chunkSize, long targetIntermediateBufferSize) {
validateChunkSize(chunkSize);
this.chunkSize = chunkSize;
this.maxOutputChunkSize = NvcompJni.batchedLZ4CompressGetMaxOutputChunkSize(chunkSize);
assert maxOutputChunkSize < Integer.MAX_VALUE;
this.targetIntermediateBufferSize = Math.max(targetIntermediateBufferSize, maxOutputChunkSize);
}
/**
* Compress a batch of buffers with LZ4. The input buffers will be closed.
* @param origInputs buffers to compress
* @param stream CUDA stream to use
* @return compressed buffers corresponding to the input buffers
*/
public DeviceMemoryBuffer[] compress(BaseDeviceMemoryBuffer[] origInputs, Cuda.Stream stream) {
try (CloseableArray<BaseDeviceMemoryBuffer> inputs = CloseableArray.wrap(origInputs)) {
if (chunkSize <= 0) {
throw new IllegalArgumentException("Illegal chunk size: " + chunkSize);
}
final int numInputs = inputs.size();
if (numInputs == 0) {
return new DeviceMemoryBuffer[0];
}
// Each buffer is broken up into chunkSize chunks for compression. Calculate how many
// chunks are needed for each input buffer.
int[] chunksPerInput = new int[numInputs];
int numChunks = 0;
for (int i = 0; i < numInputs; i++) {
BaseDeviceMemoryBuffer buffer = inputs.get(i);
int numBufferChunks = getNumChunksInBuffer(buffer);
chunksPerInput[i] = numBufferChunks;
numChunks += numBufferChunks;
}
// Allocate buffers for each chunk and generate parallel lists of chunk source addresses,
// chunk destination addresses, and sizes.
try (CloseableArray<DeviceMemoryBuffer> compressedBuffers =
allocCompressedBuffers(numChunks, stream);
DeviceMemoryBuffer compressedChunkSizes =
DeviceMemoryBuffer.allocate(numChunks * 8L, stream)) {
long[] inputChunkAddrs = new long[numChunks];
long[] inputChunkSizes = new long[numChunks];
long[] outputChunkAddrs = new long[numChunks];
buildAddrsAndSizes(inputs, inputChunkAddrs, inputChunkSizes,
compressedBuffers, outputChunkAddrs);
long[] outputChunkSizes;
final long tempBufferSize = NvcompJni.batchedLZ4CompressGetTempSize(numChunks, chunkSize);
try (DeviceMemoryBuffer addrsAndSizes =
putAddrsAndSizesOnDevice(inputChunkAddrs, inputChunkSizes, outputChunkAddrs, stream);
DeviceMemoryBuffer tempBuffer = DeviceMemoryBuffer.allocate(tempBufferSize, stream)) {
final long devOutputAddrsPtr = addrsAndSizes.getAddress() + numChunks * 8L;
final long devInputSizesPtr = devOutputAddrsPtr + numChunks * 8L;
NvcompJni.batchedLZ4CompressAsync(
addrsAndSizes.getAddress(),
devInputSizesPtr,
chunkSize,
numChunks,
tempBuffer.getAddress(),
tempBufferSize,
devOutputAddrsPtr,
compressedChunkSizes.getAddress(),
stream.getStream());
}
// Synchronously copy the resulting compressed sizes per chunk.
outputChunkSizes = getOutputChunkSizes(compressedChunkSizes, stream);
// inputs are no longer needed at this point, so free them early
inputs.close();
// Combine compressed chunks into output buffers corresponding to each original input
return stitchOutput(chunksPerInput, compressedChunkSizes, outputChunkAddrs,
outputChunkSizes, stream);
}
}
}
static void validateChunkSize(long chunkSize) {
if (chunkSize <= 0 || chunkSize > MAX_CHUNK_SIZE) {
throw new IllegalArgumentException("Invalid chunk size: " + chunkSize + " Max chunk size is: "
+ MAX_CHUNK_SIZE + " bytes");
}
}
private static long ceilingDivide(long x, long y) {
return (x + y - 1) / y;
}
private int getNumChunksInBuffer(MemoryBuffer buffer) {
return (int) ceilingDivide(buffer.getLength(), chunkSize);
}
private CloseableArray<DeviceMemoryBuffer> allocCompressedBuffers(long numChunks,
Cuda.Stream stream) {
final long chunksPerBuffer = targetIntermediateBufferSize / maxOutputChunkSize;
final long numBuffers = ceilingDivide(numChunks, chunksPerBuffer);
if (numBuffers > Integer.MAX_VALUE) {
throw new IllegalStateException("Too many chunks");
}
try (NvtxRange range = new NvtxRange("allocCompressedBuffers", NvtxColor.YELLOW)) {
CloseableArray<DeviceMemoryBuffer> buffers = CloseableArray.wrap(
new DeviceMemoryBuffer[(int) numBuffers]);
try {
// allocate all of the max-chunks intermediate compressed buffers
for (int i = 0; i < buffers.size() - 1; ++i) {
buffers.set(i, DeviceMemoryBuffer.allocate(chunksPerBuffer * maxOutputChunkSize, stream));
}
// allocate the tail intermediate compressed buffer that may be smaller than the others
buffers.set(buffers.size() - 1, DeviceMemoryBuffer.allocate(
(numChunks - chunksPerBuffer * (buffers.size() - 1)) * maxOutputChunkSize, stream));
return buffers;
} catch (Exception e) {
buffers.close(e);
throw e;
}
}
}
// Fill in the inputChunkAddrs, inputChunkSizes, and outputChunkAddrs arrays to point
// into the chunks in the input and output buffers.
private void buildAddrsAndSizes(CloseableArray<BaseDeviceMemoryBuffer> inputs,
long[] inputChunkAddrs,
long[] inputChunkSizes,
CloseableArray<DeviceMemoryBuffer> compressedBuffers,
long[] outputChunkAddrs) {
// setup the input addresses and sizes
int chunkIdx = 0;
for (BaseDeviceMemoryBuffer input : inputs.getArray()) {
final int numChunksInBuffer = getNumChunksInBuffer(input);
for (int i = 0; i < numChunksInBuffer; i++) {
inputChunkAddrs[chunkIdx] = input.getAddress() + i * chunkSize;
inputChunkSizes[chunkIdx] = (i != numChunksInBuffer - 1) ? chunkSize
: (input.getLength() - (long) i * chunkSize);
++chunkIdx;
}
}
assert chunkIdx == inputChunkAddrs.length;
assert chunkIdx == inputChunkSizes.length;
// setup output addresses
chunkIdx = 0;
for (DeviceMemoryBuffer buffer : compressedBuffers.getArray()) {
assert buffer.getLength() % maxOutputChunkSize == 0;
long numChunksInBuffer = buffer.getLength() / maxOutputChunkSize;
long baseAddr = buffer.getAddress();
for (int i = 0; i < numChunksInBuffer; i++) {
outputChunkAddrs[chunkIdx++] = baseAddr + i * maxOutputChunkSize;
}
}
assert chunkIdx == outputChunkAddrs.length;
}
// Write input addresses, output addresses and sizes contiguously into a DeviceMemoryBuffer.
private DeviceMemoryBuffer putAddrsAndSizesOnDevice(long[] inputAddrs,
long[] inputSizes,
long[] outputAddrs,
Cuda.Stream stream) {
final long totalSize = inputAddrs.length * 8L * 3; // space for input, output, and size arrays
final long outputAddrsOffset = inputAddrs.length * 8L;
final long sizesOffset = outputAddrsOffset + inputAddrs.length * 8L;
try (NvtxRange range = new NvtxRange("putAddrsAndSizesOnDevice", NvtxColor.YELLOW)) {
try (HostMemoryBuffer hostbuf = hostMemoryAllocator.allocate(totalSize);
DeviceMemoryBuffer result = DeviceMemoryBuffer.allocate(totalSize)) {
hostbuf.setLongs(0, inputAddrs, 0, inputAddrs.length);
hostbuf.setLongs(outputAddrsOffset, outputAddrs, 0, outputAddrs.length);
for (int i = 0; i < inputSizes.length; i++) {
hostbuf.setLong(sizesOffset + i * 8L, inputSizes[i]);
}
result.copyFromHostBuffer(hostbuf, stream);
result.incRefCount();
return result;
}
}
}
// Synchronously copy the resulting compressed sizes from device memory to host memory.
private long[] getOutputChunkSizes(BaseDeviceMemoryBuffer devChunkSizes, Cuda.Stream stream) {
try (NvtxRange range = new NvtxRange("getOutputChunkSizes", NvtxColor.YELLOW)) {
try (HostMemoryBuffer hostbuf = hostMemoryAllocator.allocate(devChunkSizes.getLength())) {
hostbuf.copyFromDeviceBuffer(devChunkSizes, stream);
int numChunks = (int) (devChunkSizes.getLength() / 8);
long[] result = new long[numChunks];
for (int i = 0; i < numChunks; i++) {
long size = hostbuf.getLong(i * 8L);
assert size < Integer.MAX_VALUE : "output size is too big";
result[i] = size;
}
return result;
}
}
}
// Stitch together the individual chunks into the result buffers.
// Each result buffer has metadata at the beginning, followed by compressed chunks.
// This is done by building up parallel lists of source addr, dest addr and size and
// then calling multiBufferCopyAsync()
private DeviceMemoryBuffer[] stitchOutput(int[] chunksPerInput,
DeviceMemoryBuffer compressedChunkSizes,
long[] outputChunkAddrs,
long[] outputChunkSizes,
Cuda.Stream stream) {
try (NvtxRange range = new NvtxRange("stitchOutput", NvtxColor.YELLOW)) {
final int numOutputs = chunksPerInput.length;
final long chunkSizesAddr = compressedChunkSizes.getAddress();
long[] outputBufferSizes = calcOutputBufferSizes(chunksPerInput, outputChunkSizes);
try (CloseableArray<DeviceMemoryBuffer> outputs =
CloseableArray.wrap(new DeviceMemoryBuffer[numOutputs])) {
// Each chunk needs to be copied, and each output needs a copy of the
// compressed chunk size vector representing the metadata.
final int totalBuffersToCopy = numOutputs + outputChunkAddrs.length;
long[] destAddrs = new long[totalBuffersToCopy];
long[] srcAddrs = new long[totalBuffersToCopy];
long[] sizes = new long[totalBuffersToCopy];
int copyBufferIdx = 0;
int chunkIdx = 0;
for (int outputIdx = 0; outputIdx < numOutputs; outputIdx++) {
DeviceMemoryBuffer outputBuffer = DeviceMemoryBuffer.allocate(outputBufferSizes[outputIdx]);
final long outputBufferAddr = outputBuffer.getAddress();
outputs.set(outputIdx, outputBuffer);
final long numChunks = chunksPerInput[outputIdx];
final long metadataSize = numChunks * METADATA_BYTES_PER_CHUNK;
// setup a copy of the metadata at the front of the output buffer
srcAddrs[copyBufferIdx] = chunkSizesAddr + chunkIdx * 8;
destAddrs[copyBufferIdx] = outputBufferAddr;
sizes[copyBufferIdx] = metadataSize;
++copyBufferIdx;
// setup copies of the compressed chunks for this output buffer
long nextChunkAddr = outputBufferAddr + metadataSize;
for (int i = 0; i < numChunks; ++i) {
srcAddrs[copyBufferIdx] = outputChunkAddrs[chunkIdx];
destAddrs[copyBufferIdx] = nextChunkAddr;
final long chunkSize = outputChunkSizes[chunkIdx];
sizes[copyBufferIdx] = chunkSize;
copyBufferIdx++;
chunkIdx++;
nextChunkAddr += chunkSize;
}
}
assert copyBufferIdx == totalBuffersToCopy;
assert chunkIdx == outputChunkAddrs.length;
assert chunkIdx == outputChunkSizes.length;
Cuda.multiBufferCopyAsync(destAddrs, srcAddrs, sizes, stream);
return outputs.release();
}
}
}
// Calculate the list of sizes for each output buffer (metadata plus size of compressed chunks)
private long[] calcOutputBufferSizes(int[] chunksPerInput,
long[] outputChunkSizes) {
long[] sizes = new long[chunksPerInput.length];
int chunkIdx = 0;
for (int i = 0; i < sizes.length; i++) {
final int chunksInBuffer = chunksPerInput[i];
final int chunkEndIdx = chunkIdx + chunksInBuffer;
// metadata stored in front of compressed data
long bufferSize = METADATA_BYTES_PER_CHUNK * chunksInBuffer;
// add in the compressed chunk sizes to get the total size
while (chunkIdx < chunkEndIdx) {
bufferSize += outputChunkSizes[chunkIdx++];
}
sizes[i] = bufferSize;
}
assert chunkIdx == outputChunkSizes.length;
return sizes;
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/nvcomp/CompressionType.java
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.nvcomp;
/** Enumeration of data types that can be compressed. */
public enum CompressionType {
CHAR(0),
UCHAR(1),
SHORT(2),
USHORT(3),
INT(4),
UINT(5),
LONGLONG(6),
ULONGLONG(7),
BITS(0xff);
private static final CompressionType[] types = CompressionType.values();
final int nativeId;
CompressionType(int nativeId) {
this.nativeId = nativeId;
}
/** Lookup the CompressionType that corresponds to the specified native identifier */
public static CompressionType fromNativeId(int id) {
for (CompressionType type : types) {
if (type.nativeId == id) {
return type;
}
}
throw new IllegalArgumentException("Unknown compression type ID: " + id);
}
/** Get the native code identifier for the type */
public final int toNativeId() {
return nativeId;
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/nvcomp/BatchedLZ4Decompressor.java
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.nvcomp;
import ai.rapids.cudf.CloseableArray;
import ai.rapids.cudf.Cuda;
import ai.rapids.cudf.BaseDeviceMemoryBuffer;
import ai.rapids.cudf.DeviceMemoryBuffer;
import ai.rapids.cudf.HostMemoryBuffer;
import ai.rapids.cudf.NvtxColor;
import ai.rapids.cudf.NvtxRange;
import java.util.Arrays;
/** LZ4 decompressor that operates on multiple input buffers in a batch */
public class BatchedLZ4Decompressor {
/**
* Asynchronously decompress a batch of buffers
* @param chunkSize maximum uncompressed block size, must match value used during compression
* @param origInputs buffers to decompress, will be closed by this operation
* @param outputs output buffers that will contain the compressed results, each must be sized
* to the exact decompressed size of the corresponding input
* @param stream CUDA stream to use
*/
public static void decompressAsync(long chunkSize,
BaseDeviceMemoryBuffer[] origInputs,
BaseDeviceMemoryBuffer[] outputs,
Cuda.Stream stream) {
try (CloseableArray<BaseDeviceMemoryBuffer> inputs =
CloseableArray.wrap(Arrays.copyOf(origInputs, origInputs.length))) {
BatchedLZ4Compressor.validateChunkSize(chunkSize);
if (origInputs.length != outputs.length) {
throw new IllegalArgumentException("number of inputs must match number of outputs");
}
final int numInputs = inputs.size();
if (numInputs == 0) {
return;
}
int[] chunksPerInput = new int[numInputs];
long totalChunks = 0;
for (int i = 0; i < numInputs; i++) {
// use output size to determine number of chunks in the input, as the output buffer
// must be exactly sized to the uncompressed data
BaseDeviceMemoryBuffer buffer = outputs[i];
int numBufferChunks = getNumChunksInBuffer(chunkSize, buffer);
chunksPerInput[i] = numBufferChunks;
totalChunks += numBufferChunks;
}
final long tempBufferSize = NvcompJni.batchedLZ4DecompressGetTempSize(totalChunks, chunkSize);
try (DeviceMemoryBuffer devAddrsSizes =
buildAddrsSizesBuffer(chunkSize, totalChunks, inputs.getArray(), chunksPerInput,
outputs, stream);
DeviceMemoryBuffer devTemp = DeviceMemoryBuffer.allocate(tempBufferSize)) {
// buffer containing addresses and sizes contains four vectors of longs in this order:
// - compressed chunk input addresses
// - chunk output buffer addresses
// - compressed chunk sizes
// - uncompressed chunk sizes
final long inputAddrsPtr = devAddrsSizes.getAddress();
final long outputAddrsPtr = inputAddrsPtr + totalChunks * 8;
final long inputSizesPtr = outputAddrsPtr + totalChunks * 8;
final long outputSizesPtr = inputSizesPtr + totalChunks * 8;
NvcompJni.batchedLZ4DecompressAsync(
inputAddrsPtr,
inputSizesPtr,
outputSizesPtr,
totalChunks,
devTemp.getAddress(),
devTemp.getLength(),
outputAddrsPtr,
stream.getStream());
}
}
}
private static int getNumChunksInBuffer(long chunkSize, BaseDeviceMemoryBuffer buffer) {
return (int) ((buffer.getLength() + chunkSize - 1) / chunkSize);
}
/**
* Build a device memory buffer containing four vectors of longs in the following order:
* <ul>
* <li>compressed chunk input addresses</li>
* <li>uncompressed chunk output addresses</li>
* <li>compressed chunk sizes</li>
* <li>uncompressed chunk sizes</li>
* </ul>
* Each vector contains as many longs as the number of chunks being decompressed
* @param chunkSize maximum uncompressed size of a chunk
* @param totalChunks total number of chunks to be decompressed
* @param inputs device buffers containing the compressed data
* @param chunksPerInput number of compressed chunks per input buffer
* @param outputs device buffers that will hold the uncompressed output
* @param stream CUDA stream to use
* @return device buffer containing address and size vectors
*/
private static DeviceMemoryBuffer buildAddrsSizesBuffer(long chunkSize,
long totalChunks,
BaseDeviceMemoryBuffer[] inputs,
int[] chunksPerInput,
BaseDeviceMemoryBuffer[] outputs,
Cuda.Stream stream) {
final long totalBufferSize = totalChunks * 8L * 4L;
try (NvtxRange range = new NvtxRange("buildAddrSizesBuffer", NvtxColor.YELLOW)) {
try (HostMemoryBuffer metadata = fetchMetadata(totalChunks, inputs, chunksPerInput, stream);
HostMemoryBuffer hostAddrsSizes = HostMemoryBuffer.allocate(totalBufferSize);
DeviceMemoryBuffer devAddrsSizes = DeviceMemoryBuffer.allocate(totalBufferSize)) {
// Build four long vectors in the AddrsSizes buffer:
// - compressed input address (one per chunk)
// - uncompressed output address (one per chunk)
// - compressed input size (one per chunk)
// - uncompressed input size (one per chunk)
final long srcAddrsOffset = 0;
final long destAddrsOffset = srcAddrsOffset + totalChunks * 8L;
final long srcSizesOffset = destAddrsOffset + totalChunks * 8L;
final long destSizesOffset = srcSizesOffset + totalChunks * 8L;
long chunkIdx = 0;
for (int inputIdx = 0; inputIdx < inputs.length; inputIdx++) {
final BaseDeviceMemoryBuffer input = inputs[inputIdx];
final BaseDeviceMemoryBuffer output = outputs[inputIdx];
final int numChunksInInput = chunksPerInput[inputIdx];
long srcAddr = input.getAddress() +
BatchedLZ4Compressor.METADATA_BYTES_PER_CHUNK * numChunksInInput;
long destAddr = output.getAddress();
final long chunkIdxEnd = chunkIdx + numChunksInInput;
while (chunkIdx < chunkIdxEnd) {
final long srcChunkSize = metadata.getLong(chunkIdx * 8);
final long destChunkSize = (chunkIdx < chunkIdxEnd - 1) ? chunkSize
: output.getAddress() + output.getLength() - destAddr;
hostAddrsSizes.setLong(srcAddrsOffset + chunkIdx * 8, srcAddr);
hostAddrsSizes.setLong(destAddrsOffset + chunkIdx * 8, destAddr);
hostAddrsSizes.setLong(srcSizesOffset + chunkIdx * 8, srcChunkSize);
hostAddrsSizes.setLong(destSizesOffset + chunkIdx * 8, destChunkSize);
srcAddr += srcChunkSize;
destAddr += destChunkSize;
++chunkIdx;
}
}
devAddrsSizes.copyFromHostBuffer(hostAddrsSizes, stream);
devAddrsSizes.incRefCount();
return devAddrsSizes;
}
}
}
/**
* Fetch the metadata at the front of each input in a single, contiguous host buffer.
* @param totalChunks total number of compressed chunks
* @param inputs buffers containing the compressed data
* @param chunksPerInput number of compressed chunks for the corresponding input
* @param stream CUDA stream to use
* @return host buffer containing all of the metadata
*/
private static HostMemoryBuffer fetchMetadata(long totalChunks,
BaseDeviceMemoryBuffer[] inputs,
int[] chunksPerInput,
Cuda.Stream stream) {
try (NvtxRange range = new NvtxRange("fetchMetadata", NvtxColor.PURPLE)) {
// one long per chunk containing the compressed size
final long totalMetadataSize = totalChunks * BatchedLZ4Compressor.METADATA_BYTES_PER_CHUNK;
// Build corresponding vectors of destination addresses, source addresses and sizes.
long[] destAddrs = new long[inputs.length];
long[] srcAddrs = new long[inputs.length];
long[] sizes = new long[inputs.length];
try (HostMemoryBuffer hostMetadata = HostMemoryBuffer.allocate(totalMetadataSize);
DeviceMemoryBuffer devMetadata = DeviceMemoryBuffer.allocate(totalMetadataSize)) {
long destCopyAddr = devMetadata.getAddress();
for (int inputIdx = 0; inputIdx < inputs.length; inputIdx++) {
final BaseDeviceMemoryBuffer input = inputs[inputIdx];
final long copySize = chunksPerInput[inputIdx] * BatchedLZ4Compressor.METADATA_BYTES_PER_CHUNK;
destAddrs[inputIdx] = destCopyAddr;
srcAddrs[inputIdx] = input.getAddress();
sizes[inputIdx] = copySize;
destCopyAddr += copySize;
}
Cuda.multiBufferCopyAsync(destAddrs, srcAddrs, sizes, stream);
hostMetadata.copyFromDeviceBuffer(devMetadata, stream);
hostMetadata.incRefCount();
return hostMetadata;
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/nvcomp/NvcompCudaException.java
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.nvcomp;
/** Exception thrown from nvcomp indicating a CUDA error occurred. */
public class NvcompCudaException extends NvcompException {
NvcompCudaException(String message) {
super(message);
}
NvcompCudaException(String message, Throwable cause) {
super(message, cause);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/nvcomp/NvcompException.java
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.nvcomp;
/** Base class for all nvcomp-specific exceptions */
public class NvcompException extends RuntimeException {
NvcompException(String message) {
super(message);
}
NvcompException(String message, Throwable cause) {
super(message, cause);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/TableReference.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import java.nio.ByteBuffer;
/**
* Enumeration of tables that can be referenced in an AST.
* NOTE: This must be kept in sync with `jni_to_table_reference` code in CompiledExpression.cpp!
*/
public enum TableReference {
LEFT(0),
RIGHT(1);
// OUTPUT is an AST implementation detail and should not appear in user-built expressions.
private final byte nativeId;
TableReference(int nativeId) {
this.nativeId = (byte) nativeId;
assert this.nativeId == nativeId;
}
/** Get the size in bytes to serialize this table reference */
int getSerializedSize() {
return Byte.BYTES;
}
/** Serialize this table reference to the specified buffer */
void serialize(ByteBuffer bb) {
bb.put(nativeId);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/ColumnReference.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import java.nio.ByteBuffer;
/** A reference to a column in an input table. */
public final class ColumnReference extends AstExpression {
private final int columnIndex;
private final TableReference tableSource;
/** Construct a column reference to either the only or leftmost input table */
public ColumnReference(int columnIndex) {
this(columnIndex, TableReference.LEFT);
}
/** Construct a column reference to the specified column index in the specified table */
public ColumnReference(int columnIndex, TableReference tableSource) {
this.columnIndex = columnIndex;
this.tableSource = tableSource;
}
@Override
int getSerializedSize() {
// node type + table ref + column index
return ExpressionType.COLUMN_REFERENCE.getSerializedSize() +
tableSource.getSerializedSize() +
Integer.BYTES;
}
@Override
void serialize(ByteBuffer bb) {
ExpressionType.COLUMN_REFERENCE.serialize(bb);
tableSource.serialize(bb);
bb.putInt(columnIndex);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/BinaryOperation.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import java.nio.ByteBuffer;
/** A binary operation consisting of an operator and two operands. */
public class BinaryOperation extends AstExpression {
private final BinaryOperator op;
private final AstExpression leftInput;
private final AstExpression rightInput;
public BinaryOperation(BinaryOperator op, AstExpression leftInput, AstExpression rightInput) {
this.op = op;
this.leftInput = leftInput;
this.rightInput = rightInput;
}
@Override
int getSerializedSize() {
return ExpressionType.BINARY_EXPRESSION.getSerializedSize() +
op.getSerializedSize() +
leftInput.getSerializedSize() +
rightInput.getSerializedSize();
}
@Override
void serialize(ByteBuffer bb) {
ExpressionType.BINARY_EXPRESSION.serialize(bb);
op.serialize(bb);
leftInput.serialize(bb);
rightInput.serialize(bb);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/AstExpression.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/** Base class of every node in an AST */
public abstract class AstExpression {
/**
* Enumeration for the types of AST nodes that can appear in a serialized AST.
* NOTE: This must be kept in sync with the `jni_serialized_node_type` in CompiledExpression.cpp!
*/
protected enum ExpressionType {
VALID_LITERAL(0),
NULL_LITERAL(1),
COLUMN_REFERENCE(2),
UNARY_EXPRESSION(3),
BINARY_EXPRESSION(4);
private final byte nativeId;
ExpressionType(int nativeId) {
this.nativeId = (byte) nativeId;
assert this.nativeId == nativeId;
}
/** Get the size in bytes to serialize this node type */
int getSerializedSize() {
return Byte.BYTES;
}
/** Serialize this node type to the specified buffer */
void serialize(ByteBuffer bb) {
bb.put(nativeId);
}
}
public CompiledExpression compile() {
int size = getSerializedSize();
ByteBuffer bb = ByteBuffer.allocate(size);
bb.order(ByteOrder.nativeOrder());
serialize(bb);
return new CompiledExpression(bb.array());
}
/** Get the size in bytes of the serialized form of this node and all child nodes */
abstract int getSerializedSize();
/**
* Serialize this node and all child nodes.
* @param bb buffer to receive the serialized data
*/
abstract void serialize(ByteBuffer bb);
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/Literal.java
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import ai.rapids.cudf.DType;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.StandardCharsets;
/** A literal value in an AST expression. */
public final class Literal extends AstExpression {
private final DType type;
private final byte[] serializedValue;
/** Construct a null literal of the specified type. */
public static Literal ofNull(DType type) {
return new Literal(type, null);
}
/** Construct a boolean literal with the specified value. */
public static Literal ofBoolean(boolean value) {
return new Literal(DType.BOOL8, new byte[] { value ? (byte) 1 : (byte) 0 });
}
/** Construct a boolean literal with the specified value or null. */
public static Literal ofBoolean(Boolean value) {
if (value == null) {
return ofNull(DType.BOOL8);
}
return ofBoolean(value.booleanValue());
}
/** Construct a byte literal with the specified value. */
public static Literal ofByte(byte value) {
return new Literal(DType.INT8, new byte[] { value });
}
/** Construct a byte literal with the specified value or null. */
public static Literal ofByte(Byte value) {
if (value == null) {
return ofNull(DType.INT8);
}
return ofByte(value.byteValue());
}
/** Construct a short literal with the specified value. */
public static Literal ofShort(short value) {
byte[] serializedValue = new byte[Short.BYTES];
ByteBuffer.wrap(serializedValue).order(ByteOrder.nativeOrder()).putShort(value);
return new Literal(DType.INT16, serializedValue);
}
/** Construct a short literal with the specified value or null. */
public static Literal ofShort(Short value) {
if (value == null) {
return ofNull(DType.INT16);
}
return ofShort(value.shortValue());
}
/** Construct an integer literal with the specified value. */
public static Literal ofInt(int value) {
return ofIntBasedType(DType.INT32, value);
}
/** Construct an integer literal with the specified value or null. */
public static Literal ofInt(Integer value) {
if (value == null) {
return ofNull(DType.INT32);
}
return ofInt(value.intValue());
}
/** Construct a long literal with the specified value. */
public static Literal ofLong(long value) {
return ofLongBasedType(DType.INT64, value);
}
/** Construct a long literal with the specified value or null. */
public static Literal ofLong(Long value) {
if (value == null) {
return ofNull(DType.INT64);
}
return ofLong(value.longValue());
}
/** Construct a float literal with the specified value. */
public static Literal ofFloat(float value) {
byte[] serializedValue = new byte[Float.BYTES];
ByteBuffer.wrap(serializedValue).order(ByteOrder.nativeOrder()).putFloat(value);
return new Literal(DType.FLOAT32, serializedValue);
}
/** Construct a float literal with the specified value or null. */
public static Literal ofFloat(Float value) {
if (value == null) {
return ofNull(DType.FLOAT32);
}
return ofFloat(value.floatValue());
}
/** Construct a double literal with the specified value. */
public static Literal ofDouble(double value) {
byte[] serializedValue = new byte[Double.BYTES];
ByteBuffer.wrap(serializedValue).order(ByteOrder.nativeOrder()).putDouble(value);
return new Literal(DType.FLOAT64, serializedValue);
}
/** Construct a double literal with the specified value or null. */
public static Literal ofDouble(Double value) {
if (value == null) {
return ofNull(DType.FLOAT64);
}
return ofDouble(value.doubleValue());
}
/** Construct a timestamp days literal with the specified value. */
public static Literal ofTimestampDaysFromInt(int value) {
return ofIntBasedType(DType.TIMESTAMP_DAYS, value);
}
/** Construct a timestamp days literal with the specified value or null. */
public static Literal ofTimestampDaysFromInt(Integer value) {
if (value == null) {
return ofNull(DType.TIMESTAMP_DAYS);
}
return ofTimestampDaysFromInt(value.intValue());
}
/** Construct a long-based timestamp literal with the specified value. */
public static Literal ofTimestampFromLong(DType type, long value) {
if (!type.isTimestampType()) {
throw new IllegalArgumentException("type is not a timestamp: " + type);
}
if (type.equals(DType.TIMESTAMP_DAYS)) {
int intValue = (int)value;
if (value != intValue) {
throw new IllegalArgumentException("value too large for type " + type + ": " + value);
}
return ofTimestampDaysFromInt(intValue);
}
return ofLongBasedType(type, value);
}
/** Construct a long-based timestamp literal with the specified value or null. */
public static Literal ofTimestampFromLong(DType type, Long value) {
if (value == null) {
return ofNull(type);
}
return ofTimestampFromLong(type, value.longValue());
}
/** Construct a duration days literal with the specified value. */
public static Literal ofDurationDaysFromInt(int value) {
return ofIntBasedType(DType.DURATION_DAYS, value);
}
/** Construct a duration days literal with the specified value or null. */
public static Literal ofDurationDaysFromInt(Integer value) {
if (value == null) {
return ofNull(DType.DURATION_DAYS);
}
return ofDurationDaysFromInt(value.intValue());
}
/** Construct a long-based duration literal with the specified value. */
public static Literal ofDurationFromLong(DType type, long value) {
if (!type.isDurationType()) {
throw new IllegalArgumentException("type is not a timestamp: " + type);
}
if (type.equals(DType.DURATION_DAYS)) {
int intValue = (int)value;
if (value != intValue) {
throw new IllegalArgumentException("value too large for type " + type + ": " + value);
}
return ofDurationDaysFromInt(intValue);
}
return ofLongBasedType(type, value);
}
/** Construct a long-based duration literal with the specified value or null. */
public static Literal ofDurationFromLong(DType type, Long value) {
if (value == null) {
return ofNull(type);
}
return ofDurationFromLong(type, value.longValue());
}
/** Construct a string literal with the specified value or null. */
public static Literal ofString(String value) {
if (value == null) {
return ofNull(DType.STRING);
}
return ofUTF8String(value.getBytes(StandardCharsets.UTF_8));
}
/** Construct a string literal directly with byte array to skip transcoding. */
public static Literal ofUTF8String(byte[] stringBytes) {
if (stringBytes == null) {
return ofNull(DType.STRING);
}
byte[] serializedValue = new byte[stringBytes.length + Integer.BYTES];
ByteBuffer.wrap(serializedValue).order(ByteOrder.nativeOrder()).putInt(stringBytes.length);
System.arraycopy(stringBytes, 0, serializedValue, Integer.BYTES, stringBytes.length);
return new Literal(DType.STRING, serializedValue);
}
Literal(DType type, byte[] serializedValue) {
this.type = type;
this.serializedValue = serializedValue;
}
@Override
int getSerializedSize() {
ExpressionType nodeType = serializedValue != null
? ExpressionType.VALID_LITERAL : ExpressionType.NULL_LITERAL;
int size = nodeType.getSerializedSize() + getDataTypeSerializedSize();
if (serializedValue != null) {
size += serializedValue.length;
}
return size;
}
@Override
void serialize(ByteBuffer bb) {
ExpressionType nodeType = serializedValue != null
? ExpressionType.VALID_LITERAL : ExpressionType.NULL_LITERAL;
nodeType.serialize(bb);
serializeDataType(bb);
if (serializedValue != null) {
bb.put(serializedValue);
}
}
private int getDataTypeSerializedSize() {
int nativeTypeId = type.getTypeId().getNativeId();
assert nativeTypeId == (byte) nativeTypeId : "Type ID does not fit in a byte";
if (type.isDecimalType()) {
assert type.getScale() == (byte) type.getScale() : "Decimal scale does not fit in a byte";
return 2;
}
return 1;
}
private void serializeDataType(ByteBuffer bb) {
byte nativeTypeId = (byte) type.getTypeId().getNativeId();
assert nativeTypeId == type.getTypeId().getNativeId() : "DType ID does not fit in a byte";
bb.put(nativeTypeId);
if (type.isDecimalType()) {
byte scale = (byte) type.getScale();
assert scale == (byte) type.getScale() : "Decimal scale does not fit in a byte";
bb.put(scale);
}
}
private static Literal ofIntBasedType(DType type, int value) {
assert type.getSizeInBytes() == Integer.BYTES;
byte[] serializedValue = new byte[Integer.BYTES];
ByteBuffer.wrap(serializedValue).order(ByteOrder.nativeOrder()).putInt(value);
return new Literal(type, serializedValue);
}
private static Literal ofLongBasedType(DType type, long value) {
assert type.getSizeInBytes() == Long.BYTES;
byte[] serializedValue = new byte[Long.BYTES];
ByteBuffer.wrap(serializedValue).order(ByteOrder.nativeOrder()).putLong(value);
return new Literal(type, serializedValue);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/BinaryOperator.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import java.nio.ByteBuffer;
/**
* Enumeration of AST operators that can appear in a binary operation.
* NOTE: This must be kept in sync with `jni_to_binary_operator` in CompiledExpression.cpp!
*/
public enum BinaryOperator {
ADD(0), // operator +
SUB(1), // operator -
MUL(2), // operator *
DIV(3), // operator / using common type of lhs and rhs
TRUE_DIV(4), // operator / after promoting type to floating point
FLOOR_DIV(5), // operator / after promoting to 64 bit floating point and then flooring the result
MOD(6), // operator %
PYMOD(7), // operator % using Python's sign rules for negatives
POW(8), // lhs ^ rhs
EQUAL(9), // operator ==
NULL_EQUAL(10), // operator == using Spark rules for null inputs
NOT_EQUAL(11), // operator !=
LESS(12), // operator <
GREATER(13), // operator >
LESS_EQUAL(14), // operator <=
GREATER_EQUAL(15), // operator >=
BITWISE_AND(16), // operator &
BITWISE_OR(17), // operator |
BITWISE_XOR(18), // operator ^
LOGICAL_AND(19), // operator &&
NULL_LOGICAL_AND(20), // operator && using Spark rules for null inputs
LOGICAL_OR(21), // operator ||
NULL_LOGICAL_OR(22); // operator || using Spark rules for null inputs
private final byte nativeId;
BinaryOperator(int nativeId) {
this.nativeId = (byte) nativeId;
assert this.nativeId == nativeId;
}
/** Get the size in bytes to serialize this operator */
int getSerializedSize() {
return Byte.BYTES;
}
/** Serialize this operator to the specified buffer */
void serialize(ByteBuffer bb) {
bb.put(nativeId);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/UnaryOperation.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import java.nio.ByteBuffer;
/** A unary operation consisting of an operator and an operand. */
public final class UnaryOperation extends AstExpression {
private final UnaryOperator op;
private final AstExpression input;
public UnaryOperation(UnaryOperator op, AstExpression input) {
this.op = op;
this.input = input;
}
@Override
int getSerializedSize() {
return ExpressionType.UNARY_EXPRESSION.getSerializedSize() +
op.getSerializedSize() +
input.getSerializedSize();
}
@Override
void serialize(ByteBuffer bb) {
ExpressionType.UNARY_EXPRESSION.serialize(bb);
op.serialize(bb);
input.serialize(bb);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/CompiledExpression.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import ai.rapids.cudf.ColumnVector;
import ai.rapids.cudf.MemoryCleaner;
import ai.rapids.cudf.NativeDepsLoader;
import ai.rapids.cudf.Table;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** This class wraps a native compiled AST and must be closed to avoid native memory leaks. */
public class CompiledExpression implements AutoCloseable {
static {
NativeDepsLoader.loadNativeDeps();
}
private static final Logger log = LoggerFactory.getLogger(CompiledExpression.class);
private static class CompiledExpressionCleaner extends MemoryCleaner.Cleaner {
private long nativeHandle;
CompiledExpressionCleaner(long nativeHandle) {
this.nativeHandle = nativeHandle;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
long origAddress = nativeHandle;
boolean neededCleanup = nativeHandle != 0;
if (neededCleanup) {
try {
destroy(nativeHandle);
} finally {
nativeHandle = 0;
}
if (logErrorIfNotClean) {
log.error("AN AST COMPILED EXPRESSION WAS LEAKED (ID: " +
id + " " + Long.toHexString(origAddress));
}
}
return neededCleanup;
}
@Override
public boolean isClean() {
return nativeHandle == 0;
}
}
private final CompiledExpressionCleaner cleaner;
private boolean isClosed = false;
/** Construct a compiled expression from a serialized AST */
CompiledExpression(byte[] serializedExpression) {
this(compile(serializedExpression));
}
/** Construct a compiled expression from a native compiled AST pointer */
CompiledExpression(long nativeHandle) {
this.cleaner = new CompiledExpressionCleaner(nativeHandle);
MemoryCleaner.register(this, cleaner);
cleaner.addRef();
}
/**
* Compute a new column by applying this AST expression to the specified table. All
* {@link ColumnReference} instances within the expression will use the sole input table,
* even if they try to specify a non-existent table, e.g.: {@link TableReference#RIGHT}.
* @param table input table for this expression
* @return new column computed from this expression applied to the input table
*/
public ColumnVector computeColumn(Table table) {
return new ColumnVector(computeColumn(cleaner.nativeHandle, table.getNativeView()));
}
@Override
public synchronized void close() {
cleaner.delRef();
if (isClosed) {
cleaner.logRefCountDebug("double free " + this);
throw new IllegalStateException("Close called too many times " + this);
}
cleaner.clean(false);
isClosed = true;
}
/** Returns the native address of a compiled expression. Intended for internal cudf use only. */
public long getNativeHandle() {
return cleaner.nativeHandle;
}
private static native long compile(byte[] serializedExpression);
private static native long computeColumn(long astHandle, long tableHandle);
private static native void destroy(long handle);
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ast/UnaryOperator.java
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import java.nio.ByteBuffer;
/**
* Enumeration of AST operators that can appear in a unary operation.
* NOTE: This must be kept in sync with `jni_to_unary_operator` in CompiledExpression.cpp!
*/
public enum UnaryOperator {
IDENTITY(0), // Identity function
IS_NULL(1), // Check if operand is null
SIN(2), // Trigonometric sine
COS(3), // Trigonometric cosine
TAN(4), // Trigonometric tangent
ARCSIN(5), // Trigonometric sine inverse
ARCCOS(6), // Trigonometric cosine inverse
ARCTAN(7), // Trigonometric tangent inverse
SINH(8), // Hyperbolic sine
COSH(9), // Hyperbolic cosine
TANH(10), // Hyperbolic tangent
ARCSINH(11), // Hyperbolic sine inverse
ARCCOSH(12), // Hyperbolic cosine inverse
ARCTANH(13), // Hyperbolic tangent inverse
EXP(14), // Exponential (base e, Euler number)
LOG(15), // Natural Logarithm (base e)
SQRT(16), // Square-root (x^0.5)
CBRT(17), // Cube-root (x^(1.0/3))
CEIL(18), // Smallest integer value not less than arg
FLOOR(19), // largest integer value not greater than arg
ABS(20), // Absolute value
RINT(21), // Rounds the floating-point argument arg to an integer value
BIT_INVERT(22), // Bitwise Not (~)
NOT(23), // Logical Not (!)
CAST_TO_INT64(24), // Cast value to int64_t
CAST_TO_UINT64(25), // Cast value to uint64_t
CAST_TO_FLOAT64(26); // Cast value to double
private final byte nativeId;
UnaryOperator(int nativeId) {
this.nativeId = (byte) nativeId;
assert this.nativeId == nativeId;
}
/** Get the size in bytes to serialize this operator */
int getSerializedSize() {
return Byte.BYTES;
}
/** Serialize this operator to the specified buffer */
void serialize(ByteBuffer bb) {
bb.put(nativeId);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main
|
rapidsai_public_repos/cudf/java/src/main/native/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(../../../../fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cuda)
include(rapids-find)
include(rapids-cpm)
rapids_cpm_init()
# Use GPU_ARCHS if it is defined
if(DEFINED GPU_ARCHS)
set(CMAKE_CUDA_ARCHITECTURES "${GPU_ARCHS}")
endif()
rapids_cuda_init_architectures(CUDF_JNI)
project(
CUDF_JNI
VERSION 24.02.00
LANGUAGES C CXX CUDA
)
# ##################################################################################################
# * build options ---------------------------------------------------------------------------------
option(USE_NVTX "Build with NVTX support" ON)
option(BUILD_SHARED_LIBS "Build cuDF JNI shared libraries" ON)
option(BUILD_TESTS "Configure CMake to build tests" ON)
option(CUDF_USE_PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" OFF)
option(CUDA_STATIC_RUNTIME "Statically link the CUDA runtime" OFF)
option(USE_GDS "Build with GPUDirect Storage (GDS)/cuFile support" OFF)
option(CUDF_JNI_LIBCUDF_STATIC "Link with libcudf.a" OFF)
option(CUDF_JNI_ENABLE_PROFILING "Build with profiling support" ON)
message(VERBOSE "CUDF_JNI: Build with NVTX support: ${USE_NVTX}")
message(VERBOSE "CUDF_JNI: Build cuDF JNI shared libraries: ${BUILD_SHARED_LIBS}")
message(VERBOSE "CUDF_JNI: Configure CMake to build tests: ${BUILD_TESTS}")
message(VERBOSE
"CUDF_JNI: Build with per-thread default stream: ${CUDF_USE_PER_THREAD_DEFAULT_STREAM}"
)
message(VERBOSE "CUDF_JNI: Statically link the CUDA runtime: ${CUDA_STATIC_RUNTIME}")
message(VERBOSE "CUDF_JNI: Build with GPUDirect Storage support: ${USE_GDS}")
message(VERBOSE "CUDF_JNI: Link with libcudf statically: ${CUDF_JNI_LIBCUDF_STATIC}")
set(CUDF_SOURCE_DIR "${PROJECT_SOURCE_DIR}/../../../../cpp")
if(NOT DEFINED CUDF_CPP_BUILD_DIR OR CUDF_CPP_BUILD_DIR STREQUAL "")
if(DEFINED ENV{CUDF_CPP_BUILD_DIR})
set(CUDF_CPP_BUILD_DIR "$ENV{CUDF_CPP_BUILD_DIR}")
else()
set(CUDF_CPP_BUILD_DIR "${CUDF_SOURCE_DIR}/build")
endif()
endif()
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/"
"${CUDF_SOURCE_DIR}/cmake/Modules/" ${CMAKE_MODULE_PATH}
)
# ##################################################################################################
# * compiler options ------------------------------------------------------------------------------
set(CUDF_CXX_FLAGS "")
set(CUDF_CUDA_FLAGS "")
set(CUDF_CXX_DEFINITIONS "")
set(CUDF_CUDA_DEFINITIONS "")
rapids_find_package(CUDAToolkit REQUIRED)
include(ConfigureCUDA) # set other CUDA compilation flags
if(CUDF_USE_PER_THREAD_DEFAULT_STREAM)
message(STATUS "Using per-thread default stream")
add_compile_definitions(CUDA_API_PER_THREAD_DEFAULT_STREAM CUDF_USE_PER_THREAD_DEFAULT_STREAM)
endif()
# ##################################################################################################
# * build type ------------------------------------------------------------------------------------
# Set a default build type if none was specified
rapids_cmake_build_type("Release")
# ##################################################################################################
# * CUDF ------------------------------------------------------------------------------------------
set(cudf_ROOT "${CUDF_CPP_BUILD_DIR}")
rapids_find_package(cudf REQUIRED)
# ##################################################################################################
# * nvcomp------------------------------------------------------------------------------------------
if(NOT DEFINED nvcomp_DIR)
set(nvcomp_DIR "${CUDF_CPP_BUILD_DIR}/_deps/nvcomp-build")
endif()
rapids_find_package(nvcomp REQUIRED)
# ##################################################################################################
# * find JNI -------------------------------------------------------------------------------------
find_package(JNI REQUIRED)
if(JNI_FOUND)
message(STATUS "JDK with JNI in ${JNI_INCLUDE_DIRS}")
else()
message(FATAL_ERROR "JDK with JNI not found, please check your settings.")
endif()
# ##################################################################################################
# * GDS/cufile ------------------------------------------------------------------------------------
if(USE_GDS)
message(STATUS "Building with GPUDirect Storage (GDS)/cuFile support")
find_package(cuFile REQUIRED)
endif()
# ##################################################################################################
# * library targets -------------------------------------------------------------------------------
add_library(
cudfjni
src/Aggregation128UtilsJni.cpp
src/AggregationJni.cpp
src/ChunkedPackJni.cpp
src/ChunkedReaderJni.cpp
src/CudfJni.cpp
src/CudaJni.cpp
src/ColumnVectorJni.cpp
src/ColumnViewJni.cpp
src/ColumnViewJni.cu
src/CompiledExpression.cpp
src/ContiguousTableJni.cpp
src/DataSourceHelperJni.cpp
src/HashJoinJni.cpp
src/HostMemoryBufferNativeUtilsJni.cpp
src/NvcompJni.cpp
src/NvtxRangeJni.cpp
src/NvtxUniqueRangeJni.cpp
src/PackedColumnMetadataJni.cpp
src/RmmJni.cpp
src/ScalarJni.cpp
src/TableJni.cpp
src/aggregation128_utils.cu
src/maps_column_view.cu
src/row_conversion.cu
src/check_nvcomp_output_sizes.cu
)
# Disable NVTX if necessary
if(NOT USE_NVTX)
target_compile_definitions(cudfjni PUBLIC NVTX_DISABLE)
endif()
if(CUDF_JNI_ENABLE_PROFILING)
target_compile_definitions(cudfjni PRIVATE CUDF_JNI_ENABLE_PROFILING)
endif()
if(CUDF_JNI_LIBCUDF_STATIC AND BUILD_SHARED_LIBS)
# When linking against libcudf.a, the JNI library will include the old libcudf.so. For
# backwards-compatibility for software that expects to find libcudf.so in the JVM environment
# after cudf has loaded, the JNI code and libcudf.a will be combined into libcudf.so. A stub
# library will be created for libcudfjni.so that will simply require libcudf.so for backwards
# compatibility with software that expects to find libcudfjni.so at runtime.
set_target_properties(cudfjni PROPERTIES OUTPUT_NAME "cudf")
add_library(cudfjnistub SHARED src/emptyfile.cpp)
set_target_properties(cudfjnistub PROPERTIES OUTPUT_NAME "cudfjni")
target_link_libraries(cudfjnistub -Wl,--no-as-needed cudfjni -Wl,--as-needed)
endif()
# ##################################################################################################
# * include paths ---------------------------------------------------------------------------------
target_include_directories(
cudfjni PUBLIC "${CMAKE_BINARY_DIR}/include" "${CMAKE_SOURCE_DIR}/include"
"${CMAKE_SOURCE_DIR}/src" "${JNI_INCLUDE_DIRS}"
)
# ##################################################################################################
# * compile options
# ---------------------------------------------------------------------------------
# Override RPATH for cudfjni
set_target_properties(
cudfjni
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
cudfjni PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${CUDF_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_FLAGS}>"
)
target_compile_definitions(
cudfjni PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:${CUDF_CXX_DEFINITIONS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_DEFINITIONS}>"
)
if(USE_GDS)
add_library(cufilejni src/CuFileJni.cpp)
set_target_properties(
cufilejni
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_include_directories(cufilejni PRIVATE "${cuFile_INCLUDE_DIRS}")
target_link_libraries(cufilejni PRIVATE cudfjni "${cuFile_LIBRARIES}")
endif()
# ##################################################################################################
# * link libraries --------------------------------------------------------------------------------
set(CUDF_LINK PUBLIC cudf::cudf)
if(CUDF_JNI_LIBCUDF_STATIC)
# Whole-link libcudf.a into the shared library but not its dependencies
set(CUDF_LINK PRIVATE -Wl,--whole-archive cudf::cudf -Wl,--no-whole-archive PUBLIC cudf::cudf)
endif()
# When nvcomp is installed we need to use nvcomp::nvcomp but from the cudf build directory it will
# just be nvcomp.
target_link_libraries(
cudfjni ${CUDF_LINK} PRIVATE $<TARGET_NAME_IF_EXISTS:nvcomp>
$<TARGET_NAME_IF_EXISTS:nvcomp::nvcomp>
)
# ##################################################################################################
# * cudart options --------------------------------------------------------------------------------
# cudart can be statically linked or dynamically linked. The python ecosystem wants dynamic
# linking
if(CUDA_STATIC_RUNTIME)
# Tell CMake what CUDA language runtime to use
set_target_properties(cudfjni PROPERTIES CUDA_RUNTIME_LIBRARY Static)
else()
# Tell CMake what CUDA language runtime to use
set_target_properties(cudfjni PROPERTIES CUDA_RUNTIME_LIBRARY Shared)
endif()
# ##################################################################################################
# * install shared libraries ----------------------------------------------------------------------
if(TARGET nvcomp::nvcomp)
add_custom_command(
TARGET cudfjni
PRE_LINK
COMMAND
${CMAKE_COMMAND} -E copy $<TARGET_FILE:nvcomp::nvcomp> $<TARGET_FILE:nvcomp::nvcomp_gdeflate>
$<TARGET_FILE:nvcomp::nvcomp_bitcomp> "${PROJECT_BINARY_DIR}"
COMMENT "Copying nvcomp libraries to ${PROJECT_BINARY_DIR}"
)
endif()
| 0 |
rapidsai_public_repos/cudf/java/src/main
|
rapidsai_public_repos/cudf/java/src/main/native/clang-format.README
|
README
======
To apply code formatting to a file you are working on, currently you can do this manually using
clang-format-7:
This will edit the file, and print to stdout:
clang-format [file]
This will edit the file in place, do this if you are sure of what you are doing:
clang-format -i [file]
| 0 |
rapidsai_public_repos/cudf/java/src/main
|
rapidsai_public_repos/cudf/java/src/main/native/.clang-format
|
---
# Reference: https://clang.llvm.org/docs/ClangFormatStyleOptions.html
Language: Cpp
# BasedOnStyle: LLVM
# no indentation (-2 from indent, which is 2)
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
# int aaaa = 12;
# int b = 23;
# int ccc = 23;
# leaving OFF
AlignConsecutiveAssignments: false
# int aaaa = 12;
# float b = 23;
# std::string ccc = 23;
# leaving OFF
AlignConsecutiveDeclarations: false
##define A \
# int aaaa; \
# int b; \
# int dddddddddd;
# leaving ON
AlignEscapedNewlines: Right
# int aaa = bbbbbbbbbbbbbbb +
# ccccccccccccccc;
# leaving ON
AlignOperands: true
# true: false:
# int a; // My comment a vs. int a; // My comment a
# int b = 2; // comment b int b = 2; // comment about b
# leaving ON
AlignTrailingComments: true
# squeezes a long declaration's arguments to the next line:
#true:
#void myFunction(
# int a, int b, int c, int d, int e);
#
#false:
#void myFunction(int a,
# int b,
# int c,
# int d,
# int e);
# leaving ON
AllowAllParametersOfDeclarationOnNextLine: true
# changed to ON, as we use short blocks on same lines
AllowShortBlocksOnASingleLine: true
# set this to ON, we use this in a few places
AllowShortCaseLabelsOnASingleLine: true
# set this to ON
AllowShortFunctionsOnASingleLine: Inline
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
# Deprecated option.
# PenaltyReturnTypeOnItsOwnLine applies, as we set this to None,
# where it tries to break after the return type automatically
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: MultiLine
# if all the arguments for a function don't fit in a single line,
# with a value of "false", it'll split each argument into different lines
BinPackArguments: true
BinPackParameters: true
# if this is set to Custom, the BraceWrapping flags apply
BreakBeforeBraces: Custom
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
# will break after operators when a line is too long
BreakBeforeBinaryOperators: None
# not in docs.. so that's nice
BreakBeforeInheritanceComma: false
# This will break inheritance list and align on colon,
# it also places each inherited class in a different line.
# Leaving ON
BreakInheritanceList: BeforeColon
#
#true:
#veryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongDescription
# ? firstValue
# : SecondValueVeryVeryVeryVeryLong;
#
#false:
#veryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongDescription ?
# firstValue :
# SecondValueVeryVeryVeryVeryLong;
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: true
BreakStringLiterals: true
# So the line lengths in cudf are not following a limit, at the moment.
# Usually it's a long comment that makes the line length inconsistent.
# Command I used to find max line lengths (from cpp directory):
# find include src tests|grep "\." |xargs -I{} bash -c "awk '{print length}' {} | sort -rn | head -1"|sort -n
# I picked 100, as it seemed somewhere around median
ColumnLimit: 100
# TODO: not aware of any of these at this time
CommentPragmas: '^ IWYU pragma:'
# So it doesn't put subsequent namespaces in the same line
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
# TODO: adds spaces around the element list
# in initializer: vector<T> x{ {}, ..., {} }
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
# } // namespace a => useful
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Regroup
IncludeCategories:
- Regex: '<[[:alnum:]]+>'
Priority: 0
- Regex: '<[[:alnum:].]+>'
Priority: 1
- Regex: '<.*>'
Priority: 2
- Regex: '.*/.*'
Priority: 3
- Regex: '.*'
Priority: 4
# if a header matches this in an include group, it will be moved up to the
# top of the group.
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: true
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Auto
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
# Penalties: leaving unchanged for now
# https://stackoverflow.com/questions/26635370/in-clang-format-what-do-the-penalties-do
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
# As currently set, we don't see return types being
# left on their own line, leaving at 60
PenaltyReturnTypeOnItsOwnLine: 60
# char* foo vs char *foo, picking Right aligned
PointerAlignment: Right
ReflowComments: true
# leaving ON, but this could be something to turn OFF
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 8
UseTab: Never
...
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/include/jni_utils.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <memory>
#include <vector>
#include <jni.h>
#include <cudf/utilities/error.hpp>
#include <rmm/detail/error.hpp>
namespace cudf {
namespace jni {
constexpr jint MINIMUM_JNI_VERSION = JNI_VERSION_1_6;
constexpr char const *CUDA_ERROR_CLASS = "ai/rapids/cudf/CudaException";
constexpr char const *CUDA_FATAL_ERROR_CLASS = "ai/rapids/cudf/CudaFatalException";
constexpr char const *CUDF_ERROR_CLASS = "ai/rapids/cudf/CudfException";
constexpr char const *CUDF_OVERFLOW_ERROR_CLASS = "ai/rapids/cudf/CudfColumnSizeOverflowException";
constexpr char const *CUDF_DTYPE_ERROR_CLASS = "ai/rapids/cudf/CudfException";
constexpr char const *INDEX_OOB_CLASS = "java/lang/ArrayIndexOutOfBoundsException";
constexpr char const *ILLEGAL_ARG_CLASS = "java/lang/IllegalArgumentException";
constexpr char const *NPE_CLASS = "java/lang/NullPointerException";
constexpr char const *OOM_CLASS = "java/lang/OutOfMemoryError";
/**
* @brief indicates that a JNI error of some kind was thrown and the main
* function should return.
*/
class jni_exception : public std::runtime_error {
public:
jni_exception(char const *const message) : std::runtime_error(message) {}
jni_exception(std::string const &message) : std::runtime_error(message) {}
};
/**
* @brief throw a java exception and a C++ one for flow control.
*/
inline void throw_java_exception(JNIEnv *const env, const char *class_name, const char *message) {
jclass ex_class = env->FindClass(class_name);
if (ex_class != NULL) {
env->ThrowNew(ex_class, message);
}
throw jni_exception(message);
}
/**
* @brief check if an java exceptions have been thrown and if so throw a C++
* exception so the flow control stop processing.
*/
inline void check_java_exception(JNIEnv *const env) {
if (env->ExceptionCheck()) {
// Not going to try to get the message out of the Exception, too complex and
// might fail.
throw jni_exception("JNI Exception...");
}
}
/**
* @brief Helper to convert a pointer to a jlong.
*
* This is useful when, for instance, converting a cudf::column pointer
* to a jlong, for use in JNI.
*/
template <typename T> jlong ptr_as_jlong(T *ptr) {
return reinterpret_cast<jlong>(ptr);
}
/**
* @brief Helper to release the data held by a unique_ptr, and return
* the pointer as a jlong.
*/
template <typename T> jlong release_as_jlong(std::unique_ptr<T> &&ptr) {
return ptr_as_jlong(ptr.release());
}
/**
* @brief Helper to release the data held by a unique_ptr, and return
* the pointer as a jlong.
*/
template <typename T> jlong release_as_jlong(std::unique_ptr<T> &ptr) {
return release_as_jlong(std::move(ptr));
}
class native_jdoubleArray_accessor {
public:
jdouble *getArrayElements(JNIEnv *const env, jdoubleArray arr) const {
return env->GetDoubleArrayElements(arr, NULL);
}
jdoubleArray newArray(JNIEnv *const env, int len) const { return env->NewDoubleArray(len); }
void setArrayRegion(JNIEnv *const env, jdoubleArray jarr, int start, int len,
jdouble const *arr) const {
env->SetDoubleArrayRegion(jarr, start, len, arr);
}
void releaseArrayElements(JNIEnv *const env, jdoubleArray jarr, jdouble *arr, jint mode) const {
env->ReleaseDoubleArrayElements(jarr, arr, mode);
}
};
class native_jlongArray_accessor {
public:
jlong *getArrayElements(JNIEnv *const env, jlongArray arr) const {
return env->GetLongArrayElements(arr, NULL);
}
jlongArray newArray(JNIEnv *const env, int len) const { return env->NewLongArray(len); }
void setArrayRegion(JNIEnv *const env, jlongArray jarr, int start, int len,
jlong const *arr) const {
env->SetLongArrayRegion(jarr, start, len, arr);
}
void releaseArrayElements(JNIEnv *const env, jlongArray jarr, jlong *arr, jint mode) const {
env->ReleaseLongArrayElements(jarr, arr, mode);
}
};
class native_jintArray_accessor {
public:
jint *getArrayElements(JNIEnv *const env, jintArray arr) const {
return env->GetIntArrayElements(arr, NULL);
}
jintArray newArray(JNIEnv *const env, int len) const { return env->NewIntArray(len); }
void setArrayRegion(JNIEnv *const env, jintArray jarr, int start, int len,
jint const *arr) const {
env->SetIntArrayRegion(jarr, start, len, arr);
}
void releaseArrayElements(JNIEnv *const env, jintArray jarr, jint *arr, jint mode) const {
env->ReleaseIntArrayElements(jarr, arr, mode);
}
};
class native_jbyteArray_accessor {
public:
jbyte *getArrayElements(JNIEnv *const env, jbyteArray arr) const {
return env->GetByteArrayElements(arr, NULL);
}
jbyteArray newArray(JNIEnv *const env, int len) const { return env->NewByteArray(len); }
void setArrayRegion(JNIEnv *const env, jbyteArray jarr, int start, int len,
jbyte const *arr) const {
env->SetByteArrayRegion(jarr, start, len, arr);
}
void releaseArrayElements(JNIEnv *const env, jbyteArray jarr, jbyte *arr, jint mode) const {
env->ReleaseByteArrayElements(jarr, arr, mode);
}
};
class native_jbooleanArray_accessor {
public:
jboolean *getArrayElements(JNIEnv *const env, jbooleanArray arr) const {
return env->GetBooleanArrayElements(arr, NULL);
}
jbooleanArray newArray(JNIEnv *const env, int len) const { return env->NewBooleanArray(len); }
void setArrayRegion(JNIEnv *const env, jbooleanArray jarr, int start, int len,
jboolean const *arr) const {
env->SetBooleanArrayRegion(jarr, start, len, arr);
}
void releaseArrayElements(JNIEnv *const env, jbooleanArray jarr, jboolean *arr, jint mode) const {
env->ReleaseBooleanArrayElements(jarr, arr, mode);
}
};
/**
* @brief RAII for java arrays to be sure it is handled correctly.
*
* By default any changes to the array will be committed back when
* the destructor is called unless cancel is called first.
*/
template <typename N_TYPE, typename J_ARRAY_TYPE, typename ACCESSOR> class native_jArray {
private:
ACCESSOR access{};
JNIEnv *const env;
J_ARRAY_TYPE orig;
int len;
mutable N_TYPE *data_ptr;
void init_data_ptr() const {
if (orig != nullptr && data_ptr == nullptr) {
data_ptr = access.getArrayElements(env, orig);
check_java_exception(env);
}
}
public:
native_jArray(native_jArray const &) = delete;
native_jArray &operator=(native_jArray const &) = delete;
native_jArray(JNIEnv *const env, J_ARRAY_TYPE orig)
: env(env), orig(orig), len(0), data_ptr(NULL) {
if (orig != NULL) {
len = env->GetArrayLength(orig);
check_java_exception(env);
}
}
native_jArray(JNIEnv *const env, int len)
: env(env), orig(access.newArray(env, len)), len(len), data_ptr(NULL) {
check_java_exception(env);
}
native_jArray(JNIEnv *const env, N_TYPE const *arr, int len)
: env(env), orig(access.newArray(env, len)), len(len), data_ptr(NULL) {
check_java_exception(env);
access.setArrayRegion(env, orig, 0, len, arr);
check_java_exception(env);
}
native_jArray(JNIEnv *const env, const std::vector<N_TYPE> &arr)
: env(env), orig(access.newArray(env, arr.size())), len(arr.size()), data_ptr(NULL) {
check_java_exception(env);
access.setArrayRegion(env, orig, 0, len, arr.data());
check_java_exception(env);
}
bool is_null() const noexcept { return orig == NULL; }
int size() const noexcept { return len; }
N_TYPE operator[](int index) const {
if (orig == NULL) {
throw_java_exception(env, NPE_CLASS, "pointer is NULL");
}
if (index < 0 || index >= len) {
throw_java_exception(env, INDEX_OOB_CLASS, "NOT IN BOUNDS");
}
return data()[index];
}
N_TYPE &operator[](int index) {
if (orig == NULL) {
throw_java_exception(env, NPE_CLASS, "pointer is NULL");
}
if (index < 0 || index >= len) {
throw_java_exception(env, INDEX_OOB_CLASS, "NOT IN BOUNDS");
}
return data()[index];
}
const N_TYPE *const data() const {
init_data_ptr();
return data_ptr;
}
N_TYPE *data() {
init_data_ptr();
return data_ptr;
}
const N_TYPE *const begin() const { return data(); }
N_TYPE *begin() { return data(); }
const N_TYPE *const end() const { return data() + size(); }
N_TYPE *end() { return data() + size(); }
const J_ARRAY_TYPE get_jArray() const { return orig; }
J_ARRAY_TYPE get_jArray() { return orig; }
/**
* @brief Conversion to std::vector
*
* @tparam target_t Target data type
* @return std::vector<target_t> Vector with the copied contents
*/
template <typename target_t = N_TYPE> std::vector<target_t> to_vector() const {
std::vector<target_t> ret;
ret.reserve(size());
std::copy(begin(), end(), std::back_inserter(ret));
return ret;
}
/**
* @brief if data has been written back into this array, don't commit
* it.
*/
void cancel() {
if (data_ptr != NULL && orig != NULL) {
access.releaseArrayElements(env, orig, data_ptr, JNI_ABORT);
data_ptr = NULL;
}
}
void commit() {
if (data_ptr != NULL && orig != NULL) {
access.releaseArrayElements(env, orig, data_ptr, 0);
data_ptr = NULL;
}
}
~native_jArray() { commit(); }
};
using native_jdoubleArray = native_jArray<jdouble, jdoubleArray, native_jdoubleArray_accessor>;
using native_jlongArray = native_jArray<jlong, jlongArray, native_jlongArray_accessor>;
using native_jintArray = native_jArray<jint, jintArray, native_jintArray_accessor>;
using native_jbyteArray = native_jArray<jbyte, jbyteArray, native_jbyteArray_accessor>;
/**
* @brief Specialization of native_jArray for jboolean
*
* This class adds special support for conversion to std::vector<X>, where the element
* value is chosen depending on the jboolean value.
*/
struct native_jbooleanArray
: native_jArray<jboolean, jbooleanArray, native_jbooleanArray_accessor> {
native_jbooleanArray(JNIEnv *const env, jbooleanArray orig)
: native_jArray<jboolean, jbooleanArray, native_jbooleanArray_accessor>(env, orig) {}
native_jbooleanArray(native_jbooleanArray const &) = delete;
native_jbooleanArray &operator=(native_jbooleanArray const &) = delete;
template <typename target_t>
std::vector<target_t> transform_if_else(target_t const &if_true, target_t const &if_false) const {
std::vector<target_t> ret;
ret.reserve(size());
std::transform(begin(), end(), std::back_inserter(ret),
[&](jboolean const &b) { return b ? if_true : if_false; });
return ret;
}
};
/**
* @brief wrapper around native_jlongArray to make it take pointers instead.
*
* By default any changes to the array will be committed back when
* the destructor is called unless cancel is called first.
*/
template <typename T> class native_jpointerArray {
private:
native_jlongArray wrapped;
JNIEnv *const env;
public:
native_jpointerArray(native_jpointerArray const &) = delete;
native_jpointerArray &operator=(native_jpointerArray const &) = delete;
native_jpointerArray(JNIEnv *const env, jlongArray orig) : wrapped(env, orig), env(env) {}
native_jpointerArray(JNIEnv *const env, int len) : wrapped(env, len), env(env) {}
native_jpointerArray(JNIEnv *const env, T *arr, int len) : wrapped(env, arr, len), env(env) {}
bool is_null() const noexcept { return wrapped.is_null(); }
int size() const noexcept { return wrapped.size(); }
T *operator[](int index) const {
if (data() == NULL) {
throw_java_exception(env, NPE_CLASS, "pointer is NULL");
}
if (index < 0 || index >= wrapped.size()) {
throw_java_exception(env, INDEX_OOB_CLASS, "NOT IN BOUNDS");
}
return data()[index];
}
T *&operator[](int index) {
if (data() == NULL) {
throw_java_exception(env, NPE_CLASS, "pointer is NULL");
}
if (index < 0 || index >= wrapped.size()) {
throw_java_exception(env, INDEX_OOB_CLASS, "NOT IN BOUNDS");
}
return data()[index];
}
T *const *data() const { return reinterpret_cast<T *const *>(wrapped.data()); }
T **data() { return reinterpret_cast<T **>(wrapped.data()); }
T *const *begin() const { return data(); }
T *const *end() const { return data() + size(); }
const jlongArray get_jArray() const { return wrapped.get_jArray(); }
jlongArray get_jArray() { return wrapped.get_jArray(); }
void assert_no_nulls() const {
if (std::any_of(data(), data() + size(), [](T *const ptr) { return ptr == nullptr; })) {
throw_java_exception(env, NPE_CLASS, "pointer is NULL");
}
}
/**
* @brief Convert from `T*[]` to `vector<T>`.
*/
std::vector<T> get_dereferenced() const {
assert_no_nulls();
auto ret = std::vector<T>{};
ret.reserve(size());
std::transform(data(), data() + size(), std::back_inserter(ret),
[](T *const &p) { return *p; });
return ret;
}
/**
* @brief if data has been written back into this array, don't commit
* it.
*/
void cancel() { wrapped.cancel(); }
void commit() { wrapped.commit(); }
};
/**
* @brief wrapper around native_jlongArray to hold pointers that are deleted
* if not released, like std::unique_ptr.
*
* By default any changes to the array will be committed back when
* released unless cancel is called first.
*/
template <typename T, typename D = std::default_delete<T>> class unique_jpointerArray {
private:
std::unique_ptr<native_jpointerArray<T>> wrapped;
D del;
public:
unique_jpointerArray(unique_jpointerArray const &) = delete;
unique_jpointerArray &operator=(unique_jpointerArray const &) = delete;
unique_jpointerArray(JNIEnv *const env, jlongArray orig)
: wrapped(new native_jpointerArray<T>(env, orig)) {}
unique_jpointerArray(JNIEnv *const env, jlongArray orig, const D &del)
: wrapped(new native_jpointerArray<T>(env, orig)), del(del) {}
unique_jpointerArray(JNIEnv *const env, int len)
: wrapped(new native_jpointerArray<T>(env, len)) {}
unique_jpointerArray(JNIEnv *const env, int len, const D &del)
: wrapped(new native_jpointerArray<T>(env, len)), del(del) {}
unique_jpointerArray(JNIEnv *const env, T *arr, int len)
: wrapped(new native_jpointerArray<T>(env, arr, len)) {}
unique_jpointerArray(JNIEnv *const env, T *arr, int len, const D &del)
: wrapped(new native_jpointerArray<T>(env, arr, len)), del(del) {}
bool is_null() const noexcept { return wrapped == NULL || wrapped->is_null(); }
int size() const noexcept { return wrapped == NULL ? 0 : wrapped->size(); }
void reset(int index, T *new_ptr = NULL) {
if (wrapped == NULL) {
throw std::logic_error("using unique_jpointerArray after release");
}
T *old = (*wrapped)[index];
if (old != new_ptr) {
(*wrapped)[index] = new_ptr;
del(old);
}
}
T *get(int index) {
if (wrapped == NULL) {
throw std::logic_error("using unique_jpointerArray after release");
}
return (*wrapped)[index];
}
T *const *get() {
if (wrapped == NULL) {
throw std::logic_error("using unique_jpointerArray after release");
}
return wrapped->data();
}
jlongArray release() {
if (wrapped == NULL) {
return NULL;
}
wrapped->commit();
jlongArray ret = wrapped->get_jArray();
wrapped.reset(NULL);
return ret;
}
~unique_jpointerArray() {
if (wrapped != NULL) {
for (int i = 0; i < wrapped->size(); i++) {
reset(i, NULL);
}
}
}
};
/**
* @brief RAII for jstring to be sure it is handled correctly.
*/
class native_jstring {
private:
JNIEnv *env;
jstring orig;
mutable const char *cstr;
mutable size_t cstr_length;
void init_cstr() const {
if (orig != NULL && cstr == NULL) {
cstr_length = env->GetStringUTFLength(orig);
cstr = env->GetStringUTFChars(orig, 0);
check_java_exception(env);
}
}
public:
native_jstring(native_jstring const &) = delete;
native_jstring &operator=(native_jstring const &) = delete;
native_jstring(native_jstring &&other) noexcept
: env(other.env), orig(other.orig), cstr(other.cstr), cstr_length(other.cstr_length) {
other.cstr = NULL;
}
native_jstring(JNIEnv *const env, jstring orig)
: env(env), orig(orig), cstr(NULL), cstr_length(0) {}
native_jstring &operator=(native_jstring const &&other) {
if (orig != NULL && cstr != NULL) {
env->ReleaseStringUTFChars(orig, cstr);
}
this->env = other.env;
this->orig = other.orig;
this->cstr = other.cstr;
this->cstr_length = other.cstr_length;
other.cstr = NULL;
return *this;
}
bool is_null() const noexcept { return orig == NULL; }
const char *get() const {
init_cstr();
return cstr;
}
size_t size_bytes() const {
init_cstr();
return cstr_length;
}
bool is_empty() const {
if (cstr != NULL) {
return cstr_length <= 0;
} else if (orig != NULL) {
jsize len = env->GetStringLength(orig);
check_java_exception(env);
return len <= 0;
}
return true;
}
const jstring get_jstring() const { return orig; }
~native_jstring() {
if (orig != NULL && cstr != NULL) {
env->ReleaseStringUTFChars(orig, cstr);
}
}
};
/**
* @brief jobjectArray wrapper to make accessing it more convenient.
*/
template <typename T> class native_jobjectArray {
private:
JNIEnv *const env;
jobjectArray orig;
int len;
public:
native_jobjectArray(JNIEnv *const env, jobjectArray orig) : env(env), orig(orig), len(0) {
if (orig != NULL) {
len = env->GetArrayLength(orig);
check_java_exception(env);
}
}
bool is_null() const noexcept { return orig == NULL; }
int size() const noexcept { return len; }
T operator[](int index) const { return get(index); }
T get(int index) const {
if (orig == NULL) {
throw_java_exception(env, NPE_CLASS, "jobjectArray pointer is NULL");
}
T ret = static_cast<T>(env->GetObjectArrayElement(orig, index));
check_java_exception(env);
return ret;
}
void set(int index, const T &val) {
if (orig == NULL) {
throw_java_exception(env, NPE_CLASS, "jobjectArray pointer is NULL");
}
env->SetObjectArrayElement(orig, index, val);
check_java_exception(env);
}
jobjectArray wrapped() { return orig; }
};
/**
* @brief jobjectArray wrapper to make accessing strings safe through RAII
* and convenient.
*/
class native_jstringArray {
private:
JNIEnv *const env;
native_jobjectArray<jstring> arr;
mutable std::vector<native_jstring> cache;
mutable std::vector<std::string> cpp_cache;
mutable std::vector<const char *> c_cache;
void init_cache() const {
if (!arr.is_null() && cache.empty()) {
int size = this->size();
cache.reserve(size);
for (int i = 0; i < size; i++) {
cache.push_back(native_jstring(env, arr.get(i)));
}
}
}
void init_c_cache() const {
if (!arr.is_null() && c_cache.empty()) {
init_cache();
int size = this->size();
c_cache.reserve(size);
for (int i = 0; i < size; i++) {
c_cache.push_back(cache[i].get());
}
}
}
void init_cpp_cache() const {
if (!arr.is_null() && cpp_cache.empty()) {
init_cache();
int size = this->size();
cpp_cache.reserve(size);
for (int i = 0; i < size; i++) {
cpp_cache.push_back(cache[i].get());
}
}
}
void update_caches(int index, jstring val) {
if (!cache.empty()) {
cache[index] = native_jstring(env, val);
if (!c_cache.empty()) {
c_cache[index] = cache[index].get();
}
if (!cpp_cache.empty()) {
cpp_cache[index] = cache[index].get();
}
} else if (!c_cache.empty() || !cpp_cache.empty()) {
// Illegal state
throw std::logic_error("CACHING IS MESSED UP");
}
}
public:
native_jstringArray(JNIEnv *const env, jobjectArray orig) : env(env), arr(env, orig) {}
bool is_null() const noexcept { return arr.is_null(); }
int size() const noexcept { return arr.size(); }
native_jstring &operator[](int index) const { return get(index); }
native_jstring &get(int index) const {
if (arr.is_null()) {
throw_java_exception(env, cudf::jni::NPE_CLASS, "jstringArray pointer is NULL");
}
init_cache();
return cache[index];
}
const char **const as_c_array() const {
init_c_cache();
return c_cache.data();
}
const std::vector<std::string> as_cpp_vector() const {
init_cpp_cache();
return cpp_cache;
}
void set(int index, jstring val) {
arr.set(index, val);
update_caches(index, val);
}
void set(int index, const native_jstring &val) {
arr.set(index, val.get_jstring());
update_caches(index, val.get_jstring());
}
void set(int index, const char *val) {
jstring str = env->NewStringUTF(val);
check_java_exception(env);
arr.set(index, str);
update_caches(index, str);
}
};
/**
* @brief create a cuda exception from a given cudaError_t
*/
inline jthrowable cuda_exception(JNIEnv *const env, cudaError_t status, jthrowable cause = NULL) {
const char *ex_class_name;
// Calls cudaGetLastError twice. It is nearly certain that a fatal error occurred if the second
// call doesn't return with cudaSuccess.
cudaGetLastError();
auto const last = cudaGetLastError();
// Call cudaDeviceSynchronize to ensure `last` did not result from an asynchronous error.
// between two calls.
if (status == last && last == cudaDeviceSynchronize()) {
ex_class_name = cudf::jni::CUDA_FATAL_ERROR_CLASS;
} else {
ex_class_name = cudf::jni::CUDA_ERROR_CLASS;
}
jclass ex_class = env->FindClass(ex_class_name);
if (ex_class == NULL) {
return NULL;
}
jmethodID ctor_id =
env->GetMethodID(ex_class, "<init>", "(Ljava/lang/String;ILjava/lang/Throwable;)V");
if (ctor_id == NULL) {
return NULL;
}
jstring msg = env->NewStringUTF(cudaGetErrorString(status));
if (msg == NULL) {
return NULL;
}
jint err_code = static_cast<jint>(status);
jobject ret = env->NewObject(ex_class, ctor_id, msg, err_code, cause);
return (jthrowable)ret;
}
inline void jni_cuda_check(JNIEnv *const env, cudaError_t cuda_status) {
if (cudaSuccess != cuda_status) {
jthrowable jt = cuda_exception(env, cuda_status);
if (jt != NULL) {
env->Throw(jt);
}
throw jni_exception(std::string("CUDA ERROR: code ") +
std::to_string(static_cast<int>(cuda_status)));
}
}
inline auto add_global_ref(JNIEnv *env, jobject jobj) {
auto new_global_ref = env->NewGlobalRef(jobj);
if (new_global_ref == nullptr) {
throw cudf::jni::jni_exception("global ref");
}
return new_global_ref;
}
inline nullptr_t del_global_ref(JNIEnv *env, jobject jobj) {
if (jobj != nullptr) {
env->DeleteGlobalRef(jobj);
}
return nullptr;
}
} // namespace jni
} // namespace cudf
#define JNI_EXCEPTION_OCCURRED_CHECK(env, ret_val) \
{ \
if (env->ExceptionOccurred()) { \
return ret_val; \
} \
}
#define JNI_THROW_NEW(env, class_name, message, ret_val) \
{ \
jclass ex_class = env->FindClass(class_name); \
if (ex_class == NULL) { \
return ret_val; \
} \
env->ThrowNew(ex_class, message); \
return ret_val; \
}
// Throw a new exception only if one is not pending then always return with the specified value
#define JNI_CHECK_THROW_CUDF_EXCEPTION(env, class_name, message, stacktrace, ret_val) \
{ \
JNI_EXCEPTION_OCCURRED_CHECK(env, ret_val); \
auto const ex_class = env->FindClass(class_name); \
if (ex_class == nullptr) { \
return ret_val; \
} \
auto const ctor_id = \
env->GetMethodID(ex_class, "<init>", "(Ljava/lang/String;Ljava/lang/String;)V"); \
if (ctor_id == nullptr) { \
return ret_val; \
} \
auto const empty_str = std::string{""}; \
auto const jmessage = env->NewStringUTF(message == nullptr ? empty_str.c_str() : message); \
if (jmessage == nullptr) { \
return ret_val; \
} \
auto const jstacktrace = \
env->NewStringUTF(stacktrace == nullptr ? empty_str.c_str() : stacktrace); \
if (jstacktrace == nullptr) { \
return ret_val; \
} \
auto const jobj = env->NewObject(ex_class, ctor_id, jmessage, jstacktrace); \
if (jobj == nullptr) { \
return ret_val; \
} \
env->Throw(reinterpret_cast<jthrowable>(jobj)); \
return ret_val; \
}
// Throw a new exception only if one is not pending then always return with the specified value
#define JNI_CHECK_THROW_CUDA_EXCEPTION(env, class_name, message, stacktrace, error_code, ret_val) \
{ \
JNI_EXCEPTION_OCCURRED_CHECK(env, ret_val); \
auto const ex_class = env->FindClass(class_name); \
if (ex_class == nullptr) { \
return ret_val; \
} \
auto const ctor_id = \
env->GetMethodID(ex_class, "<init>", "(Ljava/lang/String;Ljava/lang/String;I)V"); \
if (ctor_id == nullptr) { \
return ret_val; \
} \
auto const empty_str = std::string{""}; \
auto const jmessage = env->NewStringUTF(message == nullptr ? empty_str.c_str() : message); \
if (jmessage == nullptr) { \
return ret_val; \
} \
auto const jstacktrace = \
env->NewStringUTF(stacktrace == nullptr ? empty_str.c_str() : stacktrace); \
if (jstacktrace == nullptr) { \
return ret_val; \
} \
auto const jerror_code = static_cast<jint>(error_code); \
auto const jobj = env->NewObject(ex_class, ctor_id, jmessage, jstacktrace, jerror_code); \
if (jobj == nullptr) { \
return ret_val; \
} \
env->Throw(reinterpret_cast<jthrowable>(jobj)); \
return ret_val; \
}
#define JNI_NULL_CHECK(env, obj, error_msg, ret_val) \
{ \
if ((obj) == 0) { \
JNI_THROW_NEW(env, cudf::jni::NPE_CLASS, error_msg, ret_val); \
} \
}
#define JNI_ARG_CHECK(env, obj, error_msg, ret_val) \
{ \
if (!(obj)) { \
JNI_THROW_NEW(env, cudf::jni::ILLEGAL_ARG_CLASS, error_msg, ret_val); \
} \
}
#define CATCH_STD_CLASS(env, class_name, ret_val) \
catch (const rmm::out_of_memory &e) { \
JNI_EXCEPTION_OCCURRED_CHECK(env, ret_val); \
auto const what = \
std::string("Could not allocate native memory: ") + (e.what() == nullptr ? "" : e.what()); \
JNI_THROW_NEW(env, cudf::jni::OOM_CLASS, what.c_str(), ret_val); \
} \
catch (const cudf::fatal_cuda_error &e) { \
JNI_CHECK_THROW_CUDA_EXCEPTION(env, cudf::jni::CUDA_FATAL_ERROR_CLASS, e.what(), \
e.stacktrace(), e.error_code(), ret_val); \
} \
catch (const cudf::cuda_error &e) { \
JNI_CHECK_THROW_CUDA_EXCEPTION(env, cudf::jni::CUDA_ERROR_CLASS, e.what(), e.stacktrace(), \
e.error_code(), ret_val); \
} \
catch (const cudf::data_type_error &e) { \
JNI_CHECK_THROW_CUDF_EXCEPTION(env, cudf::jni::CUDF_DTYPE_ERROR_CLASS, e.what(), \
e.stacktrace(), ret_val); \
} \
catch (std::overflow_error const &e) { \
JNI_CHECK_THROW_CUDF_EXCEPTION(env, cudf::jni::CUDF_OVERFLOW_ERROR_CLASS, e.what(), \
"No native stacktrace is available.", ret_val); \
} \
catch (const std::exception &e) { \
char const *stacktrace = "No native stacktrace is available."; \
if (auto const cudf_ex = dynamic_cast<cudf::logic_error const *>(&e); cudf_ex != nullptr) { \
stacktrace = cudf_ex->stacktrace(); \
} \
/* Double check whether the thrown exception is unrecoverable CUDA error or not. */ \
/* Like cudf::detail::throw_cuda_error, it is nearly certain that a fatal error */ \
/* occurred if the second call doesn't return with cudaSuccess. */ \
cudaGetLastError(); \
auto const last = cudaFree(0); \
if (cudaSuccess != last && last == cudaDeviceSynchronize()) { \
/* Throw CudaFatalException since the thrown exception is unrecoverable CUDA error */ \
JNI_CHECK_THROW_CUDA_EXCEPTION(env, cudf::jni::CUDA_FATAL_ERROR_CLASS, e.what(), stacktrace, \
last, ret_val); \
} \
JNI_CHECK_THROW_CUDF_EXCEPTION(env, class_name, e.what(), stacktrace, ret_val); \
}
#define CATCH_STD(env, ret_val) CATCH_STD_CLASS(env, cudf::jni::CUDF_ERROR_CLASS, ret_val)
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/include/maps_column_view.hpp
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
class scalar;
namespace jni {
/**
* @brief Given a column-view of LIST<STRUCT<K,V>>, an instance of this class
* provides an abstraction of a column of maps.
*
* Each list row is treated as a map of key->value, with possibly repeated keys.
* The list may be looked up by a scalar key, or by a column of keys, to
* retrieve the corresponding value.
*/
class maps_column_view {
public:
maps_column_view(lists_column_view const &lists_of_structs,
rmm::cuda_stream_view stream = cudf::get_default_stream());
// Rule of 5.
maps_column_view(maps_column_view const &maps_view) = default;
maps_column_view(maps_column_view &&maps_view) = default;
maps_column_view &operator=(maps_column_view const &) = default;
maps_column_view &operator=(maps_column_view &&) = default;
~maps_column_view() = default;
/**
* @brief Returns number of map rows in the column.
*/
size_type size() const { return keys_.size(); }
/**
* @brief Getter for keys as a list column.
*
* Note: Keys are not deduped. Repeated keys are returned in order.
*/
lists_column_view const &keys() const { return keys_; }
/**
* @brief Getter for values as a list column.
*
* Note: Values for repeated keys are not dropped.
*/
lists_column_view const &values() const { return values_; }
/**
* @brief Map lookup by a column of keys.
*
* The lookup column must have as many rows as the map column,
* and must match the key-type of the map.
* A column of values is returned, with the same number of rows as the map column.
* If a key is repeated in a map row, the value corresponding to the last matching
* key is returned.
* If a lookup key is null or not found, the corresponding value is null.
*
* @param keys Column of keys to be looked up in each corresponding map row.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return std::unique_ptr<column> Column of values corresponding the value of the lookup key.
*/
std::unique_ptr<column> get_values_for(
column_view const &keys, rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource()) const;
/**
* @brief Map lookup by a scalar key.
*
* The type of the lookup scalar must match the key-type of the map.
* A column of values is returned, with the same number of rows as the map column.
* If a key is repeated in a map row, the value corresponding to the last matching
* key is returned.
* If the lookup key is null or not found, the corresponding value is null.
*
* @param keys Column of keys to be looked up in each corresponding map row.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return std::unique_ptr<column>
*/
std::unique_ptr<column> get_values_for(
scalar const &key, rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource()) const;
/**
* @brief Check if each map row contains a specified scalar key.
*
* The type of the lookup scalar must match the key-type of the map.
* A column of values is returned, with the same number of rows as the map column.
*
* Each row in the returned column contains a bool indicating whether the row contains
* the specified key (`true`) or not (`false`).
* The returned column contains no nulls. i.e. If the search key is null, or if the
* map row is null, the result row is `false`.
*
* @param key Scalar key to be looked up in each corresponding map row.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return std::unique_ptr<column>
*/
std::unique_ptr<column>
contains(scalar const &key, rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource()) const;
/**
* @brief Check if each map row contains keys specified by a column
*
* The type of the lookup column must match the key-type of the map.
* A column of values is returned, with the same number of rows as the map column.
*
* Each row in the returned column contains a bool indicating whether the row contains
* the specified key (`true`) or not (`false`).
* The returned column contains no nulls. i.e. If the search key is null, or if the
* map row is null, the result row is `false`.
*
* @param keys Column of keys to be looked up in each corresponding map row.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return std::unique_ptr<column>
*/
std::unique_ptr<column>
contains(column_view const &key, rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource()) const;
private:
lists_column_view keys_, values_;
};
} // namespace jni
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/aggregation128_utils.hpp
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf::jni {
/**
* @brief Extract a 32-bit integer column from a column of 128-bit values.
*
* Given a 128-bit input column, a 32-bit integer column is returned corresponding to
* the index of which 32-bit chunk of the original 128-bit values to extract.
* 0 corresponds to the least significant chunk, and 3 corresponds to the most
* significant chunk.
*
* A null input row will result in a corresponding null output row.
*
* @param col Column of 128-bit values
* @param dtype Integer type to use for the output column (e.g.: UINT32 or INT32)
* @param chunk_idx Index of the 32-bit chunk to extract
* @param stream CUDA stream to use
* @return A column containing the extracted 32-bit integer values
*/
std::unique_ptr<cudf::column>
extract_chunk32(cudf::column_view const &col, cudf::data_type dtype, int chunk_idx,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Reassemble a 128-bit column from four 64-bit integer columns with overflow detection.
*
* The 128-bit value is reconstructed by overlapping the 64-bit values by 32-bits. The least
* significant 32-bits of the least significant 64-bit value are used directly as the least
* significant 32-bits of the final 128-bit value, and the remaining 32-bits are added to the next
* most significant 64-bit value. The lower 32-bits of that sum become the next most significant
* 32-bits in the final 128-bit value, and the remaining 32-bits are added to the next most
* significant 64-bit input value, and so on.
*
* A null input row will result in a corresponding null output row.
*
* @param chunks_table Table of four 64-bit integer columns with the columns ordered from least
* significant to most significant. The last column must be an INT64 column.
* @param output_type The type to use for the resulting 128-bit value column
* @param stream CUDA stream to use
* @return Table containing a boolean column and a 128-bit value column of the
* requested type. The boolean value will be true if an overflow was detected
* for that row's value.
*/
std::unique_ptr<cudf::table>
assemble128_from_sum(cudf::table_view const &chunks_table, cudf::data_type output_type,
rmm::cuda_stream_view stream = cudf::get_default_stream());
} // namespace cudf::jni
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/ColumnViewJni.cu
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/labeling/label_segments.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/lists/list_device_view.cuh>
#include <cudf/lists/lists_column_device_view.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/functional.h>
#include <thrust/logical.h>
#include <thrust/scan.h>
#include <thrust/tabulate.h>
#include "ColumnViewJni.hpp"
namespace cudf::jni {
std::unique_ptr<cudf::column>
new_column_with_boolean_column_as_validity(cudf::column_view const &exemplar,
cudf::column_view const &validity_column) {
CUDF_EXPECTS(validity_column.type().id() == type_id::BOOL8,
"Validity column must be of type bool");
CUDF_EXPECTS(validity_column.size() == exemplar.size(),
"Exemplar and validity columns must have the same size");
auto validity_device_view = cudf::column_device_view::create(validity_column);
auto validity_begin = cudf::detail::make_optional_iterator<bool>(
*validity_device_view, cudf::nullate::DYNAMIC{validity_column.has_nulls()});
auto validity_end = validity_begin + validity_device_view->size();
auto [null_mask, null_count] = cudf::detail::valid_if(
validity_begin, validity_end,
[] __device__(auto optional_bool) { return optional_bool.value_or(false); },
cudf::get_default_stream(), rmm::mr::get_current_device_resource());
auto const exemplar_without_null_mask = cudf::column_view{
exemplar.type(),
exemplar.size(),
exemplar.head<void>(),
nullptr,
0,
exemplar.offset(),
std::vector<cudf::column_view>{exemplar.child_begin(), exemplar.child_end()}};
auto deep_copy = std::make_unique<cudf::column>(exemplar_without_null_mask);
deep_copy->set_null_mask(std::move(null_mask), null_count);
return deep_copy;
}
std::unique_ptr<cudf::column> generate_list_offsets(cudf::column_view const &list_length,
rmm::cuda_stream_view stream) {
CUDF_EXPECTS(list_length.type().id() == cudf::type_id::INT32,
"Input column does not have type INT32.");
auto const begin_iter = list_length.template begin<cudf::size_type>();
auto const end_iter = list_length.template end<cudf::size_type>();
auto offsets_column = make_numeric_column(data_type{type_id::INT32}, list_length.size() + 1,
mask_state::UNALLOCATED, stream);
auto offsets_view = offsets_column->mutable_view();
auto d_offsets = offsets_view.template begin<int32_t>();
thrust::inclusive_scan(rmm::exec_policy(stream), begin_iter, end_iter, d_offsets + 1);
CUDF_CUDA_TRY(cudaMemsetAsync(d_offsets, 0, sizeof(int32_t), stream));
return offsets_column;
}
namespace {
/**
* @brief Check if the input list has any null elements.
*
* @param list The input list.
* @return The boolean result indicating if the input list has null elements.
*/
__device__ bool list_has_nulls(list_device_view list) {
return thrust::any_of(thrust::seq, thrust::make_counting_iterator(0),
thrust::make_counting_iterator(list.size()),
[&list](auto const idx) { return list.is_null(idx); });
}
} // namespace
void post_process_list_overlap(cudf::column_view const &lhs, cudf::column_view const &rhs,
std::unique_ptr<cudf::column> const &overlap_result,
rmm::cuda_stream_view stream) {
// If both of the input columns do not have nulls, we don't need to do anything here.
if (!lists_column_view{lhs}.child().has_nulls() && !lists_column_view{rhs}.child().has_nulls()) {
return;
}
auto const overlap_cv = overlap_result->view();
auto const lhs_cdv_ptr = column_device_view::create(lhs, stream);
auto const rhs_cdv_ptr = column_device_view::create(rhs, stream);
auto const overlap_cdv_ptr = column_device_view::create(overlap_cv, stream);
// Create a new bitmask to satisfy Spark's arrays_overlap's special behavior.
auto validity = rmm::device_uvector<bool>(overlap_cv.size(), stream);
thrust::tabulate(rmm::exec_policy(stream), validity.begin(), validity.end(),
[lhs = cudf::detail::lists_column_device_view{*lhs_cdv_ptr},
rhs = cudf::detail::lists_column_device_view{*rhs_cdv_ptr},
overlap_result = *overlap_cdv_ptr] __device__(auto const idx) {
if (overlap_result.is_null(idx) ||
overlap_result.template element<bool>(idx)) {
return true;
}
// `lhs_list` and `rhs_list` should not be null, otherwise
// `overlap_result[idx]` is null and that has been handled above.
auto const lhs_list = list_device_view{lhs, idx};
auto const rhs_list = list_device_view{rhs, idx};
// Only proceed if both lists are non-empty.
if (lhs_list.size() == 0 || rhs_list.size() == 0) {
return true;
}
// Only proceed if at least one list has nulls.
if (!list_has_nulls(lhs_list) && !list_has_nulls(rhs_list)) {
return true;
}
// Here, the input lists satisfy all the conditions below so we output a
// null:
// - Both of the input lists have no non-null common element, and
// - They are both non-empty, and
// - Either of them contains null elements.
return false;
});
// Create a new nullmask from the validity data.
auto [new_null_mask, new_null_count] =
cudf::detail::valid_if(validity.begin(), validity.end(), thrust::identity{},
cudf::get_default_stream(), rmm::mr::get_current_device_resource());
if (new_null_count > 0) {
// If the `overlap_result` column is nullable, perform `bitmask_and` of its nullmask and the
// new nullmask.
if (overlap_cv.nullable()) {
auto [null_mask, null_count] = cudf::detail::bitmask_and(
std::vector<bitmask_type const *>{
overlap_cv.null_mask(), static_cast<bitmask_type const *>(new_null_mask.data())},
std::vector<cudf::size_type>{0, 0}, overlap_cv.size(), stream,
rmm::mr::get_current_device_resource());
overlap_result->set_null_mask(std::move(null_mask), null_count);
} else {
// Just set the output nullmask as the new nullmask.
overlap_result->set_null_mask(std::move(new_null_mask), new_null_count);
}
}
}
std::unique_ptr<cudf::column> lists_distinct_by_key(cudf::lists_column_view const &input,
rmm::cuda_stream_view stream) {
if (input.is_empty()) {
return empty_like(input.parent());
}
auto const child = input.get_sliced_child(stream);
// Generate labels for the input list elements.
auto labels = rmm::device_uvector<cudf::size_type>(child.size(), stream);
cudf::detail::label_segments(input.offsets_begin(), input.offsets_end(), labels.begin(),
labels.end(), stream);
// Use `cudf::duplicate_keep_option::KEEP_LAST` so this will produce the desired behavior when
// being called in `create_map` in spark-rapids.
// Other options comparing nulls and NaNs are set as all-equal.
auto out_columns =
cudf::detail::stable_distinct(
table_view{{column_view{cudf::device_span<cudf::size_type const>{labels}}, child.child(0),
child.child(1)}}, // input table
std::vector<size_type>{0, 1}, // key columns
cudf::duplicate_keep_option::KEEP_LAST, cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL, stream, rmm::mr::get_current_device_resource())
->release();
auto const out_labels = out_columns.front()->view();
// Assemble a structs column of <out_keys, out_vals>.
auto out_structs_members = std::vector<std::unique_ptr<cudf::column>>();
out_structs_members.emplace_back(std::move(out_columns[1]));
out_structs_members.emplace_back(std::move(out_columns[2]));
auto out_structs =
cudf::make_structs_column(out_labels.size(), std::move(out_structs_members), 0, {});
// Assemble a lists column of structs<out_keys, out_vals>.
auto out_offsets = make_numeric_column(data_type{type_to_id<size_type>()}, input.size() + 1,
mask_state::UNALLOCATED, stream);
auto const offsets_begin = out_offsets->mutable_view().template begin<size_type>();
auto const labels_begin = out_labels.template begin<size_type>();
cudf::detail::labels_to_offsets(labels_begin, labels_begin + out_labels.size(), offsets_begin,
offsets_begin + out_offsets->size(), stream);
return cudf::make_lists_column(
input.size(), std::move(out_offsets), std::move(out_structs), input.null_count(),
cudf::detail::copy_bitmask(input.parent(), stream, rmm::mr::get_current_device_resource()),
stream);
}
} // namespace cudf::jni
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/emptyfile.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Intentionally empty
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/ChunkedPackJni.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudf_jni_apis.hpp"
extern "C" {
JNIEXPORT void JNICALL Java_ai_rapids_cudf_ChunkedPack_chunkedPackDelete(JNIEnv *env, jclass,
jlong chunked_pack) {
try {
cudf::jni::auto_set_device(env);
auto cs = reinterpret_cast<cudf::chunked_pack *>(chunked_pack);
delete cs;
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ChunkedPack_chunkedPackGetTotalContiguousSize(
JNIEnv *env, jclass, jlong chunked_pack) {
try {
cudf::jni::auto_set_device(env);
auto cs = reinterpret_cast<cudf::chunked_pack *>(chunked_pack);
return cs->get_total_contiguous_size();
}
CATCH_STD(env, 0);
}
JNIEXPORT jboolean JNICALL Java_ai_rapids_cudf_ChunkedPack_chunkedPackHasNext(JNIEnv *env, jclass,
jlong chunked_pack) {
try {
cudf::jni::auto_set_device(env);
auto cs = reinterpret_cast<cudf::chunked_pack *>(chunked_pack);
return cs->has_next();
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ChunkedPack_chunkedPackNext(JNIEnv *env, jclass,
jlong chunked_pack,
jlong user_ptr,
jlong user_ptr_size) {
try {
cudf::jni::auto_set_device(env);
auto cs = reinterpret_cast<cudf::chunked_pack *>(chunked_pack);
auto user_buffer_span = cudf::device_span<uint8_t>(reinterpret_cast<uint8_t *>(user_ptr),
static_cast<std::size_t>(user_ptr_size));
return cs->next(user_buffer_span);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL
Java_ai_rapids_cudf_ChunkedPack_chunkedPackBuildMetadata(JNIEnv *env, jclass, jlong chunked_pack) {
try {
cudf::jni::auto_set_device(env);
auto cs = reinterpret_cast<cudf::chunked_pack *>(chunked_pack);
std::unique_ptr<std::vector<uint8_t>> result = cs->build_metadata();
return reinterpret_cast<jlong>(result.release());
}
CATCH_STD(env, 0);
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/NvtxUniqueRangeJni.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/nvtx/nvtx3.hpp>
#include "jni_utils.hpp"
#include "nvtx_common.hpp"
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_NvtxUniqueRange_start(JNIEnv *env, jclass clazz,
jstring name, jint color_bits) {
try {
cudf::jni::native_jstring range_name(env, name);
nvtx3::color range_color(static_cast<nvtx3::color::value_type>(color_bits));
nvtx3::event_attributes attr{range_color, range_name.get()};
auto nvtxRangeId =
nvtxDomainRangeStartEx(nvtx3::domain::get<cudf::jni::java_domain>(), attr.get());
return static_cast<jlong>(nvtxRangeId);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_NvtxUniqueRange_end(JNIEnv *env, jclass clazz,
jlong nvtxRangeId) {
try {
nvtxDomainRangeEnd(nvtx3::domain::get<cudf::jni::java_domain>(),
static_cast<nvtxRangeId_t>(nvtxRangeId));
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/cudf_jni_apis.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/contiguous_split.hpp>
#include <cudf/detail/aggregation/aggregation.hpp>
#include "jni_utils.hpp"
namespace cudf {
namespace jni {
/**
* @brief Detach all columns from the specified table, and pointers to them as an array.
*
* This function takes a table (presumably returned by some operation), and turns it into an
* array of column* (as jlongs).
* The lifetime of the columns is decoupled from that of the table, and is managed by the caller.
*
* @param env The JNI environment
* @param table_result the table to convert for return
* @param extra_columns columns not in the table that will be appended to the result.
*/
jlongArray
convert_table_for_return(JNIEnv *env, std::unique_ptr<cudf::table> &table_result,
std::vector<std::unique_ptr<cudf::column>> &&extra_columns = {});
/**
* @copydoc convert_table_for_return(JNIEnv*, std::unique_ptr<cudf::table>&,
* std::vector<std::unique_ptr<cudf::column>>&&)
*/
jlongArray
convert_table_for_return(JNIEnv *env, std::unique_ptr<cudf::table> &&table_result,
std::vector<std::unique_ptr<cudf::column>> &&extra_columns = {});
//
// ContiguousTable APIs
//
bool cache_contiguous_table_jni(JNIEnv *env);
void release_contiguous_table_jni(JNIEnv *env);
jobject contiguous_table_from(JNIEnv *env, cudf::packed_columns &split, long row_count);
native_jobjectArray<jobject> contiguous_table_array(JNIEnv *env, jsize length);
/**
* @brief Cache the JNI jclass and JNI jfield of Java `ContigSplitGroupByResult`
*
* @param env the JNI Env pointer
* @return if success
*/
bool cache_contig_split_group_by_result_jni(JNIEnv *env);
/**
* @brief Release the JNI jclass and JNI jfield of Java `ContigSplitGroupByResult`
*
* @param env the JNI Env pointer
*/
void release_contig_split_group_by_result_jni(JNIEnv *env);
/**
* @brief Construct a Java `ContigSplitGroupByResult` from contiguous tables.
*
* @param env the JNI Env pointer
* @param groups the contiguous tables
* @return a Java `ContigSplitGroupByResult`
*/
jobject contig_split_group_by_result_from(JNIEnv *env, jobjectArray &groups);
/**
* @brief Construct a Java `ContigSplitGroupByResult` from contiguous tables.
*
* @param env the JNI Env pointer
* @param groups the contiguous tables
* @param groups the contiguous tables
* @return a Java `ContigSplitGroupByResult`
*/
jobject contig_split_group_by_result_from(JNIEnv *env, jobjectArray &groups,
jlongArray &uniq_key_columns);
//
// HostMemoryBuffer APIs
//
/**
* Allocate a HostMemoryBuffer
*/
jobject allocate_host_buffer(JNIEnv *env, jlong amount, jboolean prefer_pinned,
jobject host_memory_allocator);
/**
* Get the address of a HostMemoryBuffer
*/
jlong get_host_buffer_address(JNIEnv *env, jobject buffer);
/**
* Get the length of a HostMemoryBuffer
*/
jlong get_host_buffer_length(JNIEnv *env, jobject buffer);
// Get the JNI environment, attaching the current thread to the JVM if necessary. If the thread
// needs to be attached, the thread will automatically detach when the thread terminates.
JNIEnv *get_jni_env(JavaVM *jvm);
/** Set the device to use for cudf */
void set_cudf_device(int device);
/**
* If the current thread has not set the CUDA device via Cuda.setDevice then this could
* set the device, throw an exception, or do nothing depending on how the application has
* configured it via Cuda.setAutoSetDeviceMode.
*/
void auto_set_device(JNIEnv *env);
/**
* Fills all the bytes in the buffer 'buf' with 'value'.
* The operation has not necessarily completed when this returns, but it could overlap with
* operations occurring on other streams.
*/
void device_memset_async(JNIEnv *env, rmm::device_buffer &buf, char value);
//
// DataSource APIs
//
bool cache_data_source_jni(JNIEnv *env);
void release_data_source_jni(JNIEnv *env);
} // namespace jni
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/CuFileJni.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstring>
#include <cufile.h>
#include <fcntl.h>
#include <unistd.h>
#include <cudf/utilities/error.hpp>
#include <sys/stat.h>
#include <sys/types.h>
#include "cudf_jni_apis.hpp"
#include "jni_utils.hpp"
namespace {
/**
* @brief Get the error description based on the CUDA driver error code.
*
* @param cu_result CUDA driver error code.
* @return Description for the error.
*/
char const *GetCuErrorString(CUresult cu_result) {
char const *description;
if (cuGetErrorName(cu_result, &description) != CUDA_SUCCESS)
description = "unknown cuda error";
return description;
}
/**
* @brief Get the error description based on the integer error code.
*
* cuFile APIs return both cuFile specific error codes as well as POSIX error codes for ease of use.
*
* @param error_code Integer error code.
* @return Description of the error.
*/
std::string cuFileGetErrorString(int error_code) {
return IS_CUFILE_ERR(error_code) ? std::string(CUFILE_ERRSTR(error_code)) :
std::string(std::strerror(error_code));
}
/**
* @brief Get the error description based on the cuFile return status.
*
* @param status cuFile return status.
* @return Description of the error.
*/
std::string cuFileGetErrorString(CUfileError_t status) {
std::string error = cuFileGetErrorString(status.err);
if (IS_CUDA_ERR(status)) {
error.append(".").append(GetCuErrorString(status.cu_err));
}
return error;
}
/**
* @brief RAII wrapper for the cuFile driver.
*/
class cufile_driver {
public:
/** @brief Construct a new driver instance by opening the cuFile driver. */
cufile_driver() {
auto const status = cuFileDriverOpen();
if (status.err != CU_FILE_SUCCESS) {
CUDF_FAIL("Failed to initialize cuFile driver: " + cuFileGetErrorString(status));
}
}
// Disable copy (and move) semantics.
cufile_driver(cufile_driver const &) = delete;
cufile_driver &operator=(cufile_driver const &) = delete;
/** @brief Destroy the driver instance by closing the cuFile driver. */
~cufile_driver() { cuFileDriverClose(); }
};
/** @brief RAII wrapper for a device buffer used by cuFile. */
class cufile_buffer {
public:
/**
* @brief Construct a new cuFile buffer.
*
* @param device_pointer Pointer to the device buffer.
* @param size The size of the allocated device buffer.
* @param register_buffer Whether to register the buffer with cuFile. This should only be set to
* true if this buffer is being reused and is 4KiB aligned.
*/
cufile_buffer(void *device_pointer, std::size_t size, bool register_buffer = false)
: device_pointer_{device_pointer}, size_{size}, register_buffer_{register_buffer} {
if (register_buffer_) {
auto const status = cuFileBufRegister(device_pointer_, size_, 0);
if (status.err != CU_FILE_SUCCESS) {
CUDF_FAIL("Failed to register cuFile buffer: " + cuFileGetErrorString(status));
}
}
}
// Disable copy (and move) semantics.
cufile_buffer(cufile_buffer const &) = delete;
cufile_buffer &operator=(cufile_buffer const &) = delete;
/** @brief Destroy the buffer by de-registering it if necessary. */
~cufile_buffer() {
if (register_buffer_) {
cuFileBufDeregister(device_pointer_);
}
}
/**
* @brief Get the pointer to the underlying device buffer.
*
* @return Pointer to the device buffer.
*/
void *device_pointer() const { return device_pointer_; }
/**
* @brief Get the size of the underlying device buffer.
*
* @return The size of the device buffer.
*/
std::size_t size() const { return size_; }
private:
/// Pointer to the device buffer.
void *device_pointer_;
/// Size of the device buffer.
std::size_t size_;
/// Whether to register the buffer with cuFile.
bool register_buffer_;
};
/** @brief RAII wrapper for a file descriptor and the corresponding cuFile handle. */
class cufile_file {
public:
/**
* @brief Construct a file wrapper.
*
* Should not be called directly; use the following factory methods instead.
*
* @param file_descriptor A valid file descriptor.
*/
explicit cufile_file(int file_descriptor) : file_descriptor_{file_descriptor} {
CUfileDescr_t cufile_descriptor{CU_FILE_HANDLE_TYPE_OPAQUE_FD, file_descriptor_};
auto const status = cuFileHandleRegister(&cufile_handle_, &cufile_descriptor);
if (status.err != CU_FILE_SUCCESS) {
close(file_descriptor_);
CUDF_FAIL("Failed to register cuFile handle: " + cuFileGetErrorString(status));
}
}
/**
* @brief Factory method to create a file wrapper for reading.
*
* @param path Absolute path of the file to read from.
* @return std::unique_ptr<cufile_file> for reading.
*/
static auto make_reader(char const *path) {
auto const file_descriptor = open(path, O_RDONLY | O_DIRECT);
if (file_descriptor < 0) {
CUDF_FAIL("Failed to open file to read: " + cuFileGetErrorString(errno));
}
return std::make_unique<cufile_file>(file_descriptor);
}
/**
* @brief Factory method to create a file wrapper for writing.
*
* @param path Absolute path of the file to write to.
* @return std::unique_ptr<cufile_file> for writing.
*/
static auto make_writer(char const *path) {
auto const file_descriptor = open(path, O_CREAT | O_WRONLY | O_DIRECT, S_IRUSR | S_IWUSR);
if (file_descriptor < 0) {
CUDF_FAIL("Failed to open file to write: " + cuFileGetErrorString(errno));
}
return std::make_unique<cufile_file>(file_descriptor);
}
// Disable copy (and move) semantics.
cufile_file(cufile_file const &) = delete;
cufile_file &operator=(cufile_file const &) = delete;
/** @brief Destroy the file wrapper by de-registering the cuFile handle and closing the file. */
~cufile_file() {
cuFileHandleDeregister(cufile_handle_);
close(file_descriptor_);
}
/**
* @brief Read the file into a device buffer.
*
* @param buffer Device buffer to read the file content into.
* @param file_offset Starting offset from which to read the file.
*/
void read(cufile_buffer const &buffer, std::size_t file_offset) const {
auto const status =
cuFileRead(cufile_handle_, buffer.device_pointer(), buffer.size(), file_offset, 0);
if (status < 0) {
if (IS_CUFILE_ERR(status)) {
CUDF_FAIL("Failed to read file into buffer: " + cuFileGetErrorString(status));
} else {
CUDF_FAIL("Failed to read file into buffer: " + cuFileGetErrorString(errno));
}
}
CUDF_EXPECTS(static_cast<std::size_t>(status) == buffer.size(),
"Size of bytes read is different from buffer size");
}
/**
* @brief Write a device buffer to the file.
*
* @param buffer The device buffer to write.
* @param size The number of bytes to write.
* @param file_offset Starting offset from which to write the buffer.
*/
void write(cufile_buffer const &buffer, std::size_t size, std::size_t file_offset) {
auto const status = cuFileWrite(cufile_handle_, buffer.device_pointer(), size, file_offset, 0);
if (status < 0) {
if (IS_CUFILE_ERR(status)) {
CUDF_FAIL("Failed to write buffer to file: " + cuFileGetErrorString(status));
} else {
CUDF_FAIL("Failed to write buffer to file: " + cuFileGetErrorString(errno));
}
}
CUDF_EXPECTS(static_cast<std::size_t>(status) == size,
"Size of bytes written is different from the specified size");
}
/**
* @brief Append a device buffer to the file.
*
* @param buffer The device buffer to append.
* @param size The number of bytes to append.
* @return The file offset from which the buffer was appended.
*/
std::size_t append(cufile_buffer const &buffer, std::size_t size) {
struct stat stat_buffer;
auto const status = fstat(file_descriptor_, &stat_buffer);
if (status < 0) {
CUDF_FAIL("Failed to get file status for appending: " + cuFileGetErrorString(errno));
}
auto const file_offset = static_cast<std::size_t>(stat_buffer.st_size);
write(buffer, size, file_offset);
return file_offset;
}
private:
/// The underlying file descriptor.
int file_descriptor_;
/// The registered cuFile handle.
CUfileHandle_t cufile_handle_{};
};
} // anonymous namespace
extern "C" {
/**
* @brief Create a new cuFile driver wrapper.
*
* @param env The JNI environment.
* @return Pointer address to the new driver wrapper instance.
*/
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_CuFileDriver_create(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
return reinterpret_cast<jlong>(new cufile_driver());
}
CATCH_STD(env, 0);
}
/**
* @brief Destroy the given cuFile driver wrapper.
*
* @param env The JNI environment.
* @param pointer Pointer address to the driver wrapper instance.
*/
JNIEXPORT void JNICALL Java_ai_rapids_cudf_CuFileDriver_destroy(JNIEnv *env, jclass,
jlong pointer) {
try {
cudf::jni::auto_set_device(env);
delete reinterpret_cast<cufile_driver *>(pointer);
}
CATCH_STD(env, );
}
/**
* @brief Create a new cuFile buffer wrapper.
*
* @param env The JNI environment.
* @param device_pointer Pointer address to the device buffer.
* @param size The size of the device buffer.
* @param register_buffer If true, register the cuFile buffer.
* @return Pointer address to the new buffer wrapper instance.
*/
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_CuFileBuffer_create(JNIEnv *env, jclass,
jlong device_pointer, jlong size,
jboolean register_buffer) {
try {
cudf::jni::auto_set_device(env);
auto *buffer =
new cufile_buffer(reinterpret_cast<void *>(device_pointer), size, register_buffer);
return reinterpret_cast<jlong>(buffer);
}
CATCH_STD(env, 0);
}
/**
* @brief Destroy the given cuFile buffer wrapper.
*
* @param env The JNI environment.
* @param pointer Pointer address to the buffer wrapper instance.
*/
JNIEXPORT void JNICALL Java_ai_rapids_cudf_CuFileBuffer_destroy(JNIEnv *env, jclass,
jlong pointer) {
try {
cudf::jni::auto_set_device(env);
delete reinterpret_cast<cufile_buffer *>(pointer);
}
CATCH_STD(env, );
}
/**
* @brief Create a new cuFile file handle wrapper for reading.
*
* @param env The JNI environment.
* @param path The file path to read from.
* @return Pointer address to the new file handle wrapper instance.
*/
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_CuFileReadHandle_create(JNIEnv *env, jclass,
jstring path) {
try {
cudf::jni::auto_set_device(env);
auto file = cufile_file::make_reader(env->GetStringUTFChars(path, nullptr));
return reinterpret_cast<jlong>(file.release());
}
CATCH_STD(env, 0);
}
/**
* @brief Read the content into the specified buffer.
*
* @param env The JNI environment.
* @param file Pointer to the cuFile file object.
* @param file_offset The file offset from which to read.
* @param buffer Pointer to the cuFile buffer object.
*/
JNIEXPORT void JNICALL Java_ai_rapids_cudf_CuFileReadHandle_readIntoBuffer(JNIEnv *env, jclass,
jlong file,
jlong file_offset,
jlong buffer) {
try {
cudf::jni::auto_set_device(env);
auto *file_ptr = reinterpret_cast<cufile_file *>(file);
auto *buffer_ptr = reinterpret_cast<cufile_buffer *>(buffer);
file_ptr->read(*buffer_ptr, file_offset);
}
CATCH_STD(env, );
}
/**
* @brief Create a new cuFile file handle wrapper for writing.
*
* @param env The JNI environment.
* @param path The file path to write to.
* @return Pointer address to the new file handle wrapper instance.
*/
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_CuFileWriteHandle_create(JNIEnv *env, jclass,
jstring path) {
try {
cudf::jni::auto_set_device(env);
auto file = cufile_file::make_writer(env->GetStringUTFChars(path, nullptr));
return reinterpret_cast<jlong>(file.release());
}
CATCH_STD(env, 0);
}
/**
* @brief Write the content of the specified buffer into the file.
*
* @param env The JNI environment.
* @param file Pointer to the cuFile file object.
* @param file_offset The file offset from which to write.
* @param buffer Pointer to the cuFile buffer object.
* @param size Number of bytes to write.
*/
JNIEXPORT void JNICALL Java_ai_rapids_cudf_CuFileWriteHandle_writeFromBuffer(
JNIEnv *env, jclass, jlong file, jlong file_offset, jlong buffer, jlong size) {
try {
cudf::jni::auto_set_device(env);
auto *file_ptr = reinterpret_cast<cufile_file *>(file);
auto *buffer_ptr = reinterpret_cast<cufile_buffer *>(buffer);
file_ptr->write(*buffer_ptr, size, file_offset);
}
CATCH_STD(env, );
}
/**
* @brief Append the content of the specified buffer into the file.
*
* @param env The JNI environment.
* @param file Pointer to the cuFile file object.
* @param buffer Pointer to the cuFile buffer object.
* @param size Number of bytes to append
* @return The file offset from which the buffer was appended.
*/
JNIEXPORT long JNICALL Java_ai_rapids_cudf_CuFileWriteHandle_appendFromBuffer(JNIEnv *env, jclass,
jlong file,
jlong buffer,
jlong size) {
try {
cudf::jni::auto_set_device(env);
auto *file_ptr = reinterpret_cast<cufile_file *>(file);
auto *buffer_ptr = reinterpret_cast<cufile_buffer *>(buffer);
return file_ptr->append(*buffer_ptr, size);
}
CATCH_STD(env, -1);
}
/**
* @brief Destroy the given cuFile file handle wrapper.
*
* @param env The JNI environment.
* @param pointer Pointer address to the file handle wrapper instance.
*/
JNIEXPORT void JNICALL Java_ai_rapids_cudf_CuFileHandle_destroy(JNIEnv *env, jclass,
jlong pointer) {
try {
cudf::jni::auto_set_device(env);
delete reinterpret_cast<cufile_file *>(pointer);
}
CATCH_STD(env, );
}
/**
* @brief Write a device buffer into a given file path.
*
* @param env The JNI environment.
* @param path Absolute path of the file to copy the buffer to.
* @param file_offset The file offset from which the buffer was written.
* @param device_pointer Pointer address to the device buffer.
* @param size Number of bytes to write.
*/
JNIEXPORT void JNICALL Java_ai_rapids_cudf_CuFile_writeToFile(JNIEnv *env, jclass, jstring path,
jlong file_offset,
jlong device_pointer, jlong size) {
try {
cudf::jni::auto_set_device(env);
cufile_buffer buffer{reinterpret_cast<void *>(device_pointer), static_cast<std::size_t>(size)};
auto writer = cufile_file::make_writer(env->GetStringUTFChars(path, nullptr));
writer->write(buffer, size, file_offset);
}
CATCH_STD(env, );
}
/**
* @brief Append a device buffer into a given file path.
*
* @param env The JNI environment.
* @param path Absolute path of the file to copy the buffer to.
* @param device_pointer Pointer address to the device buffer.
* @param size Number of bytes to append.
* @return The file offset from which the buffer was appended.
*/
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_CuFile_appendToFile(JNIEnv *env, jclass, jstring path,
jlong device_pointer, jlong size) {
try {
cudf::jni::auto_set_device(env);
cufile_buffer buffer{reinterpret_cast<void *>(device_pointer), static_cast<std::size_t>(size)};
auto writer = cufile_file::make_writer(env->GetStringUTFChars(path, nullptr));
return writer->append(buffer, size);
}
CATCH_STD(env, -1);
}
/**
* @brief Read from a given file path into a device buffer.
*
* @param env The JNI environment.
* @param device_pointer Pointer address to the device buffer.
* @param size The size of the device buffer.
* @param path Absolute path of the file to copy from.
* @param file_offset The file offset from which to copy content.
*/
JNIEXPORT void JNICALL Java_ai_rapids_cudf_CuFile_readFromFile(JNIEnv *env, jclass,
jlong device_pointer, jlong size,
jstring path, jlong file_offset) {
try {
cudf::jni::auto_set_device(env);
cufile_buffer buffer{reinterpret_cast<void *>(device_pointer), static_cast<std::size_t>(size)};
auto const reader = cufile_file::make_reader(env->GetStringUTFChars(path, nullptr));
reader->read(buffer, file_offset);
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/ColumnVectorJni.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <arrow/api.h>
#include <cudf/column/column_factories.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/interop.hpp>
#include <cudf/filling.hpp>
#include <cudf/hashing.hpp>
#include <cudf/interop.hpp>
#include <cudf/lists/combine.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/lists/filling.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/reshape.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include "cudf_jni_apis.hpp"
#include "dtype_utils.hpp"
#include "jni_utils.hpp"
using cudf::jni::ptr_as_jlong;
using cudf::jni::release_as_jlong;
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_sequence(JNIEnv *env, jclass,
jlong j_initial_val, jlong j_step,
jint row_count) {
JNI_NULL_CHECK(env, j_initial_val, "scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
auto initial_val = reinterpret_cast<cudf::scalar const *>(j_initial_val);
auto step = reinterpret_cast<cudf::scalar const *>(j_step);
return release_as_jlong(step ? cudf::sequence(row_count, *initial_val, *step) :
cudf::sequence(row_count, *initial_val));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_sequences(JNIEnv *env, jclass,
jlong j_start_handle,
jlong j_size_handle,
jlong j_step_handle) {
JNI_NULL_CHECK(env, j_start_handle, "start is null", 0);
JNI_NULL_CHECK(env, j_size_handle, "size is null", 0);
try {
cudf::jni::auto_set_device(env);
auto start = reinterpret_cast<cudf::column_view const *>(j_start_handle);
auto size = reinterpret_cast<cudf::column_view const *>(j_size_handle);
auto step = reinterpret_cast<cudf::column_view const *>(j_step_handle);
auto ret =
step ? cudf::lists::sequences(*start, *step, *size) : cudf::lists::sequences(*start, *size);
return release_as_jlong(ret);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_fromArrow(
JNIEnv *env, jclass, jint j_type, jlong j_col_length, jlong j_null_count, jobject j_data_obj,
jobject j_validity_obj, jobject j_offsets_obj) {
try {
cudf::jni::auto_set_device(env);
cudf::type_id n_type = static_cast<cudf::type_id>(j_type);
// not all the buffers are used for all types
void const *data_address = 0;
int data_length = 0;
if (j_data_obj != 0) {
data_address = env->GetDirectBufferAddress(j_data_obj);
data_length = env->GetDirectBufferCapacity(j_data_obj);
}
void const *validity_address = 0;
int validity_length = 0;
if (j_validity_obj != 0) {
validity_address = env->GetDirectBufferAddress(j_validity_obj);
validity_length = env->GetDirectBufferCapacity(j_validity_obj);
}
void const *offsets_address = 0;
int offsets_length = 0;
if (j_offsets_obj != 0) {
offsets_address = env->GetDirectBufferAddress(j_offsets_obj);
offsets_length = env->GetDirectBufferCapacity(j_offsets_obj);
}
auto data_buffer =
arrow::Buffer::Wrap(static_cast<const char *>(data_address), static_cast<int>(data_length));
auto null_buffer = arrow::Buffer::Wrap(static_cast<const char *>(validity_address),
static_cast<int>(validity_length));
auto offsets_buffer = arrow::Buffer::Wrap(static_cast<const char *>(offsets_address),
static_cast<int>(offsets_length));
std::shared_ptr<arrow::Array> arrow_array;
switch (n_type) {
case cudf::type_id::DECIMAL32:
JNI_THROW_NEW(env, cudf::jni::ILLEGAL_ARG_CLASS, "Don't support converting DECIMAL32 yet",
0);
break;
case cudf::type_id::DECIMAL64:
JNI_THROW_NEW(env, cudf::jni::ILLEGAL_ARG_CLASS, "Don't support converting DECIMAL64 yet",
0);
break;
case cudf::type_id::STRUCT:
JNI_THROW_NEW(env, cudf::jni::ILLEGAL_ARG_CLASS, "Don't support converting STRUCT yet", 0);
break;
case cudf::type_id::LIST:
JNI_THROW_NEW(env, cudf::jni::ILLEGAL_ARG_CLASS, "Don't support converting LIST yet", 0);
break;
case cudf::type_id::DICTIONARY32:
JNI_THROW_NEW(env, cudf::jni::ILLEGAL_ARG_CLASS,
"Don't support converting DICTIONARY32 yet", 0);
break;
case cudf::type_id::STRING:
arrow_array = std::make_shared<arrow::StringArray>(j_col_length, offsets_buffer,
data_buffer, null_buffer, j_null_count);
break;
default:
// this handles the primitive types
arrow_array = cudf::detail::to_arrow_array(n_type, j_col_length, data_buffer, null_buffer,
j_null_count);
}
auto name_and_type = arrow::field("col", arrow_array->type());
std::vector<std::shared_ptr<arrow::Field>> fields = {name_and_type};
std::shared_ptr<arrow::Schema> schema = std::make_shared<arrow::Schema>(fields);
auto arrow_table =
arrow::Table::Make(schema, std::vector<std::shared_ptr<arrow::Array>>{arrow_array});
auto retCols = cudf::from_arrow(*(arrow_table))->release();
if (retCols.size() != 1) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "Must result in one column", 0);
}
return release_as_jlong(retCols[0]);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_stringConcatenation(
JNIEnv *env, jclass, jlongArray column_handles, jlong separator, jlong narep,
jboolean separate_nulls) {
JNI_NULL_CHECK(env, column_handles, "array of column handles is null", 0);
JNI_NULL_CHECK(env, separator, "separator string scalar object is null", 0);
JNI_NULL_CHECK(env, narep, "narep string scalar object is null", 0);
try {
cudf::jni::auto_set_device(env);
const auto &separator_scalar = *reinterpret_cast<cudf::string_scalar *>(separator);
const auto &narep_scalar = *reinterpret_cast<cudf::string_scalar *>(narep);
auto null_policy = separate_nulls ? cudf::strings::separator_on_nulls::YES :
cudf::strings::separator_on_nulls::NO;
cudf::jni::native_jpointerArray<cudf::column_view> n_cudf_columns(env, column_handles);
auto column_views = n_cudf_columns.get_dereferenced();
return release_as_jlong(cudf::strings::concatenate(
cudf::table_view(column_views), separator_scalar, narep_scalar, null_policy));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_stringConcatenationSepCol(
JNIEnv *env, jclass, jlongArray column_handles, jlong sep_handle, jlong separator_narep,
jlong col_narep, jboolean separate_nulls) {
JNI_NULL_CHECK(env, column_handles, "array of column handles is null", 0);
JNI_NULL_CHECK(env, sep_handle, "separator column handle is null", 0);
JNI_NULL_CHECK(env, separator_narep, "separator narep string scalar object is null", 0);
JNI_NULL_CHECK(env, col_narep, "column narep string scalar object is null", 0);
try {
cudf::jni::auto_set_device(env);
const auto &separator_narep_scalar = *reinterpret_cast<cudf::string_scalar *>(separator_narep);
const auto &col_narep_scalar = *reinterpret_cast<cudf::string_scalar *>(col_narep);
auto null_policy = separate_nulls ? cudf::strings::separator_on_nulls::YES :
cudf::strings::separator_on_nulls::NO;
cudf::jni::native_jpointerArray<cudf::column_view> n_cudf_columns(env, column_handles);
auto column_views = n_cudf_columns.get_dereferenced();
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(sep_handle);
cudf::strings_column_view strings_column(*column);
return release_as_jlong(cudf::strings::concatenate(cudf::table_view(column_views),
strings_column, separator_narep_scalar,
col_narep_scalar, null_policy));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_concatListByRow(JNIEnv *env, jclass,
jlongArray column_handles,
jboolean ignore_null) {
JNI_NULL_CHECK(env, column_handles, "array of column handles is null", 0);
try {
cudf::jni::auto_set_device(env);
auto null_policy = ignore_null ? cudf::lists::concatenate_null_policy::IGNORE :
cudf::lists::concatenate_null_policy::NULLIFY_OUTPUT_ROW;
cudf::jni::native_jpointerArray<cudf::column_view> n_cudf_columns(env, column_handles);
auto column_views = n_cudf_columns.get_dereferenced();
return release_as_jlong(
cudf::lists::concatenate_rows(cudf::table_view(column_views), null_policy));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_makeList(JNIEnv *env, jobject j_object,
jlongArray handles, jlong j_type,
jint scale, jlong row_count) {
using ScalarType = cudf::scalar_type_t<cudf::size_type>;
JNI_NULL_CHECK(env, handles, "native view handles are null", 0)
try {
cudf::jni::auto_set_device(env);
auto children = cudf::jni::native_jpointerArray<cudf::column_view>(env, handles);
auto children_vector = children.get_dereferenced();
auto zero = cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT32));
zero->set_valid_async(true);
static_cast<ScalarType *>(zero.get())->set_value(0);
if (children.size() == 0) {
// special case because cudf::interleave_columns does not support no columns
auto offsets = cudf::make_column_from_scalar(*zero, row_count + 1);
cudf::data_type n_data_type = cudf::jni::make_data_type(j_type, scale);
auto empty_col = cudf::make_empty_column(n_data_type);
return release_as_jlong(cudf::make_lists_column(
row_count, std::move(offsets), std::move(empty_col), 0, rmm::device_buffer()));
} else {
auto count = cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT32));
count->set_valid_async(true);
static_cast<ScalarType *>(count.get())->set_value(children.size());
std::unique_ptr<cudf::column> offsets = cudf::sequence(row_count + 1, *zero, *count);
auto data_col = cudf::interleave_columns(cudf::table_view(children_vector));
return release_as_jlong(cudf::make_lists_column(
row_count, std::move(offsets), std::move(data_col), 0, rmm::device_buffer()));
}
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_makeListFromOffsets(
JNIEnv *env, jobject j_object, jlong child_handle, jlong offsets_handle, jlong row_count) {
JNI_NULL_CHECK(env, child_handle, "child_handle is null", 0)
JNI_NULL_CHECK(env, offsets_handle, "offsets_handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const child_cv = reinterpret_cast<cudf::column_view const *>(child_handle);
auto const offsets_cv = reinterpret_cast<cudf::column_view const *>(offsets_handle);
CUDF_EXPECTS(offsets_cv->type().id() == cudf::type_id::INT32,
"Input offsets does not have type INT32.");
return release_as_jlong(cudf::make_lists_column(
static_cast<cudf::size_type>(row_count), std::make_unique<cudf::column>(*offsets_cv),
std::make_unique<cudf::column>(*child_cv), 0, {}));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_fromScalar(JNIEnv *env, jclass,
jlong j_scalar,
jint row_count) {
JNI_NULL_CHECK(env, j_scalar, "scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
auto scalar_val = reinterpret_cast<cudf::scalar const *>(j_scalar);
if (scalar_val->type().id() == cudf::type_id::STRUCT && row_count == 0) {
// Specialize the creation of empty struct column, since libcudf doesn't support it.
auto struct_scalar = reinterpret_cast<cudf::struct_scalar const *>(j_scalar);
auto children = cudf::empty_like(struct_scalar->view())->release();
auto mask_buffer = cudf::create_null_mask(0, cudf::mask_state::UNALLOCATED);
return release_as_jlong(
cudf::make_structs_column(0, std::move(children), 0, std::move(mask_buffer)));
} else {
return release_as_jlong(cudf::make_column_from_scalar(*scalar_val, row_count));
}
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_concatenate(JNIEnv *env, jclass clazz,
jlongArray column_handles) {
JNI_NULL_CHECK(env, column_handles, "input columns are null", 0);
using cudf::column;
using cudf::column_view;
try {
cudf::jni::auto_set_device(env);
auto columns =
cudf::jni::native_jpointerArray<column_view>{env, column_handles}.get_dereferenced();
auto const is_lists_column = columns[0].type().id() == cudf::type_id::LIST;
return release_as_jlong(
is_lists_column ? cudf::lists::detail::concatenate(columns, cudf::get_default_stream(),
rmm::mr::get_current_device_resource()) :
cudf::concatenate(columns));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_hash(JNIEnv *env, jobject j_object,
jlongArray column_handles,
jint hash_function_id, jint seed) {
JNI_NULL_CHECK(env, column_handles, "array of column handles is null", 0);
try {
auto column_views =
cudf::jni::native_jpointerArray<cudf::column_view>{env, column_handles}.get_dereferenced();
return release_as_jlong(cudf::hash(cudf::table_view{column_views},
static_cast<cudf::hash_id>(hash_function_id), seed));
}
CATCH_STD(env, 0);
}
////////
// Native methods specific to cudf::column. These either take or return a cudf::column
// instead of a cudf::column_view so they need to be used with caution. These should
// only be called from the CudfColumn child class.
////////
JNIEXPORT void JNICALL Java_ai_rapids_cudf_ColumnVector_deleteCudfColumn(JNIEnv *env,
jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "column handle is null", );
try {
cudf::jni::auto_set_device(env);
delete reinterpret_cast<cudf::column *>(handle);
}
CATCH_STD(env, )
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_ColumnVector_setNativeNullCountColumn(JNIEnv *env,
jobject j_object,
jlong handle,
jint null_count) {
JNI_NULL_CHECK(env, handle, "native handle is null", );
try {
cudf::jni::auto_set_device(env);
cudf::column *column = reinterpret_cast<cudf::column *>(handle);
column->set_null_count(null_count);
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_getNativeColumnView(JNIEnv *env,
jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column *column = reinterpret_cast<cudf::column *>(handle);
return ptr_as_jlong(new cudf::column_view{*column});
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnVector_makeEmptyCudfColumn(JNIEnv *env, jclass,
jint j_type,
jint scale) {
try {
cudf::jni::auto_set_device(env);
cudf::data_type n_data_type = cudf::jni::make_data_type(j_type, scale);
return release_as_jlong(cudf::make_empty_column(n_data_type));
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_ColumnVector_getNativeNullCountColumn(JNIEnv *env,
jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column *column = reinterpret_cast<cudf::column *>(handle);
return static_cast<jint>(column->null_count());
}
CATCH_STD(env, 0);
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/maps_column_view.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/replace.hpp>
#include <cudf/lists/detail/contains.hpp>
#include <cudf/lists/detail/extract.hpp>
#include <cudf/scalar/scalar.hpp>
#include <maps_column_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf::jni {
namespace {
column_view make_lists(column_view const &lists_child, lists_column_view const &lists_of_structs) {
return column_view{data_type{type_id::LIST},
lists_of_structs.size(),
nullptr,
lists_of_structs.null_mask(),
lists_of_structs.null_count(),
lists_of_structs.offset(),
{lists_of_structs.offsets(), lists_child}};
}
} // namespace
maps_column_view::maps_column_view(lists_column_view const &lists_of_structs,
rmm::cuda_stream_view stream)
: keys_{make_lists(lists_of_structs.child().child(0), lists_of_structs)},
values_{make_lists(lists_of_structs.child().child(1), lists_of_structs)} {
auto const structs = lists_of_structs.child();
CUDF_EXPECTS(structs.type().id() == type_id::STRUCT,
"maps_column_view input must have exactly 1 child (STRUCT) column.");
CUDF_EXPECTS(structs.num_children() == 2,
"maps_column_view key-value struct must have exactly 2 children.");
}
template <typename KeyT>
std::unique_ptr<column> get_values_for_impl(maps_column_view const &maps_view,
KeyT const &lookup_keys, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const keys_ = maps_view.keys();
auto const values_ = maps_view.values();
CUDF_EXPECTS(lookup_keys.type().id() == keys_.child().type().id(),
"Lookup keys must have the same type as the keys of the map column.");
auto key_indices =
lists::detail::index_of(keys_, lookup_keys, lists::duplicate_find_option::FIND_LAST, stream,
rmm::mr::get_current_device_resource());
auto constexpr absent_offset = size_type{-1};
auto constexpr nullity_offset = std::numeric_limits<size_type>::min();
thrust::replace(rmm::exec_policy(stream), key_indices->mutable_view().template begin<size_type>(),
key_indices->mutable_view().template end<size_type>(), absent_offset,
nullity_offset);
return lists::detail::extract_list_element(values_, key_indices->view(), stream, mr);
}
std::unique_ptr<column>
maps_column_view::get_values_for(column_view const &lookup_keys, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) const {
CUDF_EXPECTS(lookup_keys.size() == size(),
"Lookup keys must have the same size as the map column.");
return get_values_for_impl(*this, lookup_keys, stream, mr);
}
std::unique_ptr<column>
maps_column_view::get_values_for(scalar const &lookup_key, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) const {
return get_values_for_impl(*this, lookup_key, stream, mr);
}
template <typename KeyT>
std::unique_ptr<column> contains_impl(maps_column_view const &maps_view, KeyT const &lookup_keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const keys = maps_view.keys();
CUDF_EXPECTS(lookup_keys.type().id() == keys.child().type().id(),
"Lookup keys must have the same type as the keys of the map column.");
auto const contains =
lists::detail::contains(keys, lookup_keys, stream, rmm::mr::get_current_device_resource());
// Replace nulls with BOOL8{false};
auto const scalar_false = numeric_scalar<bool>{false, true, stream};
return detail::replace_nulls(contains->view(), scalar_false, stream, mr);
}
std::unique_ptr<column> maps_column_view::contains(column_view const &lookup_keys,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) const {
CUDF_EXPECTS(lookup_keys.size() == size(),
"Lookup keys must have the same size as the map column.");
return contains_impl(*this, lookup_keys, stream, mr);
}
std::unique_ptr<column> maps_column_view::contains(scalar const &lookup_key,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) const {
return contains_impl(*this, lookup_key, stream, mr);
}
} // namespace cudf::jni
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/HostMemoryBufferNativeUtilsJni.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <fcntl.h>
#include <jni.h>
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/types.h>
#include "jni_utils.hpp"
extern "C" {
JNIEXPORT jobject JNICALL Java_ai_rapids_cudf_HostMemoryBufferNativeUtils_wrapRangeInBuffer(
JNIEnv *env, jclass, jlong addr, jlong len) {
return env->NewDirectByteBuffer(reinterpret_cast<void *>(addr), len);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_HostMemoryBufferNativeUtils_mmap(
JNIEnv *env, jclass, jstring jpath, jint mode, jlong offset, jlong length) {
JNI_NULL_CHECK(env, jpath, "path is null", 0);
JNI_ARG_CHECK(env, (mode == 0 || mode == 1), "bad mode value", 0);
try {
cudf::jni::native_jstring path(env, jpath);
int fd = open(path.get(), (mode == 0) ? O_RDONLY : O_RDWR);
if (fd == -1) {
cudf::jni::throw_java_exception(env, "java/io/IOException", strerror(errno));
}
void *address = mmap(NULL, length, (mode == 0) ? PROT_READ : PROT_READ | PROT_WRITE, MAP_SHARED,
fd, offset);
if (address == MAP_FAILED) {
char const *error_msg = strerror(errno);
close(fd);
cudf::jni::throw_java_exception(env, "java/io/IOException", error_msg);
}
close(fd);
return reinterpret_cast<jlong>(address);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_HostMemoryBufferNativeUtils_munmap(JNIEnv *env, jclass,
jlong address,
jlong length) {
JNI_NULL_CHECK(env, address, "address is NULL", );
try {
int rc = munmap(reinterpret_cast<void *>(address), length);
if (rc == -1) {
cudf::jni::throw_java_exception(env, "java/io/IOException", strerror(errno));
}
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/ChunkedReaderJni.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <vector>
#include <cudf/column/column.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/table/table.hpp>
#include "cudf_jni_apis.hpp"
#include "jni_utils.hpp"
// This function is defined in `TableJni.cpp`.
jlongArray
cudf::jni::convert_table_for_return(JNIEnv *env, std::unique_ptr<cudf::table> &&table_result,
std::vector<std::unique_ptr<cudf::column>> &&extra_columns);
// This file is for the code related to chunked reader (Parquet, ORC, etc.).
extern "C" {
// This function should take all the parameters that `Table.readParquet` takes,
// plus one more parameter `long chunkSizeByteLimit`.
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ParquetChunkedReader_create(
JNIEnv *env, jclass, jlong chunk_read_limit, jobjectArray filter_col_names,
jbooleanArray j_col_binary_read, jstring inp_file_path, jlong buffer, jlong buffer_length,
jint unit) {
JNI_NULL_CHECK(env, j_col_binary_read, "Null col_binary_read", 0);
bool read_buffer = true;
if (buffer == 0) {
JNI_NULL_CHECK(env, inp_file_path, "Input file or buffer must be supplied", 0);
read_buffer = false;
} else if (inp_file_path != nullptr) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"Cannot pass in both a buffer and an inp_file_path", 0);
} else if (buffer_length <= 0) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "An empty buffer is not supported", 0);
}
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstring filename(env, inp_file_path);
if (!read_buffer && filename.is_empty()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "inp_file_path cannot be empty", 0);
}
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
// TODO: This variable is unused now, but we still don't know what to do with it yet.
// As such, it needs to stay here for a little more time before we decide to use it again,
// or remove it completely.
cudf::jni::native_jbooleanArray n_col_binary_read(env, j_col_binary_read);
(void)n_col_binary_read;
auto const source = read_buffer ?
cudf::io::source_info(reinterpret_cast<char *>(buffer),
static_cast<std::size_t>(buffer_length)) :
cudf::io::source_info(filename.get());
auto opts_builder = cudf::io::parquet_reader_options::builder(source);
if (n_filter_col_names.size() > 0) {
opts_builder = opts_builder.columns(n_filter_col_names.as_cpp_vector());
}
auto const read_opts = opts_builder.convert_strings_to_categories(false)
.timestamp_type(cudf::data_type(static_cast<cudf::type_id>(unit)))
.build();
return reinterpret_cast<jlong>(new cudf::io::chunked_parquet_reader(
static_cast<std::size_t>(chunk_read_limit), read_opts));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ParquetChunkedReader_createWithDataSource(
JNIEnv *env, jclass, jlong chunk_read_limit, jobjectArray filter_col_names,
jbooleanArray j_col_binary_read, jint unit, jlong ds_handle) {
JNI_NULL_CHECK(env, j_col_binary_read, "Null col_binary_read", 0);
JNI_NULL_CHECK(env, ds_handle, "Null DataSouurce", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
// TODO: This variable is unused now, but we still don't know what to do with it yet.
// As such, it needs to stay here for a little more time before we decide to use it again,
// or remove it completely.
cudf::jni::native_jbooleanArray n_col_binary_read(env, j_col_binary_read);
(void)n_col_binary_read;
auto ds = reinterpret_cast<cudf::io::datasource *>(ds_handle);
cudf::io::source_info source{ds};
auto opts_builder = cudf::io::parquet_reader_options::builder(source);
if (n_filter_col_names.size() > 0) {
opts_builder = opts_builder.columns(n_filter_col_names.as_cpp_vector());
}
auto const read_opts = opts_builder.convert_strings_to_categories(false)
.timestamp_type(cudf::data_type(static_cast<cudf::type_id>(unit)))
.build();
return reinterpret_cast<jlong>(new cudf::io::chunked_parquet_reader(
static_cast<std::size_t>(chunk_read_limit), read_opts));
}
CATCH_STD(env, 0);
}
JNIEXPORT jboolean JNICALL Java_ai_rapids_cudf_ParquetChunkedReader_hasNext(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "handle is null", false);
try {
cudf::jni::auto_set_device(env);
auto const reader_ptr = reinterpret_cast<cudf::io::chunked_parquet_reader *const>(handle);
return reader_ptr->has_next();
}
CATCH_STD(env, false);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_ParquetChunkedReader_readChunk(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const reader_ptr = reinterpret_cast<cudf::io::chunked_parquet_reader *const>(handle);
auto chunk = reader_ptr->read_chunk();
return chunk.tbl ? cudf::jni::convert_table_for_return(env, chunk.tbl) : nullptr;
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_ParquetChunkedReader_close(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "handle is null", );
try {
cudf::jni::auto_set_device(env);
delete reinterpret_cast<cudf::io::chunked_parquet_reader *>(handle);
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/AggregationJni.cpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/aggregation.hpp>
#include "cudf_jni_apis.hpp"
extern "C" {
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Aggregation_close(JNIEnv *env, jclass class_object,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto to_del = reinterpret_cast<cudf::aggregation *>(ptr);
delete to_del;
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createNoParamAgg(JNIEnv *env,
jclass class_object,
jint kind) {
try {
cudf::jni::auto_set_device(env);
auto ret = [&] {
// These numbers come from Aggregation.java and must stay in sync
switch (kind) {
case 0: // SUM
return cudf::make_sum_aggregation();
case 1: // PRODUCT
return cudf::make_product_aggregation();
case 2: // MIN
return cudf::make_min_aggregation();
case 3: // MAX
return cudf::make_max_aggregation();
// case 4 COUNT
case 5: // ANY
return cudf::make_any_aggregation();
case 6: // ALL
return cudf::make_all_aggregation();
case 7: // SUM_OF_SQUARES
return cudf::make_sum_of_squares_aggregation();
case 8: // MEAN
return cudf::make_mean_aggregation();
// case 9: VARIANCE
// case 10: STD
case 11: // MEDIAN
return cudf::make_median_aggregation();
// case 12: QUANTILE
case 13: // ARGMAX
return cudf::make_argmax_aggregation();
case 14: // ARGMIN
return cudf::make_argmin_aggregation();
// case 15: NUNIQUE
// case 16: NTH_ELEMENT
case 17: // ROW_NUMBER
return cudf::make_row_number_aggregation();
// case 18: COLLECT_LIST
// case 19: COLLECT_SET
case 20: // MERGE_LISTS
return cudf::make_merge_lists_aggregation();
// case 21: MERGE_SETS
// case 22: LEAD
// case 23: LAG
// case 24: PTX
// case 25: CUDA
case 26: // M2
return cudf::make_m2_aggregation();
case 27: // MERGE_M2
return cudf::make_merge_m2_aggregation();
case 28: // RANK
return cudf::make_rank_aggregation(cudf::rank_method::MIN, {},
cudf::null_policy::INCLUDE);
case 29: // DENSE_RANK
return cudf::make_rank_aggregation(cudf::rank_method::DENSE, {},
cudf::null_policy::INCLUDE);
case 30: // ANSI SQL PERCENT_RANK
return cudf::make_rank_aggregation(cudf::rank_method::MIN, {}, cudf::null_policy::INCLUDE,
{}, cudf::rank_percentage::ONE_NORMALIZED);
case 33: // HISTOGRAM
return cudf::make_histogram_aggregation();
case 34: // MERGE_HISTOGRAM
return cudf::make_merge_histogram_aggregation();
default: throw std::logic_error("Unsupported No Parameter Aggregation Operation");
}
}();
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createNthAgg(JNIEnv *env,
jclass class_object,
jint offset,
jboolean include_nulls) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::aggregation> ret = cudf::make_nth_element_aggregation(
offset, include_nulls ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE);
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createDdofAgg(JNIEnv *env,
jclass class_object,
jint kind, jint ddof) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::aggregation> ret;
// These numbers come from Aggregation.java and must stay in sync
switch (kind) {
case 9: // VARIANCE
ret = cudf::make_variance_aggregation(ddof);
break;
case 10: // STD
ret = cudf::make_std_aggregation(ddof);
break;
default: throw std::logic_error("Unsupported DDOF Aggregation Operation");
}
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createTDigestAgg(JNIEnv *env,
jclass class_object,
jint kind, jint delta) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::aggregation> ret;
// These numbers come from Aggregation.java and must stay in sync
switch (kind) {
case 31: // TDIGEST
ret = cudf::make_tdigest_aggregation<cudf::groupby_aggregation>(delta);
break;
case 32: // MERGE_TDIGEST
ret = cudf::make_merge_tdigest_aggregation<cudf::groupby_aggregation>(delta);
break;
default: throw std::logic_error("Unsupported TDigest Aggregation Operation");
}
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createCountLikeAgg(JNIEnv *env,
jclass class_object,
jint kind,
jboolean include_nulls) {
try {
cudf::jni::auto_set_device(env);
cudf::null_policy policy =
include_nulls ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE;
std::unique_ptr<cudf::aggregation> ret;
// These numbers come from Aggregation.java and must stay in sync
switch (kind) {
case 4: // COUNT
ret = cudf::make_count_aggregation(policy);
break;
case 15: // NUNIQUE
ret = cudf::make_nunique_aggregation(policy);
break;
default: throw std::logic_error("Unsupported Count Like Aggregation Operation");
}
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createQuantAgg(JNIEnv *env,
jclass class_object,
jint j_method,
jdoubleArray j_quantiles) {
JNI_NULL_CHECK(env, j_quantiles, "quantiles are null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::jni::native_jdoubleArray quantiles(env, j_quantiles);
std::vector<double> quants(quantiles.data(), quantiles.data() + quantiles.size());
cudf::interpolation interp = static_cast<cudf::interpolation>(j_method);
std::unique_ptr<cudf::aggregation> ret = cudf::make_quantile_aggregation(quants, interp);
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createLeadLagAgg(JNIEnv *env,
jclass class_object,
jint kind, jint offset) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::aggregation> ret;
// These numbers come from Aggregation.java and must stay in sync
switch (kind) {
case 22: // LEAD
ret = cudf::make_lead_aggregation(offset);
break;
case 23: // LAG
ret = cudf::make_lag_aggregation(offset);
break;
default: throw std::logic_error("Unsupported Lead/Lag Aggregation Operation");
}
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createCollectListAgg(
JNIEnv *env, jclass class_object, jboolean include_nulls) {
try {
cudf::jni::auto_set_device(env);
cudf::null_policy policy =
include_nulls ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE;
std::unique_ptr<cudf::aggregation> ret = cudf::make_collect_list_aggregation(policy);
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createCollectSetAgg(JNIEnv *env,
jclass class_object,
jboolean include_nulls,
jboolean nulls_equal,
jboolean nans_equal) {
try {
cudf::jni::auto_set_device(env);
cudf::null_policy null_policy =
include_nulls ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE;
cudf::null_equality null_equality =
nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL;
cudf::nan_equality nan_equality =
nans_equal ? cudf::nan_equality::ALL_EQUAL : cudf::nan_equality::UNEQUAL;
std::unique_ptr<cudf::aggregation> ret =
cudf::make_collect_set_aggregation(null_policy, null_equality, nan_equality);
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation_createMergeSetsAgg(JNIEnv *env,
jclass class_object,
jboolean nulls_equal,
jboolean nans_equal) {
try {
cudf::jni::auto_set_device(env);
cudf::null_equality null_equality =
nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL;
cudf::nan_equality nan_equality =
nans_equal ? cudf::nan_equality::ALL_EQUAL : cudf::nan_equality::UNEQUAL;
std::unique_ptr<cudf::aggregation> ret =
cudf::make_merge_sets_aggregation(null_equality, nan_equality);
return reinterpret_cast<jlong>(ret.release());
}
CATCH_STD(env, 0);
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/NvcompJni.cpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvcomp.h>
#include <nvcomp/lz4.h>
#include <rmm/device_uvector.hpp>
#include "check_nvcomp_output_sizes.hpp"
#include "cudf_jni_apis.hpp"
namespace {
constexpr char const *NVCOMP_ERROR_CLASS = "ai/rapids/cudf/nvcomp/NvcompException";
constexpr char const *NVCOMP_CUDA_ERROR_CLASS = "ai/rapids/cudf/nvcomp/NvcompCudaException";
constexpr char const *ILLEGAL_ARG_CLASS = "java/lang/IllegalArgumentException";
constexpr char const *UNSUPPORTED_CLASS = "java/lang/UnsupportedOperationException";
void check_nvcomp_status(JNIEnv *env, nvcompStatus_t status) {
switch (status) {
case nvcompSuccess: break;
case nvcompErrorInvalidValue:
cudf::jni::throw_java_exception(env, ILLEGAL_ARG_CLASS, "nvcomp invalid value");
break;
case nvcompErrorNotSupported:
cudf::jni::throw_java_exception(env, UNSUPPORTED_CLASS, "nvcomp unsupported");
break;
case nvcompErrorCannotDecompress:
cudf::jni::throw_java_exception(env, NVCOMP_ERROR_CLASS, "nvcomp cannot decompress");
break;
case nvcompErrorCudaError:
cudf::jni::throw_java_exception(env, NVCOMP_CUDA_ERROR_CLASS, "nvcomp CUDA error");
break;
case nvcompErrorInternal:
cudf::jni::throw_java_exception(env, NVCOMP_ERROR_CLASS, "nvcomp internal error");
break;
default:
cudf::jni::throw_java_exception(env, NVCOMP_ERROR_CLASS, "nvcomp unknown error");
break;
}
}
} // anonymous namespace
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_nvcomp_NvcompJni_batchedLZ4CompressGetTempSize(
JNIEnv *env, jclass, jlong j_batch_size, jlong j_max_chunk_size) {
try {
cudf::jni::auto_set_device(env);
auto batch_size = static_cast<std::size_t>(j_batch_size);
auto max_chunk_size = static_cast<std::size_t>(j_max_chunk_size);
std::size_t temp_size = 0;
auto status = nvcompBatchedLZ4CompressGetTempSize(batch_size, max_chunk_size,
nvcompBatchedLZ4DefaultOpts, &temp_size);
check_nvcomp_status(env, status);
return static_cast<jlong>(temp_size);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL
Java_ai_rapids_cudf_nvcomp_NvcompJni_batchedLZ4CompressGetMaxOutputChunkSize(
JNIEnv *env, jclass, jlong j_max_chunk_size) {
try {
cudf::jni::auto_set_device(env);
auto max_chunk_size = static_cast<std::size_t>(j_max_chunk_size);
std::size_t max_output_size = 0;
auto status = nvcompBatchedLZ4CompressGetMaxOutputChunkSize(
max_chunk_size, nvcompBatchedLZ4DefaultOpts, &max_output_size);
check_nvcomp_status(env, status);
return static_cast<jlong>(max_output_size);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_nvcomp_NvcompJni_batchedLZ4CompressAsync(
JNIEnv *env, jclass, jlong j_in_ptrs, jlong j_in_sizes, jlong j_chunk_size, jlong j_batch_size,
jlong j_temp_ptr, jlong j_temp_size, jlong j_out_ptrs, jlong j_compressed_sizes_out_ptr,
jlong j_stream) {
try {
cudf::jni::auto_set_device(env);
auto in_ptrs = reinterpret_cast<void const *const *>(j_in_ptrs);
auto in_sizes = reinterpret_cast<std::size_t const *>(j_in_sizes);
auto chunk_size = static_cast<std::size_t>(j_chunk_size);
auto batch_size = static_cast<std::size_t>(j_batch_size);
auto temp_ptr = reinterpret_cast<void *>(j_temp_ptr);
auto temp_size = static_cast<std::size_t>(j_temp_size);
auto out_ptrs = reinterpret_cast<void *const *>(j_out_ptrs);
auto compressed_out_sizes = reinterpret_cast<std::size_t *>(j_compressed_sizes_out_ptr);
auto stream = reinterpret_cast<cudaStream_t>(j_stream);
auto status = nvcompBatchedLZ4CompressAsync(in_ptrs, in_sizes, chunk_size, batch_size, temp_ptr,
temp_size, out_ptrs, compressed_out_sizes,
nvcompBatchedLZ4DefaultOpts, stream);
check_nvcomp_status(env, status);
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_nvcomp_NvcompJni_batchedLZ4DecompressGetTempSize(
JNIEnv *env, jclass, jlong j_batch_size, jlong j_chunk_size) {
try {
cudf::jni::auto_set_device(env);
auto batch_size = static_cast<std::size_t>(j_batch_size);
auto chunk_size = static_cast<std::size_t>(j_chunk_size);
std::size_t temp_size = 0;
auto status = nvcompBatchedLZ4DecompressGetTempSize(batch_size, chunk_size, &temp_size);
check_nvcomp_status(env, status);
return static_cast<jlong>(temp_size);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_nvcomp_NvcompJni_batchedLZ4DecompressAsync(
JNIEnv *env, jclass, jlong j_in_ptrs, jlong j_in_sizes, jlong j_out_sizes, jlong j_batch_size,
jlong j_temp_ptr, jlong j_temp_size, jlong j_out_ptrs, jlong j_stream) {
try {
cudf::jni::auto_set_device(env);
auto compressed_ptrs = reinterpret_cast<void const *const *>(j_in_ptrs);
auto compressed_sizes = reinterpret_cast<std::size_t const *>(j_in_sizes);
auto uncompressed_sizes = reinterpret_cast<std::size_t const *>(j_out_sizes);
auto batch_size = static_cast<std::size_t>(j_batch_size);
auto temp_ptr = reinterpret_cast<void *>(j_temp_ptr);
auto temp_size = static_cast<std::size_t>(j_temp_size);
auto uncompressed_ptrs = reinterpret_cast<void *const *>(j_out_ptrs);
auto stream = reinterpret_cast<cudaStream_t>(j_stream);
auto uncompressed_statuses = rmm::device_uvector<nvcompStatus_t>(batch_size, stream);
auto actual_uncompressed_sizes = rmm::device_uvector<std::size_t>(batch_size, stream);
auto status = nvcompBatchedLZ4DecompressAsync(
compressed_ptrs, compressed_sizes, uncompressed_sizes, actual_uncompressed_sizes.data(),
batch_size, temp_ptr, temp_size, uncompressed_ptrs, uncompressed_statuses.data(), stream);
check_nvcomp_status(env, status);
if (!cudf::java::check_nvcomp_output_sizes(uncompressed_sizes, actual_uncompressed_sizes.data(),
batch_size, stream)) {
cudf::jni::throw_java_exception(env, NVCOMP_ERROR_CLASS,
"nvcomp decompress output size mismatch");
}
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_nvcomp_NvcompJni_batchedLZ4GetDecompressSizeAsync(
JNIEnv *env, jclass, jlong j_in_ptrs, jlong j_in_sizes, jlong j_out_sizes, jlong j_batch_size,
jlong j_stream) {
try {
cudf::jni::auto_set_device(env);
auto compressed_ptrs = reinterpret_cast<void const *const *>(j_in_ptrs);
auto compressed_sizes = reinterpret_cast<std::size_t const *>(j_in_sizes);
auto uncompressed_sizes = reinterpret_cast<std::size_t *>(j_out_sizes);
auto batch_size = static_cast<std::size_t>(j_batch_size);
auto stream = reinterpret_cast<cudaStream_t>(j_stream);
auto status = nvcompBatchedLZ4GetDecompressSizeAsync(compressed_ptrs, compressed_sizes,
uncompressed_sizes, batch_size, stream);
check_nvcomp_status(env, status);
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/NvtxRangeJni.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/nvtx/nvtx3.hpp>
#include "jni_utils.hpp"
#include "nvtx_common.hpp"
extern "C" {
JNIEXPORT void JNICALL Java_ai_rapids_cudf_NvtxRange_push(JNIEnv *env, jclass clazz, jstring name,
jint color_bits) {
try {
cudf::jni::native_jstring range_name(env, name);
nvtx3::color range_color(static_cast<nvtx3::color::value_type>(color_bits));
nvtx3::event_attributes attr{range_color, range_name.get()};
nvtxDomainRangePushEx(nvtx3::domain::get<cudf::jni::java_domain>(), attr.get());
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_NvtxRange_pop(JNIEnv *env, jclass clazz) {
try {
nvtxDomainRangePop(nvtx3::domain::get<cudf::jni::java_domain>());
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/ScalarJni.cpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/binaryop.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/repeat_strings.hpp>
#include <cudf/types.hpp>
#include "cudf_jni_apis.hpp"
#include "dtype_utils.hpp"
using cudf::jni::release_as_jlong;
extern "C" {
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Scalar_closeScalar(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
cudf::scalar *s = reinterpret_cast<cudf::scalar *>(scalar_handle);
delete s;
}
CATCH_STD(env, );
}
JNIEXPORT jboolean JNICALL Java_ai_rapids_cudf_Scalar_isScalarValid(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
cudf::scalar *s = reinterpret_cast<cudf::scalar *>(scalar_handle);
return static_cast<jboolean>(s->is_valid());
}
CATCH_STD(env, 0);
}
JNIEXPORT jbyte JNICALL Java_ai_rapids_cudf_Scalar_getByte(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
using ScalarType = cudf::scalar_type_t<int8_t>;
auto s = reinterpret_cast<ScalarType *>(scalar_handle);
return static_cast<jbyte>(s->value());
}
CATCH_STD(env, 0);
}
JNIEXPORT jshort JNICALL Java_ai_rapids_cudf_Scalar_getShort(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
using ScalarType = cudf::scalar_type_t<int16_t>;
auto s = reinterpret_cast<ScalarType *>(scalar_handle);
return static_cast<jshort>(s->value());
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Scalar_getInt(JNIEnv *env, jclass, jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
using ScalarType = cudf::scalar_type_t<int32_t>;
auto s = reinterpret_cast<ScalarType *>(scalar_handle);
return static_cast<jint>(s->value());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_getLong(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
using ScalarType = cudf::scalar_type_t<int64_t>;
auto s = reinterpret_cast<ScalarType *>(scalar_handle);
return static_cast<jlong>(s->value());
}
CATCH_STD(env, 0);
}
JNIEXPORT jfloat JNICALL Java_ai_rapids_cudf_Scalar_getFloat(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
using ScalarType = cudf::scalar_type_t<float>;
auto s = reinterpret_cast<ScalarType *>(scalar_handle);
return static_cast<jfloat>(s->value());
}
CATCH_STD(env, 0);
}
JNIEXPORT jdouble JNICALL Java_ai_rapids_cudf_Scalar_getDouble(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
using ScalarType = cudf::scalar_type_t<double>;
auto s = reinterpret_cast<ScalarType *>(scalar_handle);
return static_cast<jdouble>(s->value());
}
CATCH_STD(env, 0);
}
JNIEXPORT jbyteArray JNICALL Java_ai_rapids_cudf_Scalar_getBigIntegerBytes(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
using ScalarType = cudf::scalar_type_t<__int128_t>;
auto s = reinterpret_cast<ScalarType *>(scalar_handle);
auto val = s->value();
jbyte const *ptr = reinterpret_cast<jbyte const *>(&val);
cudf::jni::native_jbyteArray jbytes{env, ptr, sizeof(__int128_t)};
return jbytes.get_jArray();
}
CATCH_STD(env, 0);
}
JNIEXPORT jbyteArray JNICALL Java_ai_rapids_cudf_Scalar_getUTF8(JNIEnv *env, jclass,
jlong scalar_handle) {
try {
cudf::jni::auto_set_device(env);
auto s = reinterpret_cast<cudf::string_scalar *>(scalar_handle);
std::string val{s->to_string()};
if (val.size() > 0x7FFFFFFF) {
cudf::jni::throw_java_exception(env, "java/lang/IllegalArgumentException",
"string scalar too large");
}
cudf::jni::native_jbyteArray jbytes{env, reinterpret_cast<jbyte const *>(val.data()),
static_cast<int>(val.size())};
return jbytes.get_jArray();
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_getListAsColumnView(JNIEnv *env, jclass,
jlong scalar_handle) {
JNI_NULL_CHECK(env, scalar_handle, "scalar handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto s = reinterpret_cast<cudf::list_scalar *>(scalar_handle);
// Creates a column view in heap with the stack one, to let JVM take care of its
// life cycle.
return reinterpret_cast<jlong>(new cudf::column_view(s->view()));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL
Java_ai_rapids_cudf_Scalar_getChildrenFromStructScalar(JNIEnv *env, jclass, jlong scalar_handle) {
JNI_NULL_CHECK(env, scalar_handle, "scalar handle is null", 0);
try {
cudf::jni::auto_set_device(env);
const auto s = reinterpret_cast<cudf::struct_scalar *>(scalar_handle);
const cudf::table_view &table = s->view();
cudf::jni::native_jpointerArray<cudf::column_view> column_handles(env, table.num_columns());
for (int i = 0; i < table.num_columns(); i++) {
column_handles[i] = new cudf::column_view(table.column(i));
}
return column_handles.get_jArray();
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeBool8Scalar(JNIEnv *env, jclass,
jboolean value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::BOOL8));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int8_t>;
int8_t val = value ? 1 : 0;
static_cast<ScalarType *>(s.get())->set_value(val);
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeInt8Scalar(JNIEnv *env, jclass, jbyte value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT8));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int8_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<int8_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeUint8Scalar(JNIEnv *env, jclass, jbyte value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::UINT8));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<uint8_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<uint8_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeInt16Scalar(JNIEnv *env, jclass,
jshort value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT16));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int16_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<int16_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeUint16Scalar(JNIEnv *env, jclass,
jshort value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::UINT16));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<uint16_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<uint16_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeDurationDaysScalar(JNIEnv *env, jclass,
jint value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_duration_scalar(cudf::data_type(cudf::type_id::DURATION_DAYS));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int32_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<int32_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeInt32Scalar(JNIEnv *env, jclass, jint value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT32));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int32_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<int32_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeUint32Scalar(JNIEnv *env, jclass, jint value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::UINT32));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<uint32_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<uint32_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeInt64Scalar(JNIEnv *env, jclass, jlong value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::INT64));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int64_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<int64_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeUint64Scalar(JNIEnv *env, jclass,
jlong value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::UINT64));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<uint64_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<uint64_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeFloat32Scalar(JNIEnv *env, jclass,
jfloat value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::FLOAT32));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<float>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<float>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeFloat64Scalar(JNIEnv *env, jclass,
jdouble value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::FLOAT64));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<double>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<double>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeStringScalar(JNIEnv *env, jclass,
jbyteArray value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::string strval;
if (is_valid) {
cudf::jni::native_jbyteArray jbytes{env, value};
strval.assign(reinterpret_cast<char *>(jbytes.data()), jbytes.size());
}
auto s = new cudf::string_scalar{strval, static_cast<bool>(is_valid)};
return reinterpret_cast<jlong>(s);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeTimestampDaysScalar(JNIEnv *env, jclass,
jint value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::scalar> s =
cudf::make_timestamp_scalar(cudf::data_type(cudf::type_id::TIMESTAMP_DAYS));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int32_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<int32_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeDurationTimeScalar(JNIEnv *env, jclass,
jint jdtype_id,
jlong value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
auto dtype_id = static_cast<cudf::type_id>(jdtype_id);
std::unique_ptr<cudf::scalar> s = cudf::make_duration_scalar(cudf::data_type(dtype_id));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int64_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<int64_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeTimestampTimeScalar(JNIEnv *env, jclass,
jint jdtype_id,
jlong value,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
auto dtype_id = static_cast<cudf::type_id>(jdtype_id);
std::unique_ptr<cudf::scalar> s = cudf::make_timestamp_scalar(cudf::data_type(dtype_id));
s->set_valid_async(is_valid);
if (is_valid) {
using ScalarType = cudf::scalar_type_t<int64_t>;
static_cast<ScalarType *>(s.get())->set_value(static_cast<int64_t>(value));
}
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeDecimal32Scalar(JNIEnv *env, jclass,
jint value, jint scale,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
auto const value_ = static_cast<int32_t>(value);
auto const scale_ = numeric::scale_type{static_cast<int32_t>(scale)};
std::unique_ptr<cudf::scalar> s =
cudf::make_fixed_point_scalar<numeric::decimal32>(value_, scale_);
s->set_valid_async(is_valid);
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeDecimal64Scalar(JNIEnv *env, jclass,
jlong value, jint scale,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
auto const value_ = static_cast<int64_t>(value);
auto const scale_ = numeric::scale_type{static_cast<int32_t>(scale)};
std::unique_ptr<cudf::scalar> s =
cudf::make_fixed_point_scalar<numeric::decimal64>(value_, scale_);
s->set_valid_async(is_valid);
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeDecimal128Scalar(JNIEnv *env, jclass,
jbyteArray value,
jint scale,
jboolean is_valid) {
try {
cudf::jni::auto_set_device(env);
auto const scale_ = numeric::scale_type{static_cast<int32_t>(scale)};
cudf::jni::native_jbyteArray jbytes{env, value};
auto const value_ = reinterpret_cast<__int128_t *>(jbytes.data());
std::unique_ptr<cudf::scalar> s =
cudf::make_fixed_point_scalar<numeric::decimal128>(*value_, scale_);
s->set_valid_async(is_valid);
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_binaryOpSV(JNIEnv *env, jclass, jlong lhs_ptr,
jlong rhs_view, jint int_op,
jint out_dtype, jint scale) {
JNI_NULL_CHECK(env, lhs_ptr, "lhs is null", 0);
JNI_NULL_CHECK(env, rhs_view, "rhs is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::scalar *lhs = reinterpret_cast<cudf::scalar *>(lhs_ptr);
auto rhs = reinterpret_cast<cudf::column_view *>(rhs_view);
cudf::data_type n_data_type = cudf::jni::make_data_type(out_dtype, scale);
cudf::binary_operator op = static_cast<cudf::binary_operator>(int_op);
if (lhs->type().id() == cudf::type_id::STRUCT) {
auto out = make_fixed_width_column(n_data_type, rhs->size(), cudf::mask_state::UNALLOCATED);
if (op == cudf::binary_operator::NULL_EQUALS) {
out->set_null_mask(rmm::device_buffer{}, 0);
} else {
auto [new_mask, new_null_count] = cudf::binops::scalar_col_valid_mask_and(*rhs, *lhs);
out->set_null_mask(std::move(new_mask), new_null_count);
}
auto lhs_col = cudf::make_column_from_scalar(*lhs, 1);
auto out_view = out->mutable_view();
cudf::binops::compiled::detail::apply_sorting_struct_binary_op(
out_view, lhs_col->view(), *rhs, true, false, op, cudf::get_default_stream());
return release_as_jlong(out);
}
return release_as_jlong(cudf::binary_operation(*lhs, *rhs, op, n_data_type));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeListScalar(JNIEnv *env, jclass,
jlong view_handle,
jboolean is_valid) {
JNI_NULL_CHECK(env, view_handle, "Column view should NOT be null", 0);
try {
cudf::jni::auto_set_device(env);
auto col_view = reinterpret_cast<cudf::column_view *>(view_handle);
// Instead of calling the `cudf::empty_like` to create an empty column when `is_valid`
// is false, always passes the input view to the scalar, to avoid copying the column
// twice.
// Let the Java layer make sure the view is empty when `is_valid` is false.
cudf::scalar *s = new cudf::list_scalar(*col_view);
s->set_valid_async(is_valid);
return reinterpret_cast<jlong>(s);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_makeStructScalar(JNIEnv *env, jclass,
jlongArray handles,
jboolean is_valid) {
JNI_NULL_CHECK(env, handles, "native view handles are null", 0)
try {
cudf::jni::auto_set_device(env);
std::unique_ptr<cudf::column_view> ret;
cudf::jni::native_jpointerArray<cudf::column_view> column_pointers(env, handles);
std::vector<cudf::column_view> columns;
columns.reserve(column_pointers.size());
std::transform(column_pointers.data(), column_pointers.data() + column_pointers.size(),
std::back_inserter(columns), [](auto const &col_ptr) { return *col_ptr; });
auto s = std::make_unique<cudf::struct_scalar>(
cudf::host_span<cudf::column_view const>{columns}, is_valid);
return reinterpret_cast<jlong>(s.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_repeatString(JNIEnv *env, jclass, jlong handle,
jint repeat_times) {
JNI_NULL_CHECK(env, handle, "scalar handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const str = *reinterpret_cast<cudf::string_scalar *>(handle);
return reinterpret_cast<jlong>(cudf::strings::repeat_string(str, repeat_times).release());
}
CATCH_STD(env, 0);
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/Aggregation128UtilsJni.cpp
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "aggregation128_utils.hpp"
#include "cudf_jni_apis.hpp"
#include "dtype_utils.hpp"
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Aggregation128Utils_extractInt32Chunk(
JNIEnv *env, jclass, jlong j_column_view, jint j_out_dtype, jint j_chunk_idx) {
JNI_NULL_CHECK(env, j_column_view, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto cview = reinterpret_cast<cudf::column_view const *>(j_column_view);
auto dtype = cudf::jni::make_data_type(j_out_dtype, 0);
return cudf::jni::release_as_jlong(cudf::jni::extract_chunk32(*cview, dtype, j_chunk_idx));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Aggregation128Utils_combineInt64SumChunks(
JNIEnv *env, jclass, jlong j_table_view, jint j_dtype, jint j_scale) {
JNI_NULL_CHECK(env, j_table_view, "table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto tview = reinterpret_cast<cudf::table_view const *>(j_table_view);
std::unique_ptr<cudf::table> result =
cudf::jni::assemble128_from_sum(*tview, cudf::jni::make_data_type(j_dtype, j_scale));
return cudf::jni::convert_table_for_return(env, result);
}
CATCH_STD(env, 0);
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/dtype_utils.hpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <jni.h>
#include <cudf/types.hpp>
namespace cudf {
namespace jni {
// convert a timestamp type to the corresponding duration type
inline cudf::data_type timestamp_to_duration(cudf::data_type dt) {
cudf::type_id duration_type_id;
switch (dt.id()) {
case cudf::type_id::TIMESTAMP_DAYS: duration_type_id = cudf::type_id::DURATION_DAYS; break;
case cudf::type_id::TIMESTAMP_SECONDS:
duration_type_id = cudf::type_id::DURATION_SECONDS;
break;
case cudf::type_id::TIMESTAMP_MILLISECONDS:
duration_type_id = cudf::type_id::DURATION_MILLISECONDS;
break;
case cudf::type_id::TIMESTAMP_MICROSECONDS:
duration_type_id = cudf::type_id::DURATION_MICROSECONDS;
break;
case cudf::type_id::TIMESTAMP_NANOSECONDS:
duration_type_id = cudf::type_id::DURATION_NANOSECONDS;
break;
default: throw std::logic_error("Unexpected type in timestamp_to_duration");
}
return cudf::data_type(duration_type_id);
}
inline bool is_decimal_type(cudf::type_id n_type) {
return n_type == cudf::type_id::DECIMAL32 || n_type == cudf::type_id::DECIMAL64 ||
n_type == cudf::type_id::DECIMAL128;
}
// create data_type including scale for decimal type
inline cudf::data_type make_data_type(jint out_dtype, jint scale) {
cudf::type_id n_type = static_cast<cudf::type_id>(out_dtype);
cudf::data_type n_data_type;
if (is_decimal_type(n_type)) {
n_data_type = cudf::data_type(n_type, scale);
} else {
n_data_type = cudf::data_type(n_type);
}
return n_data_type;
}
} // namespace jni
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/check_nvcomp_output_sizes.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/nvtx/nvtx3.hpp>
#include <cudf/utilities/error.hpp>
#include <thrust/device_ptr.h>
#include <thrust/equal.h>
#include "check_nvcomp_output_sizes.hpp"
namespace {
struct java_domain {
static constexpr char const *name{"Java"};
};
} // anonymous namespace
namespace cudf {
namespace java {
/**
* Check that the vector of expected uncompressed sizes matches the vector of actual compressed
* sizes. Both vectors are assumed to be in device memory and contain num_chunks elements.
*/
bool check_nvcomp_output_sizes(std::size_t const *dev_uncompressed_sizes,
std::size_t const *dev_actual_uncompressed_sizes,
std::size_t num_chunks, rmm::cuda_stream_view stream) {
NVTX3_FUNC_RANGE_IN(java_domain);
return thrust::equal(rmm::exec_policy(stream), dev_uncompressed_sizes,
dev_uncompressed_sizes + num_chunks, dev_actual_uncompressed_sizes);
}
} // namespace java
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/jni_compiled_expr.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <vector>
namespace cudf {
namespace jni {
namespace ast {
/**
* A class to capture all of the resources associated with a compiled AST expression.
* AST nodes do not own their child nodes, so every node in the expression tree
* must be explicitly tracked in order to free the underlying resources for each node.
*
* This should be cleaned up a bit after the libcudf AST refactoring in
* https://github.com/rapidsai/cudf/pull/8815 when a virtual destructor is added to the
* base AST node type. Then we do not have to track every AST node type separately.
*/
class compiled_expr {
/** All expression nodes within the expression tree */
std::vector<std::unique_ptr<cudf::ast::expression>> expressions;
/** GPU scalar instances that correspond to literal nodes */
std::vector<std::unique_ptr<cudf::scalar>> scalars;
public:
cudf::ast::literal &add_literal(std::unique_ptr<cudf::ast::literal> literal_ptr,
std::unique_ptr<cudf::scalar> scalar_ptr) {
expressions.push_back(std::move(literal_ptr));
scalars.push_back(std::move(scalar_ptr));
return static_cast<cudf::ast::literal &>(*expressions.back());
}
cudf::ast::column_reference &
add_column_ref(std::unique_ptr<cudf::ast::column_reference> ref_ptr) {
expressions.push_back(std::move(ref_ptr));
return static_cast<cudf::ast::column_reference &>(*expressions.back());
}
cudf::ast::operation &add_operation(std::unique_ptr<cudf::ast::operation> expr_ptr) {
expressions.push_back(std::move(expr_ptr));
return static_cast<cudf::ast::operation &>(*expressions.back());
}
/** Return the expression node at the top of the tree */
cudf::ast::expression &get_top_expression() const { return *expressions.back(); }
};
} // namespace ast
} // namespace jni
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/check_nvcomp_output_sizes.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace java {
/**
* Check that the vector of expected uncompressed sizes matches the vector of actual compressed
* sizes. Both vectors are assumed to be in device memory and contain num_chunks elements.
*/
bool check_nvcomp_output_sizes(std::size_t const *dev_uncompressed_sizes,
std::size_t const *dev_actual_uncompressed_sizes,
std::size_t num_chunks, rmm::cuda_stream_view stream);
} // namespace java
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/row_conversion.hpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <memory>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace jni {
std::vector<std::unique_ptr<cudf::column>> convert_to_rows_fixed_width_optimized(
cudf::table_view const &tbl,
// TODO need something for validity
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource());
std::vector<std::unique_ptr<cudf::column>>
convert_to_rows(cudf::table_view const &tbl,
// TODO need something for validity
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource());
std::unique_ptr<cudf::table> convert_from_rows_fixed_width_optimized(
cudf::lists_column_view const &input, std::vector<cudf::data_type> const &schema,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource());
std::unique_ptr<cudf::table>
convert_from_rows(cudf::lists_column_view const &input, std::vector<cudf::data_type> const &schema,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource());
} // namespace jni
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/jni_writer_data_sink.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/io/data_sink.hpp>
#include "cudf_jni_apis.hpp"
#include "jni_utils.hpp"
namespace cudf::jni {
constexpr long MINIMUM_WRITE_BUFFER_SIZE = 10 * 1024 * 1024; // 10 MB
class jni_writer_data_sink final : public cudf::io::data_sink {
public:
explicit jni_writer_data_sink(JNIEnv *env, jobject callback, jobject host_memory_allocator) {
if (env->GetJavaVM(&jvm) < 0) {
throw std::runtime_error("GetJavaVM failed");
}
jclass cls = env->GetObjectClass(callback);
if (cls == nullptr) {
throw cudf::jni::jni_exception("class not found");
}
handle_buffer_method =
env->GetMethodID(cls, "handleBuffer", "(Lai/rapids/cudf/HostMemoryBuffer;J)V");
if (handle_buffer_method == nullptr) {
throw cudf::jni::jni_exception("handleBuffer method");
}
this->callback = add_global_ref(env, callback);
this->host_memory_allocator = add_global_ref(env, host_memory_allocator);
}
virtual ~jni_writer_data_sink() {
// This should normally be called by a JVM thread. If the JVM environment is missing then this
// is likely being triggered by the C++ runtime during shutdown. In that case the JVM may
// already be destroyed and this thread should not try to attach to get an environment.
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) {
callback = del_global_ref(env, callback);
current_buffer = del_global_ref(env, current_buffer);
host_memory_allocator = del_global_ref(env, host_memory_allocator);
}
callback = nullptr;
current_buffer = nullptr;
host_memory_allocator = nullptr;
}
void host_write(void const *data, size_t size) override {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
long left_to_copy = static_cast<long>(size);
const char *copy_from = static_cast<const char *>(data);
while (left_to_copy > 0) {
long buffer_amount_available = current_buffer_len - current_buffer_written;
if (buffer_amount_available <= 0) {
// should never be < 0, but just to be safe
rotate_buffer(env);
buffer_amount_available = current_buffer_len - current_buffer_written;
}
long amount_to_copy =
left_to_copy < buffer_amount_available ? left_to_copy : buffer_amount_available;
char *copy_to = current_buffer_data + current_buffer_written;
std::memcpy(copy_to, copy_from, amount_to_copy);
copy_from = copy_from + amount_to_copy;
current_buffer_written += amount_to_copy;
total_written += amount_to_copy;
left_to_copy -= amount_to_copy;
}
}
bool supports_device_write() const override { return true; }
void device_write(void const *gpu_data, size_t size, rmm::cuda_stream_view stream) override {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
long left_to_copy = static_cast<long>(size);
const char *copy_from = static_cast<const char *>(gpu_data);
while (left_to_copy > 0) {
long buffer_amount_available = current_buffer_len - current_buffer_written;
if (buffer_amount_available <= 0) {
// should never be < 0, but just to be safe
stream.synchronize();
rotate_buffer(env);
buffer_amount_available = current_buffer_len - current_buffer_written;
}
long amount_to_copy =
left_to_copy < buffer_amount_available ? left_to_copy : buffer_amount_available;
char *copy_to = current_buffer_data + current_buffer_written;
CUDF_CUDA_TRY(cudaMemcpyAsync(copy_to, copy_from, amount_to_copy, cudaMemcpyDeviceToHost,
stream.value()));
copy_from = copy_from + amount_to_copy;
current_buffer_written += amount_to_copy;
total_written += amount_to_copy;
left_to_copy -= amount_to_copy;
}
stream.synchronize();
}
std::future<void> device_write_async(void const *gpu_data, size_t size,
rmm::cuda_stream_view stream) override {
// Call the sync version until figuring out how to write asynchronously.
device_write(gpu_data, size, stream);
return std::async(std::launch::deferred, [] {});
}
void flush() override {
if (current_buffer_written > 0) {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
handle_buffer(env, current_buffer, current_buffer_written);
current_buffer = del_global_ref(env, current_buffer);
current_buffer_len = 0;
current_buffer_data = nullptr;
current_buffer_written = 0;
}
}
size_t bytes_written() override { return total_written; }
void set_alloc_size(long size) { this->alloc_size = size; }
private:
void rotate_buffer(JNIEnv *env) {
if (current_buffer != nullptr) {
handle_buffer(env, current_buffer, current_buffer_written);
}
current_buffer = del_global_ref(env, current_buffer);
jobject tmp_buffer = allocate_host_buffer(env, alloc_size, true, host_memory_allocator);
current_buffer = add_global_ref(env, tmp_buffer);
current_buffer_len = get_host_buffer_length(env, current_buffer);
current_buffer_data = reinterpret_cast<char *>(get_host_buffer_address(env, current_buffer));
current_buffer_written = 0;
}
void handle_buffer(JNIEnv *env, jobject buffer, jlong len) {
env->CallVoidMethod(callback, handle_buffer_method, buffer, len);
if (env->ExceptionCheck()) {
throw std::runtime_error("handleBuffer threw an exception");
}
}
JavaVM *jvm;
jobject callback;
jmethodID handle_buffer_method;
jobject current_buffer = nullptr;
char *current_buffer_data = nullptr;
long current_buffer_len = 0;
long current_buffer_written = 0;
size_t total_written = 0;
long alloc_size = MINIMUM_WRITE_BUFFER_SIZE;
jobject host_memory_allocator;
};
} // namespace cudf::jni
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/ColumnViewJni.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ColumnViewJni.hpp"
#include <numeric>
#include <jni.h>
#include <cudf/aggregation.hpp>
#include <cudf/binaryop.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/datetime.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/filling.hpp>
#include <cudf/hashing.hpp>
#include <cudf/json/json.hpp>
#include <cudf/lists/combine.hpp>
#include <cudf/lists/contains.hpp>
#include <cudf/lists/count_elements.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/lists/extract.hpp>
#include <cudf/lists/gather.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/lists/reverse.hpp>
#include <cudf/lists/set_operations.hpp>
#include <cudf/lists/sorting.hpp>
#include <cudf/lists/stream_compaction.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/quantiles.hpp>
#include <cudf/reduction.hpp>
#include <cudf/replace.hpp>
#include <cudf/reshape.hpp>
#include <cudf/rolling.hpp>
#include <cudf/round.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/search.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/strings/attributes.hpp>
#include <cudf/strings/capitalize.hpp>
#include <cudf/strings/case.hpp>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/contains.hpp>
#include <cudf/strings/convert/convert_booleans.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/strings/convert/convert_fixed_point.hpp>
#include <cudf/strings/convert/convert_floats.hpp>
#include <cudf/strings/convert/convert_integers.hpp>
#include <cudf/strings/convert/convert_urls.hpp>
#include <cudf/strings/extract.hpp>
#include <cudf/strings/find.hpp>
#include <cudf/strings/findall.hpp>
#include <cudf/strings/padding.hpp>
#include <cudf/strings/regex/regex_program.hpp>
#include <cudf/strings/repeat_strings.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/reverse.hpp>
#include <cudf/strings/slice.hpp>
#include <cudf/strings/split/split.hpp>
#include <cudf/strings/split/split_re.hpp>
#include <cudf/strings/strip.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/tdigest/tdigest_column_view.hpp>
#include <cudf/transform.hpp>
#include <cudf/types.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include "cudf_jni_apis.hpp"
#include "dtype_utils.hpp"
#include "jni_utils.hpp"
#include "maps_column_view.hpp"
using cudf::jni::ptr_as_jlong;
using cudf::jni::release_as_jlong;
namespace {
std::size_t pad_size(std::size_t size, bool const should_pad_for_cpu) {
if (should_pad_for_cpu) {
constexpr std::size_t ALIGN = sizeof(std::max_align_t);
return (size + (ALIGN - 1)) & ~(ALIGN - 1);
} else {
return size;
}
}
std::size_t calc_device_memory_size(cudf::column_view const &view, bool const pad_for_cpu) {
std::size_t total = 0;
auto row_count = view.size();
if (view.nullable()) {
total += pad_size(cudf::bitmask_allocation_size_bytes(row_count), pad_for_cpu);
}
auto dtype = view.type();
if (cudf::is_fixed_width(dtype)) {
total += pad_size(cudf::size_of(dtype) * view.size(), pad_for_cpu);
}
return std::accumulate(view.child_begin(), view.child_end(), total,
[pad_for_cpu](std::size_t t, cudf::column_view const &v) {
return t + calc_device_memory_size(v, pad_for_cpu);
});
}
} // anonymous namespace
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_upperStrings(JNIEnv *env, jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::strings_column_view strings_column(*column);
return release_as_jlong(cudf::strings::to_upper(strings_column));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_lowerStrings(JNIEnv *env, jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::strings_column_view strings_column(*column);
return release_as_jlong(cudf::strings::to_lower(strings_column));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_replaceNullsScalar(JNIEnv *env, jclass,
jlong j_col,
jlong j_scalar) {
JNI_NULL_CHECK(env, j_col, "column is null", 0);
JNI_NULL_CHECK(env, j_scalar, "scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view col = *reinterpret_cast<cudf::column_view *>(j_col);
auto val = reinterpret_cast<cudf::scalar *>(j_scalar);
return release_as_jlong(cudf::replace_nulls(col, *val));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_replaceNullsColumn(JNIEnv *env, jclass,
jlong j_col,
jlong j_replace_col) {
JNI_NULL_CHECK(env, j_col, "column is null", 0);
JNI_NULL_CHECK(env, j_replace_col, "replacement column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto col = reinterpret_cast<cudf::column_view *>(j_col);
auto replacements = reinterpret_cast<cudf::column_view *>(j_replace_col);
return release_as_jlong(cudf::replace_nulls(*col, *replacements));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_replaceNullsPolicy(JNIEnv *env, jclass,
jlong j_col,
jboolean is_preceding) {
JNI_NULL_CHECK(env, j_col, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view col = *reinterpret_cast<cudf::column_view *>(j_col);
return release_as_jlong(cudf::replace_nulls(
col, is_preceding ? cudf::replace_policy::PRECEDING : cudf::replace_policy::FOLLOWING));
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_ColumnView_distinctCount(JNIEnv *env, jclass,
jlong j_col,
jboolean nulls_included) {
JNI_NULL_CHECK(env, j_col, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view col = *reinterpret_cast<cudf::column_view *>(j_col);
return cudf::distinct_count(
col, nulls_included ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE,
cudf::nan_policy::NAN_IS_VALID);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_ifElseVV(JNIEnv *env, jclass,
jlong j_pred_vec, jlong j_true_vec,
jlong j_false_vec) {
JNI_NULL_CHECK(env, j_pred_vec, "predicate column is null", 0);
JNI_NULL_CHECK(env, j_true_vec, "true column is null", 0);
JNI_NULL_CHECK(env, j_false_vec, "false column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto pred_vec = reinterpret_cast<cudf::column_view *>(j_pred_vec);
auto true_vec = reinterpret_cast<cudf::column_view *>(j_true_vec);
auto false_vec = reinterpret_cast<cudf::column_view *>(j_false_vec);
return release_as_jlong(cudf::copy_if_else(*true_vec, *false_vec, *pred_vec));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_ifElseVS(JNIEnv *env, jclass,
jlong j_pred_vec, jlong j_true_vec,
jlong j_false_scalar) {
JNI_NULL_CHECK(env, j_pred_vec, "predicate column is null", 0);
JNI_NULL_CHECK(env, j_true_vec, "true column is null", 0);
JNI_NULL_CHECK(env, j_false_scalar, "false scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
auto pred_vec = reinterpret_cast<cudf::column_view *>(j_pred_vec);
auto true_vec = reinterpret_cast<cudf::column_view *>(j_true_vec);
auto false_scalar = reinterpret_cast<cudf::scalar *>(j_false_scalar);
return release_as_jlong(cudf::copy_if_else(*true_vec, *false_scalar, *pred_vec));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_ifElseSV(JNIEnv *env, jclass,
jlong j_pred_vec,
jlong j_true_scalar,
jlong j_false_vec) {
JNI_NULL_CHECK(env, j_pred_vec, "predicate column is null", 0);
JNI_NULL_CHECK(env, j_true_scalar, "true scalar is null", 0);
JNI_NULL_CHECK(env, j_false_vec, "false column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto pred_vec = reinterpret_cast<cudf::column_view *>(j_pred_vec);
auto true_scalar = reinterpret_cast<cudf::scalar *>(j_true_scalar);
auto false_vec = reinterpret_cast<cudf::column_view *>(j_false_vec);
return release_as_jlong(cudf::copy_if_else(*true_scalar, *false_vec, *pred_vec));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_ifElseSS(JNIEnv *env, jclass,
jlong j_pred_vec,
jlong j_true_scalar,
jlong j_false_scalar) {
JNI_NULL_CHECK(env, j_pred_vec, "predicate column is null", 0);
JNI_NULL_CHECK(env, j_true_scalar, "true scalar is null", 0);
JNI_NULL_CHECK(env, j_false_scalar, "false scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
auto pred_vec = reinterpret_cast<cudf::column_view *>(j_pred_vec);
auto true_scalar = reinterpret_cast<cudf::scalar *>(j_true_scalar);
auto false_scalar = reinterpret_cast<cudf::scalar *>(j_false_scalar);
return release_as_jlong(cudf::copy_if_else(*true_scalar, *false_scalar, *pred_vec));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getElement(JNIEnv *env, jclass, jlong from,
jint index) {
JNI_NULL_CHECK(env, from, "from column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto from_vec = reinterpret_cast<cudf::column_view *>(from);
return release_as_jlong(cudf::get_element(*from_vec, index));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_reduce(JNIEnv *env, jclass, jlong j_col_view,
jlong j_agg, jint j_dtype,
jint scale) {
JNI_NULL_CHECK(env, j_col_view, "column view is null", 0);
JNI_NULL_CHECK(env, j_agg, "aggregation is null", 0);
try {
cudf::jni::auto_set_device(env);
auto col = reinterpret_cast<cudf::column_view *>(j_col_view);
auto agg = reinterpret_cast<cudf::aggregation *>(j_agg);
cudf::data_type out_dtype = cudf::jni::make_data_type(j_dtype, scale);
return release_as_jlong(
cudf::reduce(*col, *dynamic_cast<cudf::reduce_aggregation *>(agg), out_dtype));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_segmentedReduce(
JNIEnv *env, jclass, jlong j_data_view, jlong j_offsets_view, jlong j_agg,
jboolean include_nulls, jint j_dtype, jint scale) {
JNI_NULL_CHECK(env, j_data_view, "data column view is null", 0);
JNI_NULL_CHECK(env, j_offsets_view, "offsets column view is null", 0);
JNI_NULL_CHECK(env, j_agg, "aggregation is null", 0);
try {
cudf::jni::auto_set_device(env);
auto data = reinterpret_cast<cudf::column_view *>(j_data_view);
auto offsets = reinterpret_cast<cudf::column_view *>(j_offsets_view);
auto agg = reinterpret_cast<cudf::aggregation *>(j_agg);
auto s_agg = dynamic_cast<cudf::segmented_reduce_aggregation *>(agg);
JNI_ARG_CHECK(env, s_agg != nullptr, "agg is not a cudf::segmented_reduce_aggregation", 0)
auto null_policy = include_nulls ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE;
cudf::data_type out_dtype = cudf::jni::make_data_type(j_dtype, scale);
return release_as_jlong(
cudf::segmented_reduce(*data, *offsets, *s_agg, out_dtype, null_policy));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_segmentedGather(
JNIEnv *env, jclass, jlong source_column, jlong gather_map_list, jboolean nullify_out_bounds) {
JNI_NULL_CHECK(env, source_column, "source column view is null", 0);
JNI_NULL_CHECK(env, gather_map_list, "gather map is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const &src_col =
cudf::lists_column_view(*reinterpret_cast<cudf::column_view *>(source_column));
auto const &gather_map =
cudf::lists_column_view(*reinterpret_cast<cudf::column_view *>(gather_map_list));
auto out_bounds_policy = nullify_out_bounds ? cudf::out_of_bounds_policy::NULLIFY :
cudf::out_of_bounds_policy::DONT_CHECK;
return release_as_jlong(cudf::lists::segmented_gather(src_col, gather_map, out_bounds_policy));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_scan(JNIEnv *env, jclass, jlong j_col_view,
jlong j_agg, jboolean is_inclusive,
jboolean include_nulls) {
JNI_NULL_CHECK(env, j_col_view, "column view is null", 0);
JNI_NULL_CHECK(env, j_agg, "aggregation is null", 0);
try {
cudf::jni::auto_set_device(env);
auto col = reinterpret_cast<cudf::column_view *>(j_col_view);
auto agg = reinterpret_cast<cudf::aggregation *>(j_agg);
auto scan_type = is_inclusive ? cudf::scan_type::INCLUSIVE : cudf::scan_type::EXCLUSIVE;
auto null_policy = include_nulls ? cudf::null_policy::INCLUDE : cudf::null_policy::EXCLUDE;
return release_as_jlong(
cudf::scan(*col, *dynamic_cast<cudf::scan_aggregation *>(agg), scan_type, null_policy));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_approxPercentile(JNIEnv *env, jclass clazz,
jlong input_column,
jlong percentiles_column) {
JNI_NULL_CHECK(env, input_column, "input_column native handle is null", 0);
JNI_NULL_CHECK(env, percentiles_column, "percentiles_column native handle is null", 0);
try {
using namespace cudf;
using tdigest_column_view = cudf::tdigest::tdigest_column_view;
jni::auto_set_device(env);
auto const tdigest_view =
tdigest_column_view{structs_column_view{*reinterpret_cast<column_view *>(input_column)}};
auto const p_percentiles = reinterpret_cast<column_view *>(percentiles_column);
return release_as_jlong(percentile_approx(tdigest_view, *p_percentiles));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_quantile(JNIEnv *env, jclass clazz,
jlong input_column,
jint quantile_method,
jdoubleArray jquantiles) {
JNI_NULL_CHECK(env, input_column, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jdoubleArray native_quantiles(env, jquantiles);
std::vector<double> quantiles(native_quantiles.data(),
native_quantiles.data() + native_quantiles.size());
cudf::column_view *n_input_column = reinterpret_cast<cudf::column_view *>(input_column);
cudf::interpolation n_quantile_method = static_cast<cudf::interpolation>(quantile_method);
return release_as_jlong(cudf::quantile(*n_input_column, quantiles, n_quantile_method));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_rollingWindow(
JNIEnv *env, jclass clazz, jlong input_col, jlong default_output_col, jint min_periods,
jlong agg_ptr, jint preceding, jint following, jlong preceding_col, jlong following_col) {
JNI_NULL_CHECK(env, input_col, "native handle is null", 0);
JNI_NULL_CHECK(env, agg_ptr, "aggregation handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *n_input_col = reinterpret_cast<cudf::column_view *>(input_col);
cudf::column_view *n_default_output_col =
reinterpret_cast<cudf::column_view *>(default_output_col);
cudf::column_view *n_preceding_col = reinterpret_cast<cudf::column_view *>(preceding_col);
cudf::column_view *n_following_col = reinterpret_cast<cudf::column_view *>(following_col);
cudf::rolling_aggregation *agg =
dynamic_cast<cudf::rolling_aggregation *>(reinterpret_cast<cudf::aggregation *>(agg_ptr));
JNI_ARG_CHECK(env, agg != nullptr, "aggregation is not an instance of rolling_aggregation", 0);
std::unique_ptr<cudf::column> ret;
if (n_default_output_col != nullptr) {
if (n_preceding_col != nullptr && n_following_col != nullptr) {
CUDF_FAIL("A default output column is not currently supported with variable length "
"preceding and following");
// ret = cudf::rolling_window(*n_input_col, *n_default_output_col,
// *n_preceding_col, *n_following_col, min_periods, agg);
} else {
ret = cudf::rolling_window(*n_input_col, *n_default_output_col, preceding, following,
min_periods, *agg);
}
} else {
if (n_preceding_col != nullptr && n_following_col != nullptr) {
ret = cudf::rolling_window(*n_input_col, *n_preceding_col, *n_following_col, min_periods,
*agg);
} else {
ret = cudf::rolling_window(*n_input_col, preceding, following, min_periods, *agg);
}
}
return release_as_jlong(ret);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_ColumnView_slice(JNIEnv *env, jclass clazz,
jlong input_column,
jintArray slice_indices) {
JNI_NULL_CHECK(env, input_column, "native handle is null", 0);
JNI_NULL_CHECK(env, slice_indices, "slice indices are null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *n_column = reinterpret_cast<cudf::column_view *>(input_column);
cudf::jni::native_jintArray n_slice_indices(env, slice_indices);
std::vector<cudf::size_type> indices(n_slice_indices.begin(), n_slice_indices.end());
std::vector<cudf::column_view> result = cudf::slice(*n_column, indices);
cudf::jni::native_jlongArray n_result(env, result.size());
std::transform(result.begin(), result.end(), n_result.begin(),
[](cudf::column_view const &result_col) {
return ptr_as_jlong(new cudf::column{result_col});
});
return n_result.get_jArray();
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_extractListElement(JNIEnv *env, jclass,
jlong column_view,
jint index) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::lists_column_view lcv(*cv);
return release_as_jlong(cudf::lists::extract_list_element(lcv, index));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_extractListElementV(JNIEnv *env, jclass,
jlong column_view,
jlong indices_view) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, indices_view, "indices is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *indices = reinterpret_cast<cudf::column_view *>(indices_view);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::lists_column_view lcv(*cv);
return release_as_jlong(cudf::lists::extract_list_element(lcv, *indices));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_dropListDuplicates(JNIEnv *env, jclass,
jlong column_view) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input_cv = reinterpret_cast<cudf::column_view const *>(column_view);
return release_as_jlong(cudf::lists::distinct(cudf::lists_column_view{*input_cv}));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_dropListDuplicatesWithKeysValues(
JNIEnv *env, jclass, jlong keys_vals_handle) {
JNI_NULL_CHECK(env, keys_vals_handle, "keys_vals_handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input_cv = reinterpret_cast<cudf::column_view const *>(keys_vals_handle);
JNI_ARG_CHECK(env, input_cv->type().id() == cudf::type_id::LIST,
"Input column is not a lists column.", 0);
auto const lists_keys_vals = cudf::lists_column_view(*input_cv);
auto const keys_vals = lists_keys_vals.child();
JNI_ARG_CHECK(env, keys_vals.type().id() == cudf::type_id::STRUCT,
"Input column has child that is not a structs column.", 0);
JNI_ARG_CHECK(env, keys_vals.num_children() == 2,
"Input column has child that does not have 2 children.", 0);
return release_as_jlong(
cudf::jni::lists_distinct_by_key(lists_keys_vals, cudf::get_default_stream()));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_flattenLists(JNIEnv *env, jclass,
jlong input_handle,
jboolean ignore_null) {
JNI_NULL_CHECK(env, input_handle, "input_handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const null_policy = ignore_null ? cudf::lists::concatenate_null_policy::IGNORE :
cudf::lists::concatenate_null_policy::NULLIFY_OUTPUT_ROW;
auto const input_cv = reinterpret_cast<cudf::column_view const *>(input_handle);
return release_as_jlong(cudf::lists::concatenate_list_elements(*input_cv, null_policy));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listContains(JNIEnv *env, jclass,
jlong column_view,
jlong lookup_key) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, lookup_key, "lookup scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::lists_column_view lcv(*cv);
cudf::scalar *lookup_scalar = reinterpret_cast<cudf::scalar *>(lookup_key);
return release_as_jlong(cudf::lists::contains(lcv, *lookup_scalar));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listContainsNulls(JNIEnv *env, jclass,
jlong column_view) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto cv = reinterpret_cast<cudf::column_view *>(column_view);
auto lcv = cudf::lists_column_view{*cv};
return release_as_jlong(cudf::lists::contains_nulls(lcv));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listContainsColumn(JNIEnv *env, jclass,
jlong column_view,
jlong lookup_key_cv) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, lookup_key_cv, "lookup column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::lists_column_view lcv(*cv);
cudf::column_view *lookup_cv = reinterpret_cast<cudf::column_view *>(lookup_key_cv);
return release_as_jlong(cudf::lists::contains(lcv, *lookup_cv));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listIndexOfScalar(JNIEnv *env, jclass,
jlong column_view,
jlong lookup_key,
jboolean is_find_first) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, lookup_key, "lookup scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const cv = reinterpret_cast<cudf::column_view const *>(column_view);
auto const lcv = cudf::lists_column_view{*cv};
auto const lookup_key_scalar = reinterpret_cast<cudf::scalar const *>(lookup_key);
auto const find_option = is_find_first ? cudf::lists::duplicate_find_option::FIND_FIRST :
cudf::lists::duplicate_find_option::FIND_LAST;
return release_as_jlong(cudf::lists::index_of(lcv, *lookup_key_scalar, find_option));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listIndexOfColumn(JNIEnv *env, jclass,
jlong column_view,
jlong lookup_keys,
jboolean is_find_first) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, lookup_keys, "lookup key column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const cv = reinterpret_cast<cudf::column_view const *>(column_view);
auto const lcv = cudf::lists_column_view{*cv};
auto const lookup_key_column = reinterpret_cast<cudf::column_view const *>(lookup_keys);
auto const find_option = is_find_first ? cudf::lists::duplicate_find_option::FIND_FIRST :
cudf::lists::duplicate_find_option::FIND_LAST;
return release_as_jlong(cudf::lists::index_of(lcv, *lookup_key_column, find_option));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listSortRows(JNIEnv *env, jclass,
jlong column_view,
jboolean is_descending,
jboolean is_null_smallest) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto sort_order = is_descending ? cudf::order::DESCENDING : cudf::order::ASCENDING;
auto null_order = is_null_smallest ? cudf::null_order::BEFORE : cudf::null_order::AFTER;
auto *cv = reinterpret_cast<cudf::column_view *>(column_view);
return release_as_jlong(
cudf::lists::sort_lists(cudf::lists_column_view(*cv), sort_order, null_order));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_generateListOffsets(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const cv = reinterpret_cast<cudf::column_view const *>(handle);
return release_as_jlong(cudf::jni::generate_list_offsets(*cv));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listsHaveOverlap(JNIEnv *env, jclass,
jlong lhs_handle,
jlong rhs_handle) {
JNI_NULL_CHECK(env, lhs_handle, "lhs_handle is null", 0)
JNI_NULL_CHECK(env, rhs_handle, "rhs_handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const lhs = reinterpret_cast<cudf::column_view const *>(lhs_handle);
auto const rhs = reinterpret_cast<cudf::column_view const *>(rhs_handle);
auto overlap_result =
cudf::lists::have_overlap(cudf::lists_column_view{*lhs}, cudf::lists_column_view{*rhs},
cudf::null_equality::UNEQUAL, cudf::nan_equality::ALL_EQUAL);
cudf::jni::post_process_list_overlap(*lhs, *rhs, overlap_result);
return release_as_jlong(overlap_result);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listsIntersectDistinct(JNIEnv *env, jclass,
jlong lhs_handle,
jlong rhs_handle) {
JNI_NULL_CHECK(env, lhs_handle, "lhs_handle is null", 0)
JNI_NULL_CHECK(env, rhs_handle, "rhs_handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const lhs = reinterpret_cast<cudf::column_view const *>(lhs_handle);
auto const rhs = reinterpret_cast<cudf::column_view const *>(rhs_handle);
return release_as_jlong(cudf::lists::intersect_distinct(
cudf::lists_column_view{*lhs}, cudf::lists_column_view{*rhs}, cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listsUnionDistinct(JNIEnv *env, jclass,
jlong lhs_handle,
jlong rhs_handle) {
JNI_NULL_CHECK(env, lhs_handle, "lhs_handle is null", 0)
JNI_NULL_CHECK(env, rhs_handle, "rhs_handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const lhs = reinterpret_cast<cudf::column_view const *>(lhs_handle);
auto const rhs = reinterpret_cast<cudf::column_view const *>(rhs_handle);
return release_as_jlong(
cudf::lists::union_distinct(cudf::lists_column_view{*lhs}, cudf::lists_column_view{*rhs},
cudf::null_equality::EQUAL, cudf::nan_equality::ALL_EQUAL));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_listsDifferenceDistinct(JNIEnv *env, jclass,
jlong lhs_handle,
jlong rhs_handle) {
JNI_NULL_CHECK(env, lhs_handle, "lhs_handle is null", 0)
JNI_NULL_CHECK(env, rhs_handle, "rhs_handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const lhs = reinterpret_cast<cudf::column_view const *>(lhs_handle);
auto const rhs = reinterpret_cast<cudf::column_view const *>(rhs_handle);
return release_as_jlong(cudf::lists::difference_distinct(
cudf::lists_column_view{*lhs}, cudf::lists_column_view{*rhs}, cudf::null_equality::EQUAL,
cudf::nan_equality::ALL_EQUAL));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_reverseStringsOrLists(JNIEnv *env, jclass,
jlong input_handle) {
JNI_NULL_CHECK(env, input_handle, "input_handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::column_view const *>(input_handle);
switch (input->type().id()) {
case cudf::type_id::STRING:
return release_as_jlong(cudf::strings::reverse(cudf::strings_column_view{*input}));
case cudf::type_id::LIST:
return release_as_jlong(cudf::lists::reverse(cudf::lists_column_view{*input}));
default:
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"A column of type string or list is required for reverse()", 0);
}
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_ColumnView_stringSplit(JNIEnv *env, jclass,
jlong input_handle,
jstring delimiter_obj,
jint limit) {
JNI_NULL_CHECK(env, input_handle, "input_handle is null", 0);
if (limit == 0 || limit == 1) {
// Cannot achieve the results of splitting with limit == 0 or limit == 1.
// This is because cudf operates on a different parameter (`max_split`) which is converted from
// limit. When limit == 0 or limit == 1, max_split will be non-positive and will result in an
// unlimited split.
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"limit == 0 and limit == 1 are not supported", 0);
}
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::column_view const *>(input_handle);
auto const strings_column = cudf::strings_column_view{*input};
auto const delimiter_jstr = cudf::jni::native_jstring(env, delimiter_obj);
auto const delimiter = std::string(delimiter_jstr.get(), delimiter_jstr.size_bytes());
auto const max_split = limit > 1 ? limit - 1 : limit;
auto result = cudf::strings::split(strings_column, cudf::string_scalar{delimiter}, max_split);
return cudf::jni::convert_table_for_return(env, std::move(result));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_ColumnView_stringSplitRe(
JNIEnv *env, jclass, jlong input_handle, jstring pattern_obj, jint regex_flags,
jint capture_groups, jint limit) {
JNI_NULL_CHECK(env, input_handle, "input_handle is null", 0);
if (limit == 0 || limit == 1) {
// Cannot achieve the results of splitting with limit == 0 or limit == 1.
// This is because cudf operates on a different parameter (`max_split`) which is converted from
// limit. When limit == 0 or limit == 1, max_split will be non-positive and will result in an
// unlimited split.
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"limit == 0 and limit == 1 are not supported", 0);
}
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::column_view const *>(input_handle);
auto const strings_column = cudf::strings_column_view{*input};
auto const pattern_jstr = cudf::jni::native_jstring(env, pattern_obj);
auto const pattern = std::string(pattern_jstr.get(), pattern_jstr.size_bytes());
auto const max_split = limit > 1 ? limit - 1 : limit;
auto const flags = static_cast<cudf::strings::regex_flags>(regex_flags);
auto const groups = static_cast<cudf::strings::capture_groups>(capture_groups);
auto const regex_prog = cudf::strings::regex_program::create(pattern, flags, groups);
auto result = cudf::strings::split_re(strings_column, *regex_prog, max_split);
return cudf::jni::convert_table_for_return(env, std::move(result));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringSplitRecord(JNIEnv *env, jclass,
jlong input_handle,
jstring delimiter_obj,
jint limit) {
JNI_NULL_CHECK(env, input_handle, "input_handle is null", 0);
if (limit == 0 || limit == 1) {
// Cannot achieve the results of splitting with limit == 0 or limit == 1.
// This is because cudf operates on a different parameter (`max_split`) which is converted from
// limit. When limit == 0 or limit == 1, max_split will be non-positive and will result in an
// unlimited split.
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"limit == 0 and limit == 1 are not supported", 0);
}
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::column_view const *>(input_handle);
auto const strings_column = cudf::strings_column_view{*input};
auto const delimiter_jstr = cudf::jni::native_jstring(env, delimiter_obj);
auto const delimiter = std::string(delimiter_jstr.get(), delimiter_jstr.size_bytes());
auto const max_split = limit > 1 ? limit - 1 : limit;
auto result =
cudf::strings::split_record(strings_column, cudf::string_scalar{delimiter}, max_split);
return release_as_jlong(result);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringSplitRecordRe(
JNIEnv *env, jclass, jlong input_handle, jstring pattern_obj, jint regex_flags,
jint capture_groups, jint limit) {
JNI_NULL_CHECK(env, input_handle, "input_handle is null", 0);
if (limit == 0 || limit == 1) {
// Cannot achieve the results of splitting with limit == 0 or limit == 1.
// This is because cudf operates on a different parameter (`max_split`) which is converted from
// limit. When limit == 0 or limit == 1, max_split will be non-positive and will result in an
// unlimited split.
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"limit == 0 and limit == 1 are not supported", 0);
}
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::column_view const *>(input_handle);
auto const strings_column = cudf::strings_column_view{*input};
auto const pattern_jstr = cudf::jni::native_jstring(env, pattern_obj);
auto const pattern = std::string(pattern_jstr.get(), pattern_jstr.size_bytes());
auto const max_split = limit > 1 ? limit - 1 : limit;
auto const flags = static_cast<cudf::strings::regex_flags>(regex_flags);
auto const groups = static_cast<cudf::strings::capture_groups>(capture_groups);
auto const regex_prog = cudf::strings::regex_program::create(pattern, flags, groups);
auto result = cudf::strings::split_record_re(strings_column, *regex_prog, max_split);
return release_as_jlong(result);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_ColumnView_split(JNIEnv *env, jclass clazz,
jlong input_column,
jintArray split_indices) {
JNI_NULL_CHECK(env, input_column, "native handle is null", 0);
JNI_NULL_CHECK(env, split_indices, "split indices are null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *n_column = reinterpret_cast<cudf::column_view *>(input_column);
cudf::jni::native_jintArray n_split_indices(env, split_indices);
std::vector<cudf::size_type> indices(n_split_indices.begin(), n_split_indices.end());
std::vector<cudf::column_view> result = cudf::split(*n_column, indices);
cudf::jni::native_jlongArray n_result(env, result.size());
std::transform(result.begin(), result.end(), n_result.begin(),
[](cudf::column_view const &result_col) {
return ptr_as_jlong(new cudf::column_view{result_col});
});
return n_result.get_jArray();
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_countElements(JNIEnv *env, jclass clazz,
jlong view_handle) {
JNI_NULL_CHECK(env, view_handle, "input column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *n_column = reinterpret_cast<cudf::column_view *>(view_handle);
return release_as_jlong(cudf::lists::count_elements(cudf::lists_column_view(*n_column)));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_charLengths(JNIEnv *env, jclass clazz,
jlong view_handle) {
JNI_NULL_CHECK(env, view_handle, "input column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *n_column = reinterpret_cast<cudf::column_view *>(view_handle);
return release_as_jlong(cudf::strings::count_characters(cudf::strings_column_view(*n_column)));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_byteCount(JNIEnv *env, jclass clazz,
jlong view_handle) {
JNI_NULL_CHECK(env, view_handle, "input column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *n_column = reinterpret_cast<cudf::column_view *>(view_handle);
return release_as_jlong(cudf::strings::count_bytes(cudf::strings_column_view(*n_column)));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_findAndReplaceAll(JNIEnv *env, jclass clazz,
jlong old_values_handle,
jlong new_values_handle,
jlong input_handle) {
JNI_NULL_CHECK(env, old_values_handle, "values column is null", 0);
JNI_NULL_CHECK(env, new_values_handle, "replace column is null", 0);
JNI_NULL_CHECK(env, input_handle, "input column is null", 0);
using cudf::column;
using cudf::column_view;
try {
cudf::jni::auto_set_device(env);
column_view *input_column = reinterpret_cast<column_view *>(input_handle);
column_view *old_values_column = reinterpret_cast<column_view *>(old_values_handle);
column_view *new_values_column = reinterpret_cast<column_view *>(new_values_handle);
return release_as_jlong(
cudf::find_and_replace_all(*input_column, *old_values_column, *new_values_column));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isNullNative(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "input column is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(handle);
return release_as_jlong(cudf::is_null(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isNotNullNative(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "input column is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(handle);
return release_as_jlong(cudf::is_valid(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isNanNative(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "input column is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(handle);
return release_as_jlong(cudf::is_nan(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isNotNanNative(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "input column is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(handle);
return release_as_jlong(cudf::is_not_nan(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_unaryOperation(JNIEnv *env, jclass,
jlong input_ptr,
jint int_op) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
cudf::unary_operator op = static_cast<cudf::unary_operator>(int_op);
return release_as_jlong(cudf::unary_operation(*input, op));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_round(JNIEnv *env, jclass, jlong input_ptr,
jint decimal_places,
jint rounding_method) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
cudf::rounding_method method = static_cast<cudf::rounding_method>(rounding_method);
return release_as_jlong(cudf::round(*input, decimal_places, method));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_year(JNIEnv *env, jclass, jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::extract_year(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_month(JNIEnv *env, jclass, jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::extract_month(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_day(JNIEnv *env, jclass, jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::extract_day(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_hour(JNIEnv *env, jclass, jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::extract_hour(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_minute(JNIEnv *env, jclass,
jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::extract_minute(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_second(JNIEnv *env, jclass,
jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::extract_second(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_weekDay(JNIEnv *env, jclass,
jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::extract_weekday(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_lastDayOfMonth(JNIEnv *env, jclass,
jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::last_day_of_month(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_dayOfYear(JNIEnv *env, jclass,
jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::day_of_year(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_quarterOfYear(JNIEnv *env, jclass,
jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::extract_quarter(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_addCalendricalMonths(JNIEnv *env, jclass,
jlong ts_ptr,
jlong months_ptr) {
JNI_NULL_CHECK(env, ts_ptr, "ts is null", 0);
JNI_NULL_CHECK(env, months_ptr, "months is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *ts = reinterpret_cast<cudf::column_view *>(ts_ptr);
const cudf::column_view *months = reinterpret_cast<cudf::column_view *>(months_ptr);
return release_as_jlong(cudf::datetime::add_calendrical_months(*ts, *months));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isLeapYear(JNIEnv *env, jclass,
jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::datetime::is_leap_year(*input));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_castTo(JNIEnv *env, jclass, jlong handle,
jint type, jint scale) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::data_type n_data_type = cudf::jni::make_data_type(type, scale);
if (n_data_type == column->type()) {
return ptr_as_jlong(new cudf::column(*column));
}
if (n_data_type.id() == cudf::type_id::STRING) {
switch (column->type().id()) {
case cudf::type_id::BOOL8: {
auto const true_scalar = cudf::string_scalar("true");
auto const false_scalar = cudf::string_scalar("false");
return release_as_jlong(cudf::strings::from_booleans(*column, true_scalar, false_scalar));
}
case cudf::type_id::FLOAT32:
case cudf::type_id::FLOAT64: return release_as_jlong(cudf::strings::from_floats(*column));
case cudf::type_id::INT8:
case cudf::type_id::UINT8:
case cudf::type_id::INT16:
case cudf::type_id::UINT16:
case cudf::type_id::INT32:
case cudf::type_id::UINT32:
case cudf::type_id::INT64:
case cudf::type_id::UINT64: return release_as_jlong(cudf::strings::from_integers(*column));
case cudf::type_id::DECIMAL32:
case cudf::type_id::DECIMAL64:
case cudf::type_id::DECIMAL128:
return release_as_jlong(cudf::strings::from_fixed_point(*column));
default: JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "Invalid data type", 0);
}
} else if (column->type().id() == cudf::type_id::STRING) {
switch (n_data_type.id()) {
case cudf::type_id::BOOL8: {
auto const true_scalar = cudf::string_scalar("true");
return release_as_jlong(cudf::strings::to_booleans(*column, true_scalar));
}
case cudf::type_id::FLOAT32:
case cudf::type_id::FLOAT64:
return release_as_jlong(cudf::strings::to_floats(*column, n_data_type));
case cudf::type_id::INT8:
case cudf::type_id::UINT8:
case cudf::type_id::INT16:
case cudf::type_id::UINT16:
case cudf::type_id::INT32:
case cudf::type_id::UINT32:
case cudf::type_id::INT64:
case cudf::type_id::UINT64:
return release_as_jlong(cudf::strings::to_integers(*column, n_data_type));
case cudf::type_id::DECIMAL32:
case cudf::type_id::DECIMAL64:
case cudf::type_id::DECIMAL128:
return release_as_jlong(cudf::strings::to_fixed_point(*column, n_data_type));
default: JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "Invalid data type", 0);
}
} else if (cudf::is_timestamp(n_data_type) && cudf::is_numeric(column->type())) {
// This is a temporary workaround to allow Java to cast from integral types into a timestamp
// without forcing an intermediate duration column to be manifested. Ultimately this style of
// "reinterpret" casting will be supported via https://github.com/rapidsai/cudf/pull/5358
if (n_data_type.id() == cudf::type_id::TIMESTAMP_DAYS) {
if (column->type().id() != cudf::type_id::INT32) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"Numeric cast to TIMESTAMP_DAYS requires INT32", 0);
}
} else {
if (column->type().id() != cudf::type_id::INT64) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"Numeric cast to non-day timestamp requires INT64", 0);
}
}
cudf::data_type duration_type = cudf::jni::timestamp_to_duration(n_data_type);
cudf::column_view duration_view = cudf::column_view(
duration_type, column->size(), column->head(), column->null_mask(), column->null_count());
return release_as_jlong(cudf::cast(duration_view, n_data_type));
} else if (cudf::is_timestamp(column->type()) && cudf::is_numeric(n_data_type)) {
// This is a temporary workaround to allow Java to cast from timestamp types to integral types
// without forcing an intermediate duration column to be manifested. Ultimately this style of
// "reinterpret" casting will be supported via https://github.com/rapidsai/cudf/pull/5358
cudf::data_type duration_type = cudf::jni::timestamp_to_duration(column->type());
cudf::column_view duration_view = cudf::column_view(
duration_type, column->size(), column->head(), column->null_mask(), column->null_count());
return release_as_jlong(cudf::cast(duration_view, n_data_type));
} else {
return release_as_jlong(cudf::cast(*column, n_data_type));
}
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_bitCastTo(JNIEnv *env, jclass, jlong handle,
jint type, jint scale) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::data_type n_data_type = cudf::jni::make_data_type(type, scale);
return ptr_as_jlong(new cudf::column_view{cudf::bit_cast(*column, n_data_type)});
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_byteListCast(JNIEnv *env, jobject j_object,
jlong handle,
jboolean endianness_config) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::flip_endianness config(static_cast<cudf::flip_endianness>(endianness_config));
return release_as_jlong(byte_cast(*column, config));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringTimestampToTimestamp(
JNIEnv *env, jobject j_object, jlong handle, jint time_unit, jstring formatObj) {
JNI_NULL_CHECK(env, handle, "column is null", 0);
JNI_NULL_CHECK(env, formatObj, "format is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstring format(env, formatObj);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::strings_column_view strings_column(*column);
return release_as_jlong(cudf::strings::to_timestamps(
strings_column, cudf::data_type(static_cast<cudf::type_id>(time_unit)), format.get()));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isTimestamp(JNIEnv *env, jclass,
jlong handle,
jstring formatObj) {
JNI_NULL_CHECK(env, handle, "column is null", 0);
JNI_NULL_CHECK(env, formatObj, "format is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstring format(env, formatObj);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::strings_column_view strings_column(*column);
return release_as_jlong(cudf::strings::is_timestamp(strings_column, format.get()));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_timestampToStringTimestamp(
JNIEnv *env, jobject j_object, jlong handle, jstring j_format) {
JNI_NULL_CHECK(env, handle, "column is null", 0);
JNI_NULL_CHECK(env, j_format, "format is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstring format(env, j_format);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
return release_as_jlong(cudf::strings::from_timestamps(*column, format.get()));
}
CATCH_STD(env, 0);
}
JNIEXPORT jboolean JNICALL Java_ai_rapids_cudf_ColumnView_containsScalar(JNIEnv *env,
jobject j_object,
jlong j_view_handle,
jlong j_scalar_handle) {
JNI_NULL_CHECK(env, j_view_handle, "haystack vector is null", false);
JNI_NULL_CHECK(env, j_scalar_handle, "scalar needle is null", false);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column_view = reinterpret_cast<cudf::column_view *>(j_view_handle);
cudf::scalar *scalar = reinterpret_cast<cudf::scalar *>(j_scalar_handle);
return cudf::contains(*column_view, *scalar);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_containsVector(JNIEnv *env, jobject j_object,
jlong j_values_handle,
jlong j_search_space_handle) {
JNI_NULL_CHECK(env, j_values_handle, "values vector is null", false);
JNI_NULL_CHECK(env, j_search_space_handle, "search_space vector is null", false);
try {
cudf::jni::auto_set_device(env);
auto const search_space_ptr =
reinterpret_cast<cudf::column_view const *>(j_search_space_handle);
auto const values_ptr = reinterpret_cast<cudf::column_view const *>(j_values_handle);
// The C++ API `cudf::contains` requires that the search space is the first parameter.
return release_as_jlong(cudf::contains(*search_space_ptr, *values_ptr));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_transform(JNIEnv *env, jobject j_object,
jlong handle, jstring j_udf,
jboolean j_is_ptx) {
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::jni::native_jstring n_j_udf(env, j_udf);
std::string n_udf(n_j_udf.get());
return release_as_jlong(
cudf::transform(*column, n_udf, cudf::data_type(cudf::type_id::INT32), j_is_ptx));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringStartWith(JNIEnv *env,
jobject j_object,
jlong j_view_handle,
jlong comp_string) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", false);
JNI_NULL_CHECK(env, comp_string, "comparison string scalar is null", false);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column_view = reinterpret_cast<cudf::column_view *>(j_view_handle);
cudf::strings_column_view strings_column(*column_view);
cudf::string_scalar *comp_scalar = reinterpret_cast<cudf::string_scalar *>(comp_string);
return release_as_jlong(cudf::strings::starts_with(strings_column, *comp_scalar));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringEndWith(JNIEnv *env, jobject j_object,
jlong j_view_handle,
jlong comp_string) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", false);
JNI_NULL_CHECK(env, comp_string, "comparison string scalar is null", false);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column_view = reinterpret_cast<cudf::column_view *>(j_view_handle);
cudf::strings_column_view strings_column(*column_view);
cudf::string_scalar *comp_scalar = reinterpret_cast<cudf::string_scalar *>(comp_string);
return release_as_jlong(cudf::strings::ends_with(strings_column, *comp_scalar));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringContains(JNIEnv *env, jobject j_object,
jlong j_view_handle,
jlong comp_string) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", false);
JNI_NULL_CHECK(env, comp_string, "comparison string scalar is null", false);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column_view = reinterpret_cast<cudf::column_view *>(j_view_handle);
cudf::strings_column_view strings_column(*column_view);
cudf::string_scalar *comp_scalar = reinterpret_cast<cudf::string_scalar *>(comp_string);
return release_as_jlong(cudf::strings::contains(strings_column, *comp_scalar));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_matchesRe(JNIEnv *env, jobject j_object,
jlong j_view_handle,
jstring pattern_obj,
jint regex_flags,
jint capture_groups) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", false);
JNI_NULL_CHECK(env, pattern_obj, "pattern is null", false);
try {
cudf::jni::auto_set_device(env);
auto const column_view = reinterpret_cast<cudf::column_view const *>(j_view_handle);
auto const strings_column = cudf::strings_column_view{*column_view};
auto const pattern = cudf::jni::native_jstring(env, pattern_obj);
auto const flags = static_cast<cudf::strings::regex_flags>(regex_flags);
auto const groups = static_cast<cudf::strings::capture_groups>(capture_groups);
auto const regex_prog = cudf::strings::regex_program::create(pattern.get(), flags, groups);
return release_as_jlong(cudf::strings::matches_re(strings_column, *regex_prog));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_containsRe(JNIEnv *env, jobject j_object,
jlong j_view_handle,
jstring pattern_obj,
jint regex_flags,
jint capture_groups) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", false);
JNI_NULL_CHECK(env, pattern_obj, "pattern is null", false);
try {
cudf::jni::auto_set_device(env);
auto const column_view = reinterpret_cast<cudf::column_view const *>(j_view_handle);
auto const strings_column = cudf::strings_column_view{*column_view};
auto const pattern = cudf::jni::native_jstring(env, pattern_obj);
auto const flags = static_cast<cudf::strings::regex_flags>(regex_flags);
auto const capture = static_cast<cudf::strings::capture_groups>(capture_groups);
auto const regex_prog = cudf::strings::regex_program::create(pattern.get(), flags, capture);
return release_as_jlong(cudf::strings::contains_re(strings_column, *regex_prog));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_like(JNIEnv *env, jobject j_object,
jlong j_view_handle, jlong pattern,
jlong escapeChar) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", false);
JNI_NULL_CHECK(env, pattern, "pattern is null", false);
JNI_NULL_CHECK(env, escapeChar, "escape character is null", false);
try {
cudf::jni::auto_set_device(env);
auto const column_view = reinterpret_cast<cudf::column_view const *>(j_view_handle);
auto const strings_column = cudf::strings_column_view{*column_view};
auto const pattern_scalar = reinterpret_cast<cudf::string_scalar const *>(pattern);
auto const escape_scalar = reinterpret_cast<cudf::string_scalar const *>(escapeChar);
return release_as_jlong(cudf::strings::like(strings_column, *pattern_scalar, *escape_scalar));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_binaryOpVV(JNIEnv *env, jclass,
jlong lhs_view, jlong rhs_view,
jint int_op, jint out_dtype,
jint scale) {
JNI_NULL_CHECK(env, lhs_view, "lhs is null", 0);
JNI_NULL_CHECK(env, rhs_view, "rhs is null", 0);
try {
cudf::jni::auto_set_device(env);
auto lhs = reinterpret_cast<cudf::column_view *>(lhs_view);
auto rhs = reinterpret_cast<cudf::column_view *>(rhs_view);
cudf::data_type n_data_type = cudf::jni::make_data_type(out_dtype, scale);
cudf::binary_operator op = static_cast<cudf::binary_operator>(int_op);
if (lhs->type().id() == cudf::type_id::STRUCT) {
auto out = make_fixed_width_column(n_data_type, lhs->size(), cudf::mask_state::UNALLOCATED);
if (op == cudf::binary_operator::NULL_EQUALS) {
out->set_null_mask(rmm::device_buffer{}, 0);
} else {
auto [new_mask, null_count] = cudf::bitmask_and(cudf::table_view{{*lhs, *rhs}});
out->set_null_mask(std::move(new_mask), null_count);
}
auto out_view = out->mutable_view();
cudf::binops::compiled::detail::apply_sorting_struct_binary_op(
out_view, *lhs, *rhs, false, false, op, cudf::get_default_stream());
return release_as_jlong(out);
}
return release_as_jlong(cudf::binary_operation(*lhs, *rhs, op, n_data_type));
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_ColumnView_fixedPointOutputScale(JNIEnv *env, jclass,
jint int_op,
jint lhs_scale,
jint rhs_scale) {
try {
// we just return the scale as the types will be the same as the lhs input
return cudf::binary_operation_fixed_point_scale(static_cast<cudf::binary_operator>(int_op),
lhs_scale, rhs_scale);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_binaryOpVS(JNIEnv *env, jclass,
jlong lhs_view, jlong rhs_ptr,
jint int_op, jint out_dtype,
jint scale) {
JNI_NULL_CHECK(env, lhs_view, "lhs is null", 0);
JNI_NULL_CHECK(env, rhs_ptr, "rhs is null", 0);
try {
cudf::jni::auto_set_device(env);
auto lhs = reinterpret_cast<cudf::column_view *>(lhs_view);
cudf::scalar *rhs = reinterpret_cast<cudf::scalar *>(rhs_ptr);
cudf::data_type n_data_type = cudf::jni::make_data_type(out_dtype, scale);
cudf::binary_operator op = static_cast<cudf::binary_operator>(int_op);
if (lhs->type().id() == cudf::type_id::STRUCT) {
auto out = make_fixed_width_column(n_data_type, lhs->size(), cudf::mask_state::UNALLOCATED);
if (op == cudf::binary_operator::NULL_EQUALS) {
out->set_null_mask(rmm::device_buffer{}, 0);
} else {
auto [new_mask, new_null_count] = cudf::binops::scalar_col_valid_mask_and(*lhs, *rhs);
out->set_null_mask(std::move(new_mask), new_null_count);
}
auto rhsv = cudf::make_column_from_scalar(*rhs, 1);
auto out_view = out->mutable_view();
cudf::binops::compiled::detail::apply_sorting_struct_binary_op(
out_view, *lhs, rhsv->view(), false, true, op, cudf::get_default_stream());
return release_as_jlong(out);
}
return release_as_jlong(cudf::binary_operation(*lhs, *rhs, op, n_data_type));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_substringS(JNIEnv *env, jclass,
jlong cv_handle, jint start) {
JNI_NULL_CHECK(env, cv_handle, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const cv = reinterpret_cast<cudf::column_view const *>(cv_handle);
auto const scv = cudf::strings_column_view{*cv};
return release_as_jlong(cudf::strings::slice_strings(scv, start));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_substring(JNIEnv *env, jclass,
jlong column_view, jint start,
jint end) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::strings_column_view scv(*cv);
return release_as_jlong(cudf::strings::slice_strings(scv, start, end));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_substringColumn(JNIEnv *env, jclass,
jlong column_view,
jlong start_column,
jlong end_column) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, start_column, "column is null", 0);
JNI_NULL_CHECK(env, end_column, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::strings_column_view scv(*cv);
cudf::column_view *sc = reinterpret_cast<cudf::column_view *>(start_column);
cudf::column_view *ec = reinterpret_cast<cudf::column_view *>(end_column);
return release_as_jlong(cudf::strings::slice_strings(scv, *sc, *ec));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_substringLocate(JNIEnv *env, jclass,
jlong column_view,
jlong substring, jint start,
jint end) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, substring, "target string scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::strings_column_view scv(*cv);
cudf::string_scalar *ss_scalar = reinterpret_cast<cudf::string_scalar *>(substring);
return release_as_jlong(cudf::strings::find(scv, *ss_scalar, start, end));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringReplace(JNIEnv *env, jclass,
jlong column_view,
jlong target, jlong replace) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, target, "target string scalar is null", 0);
JNI_NULL_CHECK(env, replace, "replace string scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::strings_column_view scv(*cv);
cudf::string_scalar *ss_target = reinterpret_cast<cudf::string_scalar *>(target);
cudf::string_scalar *ss_replace = reinterpret_cast<cudf::string_scalar *>(replace);
return release_as_jlong(cudf::strings::replace(scv, *ss_target, *ss_replace));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringReplaceMulti(JNIEnv *env, jclass,
jlong inputs_cv,
jlong targets_cv,
jlong repls_cv) {
JNI_NULL_CHECK(env, inputs_cv, "column is null", 0);
JNI_NULL_CHECK(env, targets_cv, "targets string column view is null", 0);
JNI_NULL_CHECK(env, repls_cv, "repls string column view is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(inputs_cv);
cudf::strings_column_view scv(*cv);
cudf::column_view *cvtargets = reinterpret_cast<cudf::column_view *>(targets_cv);
cudf::strings_column_view scvtargets(*cvtargets);
cudf::column_view *cvrepls = reinterpret_cast<cudf::column_view *>(repls_cv);
cudf::strings_column_view scvrepls(*cvrepls);
return release_as_jlong(cudf::strings::replace(scv, scvtargets, scvrepls));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_mapLookupForKeys(JNIEnv *env, jclass,
jlong map_column_view,
jlong lookup_keys) {
JNI_NULL_CHECK(env, map_column_view, "column is null", 0);
JNI_NULL_CHECK(env, lookup_keys, "lookup key is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const *cv = reinterpret_cast<cudf::column_view *>(map_column_view);
auto const *column_keys = reinterpret_cast<cudf::column_view *>(lookup_keys);
auto const maps_view = cudf::jni::maps_column_view{*cv};
return release_as_jlong(maps_view.get_values_for(*column_keys));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_mapLookup(JNIEnv *env, jclass,
jlong map_column_view,
jlong lookup_key) {
JNI_NULL_CHECK(env, map_column_view, "column is null", 0);
JNI_NULL_CHECK(env, lookup_key, "lookup key is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const *cv = reinterpret_cast<cudf::column_view *>(map_column_view);
auto const *scalar_key = reinterpret_cast<cudf::scalar *>(lookup_key);
auto const maps_view = cudf::jni::maps_column_view{*cv};
return release_as_jlong(maps_view.get_values_for(*scalar_key));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_mapContainsKeys(JNIEnv *env, jclass,
jlong map_column_view,
jlong lookup_keys) {
JNI_NULL_CHECK(env, map_column_view, "column is null", 0);
JNI_NULL_CHECK(env, lookup_keys, "lookup key is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const *cv = reinterpret_cast<cudf::column_view *>(map_column_view);
auto const *column_key = reinterpret_cast<cudf::column_view *>(lookup_keys);
auto const maps_view = cudf::jni::maps_column_view{*cv};
return release_as_jlong(maps_view.contains(*column_key));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_mapContains(JNIEnv *env, jclass,
jlong map_column_view,
jlong lookup_key) {
JNI_NULL_CHECK(env, map_column_view, "column is null", 0);
JNI_NULL_CHECK(env, lookup_key, "lookup key is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const *cv = reinterpret_cast<cudf::column_view *>(map_column_view);
auto const *scalar_key = reinterpret_cast<cudf::scalar *>(lookup_key);
auto const maps_view = cudf::jni::maps_column_view{*cv};
return release_as_jlong(maps_view.contains(*scalar_key));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_replaceRegex(
JNIEnv *env, jclass, jlong j_column_view, jstring j_pattern, jint regex_flags,
jint capture_groups, jlong j_repl, jlong j_maxrepl) {
JNI_NULL_CHECK(env, j_column_view, "column is null", 0);
JNI_NULL_CHECK(env, j_pattern, "pattern string is null", 0);
JNI_NULL_CHECK(env, j_repl, "replace scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const cv = reinterpret_cast<cudf::column_view const *>(j_column_view);
auto const strings_column = cudf::strings_column_view{*cv};
auto const pattern = cudf::jni::native_jstring(env, j_pattern);
auto const flags = static_cast<cudf::strings::regex_flags>(regex_flags);
auto const groups = static_cast<cudf::strings::capture_groups>(capture_groups);
auto const regex_prog = cudf::strings::regex_program::create(pattern.get(), flags, groups);
auto const repl = reinterpret_cast<cudf::string_scalar const *>(j_repl);
return release_as_jlong(
cudf::strings::replace_re(strings_column, *regex_prog, *repl, j_maxrepl));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_replaceMultiRegex(JNIEnv *env, jclass,
jlong j_column_view,
jobjectArray j_patterns,
jlong j_repls) {
JNI_NULL_CHECK(env, j_column_view, "column is null", 0);
JNI_NULL_CHECK(env, j_patterns, "patterns is null", 0);
JNI_NULL_CHECK(env, j_repls, "repls is null", 0);
try {
cudf::jni::auto_set_device(env);
auto cv = reinterpret_cast<cudf::column_view const *>(j_column_view);
cudf::strings_column_view scv(*cv);
cudf::jni::native_jstringArray patterns(env, j_patterns);
auto repl_cv = reinterpret_cast<cudf::column_view const *>(j_repls);
cudf::strings_column_view repl_scv(*repl_cv);
return release_as_jlong(cudf::strings::replace_re(scv, patterns.as_cpp_vector(), repl_scv));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringReplaceWithBackrefs(
JNIEnv *env, jclass, jlong j_column_view, jstring pattern_obj, jint regex_flags,
jint capture_groups, jstring replace_obj) {
JNI_NULL_CHECK(env, j_column_view, "column is null", 0);
JNI_NULL_CHECK(env, pattern_obj, "pattern string is null", 0);
JNI_NULL_CHECK(env, replace_obj, "replace string is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const cv = reinterpret_cast<cudf::column_view const *>(j_column_view);
auto const strings_column = cudf::strings_column_view{*cv};
auto const pattern = cudf::jni::native_jstring(env, pattern_obj);
auto const flags = static_cast<cudf::strings::regex_flags>(regex_flags);
auto const groups = static_cast<cudf::strings::capture_groups>(capture_groups);
auto const regex_prog = cudf::strings::regex_program::create(pattern.get(), flags, groups);
cudf::jni::native_jstring ss_replace(env, replace_obj);
return release_as_jlong(
cudf::strings::replace_with_backrefs(strings_column, *regex_prog, ss_replace.get()));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_zfill(JNIEnv *env, jclass, jlong column_view,
jint j_width) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::strings_column_view scv(*cv);
cudf::size_type width = reinterpret_cast<cudf::size_type>(j_width);
return release_as_jlong(cudf::strings::zfill(scv, width));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_pad(JNIEnv *env, jclass, jlong column_view,
jint j_width, jint j_side,
jstring fill_char) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, fill_char, "fill_char is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::strings_column_view scv(*cv);
cudf::size_type width = reinterpret_cast<cudf::size_type>(j_width);
cudf::strings::side_type side = static_cast<cudf::strings::side_type>(j_side);
cudf::jni::native_jstring ss_fill(env, fill_char);
return release_as_jlong(cudf::strings::pad(scv, width, side, ss_fill.get()));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringStrip(JNIEnv *env, jclass,
jlong column_view,
jint strip_type,
jlong to_strip) {
JNI_NULL_CHECK(env, column_view, "column is null", 0);
JNI_NULL_CHECK(env, to_strip, "to_strip scalar is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_view);
cudf::strings_column_view scv(*cv);
cudf::strings::side_type s_striptype = static_cast<cudf::strings::side_type>(strip_type);
cudf::string_scalar *ss_tostrip = reinterpret_cast<cudf::string_scalar *>(to_strip);
return release_as_jlong(cudf::strings::strip(scv, s_striptype, *ss_tostrip));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_ColumnView_extractRe(JNIEnv *env, jclass,
jlong j_view_handle,
jstring pattern_obj,
jint regex_flags,
jint capture_groups) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", nullptr);
JNI_NULL_CHECK(env, pattern_obj, "pattern is null", nullptr);
try {
cudf::jni::auto_set_device(env);
auto const column_view = reinterpret_cast<cudf::column_view const *>(j_view_handle);
auto const strings_column = cudf::strings_column_view{*column_view};
auto const pattern = cudf::jni::native_jstring(env, pattern_obj);
auto const flags = static_cast<cudf::strings::regex_flags>(regex_flags);
auto const groups = static_cast<cudf::strings::capture_groups>(capture_groups);
auto const regex_prog = cudf::strings::regex_program::create(pattern.get(), flags, groups);
return cudf::jni::convert_table_for_return(env,
cudf::strings::extract(strings_column, *regex_prog));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_extractAllRecord(
JNIEnv *env, jclass, jlong j_view_handle, jstring pattern_obj, jint regex_flags,
jint capture_groups, jint idx) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", 0);
JNI_NULL_CHECK(env, pattern_obj, "pattern is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const column_view = reinterpret_cast<cudf::column_view const *>(j_view_handle);
auto const strings_column = cudf::strings_column_view{*column_view};
auto const pattern = cudf::jni::native_jstring(env, pattern_obj);
auto const flags = static_cast<cudf::strings::regex_flags>(regex_flags);
auto const groups = static_cast<cudf::strings::capture_groups>(capture_groups);
auto const regex_prog = cudf::strings::regex_program::create(pattern.get(), flags, groups);
auto result = (idx == 0) ? cudf::strings::findall(strings_column, *regex_prog) :
cudf::strings::extract_all_record(strings_column, *regex_prog);
return release_as_jlong(result);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_urlDecode(JNIEnv *env, jclass,
jlong j_view_handle) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto view_ptr = reinterpret_cast<cudf::column_view *>(j_view_handle);
cudf::strings_column_view strings_view(*view_ptr);
return release_as_jlong(cudf::strings::url_decode(strings_view));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_urlEncode(JNIEnv *env, jclass,
jlong j_view_handle) {
JNI_NULL_CHECK(env, j_view_handle, "column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto view_ptr = reinterpret_cast<cudf::column_view *>(j_view_handle);
cudf::strings_column_view strings_view(*view_ptr);
return release_as_jlong(cudf::strings::url_encode(strings_view));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_normalizeNANsAndZeros(JNIEnv *env,
jclass clazz,
jlong input_column) {
using cudf::column_view;
JNI_NULL_CHECK(env, input_column, "Input column is null", 0);
try {
cudf::jni::auto_set_device(env);
return release_as_jlong(
cudf::normalize_nans_and_zeros(*reinterpret_cast<column_view *>(input_column)));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_bitwiseMergeAndSetValidity(
JNIEnv *env, jobject j_object, jlong base_column, jlongArray column_handles, jint bin_op) {
JNI_NULL_CHECK(env, base_column, "base column native handle is null", 0);
JNI_NULL_CHECK(env, column_handles, "array of column handles is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *original_column = reinterpret_cast<cudf::column_view *>(base_column);
std::unique_ptr<cudf::column> copy(new cudf::column(*original_column));
cudf::jni::native_jpointerArray<cudf::column_view> n_cudf_columns(env, column_handles);
if (n_cudf_columns.size() == 0) {
copy->set_null_mask({}, 0);
return release_as_jlong(copy);
}
cudf::binary_operator op = static_cast<cudf::binary_operator>(bin_op);
switch (op) {
case cudf::binary_operator::BITWISE_AND: {
auto cols = n_cudf_columns.get_dereferenced();
cols.push_back(copy->view());
auto table_view = cudf::table_view{cols};
auto [new_bitmask, null_count] = cudf::bitmask_and(table_view);
copy->set_null_mask(std::move(new_bitmask), null_count);
break;
}
case cudf::binary_operator::BITWISE_OR: {
auto input_table = cudf::table_view{n_cudf_columns.get_dereferenced()};
auto [tmp_new_bitmask, tmp_null_count] = cudf::bitmask_or(input_table);
copy->set_null_mask(std::move(tmp_new_bitmask), tmp_null_count);
// and the bitmask with the original column
cudf::table_view table_view{std::vector<cudf::column_view>{copy->view(), *original_column}};
auto [new_bitmask, null_count] = cudf::bitmask_and(table_view);
copy->set_null_mask(std::move(new_bitmask), null_count);
break;
}
default: JNI_THROW_NEW(env, cudf::jni::ILLEGAL_ARG_CLASS, "Unsupported merge operation", 0);
}
auto const copy_cv = copy->view();
if (cudf::has_nonempty_nulls(copy_cv)) {
copy = cudf::purge_nonempty_nulls(copy_cv);
}
return release_as_jlong(copy);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_copyWithBooleanColumnAsValidity(
JNIEnv *env, jobject j_object, jlong exemplar_handle, jlong validity_column_handle) {
JNI_NULL_CHECK(env, exemplar_handle, "ColumnView handle is null", 0);
JNI_NULL_CHECK(env, validity_column_handle, "Validity column handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const exemplar = *reinterpret_cast<cudf::column_view *>(exemplar_handle);
auto const validity = *reinterpret_cast<cudf::column_view *>(validity_column_handle);
return release_as_jlong(
cudf::jni::new_column_with_boolean_column_as_validity(exemplar, validity));
}
CATCH_STD(env, 0);
}
////////
// Native cudf::column_view life cycle and metadata access methods. Life cycle methods
// should typically only be called from the CudfColumn inner class.
////////
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_makeCudfColumnView(
JNIEnv *env, jclass, jint j_type, jint scale, jlong j_data, jlong j_data_size, jlong j_offset,
jlong j_valid, jint j_null_count, jint size, jlongArray j_children) {
try {
using cudf::column_view;
cudf::jni::auto_set_device(env);
cudf::type_id n_type = static_cast<cudf::type_id>(j_type);
cudf::data_type n_data_type = cudf::jni::make_data_type(j_type, scale);
void *data = reinterpret_cast<void *>(j_data);
cudf::bitmask_type *valid = reinterpret_cast<cudf::bitmask_type *>(j_valid);
if (valid == nullptr) {
j_null_count = 0;
}
if (j_null_count < 0) { // Check for unknown null count.
// Calculate concrete null count.
j_null_count = cudf::null_count(valid, 0, size);
}
if (n_type == cudf::type_id::STRING) {
if (size == 0) {
return ptr_as_jlong(
new cudf::column_view(cudf::data_type{cudf::type_id::STRING}, 0, nullptr, nullptr, 0));
} else {
JNI_NULL_CHECK(env, j_offset, "offset is null", 0);
// This must be kept in sync with how string columns are created
// offsets are always the first child
// data is the second child
cudf::size_type *offsets = reinterpret_cast<cudf::size_type *>(j_offset);
cudf::column_view offsets_column(cudf::data_type{cudf::type_id::INT32}, size + 1, offsets,
nullptr, 0);
cudf::column_view data_column(cudf::data_type{cudf::type_id::INT8}, j_data_size, data,
nullptr, 0);
return ptr_as_jlong(new cudf::column_view(cudf::data_type{cudf::type_id::STRING}, size,
nullptr, valid, j_null_count, 0,
{offsets_column, data_column}));
}
} else if (n_type == cudf::type_id::LIST) {
JNI_NULL_CHECK(env, j_children, "children of a list are null", 0);
cudf::jni::native_jpointerArray<cudf::column_view> children(env, j_children);
JNI_ARG_CHECK(env, (children.size() == 1), "LIST children size is not 1", 0);
cudf::size_type offsets_size = 0;
cudf::size_type *offsets = nullptr;
if (size != 0) {
JNI_NULL_CHECK(env, j_offset, "offset is null", 0);
offsets_size = size + 1;
offsets = reinterpret_cast<cudf::size_type *>(j_offset);
}
cudf::column_view offsets_column(cudf::data_type{cudf::type_id::INT32}, offsets_size, offsets,
nullptr, 0);
return ptr_as_jlong(new cudf::column_view(cudf::data_type{cudf::type_id::LIST}, size, nullptr,
valid, j_null_count, 0,
{offsets_column, *children[0]}));
} else if (n_type == cudf::type_id::STRUCT) {
JNI_NULL_CHECK(env, j_children, "children of a struct are null", 0);
cudf::jni::native_jpointerArray<cudf::column_view> children(env, j_children);
std::vector<column_view> children_vector = children.get_dereferenced();
return ptr_as_jlong(new cudf::column_view(cudf::data_type{cudf::type_id::STRUCT}, size,
nullptr, valid, j_null_count, 0, children_vector));
} else {
return ptr_as_jlong(new cudf::column_view(n_data_type, size, data, valid, j_null_count));
}
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_ColumnView_getNativeTypeId(JNIEnv *env, jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
return static_cast<jint>(column->type().id());
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_ColumnView_getNativeTypeScale(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
return column->type().scale();
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_ColumnView_getNativeRowCount(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
return static_cast<jint>(column->size());
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_ColumnView_getNativeNullCount(JNIEnv *env,
jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
return static_cast<jint>(column->null_count());
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_ColumnView_deleteColumnView(JNIEnv *env,
jobject j_object,
jlong handle) {
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(handle);
delete view;
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getNativeDataAddress(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
jlong result = 0;
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
if (column->type().id() == cudf::type_id::STRING) {
if (column->size() > 0) {
cudf::strings_column_view view = cudf::strings_column_view(*column);
cudf::column_view data_view = view.chars();
result = reinterpret_cast<jlong>(data_view.data<char>());
}
} else if (column->type().id() != cudf::type_id::LIST &&
column->type().id() != cudf::type_id::STRUCT) {
result = reinterpret_cast<jlong>(column->data<char>());
}
return result;
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getNativeDataLength(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
jlong result = 0;
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
if (column->type().id() == cudf::type_id::STRING) {
if (column->size() > 0) {
cudf::strings_column_view view = cudf::strings_column_view(*column);
cudf::column_view data_view = view.chars();
result = data_view.size();
}
} else if (column->type().id() != cudf::type_id::LIST &&
column->type().id() != cudf::type_id::STRUCT) {
result = cudf::size_of(column->type()) * column->size();
}
return result;
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_ColumnView_getNativeNumChildren(JNIEnv *env,
jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
// Strings has children(offsets and chars) but not a nested child() we care about here.
if (column->type().id() == cudf::type_id::STRING) {
return 0;
} else if (column->type().id() == cudf::type_id::LIST) {
// first child is always offsets in lists which we do not want to count here
return static_cast<jint>(column->num_children() - 1);
} else if (column->type().id() == cudf::type_id::STRUCT) {
return static_cast<jint>(column->num_children());
} else {
return 0;
}
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getChildCvPointer(JNIEnv *env,
jobject j_object,
jlong handle,
jint child_index) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
auto const is_list = column->type().id() == cudf::type_id::LIST;
auto const child = column->child(child_index + (is_list ? 1 : 0));
return ptr_as_jlong(new cudf::column_view(child));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getListOffsetCvPointer(JNIEnv *env,
jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
cudf::lists_column_view view = cudf::lists_column_view(*column);
cudf::column_view offsets_view = view.offsets();
return ptr_as_jlong(new cudf::column_view(offsets_view));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getNativeOffsetsAddress(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
jlong result = 0;
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
if (column->type().id() == cudf::type_id::STRING) {
if (column->size() > 0) {
cudf::strings_column_view view = cudf::strings_column_view(*column);
cudf::column_view offsets_view = view.offsets();
result = ptr_as_jlong(offsets_view.data<char>());
}
} else if (column->type().id() == cudf::type_id::LIST) {
if (column->size() > 0) {
cudf::lists_column_view view = cudf::lists_column_view(*column);
cudf::column_view offsets_view = view.offsets();
result = ptr_as_jlong(offsets_view.data<char>());
}
}
return result;
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getNativeOffsetsLength(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
jlong result = 0;
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
if (column->type().id() == cudf::type_id::STRING) {
if (column->size() > 0) {
cudf::strings_column_view view = cudf::strings_column_view(*column);
cudf::column_view offsets_view = view.offsets();
result = sizeof(int) * offsets_view.size();
}
} else if (column->type().id() == cudf::type_id::LIST) {
if (column->size() > 0) {
cudf::lists_column_view view = cudf::lists_column_view(*column);
cudf::column_view offsets_view = view.offsets();
result = sizeof(int) * offsets_view.size();
}
}
return result;
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getNativeValidityAddress(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
return ptr_as_jlong(column->null_mask());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getNativeValidityLength(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(handle);
jlong result = 0;
if (column->null_mask() != nullptr) {
result = cudf::bitmask_allocation_size_bytes(column->size());
}
return result;
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getDeviceMemorySize(JNIEnv *env, jclass,
jlong handle,
jboolean pad_for_cpu) {
JNI_NULL_CHECK(env, handle, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto view = reinterpret_cast<cudf::column_view const *>(handle);
return calc_device_memory_size(*view, pad_for_cpu);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_hostPaddingSizeInBytes(JNIEnv *env, jclass) {
return sizeof(std::max_align_t);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_clamper(JNIEnv *env, jobject j_object,
jlong handle, jlong j_lo_scalar,
jlong j_lo_replace_scalar,
jlong j_hi_scalar,
jlong j_hi_replace_scalar) {
JNI_NULL_CHECK(env, handle, "native view handle is null", 0)
JNI_NULL_CHECK(env, j_lo_scalar, "lo scalar is null", 0)
JNI_NULL_CHECK(env, j_lo_replace_scalar, "lo scalar replace value is null", 0)
JNI_NULL_CHECK(env, j_hi_scalar, "lo scalar is null", 0)
JNI_NULL_CHECK(env, j_hi_replace_scalar, "lo scalar replace value is null", 0)
using cudf::clamp;
try {
cudf::jni::auto_set_device(env);
cudf::column_view *column_view = reinterpret_cast<cudf::column_view *>(handle);
cudf::scalar *lo_scalar = reinterpret_cast<cudf::scalar *>(j_lo_scalar);
cudf::scalar *lo_replace_scalar = reinterpret_cast<cudf::scalar *>(j_lo_replace_scalar);
cudf::scalar *hi_scalar = reinterpret_cast<cudf::scalar *>(j_hi_scalar);
cudf::scalar *hi_replace_scalar = reinterpret_cast<cudf::scalar *>(j_hi_replace_scalar);
return release_as_jlong(
clamp(*column_view, *lo_scalar, *lo_replace_scalar, *hi_scalar, *hi_replace_scalar));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_title(JNIEnv *env, jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native view handle is null", 0)
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(handle);
return release_as_jlong(cudf::strings::title(*view));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_capitalize(JNIEnv *env, jobject j_object,
jlong strs_handle,
jlong delimiters_handle) {
JNI_NULL_CHECK(env, strs_handle, "native view handle is null", 0)
JNI_NULL_CHECK(env, delimiters_handle, "delimiters scalar handle is null", 0)
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(strs_handle);
cudf::string_scalar *deli = reinterpret_cast<cudf::string_scalar *>(delimiters_handle);
return release_as_jlong(cudf::strings::capitalize(*view, *deli));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_joinStrings(JNIEnv *env, jobject j_object,
jlong strs_handle,
jlong separator_handle,
jlong narep_handle) {
JNI_NULL_CHECK(env, strs_handle, "native view handle is null", 0)
JNI_NULL_CHECK(env, separator_handle, "separator scalar handle is null", 0)
JNI_NULL_CHECK(env, narep_handle, "narep scalar handle is null", 0)
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(strs_handle);
cudf::string_scalar *sep = reinterpret_cast<cudf::string_scalar *>(separator_handle);
cudf::string_scalar *narep = reinterpret_cast<cudf::string_scalar *>(narep_handle);
return release_as_jlong(cudf::strings::join_strings(*view, *sep, *narep));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_makeStructView(JNIEnv *env, jobject j_object,
jlongArray handles,
jlong row_count) {
JNI_NULL_CHECK(env, handles, "native view handles are null", 0)
try {
cudf::jni::auto_set_device(env);
auto children = cudf::jni::native_jpointerArray<cudf::column_view>{env, handles};
auto children_vector = children.get_dereferenced();
return ptr_as_jlong(new cudf::column_view(cudf::data_type{cudf::type_id::STRUCT}, row_count,
nullptr, nullptr, 0, 0, children_vector));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_nansToNulls(JNIEnv *env, jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native view handle is null", 0)
try {
cudf::jni::auto_set_device(env);
auto const input = *reinterpret_cast<cudf::column_view *>(handle);
// get a new null mask by setting all the nans to null
auto [new_nullmask, new_null_count] = cudf::nans_to_nulls(input);
// create a column_view which is a no-copy wrapper around the original column without the null
// mask
auto const input_without_nullmask = cudf::column_view(
input.type(), input.size(), input.head<void>(), nullptr, 0, input.offset(),
std::vector<cudf::column_view>{input.child_begin(), input.child_end()});
// create a column by deep copying `input_without_nullmask`.
auto deep_copy = std::make_unique<cudf::column>(input_without_nullmask);
deep_copy->set_null_mask(std::move(*new_nullmask), new_null_count);
return release_as_jlong(deep_copy);
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isFloat(JNIEnv *env, jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native view handle is null", 0)
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(handle);
return release_as_jlong(cudf::strings::is_float(*view));
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isInteger(JNIEnv *env, jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native view handle is null", 0)
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(handle);
return release_as_jlong(cudf::strings::is_integer(*view));
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isFixedPoint(JNIEnv *env, jobject,
jlong handle, jint j_dtype,
jint scale) {
JNI_NULL_CHECK(env, handle, "native view handle is null", 0)
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(handle);
cudf::data_type fp_dtype = cudf::jni::make_data_type(j_dtype, scale);
return release_as_jlong(cudf::strings::is_fixed_point(*view, fp_dtype));
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_isIntegerWithType(JNIEnv *env, jobject,
jlong handle, jint j_dtype,
jint scale) {
JNI_NULL_CHECK(env, handle, "native view handle is null", 0)
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(handle);
cudf::data_type int_dtype = cudf::jni::make_data_type(j_dtype, scale);
return release_as_jlong(cudf::strings::is_integer(*view, int_dtype));
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_copyColumnViewToCV(JNIEnv *env,
jobject j_object,
jlong handle) {
JNI_NULL_CHECK(env, handle, "native view handle is null", 0)
try {
cudf::jni::auto_set_device(env);
cudf::column_view *view = reinterpret_cast<cudf::column_view *>(handle);
return ptr_as_jlong(new cudf::column(*view));
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_getJSONObject(JNIEnv *env, jclass,
jlong j_view_handle,
jlong j_scalar_handle) {
JNI_NULL_CHECK(env, j_view_handle, "view cannot be null", 0);
JNI_NULL_CHECK(env, j_scalar_handle, "path cannot be null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view *n_column_view = reinterpret_cast<cudf::column_view *>(j_view_handle);
cudf::strings_column_view n_strings_col_view(*n_column_view);
cudf::string_scalar *n_scalar_path = reinterpret_cast<cudf::string_scalar *>(j_scalar_handle);
return release_as_jlong(cudf::get_json_object(n_strings_col_view, *n_scalar_path));
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringConcatenationListElementsSepCol(
JNIEnv *env, jclass, jlong column_handle, jlong sep_handle, jlong separator_narep,
jlong col_narep, jboolean separate_nulls, jboolean empty_string_output_if_empty_list) {
JNI_NULL_CHECK(env, column_handle, "column handle is null", 0);
JNI_NULL_CHECK(env, sep_handle, "separator column handle is null", 0);
JNI_NULL_CHECK(env, separator_narep, "separator narep string scalar object is null", 0);
JNI_NULL_CHECK(env, col_narep, "column narep string scalar object is null", 0);
try {
cudf::jni::auto_set_device(env);
const auto &separator_narep_scalar = *reinterpret_cast<cudf::string_scalar *>(separator_narep);
const auto &col_narep_scalar = *reinterpret_cast<cudf::string_scalar *>(col_narep);
auto null_policy = separate_nulls ? cudf::strings::separator_on_nulls::YES :
cudf::strings::separator_on_nulls::NO;
auto empty_list_output = empty_string_output_if_empty_list ?
cudf::strings::output_if_empty_list::EMPTY_STRING :
cudf::strings::output_if_empty_list::NULL_ELEMENT;
cudf::column_view *column = reinterpret_cast<cudf::column_view *>(sep_handle);
cudf::strings_column_view strings_column(*column);
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_handle);
cudf::lists_column_view lcv(*cv);
return release_as_jlong(
cudf::strings::join_list_elements(lcv, strings_column, separator_narep_scalar,
col_narep_scalar, null_policy, empty_list_output));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_stringConcatenationListElements(
JNIEnv *env, jclass, jlong column_handle, jlong separator, jlong narep, jboolean separate_nulls,
jboolean empty_string_output_if_empty_list) {
JNI_NULL_CHECK(env, column_handle, "column handle is null", 0);
JNI_NULL_CHECK(env, separator, "separator string scalar object is null", 0);
JNI_NULL_CHECK(env, narep, "separator narep string scalar object is null", 0);
try {
cudf::jni::auto_set_device(env);
const auto &separator_scalar = *reinterpret_cast<cudf::string_scalar *>(separator);
const auto &narep_scalar = *reinterpret_cast<cudf::string_scalar *>(narep);
auto null_policy = separate_nulls ? cudf::strings::separator_on_nulls::YES :
cudf::strings::separator_on_nulls::NO;
auto empty_list_output = empty_string_output_if_empty_list ?
cudf::strings::output_if_empty_list::EMPTY_STRING :
cudf::strings::output_if_empty_list::NULL_ELEMENT;
cudf::column_view *cv = reinterpret_cast<cudf::column_view *>(column_handle);
cudf::lists_column_view lcv(*cv);
return release_as_jlong(cudf::strings::join_list_elements(lcv, separator_scalar, narep_scalar,
null_policy, empty_list_output));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_repeatStrings(JNIEnv *env, jclass,
jlong strings_handle,
jint repeat_times) {
JNI_NULL_CHECK(env, strings_handle, "strings_handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const cv = *reinterpret_cast<cudf::column_view *>(strings_handle);
auto const strs_col = cudf::strings_column_view(cv);
return release_as_jlong(cudf::strings::repeat_strings(strs_col, repeat_times));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_repeatStringsWithColumnRepeatTimes(
JNIEnv *env, jclass, jlong strings_handle, jlong repeat_times_handle) {
JNI_NULL_CHECK(env, strings_handle, "strings_handle is null", 0);
JNI_NULL_CHECK(env, repeat_times_handle, "repeat_times_handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const strings_cv = *reinterpret_cast<cudf::column_view *>(strings_handle);
auto const strs_col = cudf::strings_column_view(strings_cv);
auto const repeat_times_cv = *reinterpret_cast<cudf::column_view *>(repeat_times_handle);
return release_as_jlong(cudf::strings::repeat_strings(strs_col, repeat_times_cv));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_applyBooleanMask(
JNIEnv *env, jclass, jlong list_column_handle, jlong boolean_mask_list_column_handle) {
JNI_NULL_CHECK(env, list_column_handle, "list handle is null", 0);
JNI_NULL_CHECK(env, boolean_mask_list_column_handle, "boolean mask handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::column_view const *list_column =
reinterpret_cast<cudf::column_view const *>(list_column_handle);
cudf::lists_column_view const list_view = cudf::lists_column_view(*list_column);
cudf::column_view const *boolean_mask_list_column =
reinterpret_cast<cudf::column_view const *>(boolean_mask_list_column_handle);
cudf::lists_column_view const boolean_mask_list_view =
cudf::lists_column_view(*boolean_mask_list_column);
return release_as_jlong(cudf::lists::apply_boolean_mask(list_view, boolean_mask_list_view));
}
CATCH_STD(env, 0);
}
JNIEXPORT jboolean JNICALL
Java_ai_rapids_cudf_ColumnView_hasNonEmptyNulls(JNIEnv *env, jclass, jlong column_view_handle) {
JNI_NULL_CHECK(env, column_view_handle, "column_view handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const *cv = reinterpret_cast<cudf::column_view const *>(column_view_handle);
return cudf::has_nonempty_nulls(*cv);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL
Java_ai_rapids_cudf_ColumnView_purgeNonEmptyNulls(JNIEnv *env, jclass, jlong column_view_handle) {
JNI_NULL_CHECK(env, column_view_handle, "column_view handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const *cv = reinterpret_cast<cudf::column_view const *>(column_view_handle);
return release_as_jlong(cudf::purge_nonempty_nulls(*cv));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_toHex(JNIEnv *env, jclass, jlong input_ptr) {
JNI_NULL_CHECK(env, input_ptr, "input is null", 0);
try {
cudf::jni::auto_set_device(env);
const cudf::column_view *input = reinterpret_cast<cudf::column_view *>(input_ptr);
return release_as_jlong(cudf::strings::integers_to_hex(*input));
}
CATCH_STD(env, 0);
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/csv_chunked_writer.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <cudf/io/csv.hpp>
#include "jni_writer_data_sink.hpp"
namespace cudf::jni::io {
/**
* @brief Class to write multiple Tables into the jni_writer_data_sink.
*/
class csv_chunked_writer {
cudf::io::csv_writer_options _options;
std::unique_ptr<cudf::jni::jni_writer_data_sink> _sink;
bool _first_write_completed = false; ///< Decides if header should be written.
public:
explicit csv_chunked_writer(cudf::io::csv_writer_options options,
std::unique_ptr<cudf::jni::jni_writer_data_sink> &sink)
: _options{options}, _sink{std::move(sink)} {
auto const &sink_info = _options.get_sink();
// Assert invariants.
CUDF_EXPECTS(sink_info.type() != cudf::io::io_type::FILEPATH,
"Currently, chunked CSV writes to files is not supported.");
// Note: csv_writer_options ties the sink(s) to the options, and exposes
// no way to modify the sinks afterwards.
// Ideally, the options would have been separate from the tables written,
// and the destination sinks.
// Here, we retain a modifiable reference to the sink, and confirm the
// options point to the same sink.
CUDF_EXPECTS(sink_info.num_sinks() == 1, "csv_chunked_writer should have exactly one sink.");
CUDF_EXPECTS(sink_info.user_sinks()[0] == _sink.get(), "Sink mismatch.");
}
void write(cudf::table_view const &table) {
if (_first_write_completed) {
_options.enable_include_header(false); // Don't write header after the first write.
}
_options.set_table(table);
_options.set_rows_per_chunk(table.num_rows());
cudf::io::write_csv(_options);
_first_write_completed = true;
}
void close() {
// Flush pending writes to sink.
_sink->flush();
}
};
} // namespace cudf::jni::io
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/RmmJni.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <atomic>
#include <ctime>
#include <fstream>
#include <iostream>
#include <limits>
#include <mutex>
#include <rmm/mr/device/aligned_resource_adaptor.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/limiting_resource_adaptor.hpp>
#include <rmm/mr/device/logging_resource_adaptor.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include "cudf_jni_apis.hpp"
using rmm::mr::device_memory_resource;
using rmm::mr::logging_resource_adaptor;
namespace {
constexpr char const *RMM_EXCEPTION_CLASS = "ai/rapids/cudf/RmmException";
/**
* @brief Base class so we can template tracking_resource_adaptor but
* still hold all instances of it without issues.
*/
class base_tracking_resource_adaptor : public device_memory_resource {
public:
virtual std::size_t get_total_allocated() = 0;
virtual std::size_t get_max_total_allocated() = 0;
virtual void reset_scoped_max_total_allocated(std::size_t initial_value) = 0;
virtual std::size_t get_scoped_max_total_allocated() = 0;
};
/**
* @brief An RMM device memory resource that delegates to another resource
* while tracking the amount of memory allocated.
*
* @tparam Upstream Type of memory resource that will be wrapped.
* @tparam size_align The size to which all allocation requests are
* aligned. Must be a value >= 1.
*/
template <typename Upstream>
class tracking_resource_adaptor final : public base_tracking_resource_adaptor {
public:
/**
* @brief Constructs a new tracking resource adaptor that delegates to
* `mr` for all allocation operations while tracking the amount of memory
* allocated.
*
* @param mr The resource to use for memory allocation operations.
* @param size_alignment The alignment to which the `mr` resource will
* round up all memory allocation size requests.
*/
tracking_resource_adaptor(Upstream *mr, std::size_t size_alignment)
: resource{mr}, size_align{size_alignment} {}
Upstream *get_wrapped_resource() { return resource; }
std::size_t get_total_allocated() override { return total_allocated.load(); }
std::size_t get_max_total_allocated() override { return max_total_allocated; }
void reset_scoped_max_total_allocated(std::size_t initial_value) override {
std::scoped_lock lock(max_total_allocated_mutex);
scoped_allocated = initial_value;
scoped_max_total_allocated = initial_value;
}
std::size_t get_scoped_max_total_allocated() override {
std::scoped_lock lock(max_total_allocated_mutex);
return scoped_max_total_allocated;
}
bool supports_get_mem_info() const noexcept override { return resource->supports_get_mem_info(); }
bool supports_streams() const noexcept override { return resource->supports_streams(); }
private:
Upstream *const resource;
std::size_t const size_align;
// sum of what is currently allocated
std::atomic_size_t total_allocated{0};
// the maximum total allocated for the lifetime of this class
std::size_t max_total_allocated{0};
// the sum of what is currently outstanding from the last
// `reset_scoped_max_total_allocated` call. This can be negative.
std::atomic_long scoped_allocated{0};
// the maximum total allocated relative to the last
// `reset_scoped_max_total_allocated` call.
long scoped_max_total_allocated{0};
std::mutex max_total_allocated_mutex;
void *do_allocate(std::size_t num_bytes, rmm::cuda_stream_view stream) override {
// adjust size of allocation based on specified size alignment
num_bytes = (num_bytes + size_align - 1) / size_align * size_align;
auto result = resource->allocate(num_bytes, stream);
if (result) {
total_allocated += num_bytes;
scoped_allocated += num_bytes;
std::scoped_lock lock(max_total_allocated_mutex);
max_total_allocated = std::max(total_allocated.load(), max_total_allocated);
scoped_max_total_allocated = std::max(scoped_allocated.load(), scoped_max_total_allocated);
}
return result;
}
void do_deallocate(void *p, std::size_t size, rmm::cuda_stream_view stream) override {
size = (size + size_align - 1) / size_align * size_align;
resource->deallocate(p, size, stream);
if (p) {
total_allocated -= size;
scoped_allocated -= size;
}
}
std::pair<size_t, size_t> do_get_mem_info(rmm::cuda_stream_view stream) const override {
return resource->get_mem_info(stream);
}
};
template <typename Upstream>
tracking_resource_adaptor<Upstream> *make_tracking_adaptor(Upstream *upstream,
std::size_t size_alignment) {
return new tracking_resource_adaptor<Upstream>{upstream, size_alignment};
}
/**
* @brief An RMM device memory resource adaptor that delegates to the wrapped resource
* for most operations but will call Java to handle certain situations (e.g.: allocation failure).
*/
class java_event_handler_memory_resource : public device_memory_resource {
public:
java_event_handler_memory_resource(JNIEnv *env, jobject jhandler, jlongArray jalloc_thresholds,
jlongArray jdealloc_thresholds,
device_memory_resource *resource_to_wrap,
base_tracking_resource_adaptor *tracker)
: resource(resource_to_wrap), tracker(tracker) {
if (env->GetJavaVM(&jvm) < 0) {
throw std::runtime_error("GetJavaVM failed");
}
jclass cls = env->GetObjectClass(jhandler);
if (cls == nullptr) {
throw cudf::jni::jni_exception("class not found");
}
on_alloc_fail_method = env->GetMethodID(cls, "onAllocFailure", "(JI)Z");
if (on_alloc_fail_method == nullptr) {
use_old_alloc_fail_interface = true;
on_alloc_fail_method = env->GetMethodID(cls, "onAllocFailure", "(J)Z");
if (on_alloc_fail_method == nullptr) {
throw cudf::jni::jni_exception("onAllocFailure method");
}
} else {
use_old_alloc_fail_interface = false;
}
on_alloc_threshold_method = env->GetMethodID(cls, "onAllocThreshold", "(J)V");
if (on_alloc_threshold_method == nullptr) {
throw cudf::jni::jni_exception("onAllocThreshold method");
}
on_dealloc_threshold_method = env->GetMethodID(cls, "onDeallocThreshold", "(J)V");
if (on_dealloc_threshold_method == nullptr) {
throw cudf::jni::jni_exception("onDeallocThreshold method");
}
update_thresholds(env, alloc_thresholds, jalloc_thresholds);
update_thresholds(env, dealloc_thresholds, jdealloc_thresholds);
handler_obj = cudf::jni::add_global_ref(env, jhandler);
}
virtual ~java_event_handler_memory_resource() {
// This should normally be called by a JVM thread. If the JVM environment is missing then this
// is likely being triggered by the C++ runtime during shutdown. In that case the JVM may
// already be destroyed and this thread should not try to attach to get an environment.
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) {
handler_obj = cudf::jni::del_global_ref(env, handler_obj);
}
handler_obj = nullptr;
}
device_memory_resource *get_wrapped_resource() { return resource; }
bool supports_get_mem_info() const noexcept override { return resource->supports_get_mem_info(); }
bool supports_streams() const noexcept override { return resource->supports_streams(); }
private:
device_memory_resource *const resource;
base_tracking_resource_adaptor *const tracker;
jmethodID on_alloc_fail_method;
bool use_old_alloc_fail_interface;
jmethodID on_alloc_threshold_method;
jmethodID on_dealloc_threshold_method;
// sorted memory thresholds to trigger callbacks
std::vector<std::size_t> alloc_thresholds{};
std::vector<std::size_t> dealloc_thresholds{};
static void update_thresholds(JNIEnv *env, std::vector<std::size_t> &thresholds,
jlongArray from_java) {
thresholds.clear();
if (from_java != nullptr) {
cudf::jni::native_jlongArray jvalues(env, from_java);
thresholds.insert(thresholds.end(), jvalues.data(), jvalues.data() + jvalues.size());
} else {
// use a single, maximum-threshold value so we don't have to always check for the corner case.
thresholds.push_back(std::numeric_limits<std::size_t>::max());
}
}
bool on_alloc_fail(std::size_t num_bytes, int retry_count) {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
jboolean result = false;
if (!use_old_alloc_fail_interface) {
result =
env->CallBooleanMethod(handler_obj, on_alloc_fail_method, static_cast<jlong>(num_bytes),
static_cast<jint>(retry_count));
} else {
result =
env->CallBooleanMethod(handler_obj, on_alloc_fail_method, static_cast<jlong>(num_bytes));
}
if (env->ExceptionCheck()) {
throw std::runtime_error("onAllocFailure handler threw an exception");
}
return result;
}
void check_for_threshold_callback(std::size_t low, std::size_t high,
std::vector<std::size_t> const &thresholds,
jmethodID callback_method, char const *callback_name,
std::size_t current_total) {
if (high >= thresholds.front() && low < thresholds.back()) {
// could use binary search, but assumption is threshold count is very small
auto it = std::find_if(thresholds.begin(), thresholds.end(),
[=](std::size_t t) -> bool { return low < t && high >= t; });
if (it != thresholds.end()) {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
env->CallVoidMethod(handler_obj, callback_method, current_total);
if (env->ExceptionCheck()) {
throw std::runtime_error("onAllocThreshold handler threw an exception");
}
}
}
}
std::pair<size_t, size_t> do_get_mem_info(rmm::cuda_stream_view stream) const override {
return resource->get_mem_info(stream);
}
protected:
JavaVM *jvm;
jobject handler_obj;
void *do_allocate(std::size_t num_bytes, rmm::cuda_stream_view stream) override {
std::size_t total_before;
void *result;
// a non-zero retry_count signifies that the `on_alloc_fail`
// callback is being invoked while re-attempting an allocation
// that had previously failed.
int retry_count = 0;
while (true) {
try {
total_before = tracker->get_total_allocated();
result = resource->allocate(num_bytes, stream);
break;
} catch (rmm::out_of_memory const &e) {
if (!on_alloc_fail(num_bytes, retry_count++)) {
throw;
}
}
}
auto total_after = tracker->get_total_allocated();
try {
check_for_threshold_callback(total_before, total_after, alloc_thresholds,
on_alloc_threshold_method, "onAllocThreshold", total_after);
} catch (std::exception const &e) {
// Free the allocation as app will think the exception means the memory was not allocated.
resource->deallocate(result, num_bytes, stream);
throw;
}
return result;
}
void do_deallocate(void *p, std::size_t size, rmm::cuda_stream_view stream) override {
auto total_before = tracker->get_total_allocated();
resource->deallocate(p, size, stream);
auto total_after = tracker->get_total_allocated();
check_for_threshold_callback(total_after, total_before, dealloc_thresholds,
on_dealloc_threshold_method, "onDeallocThreshold", total_after);
}
};
class java_debug_event_handler_memory_resource final : public java_event_handler_memory_resource {
public:
java_debug_event_handler_memory_resource(JNIEnv *env, jobject jhandler,
jlongArray jalloc_thresholds,
jlongArray jdealloc_thresholds,
device_memory_resource *resource_to_wrap,
base_tracking_resource_adaptor *tracker)
: java_event_handler_memory_resource(env, jhandler, jalloc_thresholds, jdealloc_thresholds,
resource_to_wrap, tracker) {
jclass cls = env->GetObjectClass(jhandler);
if (cls == nullptr) {
throw cudf::jni::jni_exception("class not found");
}
on_allocated_method = env->GetMethodID(cls, "onAllocated", "(J)V");
if (on_allocated_method == nullptr) {
throw cudf::jni::jni_exception("onAllocated method");
}
on_deallocated_method = env->GetMethodID(cls, "onDeallocated", "(J)V");
if (on_deallocated_method == nullptr) {
throw cudf::jni::jni_exception("onDeallocated method");
}
}
private:
jmethodID on_allocated_method;
jmethodID on_deallocated_method;
void on_allocated_callback(std::size_t num_bytes, rmm::cuda_stream_view stream) {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
env->CallVoidMethod(handler_obj, on_allocated_method, num_bytes);
if (env->ExceptionCheck()) {
throw std::runtime_error("onAllocated handler threw an exception");
}
}
void on_deallocated_callback(void *p, std::size_t size, rmm::cuda_stream_view stream) {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
env->CallVoidMethod(handler_obj, on_deallocated_method, size);
}
void *do_allocate(std::size_t num_bytes, rmm::cuda_stream_view stream) override {
void *result = java_event_handler_memory_resource::do_allocate(num_bytes, stream);
on_allocated_callback(num_bytes, stream);
return result;
}
void do_deallocate(void *p, std::size_t size, rmm::cuda_stream_view stream) override {
java_event_handler_memory_resource::do_deallocate(p, size, stream);
on_deallocated_callback(p, size, stream);
}
};
} // anonymous namespace
extern "C" {
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_initDefaultCudaDevice(JNIEnv *env, jclass clazz) {
// make sure the CUDA device is setup in the context
cudaError_t cuda_status = cudaFree(0);
cudf::jni::jni_cuda_check(env, cuda_status);
int device_id;
cuda_status = cudaGetDevice(&device_id);
cudf::jni::jni_cuda_check(env, cuda_status);
// Now that RMM has successfully initialized, setup all threads calling
// cudf to use the same device RMM is using.
cudf::jni::set_cudf_device(device_id);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_cleanupDefaultCudaDevice(JNIEnv *env, jclass clazz) {
cudf::jni::set_cudf_device(cudaInvalidDeviceId);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_allocInternal(JNIEnv *env, jclass clazz, jlong size,
jlong stream) {
try {
cudf::jni::auto_set_device(env);
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource();
auto c_stream = rmm::cuda_stream_view(reinterpret_cast<cudaStream_t>(stream));
void *ret = mr->allocate(size, c_stream);
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_free(JNIEnv *env, jclass clazz, jlong ptr,
jlong size, jlong stream) {
try {
cudf::jni::auto_set_device(env);
rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource();
void *cptr = reinterpret_cast<void *>(ptr);
auto c_stream = rmm::cuda_stream_view(reinterpret_cast<cudaStream_t>(stream));
mr->deallocate(cptr, size, c_stream);
}
CATCH_STD(env, )
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_freeDeviceBuffer(JNIEnv *env, jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
rmm::device_buffer *cptr = reinterpret_cast<rmm::device_buffer *>(ptr);
delete cptr;
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_allocCudaInternal(JNIEnv *env, jclass clazz,
jlong size, jlong stream) {
try {
cudf::jni::auto_set_device(env);
void *ptr{nullptr};
RMM_CUDA_TRY_ALLOC(cudaMalloc(&ptr, size));
return reinterpret_cast<jlong>(ptr);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_freeCuda(JNIEnv *env, jclass clazz, jlong ptr,
jlong size, jlong stream) {
try {
cudf::jni::auto_set_device(env);
void *cptr = reinterpret_cast<void *>(ptr);
RMM_ASSERT_CUDA_SUCCESS(cudaFree(cptr));
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newCudaMemoryResource(JNIEnv *env, jclass clazz) {
try {
cudf::jni::auto_set_device(env);
auto ret = new rmm::mr::cuda_memory_resource();
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releaseCudaMemoryResource(JNIEnv *env, jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<rmm::mr::cuda_memory_resource *>(ptr);
delete mr;
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newManagedMemoryResource(JNIEnv *env,
jclass clazz) {
try {
cudf::jni::auto_set_device(env);
auto ret = new rmm::mr::managed_memory_resource();
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releaseManagedMemoryResource(JNIEnv *env,
jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<rmm::mr::managed_memory_resource *>(ptr);
delete mr;
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newPoolMemoryResource(JNIEnv *env, jclass clazz,
jlong child, jlong init,
jlong max) {
JNI_NULL_CHECK(env, child, "child is null", 0);
try {
cudf::jni::auto_set_device(env);
auto wrapped = reinterpret_cast<rmm::mr::device_memory_resource *>(child);
auto ret =
new rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>(wrapped, init, max);
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releasePoolMemoryResource(JNIEnv *env, jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto mr =
reinterpret_cast<rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> *>(ptr);
delete mr;
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newArenaMemoryResource(JNIEnv *env, jclass clazz,
jlong child, jlong init,
jboolean dump_on_oom) {
JNI_NULL_CHECK(env, child, "child is null", 0);
try {
cudf::jni::auto_set_device(env);
auto wrapped = reinterpret_cast<rmm::mr::device_memory_resource *>(child);
auto ret = new rmm::mr::arena_memory_resource<rmm::mr::device_memory_resource>(wrapped, init,
dump_on_oom);
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releaseArenaMemoryResource(JNIEnv *env, jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto mr =
reinterpret_cast<rmm::mr::arena_memory_resource<rmm::mr::device_memory_resource> *>(ptr);
delete mr;
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newCudaAsyncMemoryResource(JNIEnv *env,
jclass clazz, jlong init,
jlong release) {
try {
cudf::jni::auto_set_device(env);
auto ret = new rmm::mr::cuda_async_memory_resource(init, release);
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releaseCudaAsyncMemoryResource(JNIEnv *env,
jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<rmm::mr::cuda_async_memory_resource *>(ptr);
delete mr;
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newLimitingResourceAdaptor(JNIEnv *env,
jclass clazz,
jlong child, jlong limit,
jlong align) {
JNI_NULL_CHECK(env, child, "child is null", 0);
try {
cudf::jni::auto_set_device(env);
auto wrapped = reinterpret_cast<rmm::mr::device_memory_resource *>(child);
auto ret = new rmm::mr::limiting_resource_adaptor<rmm::mr::device_memory_resource>(
wrapped, limit, align);
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releaseLimitingResourceAdaptor(JNIEnv *env,
jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto mr =
reinterpret_cast<rmm::mr::limiting_resource_adaptor<rmm::mr::device_memory_resource> *>(
ptr);
delete mr;
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newLoggingResourceAdaptor(JNIEnv *env, jclass clazz,
jlong child, jint type,
jstring jpath,
jboolean auto_flush) {
JNI_NULL_CHECK(env, child, "child is null", 0);
try {
cudf::jni::auto_set_device(env);
auto wrapped = reinterpret_cast<rmm::mr::device_memory_resource *>(child);
switch (type) {
case 1: // File
{
cudf::jni::native_jstring path(env, jpath);
auto ret = new logging_resource_adaptor<rmm::mr::device_memory_resource>(
wrapped, path.get(), auto_flush);
return reinterpret_cast<jlong>(ret);
}
case 2: // stdout
{
auto ret = new logging_resource_adaptor<rmm::mr::device_memory_resource>(wrapped, std::cout,
auto_flush);
return reinterpret_cast<jlong>(ret);
}
case 3: // stderr
{
auto ret = new logging_resource_adaptor<rmm::mr::device_memory_resource>(wrapped, std::cerr,
auto_flush);
return reinterpret_cast<jlong>(ret);
}
default: throw std::logic_error("unsupported logging location type");
}
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releaseLoggingResourceAdaptor(JNIEnv *env,
jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto mr =
reinterpret_cast<rmm::mr::logging_resource_adaptor<rmm::mr::device_memory_resource> *>(ptr);
delete mr;
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newTrackingResourceAdaptor(JNIEnv *env,
jclass clazz,
jlong child,
jlong align) {
JNI_NULL_CHECK(env, child, "child is null", 0);
try {
cudf::jni::auto_set_device(env);
auto wrapped = reinterpret_cast<rmm::mr::device_memory_resource *>(child);
auto ret = new tracking_resource_adaptor<rmm::mr::device_memory_resource>(wrapped, align);
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releaseTrackingResourceAdaptor(JNIEnv *env,
jclass clazz,
jlong ptr) {
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<tracking_resource_adaptor<rmm::mr::device_memory_resource> *>(ptr);
delete mr;
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_nativeGetTotalBytesAllocated(JNIEnv *env,
jclass clazz,
jlong ptr) {
JNI_NULL_CHECK(env, ptr, "adaptor is null", 0);
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<tracking_resource_adaptor<rmm::mr::device_memory_resource> *>(ptr);
return mr->get_total_allocated();
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_nativeGetMaxTotalBytesAllocated(JNIEnv *env,
jclass clazz,
jlong ptr) {
JNI_NULL_CHECK(env, ptr, "adaptor is null", 0);
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<tracking_resource_adaptor<rmm::mr::device_memory_resource> *>(ptr);
return mr->get_max_total_allocated();
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_nativeResetScopedMaxTotalBytesAllocated(JNIEnv *env,
jclass clazz,
jlong ptr,
jlong init) {
JNI_NULL_CHECK(env, ptr, "adaptor is null", );
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<tracking_resource_adaptor<rmm::mr::device_memory_resource> *>(ptr);
mr->reset_scoped_max_total_allocated(init);
}
CATCH_STD(env, )
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_nativeGetScopedMaxTotalBytesAllocated(JNIEnv *env,
jclass clazz,
jlong ptr) {
JNI_NULL_CHECK(env, ptr, "adaptor is null", 0);
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<tracking_resource_adaptor<rmm::mr::device_memory_resource> *>(ptr);
return mr->get_scoped_max_total_allocated();
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Rmm_newEventHandlerResourceAdaptor(
JNIEnv *env, jclass, jlong child, jlong tracker, jobject handler_obj,
jlongArray jalloc_thresholds, jlongArray jdealloc_thresholds, jboolean enable_debug) {
JNI_NULL_CHECK(env, child, "child is null", 0);
JNI_NULL_CHECK(env, tracker, "tracker is null", 0);
try {
auto wrapped = reinterpret_cast<rmm::mr::device_memory_resource *>(child);
auto t =
reinterpret_cast<tracking_resource_adaptor<rmm::mr::device_memory_resource> *>(tracker);
if (enable_debug) {
auto ret = new java_debug_event_handler_memory_resource(env, handler_obj, jalloc_thresholds,
jdealloc_thresholds, wrapped, t);
return reinterpret_cast<jlong>(ret);
} else {
auto ret = new java_event_handler_memory_resource(env, handler_obj, jalloc_thresholds,
jdealloc_thresholds, wrapped, t);
return reinterpret_cast<jlong>(ret);
}
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_releaseEventHandlerResourceAdaptor(
JNIEnv *env, jclass clazz, jlong ptr, jboolean enable_debug) {
try {
cudf::jni::auto_set_device(env);
if (enable_debug) {
auto mr = reinterpret_cast<java_debug_event_handler_memory_resource *>(ptr);
delete mr;
} else {
auto mr = reinterpret_cast<java_event_handler_memory_resource *>(ptr);
delete mr;
}
}
CATCH_STD(env, )
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Rmm_setCurrentDeviceResourceInternal(JNIEnv *env,
jclass clazz,
jlong new_handle) {
try {
cudf::jni::auto_set_device(env);
auto mr = reinterpret_cast<rmm::mr::device_memory_resource *>(new_handle);
rmm::mr::set_current_device_resource(mr);
}
CATCH_STD(env, )
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/row_conversion.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cooperative_groups.h>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/sequence.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/lists/lists_column_device_view.cuh>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
#include <type_traits>
#include "row_conversion.hpp"
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
#define ASYNC_MEMCPY_SUPPORTED
#endif
#if !defined(__CUDA_ARCH__) || defined(ASYNC_MEMCPY_SUPPORTED)
#include <cuda/barrier>
#endif // #if !defined(__CUDA_ARCH__) || defined(ASYNC_MEMCPY_SUPPORTED)
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <iostream>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
namespace {
constexpr auto JCUDF_ROW_ALIGNMENT = 8;
constexpr auto MAX_BATCH_SIZE = std::numeric_limits<cudf::size_type>::max();
// Number of rows each block processes in the two kernels. Tuned via nsight
constexpr auto NUM_STRING_ROWS_PER_BLOCK_TO_ROWS = 1024;
constexpr auto NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS = 64;
constexpr auto MIN_STRING_BLOCKS = 32;
constexpr auto MAX_STRING_BLOCKS = MAX_BATCH_SIZE;
constexpr auto NUM_WARPS_IN_BLOCK = 32;
} // anonymous namespace
// needed to suppress warning about cuda::barrier
#pragma nv_diag_suppress static_var_with_dynamic_init
using namespace cudf;
using detail::make_device_uvector_async;
using rmm::device_uvector;
#ifdef ASYNC_MEMCPY_SUPPORTED
using cuda::aligned_size_t;
#else
template <std::size_t> using aligned_size_t = size_t; // Local stub for cuda::aligned_size_t.
#endif // ASYNC_MEMCPY_SUPPORTED
namespace cudf {
namespace jni {
namespace detail {
/*
* This module converts data from row-major to column-major and from column-major to row-major. It
* is a transpose of the data of sorts, but there are a few complicating factors. They are spelled
* out below:
*
* Row Batches:
* The row data has to fit inside a cuDF column, which limits it to 2 gigs currently. The calling
* code attempts to keep the data size under 2 gigs, but due to padding this isn't always the case,
* so being able to break this up into multiple columns is necessary. Internally, this is referred
* to as the row batch, which is a group of rows that will fit into this 2 gig space requirement.
* There are typically 1 of these batches, but there can be 2.
*
* Async Memcpy:
* The CUDA blocks are using memcpy_async, which allows for the device to schedule memcpy operations
* and then wait on them to complete at a later time with a barrier. On Ampere or later hardware
* there is dedicated hardware to do this copy and on pre-Ampere it should generate the same code
* that a hand-rolled loop would generate, so performance should be the same or better than a
* hand-rolled kernel.
*
* Tile Info:
* Each CUDA block will work on a single tile info before exiting. This single tile consumes all
* available shared memory. The kernel reads data into shared memory and then back out from shared
* memory to device memory via memcpy_async. This kernel is completely memory bound.
*
* Batch Data:
* This structure contains all the row batches and some book-keeping data necessary for the batches
* such as row numbers for the batches.
*
* Tiles:
* The tile info describes a tile of data to process. In a GPU with 48KB this equates to about 221
* bytes in each direction of a table. The tiles are kept as square as possible to attempt to
* coalesce memory operations. The taller a tile is the better coalescing of columns, but row
* coalescing suffers. The wider a tile is the better the row coalescing, but columns coalescing
* suffers. The code attempts to produce a square tile to balance the coalescing. It starts by
* figuring out the optimal byte length and then adding columns to the data until the tile is too
* large. Since rows are different width with different alignment requirements, this isn't typically
* exact. Once a width is found the tiles are generated vertically with that width and height and
* then the process repeats. This means all the tiles will be the same height, but will have
* different widths based on what columns they encompass. Tiles in a vertical row will all have the
* same dimensions.
*
* --------------------------------
* | 4 5.0f || True 8 3 1 |
* | 3 6.0f || False 3 1 1 |
* | 2 7.0f || True 7 4 1 |
* | 1 8.0f || False 2 5 1 |
* --------------------------------
* | 0 9.0f || True 6 7 1 |
* ...
*/
/**
* @brief The CUDA blocks work on one tile_info struct of data.
* This structure defines the workspaces for the blocks.
*
*/
struct tile_info {
int start_col;
int start_row;
int end_col;
int end_row;
int batch_number;
__device__ inline size_type get_shared_row_size(size_type const *const col_offsets,
size_type const *const col_sizes) const {
// this calculation is invalid if there are holes in the data such as a variable-width column.
// It is wrong in a safe way in that it will say this row size is larger than it should be, so
// we are not losing data we are just not as efficient as we could be with shared memory. This
// may be a problem if the tile is computed without regard to variable width offset/length sizes
// in that we overrun shared memory.
return util::round_up_unsafe(col_offsets[end_col] + col_sizes[end_col] - col_offsets[start_col],
JCUDF_ROW_ALIGNMENT);
}
__device__ inline size_type num_cols() const { return end_col - start_col + 1; }
__device__ inline size_type num_rows() const { return end_row - start_row + 1; }
};
/**
* @brief Returning rows is done in a byte cudf column. This is limited in size by
* `size_type` and so output is broken into batches of rows that fit inside
* this limit.
*
*/
struct row_batch {
size_type num_bytes; // number of bytes in this batch
size_type row_count; // number of rows in the batch
device_uvector<size_type> row_offsets; // offsets column of output cudf column
};
/**
* @brief Holds information about the batches of data to be processed
*
*/
struct batch_data {
device_uvector<size_type> batch_row_offsets; // offsets to each row in incoming data
device_uvector<size_type> d_batch_row_boundaries; // row numbers for the start of each batch
std::vector<size_type>
batch_row_boundaries; // row numbers for the start of each batch: 0, 1500, 2700
std::vector<row_batch> row_batches; // information about each batch such as byte count
};
/**
* @brief builds row size information for tables that contain strings
*
* @param tbl table from which to compute row size information
* @param fixed_width_and_validity_size size of fixed-width and validity data in this table
* @param stream cuda stream on which to operate
* @return pair of device vector of size_types of the row sizes of the table and a device vector of
* offsets into the string column
*/
std::pair<rmm::device_uvector<size_type>, rmm::device_uvector<strings_column_view::offset_iterator>>
build_string_row_offsets(table_view const &tbl, size_type fixed_width_and_validity_size,
rmm::cuda_stream_view stream) {
auto const num_rows = tbl.num_rows();
rmm::device_uvector<size_type> d_row_sizes(num_rows, stream);
thrust::uninitialized_fill(rmm::exec_policy(stream), d_row_sizes.begin(), d_row_sizes.end(), 0);
auto d_offsets_iterators = [&]() {
std::vector<strings_column_view::offset_iterator> offsets_iterators;
auto offsets_iter = thrust::make_transform_iterator(
tbl.begin(), [](auto const &col) -> strings_column_view::offset_iterator {
if (!is_fixed_width(col.type())) {
CUDF_EXPECTS(col.type().id() == type_id::STRING, "only string columns are supported!");
return strings_column_view(col).offsets_begin();
} else {
return nullptr;
}
});
std::copy_if(offsets_iter, offsets_iter + tbl.num_columns(),
std::back_inserter(offsets_iterators),
[](auto const &offset_ptr) { return offset_ptr != nullptr; });
return make_device_uvector_async(offsets_iterators, stream,
rmm::mr::get_current_device_resource());
}();
auto const num_columns = static_cast<size_type>(d_offsets_iterators.size());
thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_columns * num_rows),
[d_offsets_iterators = d_offsets_iterators.data(), num_columns, num_rows,
d_row_sizes = d_row_sizes.data()] __device__(auto element_idx) {
auto const row = element_idx % num_rows;
auto const col = element_idx / num_rows;
auto const val =
d_offsets_iterators[col][row + 1] - d_offsets_iterators[col][row];
atomicAdd(&d_row_sizes[row], val);
});
// transform the row sizes to include fixed width size and alignment
thrust::transform(rmm::exec_policy(stream), d_row_sizes.begin(), d_row_sizes.end(),
d_row_sizes.begin(), [fixed_width_and_validity_size] __device__(auto row_size) {
return util::round_up_unsafe(fixed_width_and_validity_size + row_size,
JCUDF_ROW_ALIGNMENT);
});
return {std::move(d_row_sizes), std::move(d_offsets_iterators)};
}
/**
* @brief functor to return the offset of a row in a table with string columns
*
*/
struct string_row_offset_functor {
string_row_offset_functor(device_span<size_type const> d_row_offsets)
: d_row_offsets(d_row_offsets){};
__device__ inline size_type operator()(int row_number, int) const {
return d_row_offsets[row_number];
}
device_span<size_type const> d_row_offsets;
};
/**
* @brief functor to return the offset of a row in a table with only fixed-width columns
*
*/
struct fixed_width_row_offset_functor {
fixed_width_row_offset_functor(size_type fixed_width_only_row_size)
: _fixed_width_only_row_size(fixed_width_only_row_size){};
__device__ inline size_type operator()(int row_number, int tile_row_start) const {
return (row_number - tile_row_start) * _fixed_width_only_row_size;
}
size_type _fixed_width_only_row_size;
};
/**
* @brief Copies data from row-based JCUDF format to column-based cudf format.
*
* This optimized version of the conversion is faster for fixed-width tables that do not have more
* than 100 columns.
*
* @param num_rows number of rows in the incoming table
* @param num_columns number of columns in the incoming table
* @param row_size length in bytes of each row
* @param input_offset_in_row offset to each row of data
* @param num_bytes total number of bytes in the incoming data
* @param output_data array of pointers to the output data
* @param output_nm array of pointers to the output null masks
* @param input_data pointing to the incoming row data
*/
__global__ void
copy_from_rows_fixed_width_optimized(const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *input_offset_in_row,
const size_type *num_bytes, int8_t **output_data,
bitmask_type **output_nm, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// For simplicity we will refer to this as a row_group
// In practice we have found writing more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type const rows_per_group = blockDim.x;
size_type const row_group_start = blockIdx.x;
size_type const row_group_stride = gridDim.x;
size_type const row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying from shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp = &row_tmp[input_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (auto row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Step 1: Copy the data into shared memory
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t const *long_input = reinterpret_cast<int64_t const *>(input_data);
auto const shared_output_index = threadIdx.x + (threadIdx.y * blockDim.x);
auto const shared_output_stride = blockDim.x * blockDim.y;
auto const row_index_end = std::min(num_rows, ((row_group_index + 1) * rows_per_group));
auto const num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
auto const shared_length = row_size * num_rows_in_group;
size_type const shared_output_end = shared_length / sizeof(int64_t);
auto const start_input_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_output_index; shared_index < shared_output_end;
shared_index += shared_output_stride) {
long_shared[shared_index] = long_input[start_input_index + shared_index];
}
// Wait for all of the data to be in shared memory
__syncthreads();
// Step 2 copy the data back out
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
auto const row_index = (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data in for the next row group.
uint32_t active_mask = __ballot_sync(0xffff'ffffu, row_index < num_rows);
if (row_index < num_rows) {
auto const col_index_start = threadIdx.y;
auto const col_index_stride = blockDim.y;
for (auto col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
auto const col_size = num_bytes[col_index];
int8_t const *col_tmp = &(row_tmp[input_offset_in_row[col_index]]);
int8_t *col_output = output_data[col_index];
switch (col_size) {
case 1: {
col_output[row_index] = *col_tmp;
break;
}
case 2: {
int16_t *short_col_output = reinterpret_cast<int16_t *>(col_output);
short_col_output[row_index] = *reinterpret_cast<const int16_t *>(col_tmp);
break;
}
case 4: {
int32_t *int_col_output = reinterpret_cast<int32_t *>(col_output);
int_col_output[row_index] = *reinterpret_cast<const int32_t *>(col_tmp);
break;
}
case 8: {
int64_t *long_col_output = reinterpret_cast<int64_t *>(col_output);
long_col_output[row_index] = *reinterpret_cast<const int64_t *>(col_tmp);
break;
}
default: {
auto const output_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (auto b = 0; b < col_size; b++) {
col_output[b + output_offset] = col_tmp[b];
}
break;
}
}
bitmask_type *nm = output_nm[col_index];
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
int predicate = *valid_byte & (1 << byte_bit_offset);
uint32_t bitmask = __ballot_sync(active_mask, predicate);
if (row_index % 32 == 0) {
nm[word_index(row_index)] = bitmask;
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied before starting on the next row group
__syncthreads();
}
}
__global__ void copy_to_rows_fixed_width_optimized(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type row_size, const size_type *output_offset_in_row, const size_type *num_bytes,
const int8_t **input_data, const bitmask_type **input_nm, int8_t *output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// We do not support copying a subset of the columns in a row yet, so we don't
// currently support a row that is wider than shared memory.
// For simplicity we will refer to this as a row_group
// In practice we have found reading more than 4 columns of data per thread
// results in performance loss. As such we are using a 2 dimensional
// kernel in terms of threads, but not in terms of blocks. Columns are
// controlled by the y dimension (there is no y dimension in blocks). Rows
// are controlled by the x dimension (there are multiple blocks in the x
// dimension).
size_type rows_per_group = blockDim.x;
size_type row_group_start = blockIdx.x;
size_type row_group_stride = gridDim.x;
size_type row_group_end = (num_rows + rows_per_group - 1) / rows_per_group + 1;
extern __shared__ int8_t shared_data[];
// Because we are copying fixed width only data and we stride the rows
// this thread will always start copying to shared data in the same place
int8_t *row_tmp = &shared_data[row_size * threadIdx.x];
int8_t *row_vld_tmp =
&row_tmp[output_offset_in_row[num_columns - 1] + num_bytes[num_columns - 1]];
for (size_type row_group_index = row_group_start; row_group_index < row_group_end;
row_group_index += row_group_stride) {
// Within the row group there should be 1 thread for each row. This is a
// requirement for launching the kernel
size_type row_index = start_row + (row_group_index * rows_per_group) + threadIdx.x;
// But we might not use all of the threads if the number of rows does not go
// evenly into the thread count. We don't want those threads to exit yet
// because we may need them to copy data back out.
if (row_index < (start_row + num_rows)) {
size_type col_index_start = threadIdx.y;
size_type col_index_stride = blockDim.y;
for (size_type col_index = col_index_start; col_index < num_columns;
col_index += col_index_stride) {
size_type col_size = num_bytes[col_index];
int8_t *col_tmp = &(row_tmp[output_offset_in_row[col_index]]);
const int8_t *col_input = input_data[col_index];
switch (col_size) {
case 1: {
*col_tmp = col_input[row_index];
break;
}
case 2: {
const int16_t *short_col_input = reinterpret_cast<const int16_t *>(col_input);
*reinterpret_cast<int16_t *>(col_tmp) = short_col_input[row_index];
break;
}
case 4: {
const int32_t *int_col_input = reinterpret_cast<const int32_t *>(col_input);
*reinterpret_cast<int32_t *>(col_tmp) = int_col_input[row_index];
break;
}
case 8: {
const int64_t *long_col_input = reinterpret_cast<const int64_t *>(col_input);
*reinterpret_cast<int64_t *>(col_tmp) = long_col_input[row_index];
break;
}
default: {
size_type input_offset = col_size * row_index;
// TODO this should just not be supported for fixed width columns, but just in case...
for (size_type b = 0; b < col_size; b++) {
col_tmp[b] = col_input[b + input_offset];
}
break;
}
}
// atomicOr only works on 32 bit or 64 bit aligned values, and not byte aligned
// so we have to rewrite the addresses to make sure that it is 4 byte aligned
int8_t *valid_byte = &row_vld_tmp[col_index / 8];
size_type byte_bit_offset = col_index % 8;
uint64_t fixup_bytes = reinterpret_cast<uint64_t>(valid_byte) % 4;
int32_t *valid_int = reinterpret_cast<int32_t *>(valid_byte - fixup_bytes);
size_type int_bit_offset = byte_bit_offset + (fixup_bytes * 8);
// Now copy validity for the column
if (input_nm[col_index]) {
if (bit_is_set(input_nm[col_index], row_index)) {
atomicOr_block(valid_int, 1 << int_bit_offset);
} else {
atomicAnd_block(valid_int, ~(1 << int_bit_offset));
}
} else {
// It is valid so just set the bit
atomicOr_block(valid_int, 1 << int_bit_offset);
}
} // end column loop
} // end row copy
// wait for the row_group to be totally copied into shared memory
__syncthreads();
// Step 2: Copy the data back out
// We know row_size is always aligned with and a multiple of int64_t;
int64_t *long_shared = reinterpret_cast<int64_t *>(shared_data);
int64_t *long_output = reinterpret_cast<int64_t *>(output_data);
size_type shared_input_index = threadIdx.x + (threadIdx.y * blockDim.x);
size_type shared_input_stride = blockDim.x * blockDim.y;
size_type row_index_end = ((row_group_index + 1) * rows_per_group);
if (row_index_end > num_rows) {
row_index_end = num_rows;
}
size_type num_rows_in_group = row_index_end - (row_group_index * rows_per_group);
size_type shared_length = row_size * num_rows_in_group;
size_type shared_input_end = shared_length / sizeof(int64_t);
size_type start_output_index = (row_size * row_group_index * rows_per_group) / sizeof(int64_t);
for (size_type shared_index = shared_input_index; shared_index < shared_input_end;
shared_index += shared_input_stride) {
long_output[start_output_index + shared_index] = long_shared[shared_index];
}
__syncthreads();
// Go for the next round
}
}
#ifdef ASYNC_MEMCPY_SUPPORTED
#define MEMCPY(dst, src, size, barrier) cuda::memcpy_async(dst, src, size, barrier)
#else
#define MEMCPY(dst, src, size, barrier) memcpy(dst, src, size)
#endif // ASYNC_MEMCPY_SUPPORTED
/**
* @brief copy data from cudf columns into JCUDF format, which is row-based
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile shared memory amount each `tile_info` is using
* @param tile_infos span of `tile_info` structs the define the work
* @param input_data pointer to raw table data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile,
device_span<const tile_info> tile_infos, const int8_t **input_data,
const size_type *col_sizes, const size_type *col_offsets,
RowOffsetFunctor row_offsets, size_type const *batch_row_boundaries,
int8_t **output_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time.
// This has been broken up for us in the tile_info struct, so we don't have
// any calculation to do here, but it is important to note.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
extern __shared__ int8_t shared_data[];
#ifdef ASYNC_MEMCPY_SUPPORTED
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier;
if (group.thread_rank() == 0) {
init(&tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto const tile = tile_infos[blockIdx.x];
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
auto const starting_column_offset = col_offsets[tile.start_col];
// to do the copy we need to do n column copies followed by m element copies OR we have to do m
// element copies followed by r row copies. When going from column to row it is much easier to
// copy by elements first otherwise we would need a running total of the column sizes for our
// tile, which isn't readily available. This makes it more appealing to copy element-wise from
// input data into shared matching the end layout and do row-based memcopies out.
// read each column across the tile
// each warp takes a column with each thread of a warp taking a row this is done with cooperative
// groups where each column is chosen by the tiled partition and each thread in that partition
// works on a row
for (int relative_col = warp.meta_group_rank(); relative_col < num_tile_cols;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile.start_col;
auto const col_size = col_sizes[absolute_col];
auto const col_offset = col_offsets[absolute_col];
auto const relative_col_offset = col_offset - starting_column_offset;
auto const col_ptr = input_data[absolute_col];
if (col_ptr == nullptr) {
// variable-width data column
continue;
}
for (int relative_row = warp.thread_rank(); relative_row < num_tile_rows;
relative_row += warp.size()) {
if (relative_row >= num_tile_rows) {
// out of bounds
continue;
}
auto const absolute_row = relative_row + tile.start_row;
auto const shared_offset = relative_row * tile_row_size + relative_col_offset;
auto const input_src = col_ptr + col_size * absolute_row;
// copy the element from global memory
switch (col_size) {
case 2: {
const int16_t *short_col_input = reinterpret_cast<const int16_t *>(input_src);
*reinterpret_cast<int16_t *>(&shared_data[shared_offset]) = *short_col_input;
break;
}
case 4: {
const int32_t *int_col_input = reinterpret_cast<const int32_t *>(input_src);
*reinterpret_cast<int32_t *>(&shared_data[shared_offset]) = *int_col_input;
break;
}
case 8: {
const int64_t *long_col_input = reinterpret_cast<const int64_t *>(input_src);
*reinterpret_cast<int64_t *>(&shared_data[shared_offset]) = *long_col_input;
break;
}
case 1: shared_data[shared_offset] = *input_src; break;
default: {
for (int i = 0; i < col_size; ++i) {
shared_data[shared_offset] = *input_src;
}
break;
}
}
}
}
auto const tile_output_buffer = output_data[tile.batch_number];
auto const row_batch_start = tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
// no async copies above waiting on the barrier, so we sync the group here to ensure all copies to
// shared memory are completed before copying data out
group.sync();
// each warp takes a row
for (int copy_row = warp.meta_group_rank(); copy_row < tile.num_rows();
copy_row += warp.meta_group_size()) {
auto const src = &shared_data[tile_row_size * copy_row];
auto const dst = tile_output_buffer + row_offsets(copy_row + tile.start_row, row_batch_start) +
starting_column_offset;
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, tile_row_size, tile_barrier);
#else
for (int b = warp.thread_rank(); b < tile_row_size; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait on the last copies to complete
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the output data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointer to output data, partitioned by data size
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_nm pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void
copy_validity_to_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const bitmask_type **input_nm) {
extern __shared__ int8_t shared_data[];
// each thread of warp reads a single int32 of validity - so we read 128 bytes then ballot_sync
// the bits and write the result to shmem after we fill shared mem memcpy it out in a blob.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> shared_tile_barrier;
if (group.thread_rank() == 0) {
init(&shared_tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto tile = tile_infos[blockIdx.x];
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const threads_per_warp = warp.size();
auto const rows_per_read = cudf::detail::size_in_bits<bitmask_type>();
auto const num_sections_x = util::div_rounding_up_unsafe(num_tile_cols, threads_per_warp);
auto const num_sections_y = util::div_rounding_up_unsafe(num_tile_rows, rows_per_read);
auto const validity_data_row_length = util::round_up_unsafe(
util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const total_sections = num_sections_x * num_sections_y;
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp.meta_group_rank(); my_section_idx < total_sections;
my_section_idx += warp.meta_group_size()) {
// convert to rows and cols
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * threads_per_warp + warp.thread_rank();
auto const relative_row = section_y * rows_per_read;
auto const absolute_col = relative_col + tile.start_col;
auto const absolute_row = relative_row + tile.start_row;
auto const participating = absolute_col < num_columns && absolute_row < num_rows;
auto const participation_mask = __ballot_sync(0xFFFF'FFFFu, participating);
if (participating) {
auto my_data = input_nm[absolute_col] != nullptr ?
input_nm[absolute_col][word_index(absolute_row)] :
std::numeric_limits<uint32_t>::max();
// every thread that is participating in the warp has 4 bytes, but it's column-based data and
// we need it in row-based. So we shuffle the bits around with ballot_sync to make the bytes
// we actually write.
bitmask_type dw_mask = 0x1;
for (int i = 0; i < threads_per_warp && relative_row + i < num_rows; ++i, dw_mask <<= 1) {
auto validity_data = __ballot_sync(participation_mask, my_data & dw_mask);
// lead thread in each warp writes data
auto const validity_write_offset =
validity_data_row_length * (relative_row + i) + (relative_col / CHAR_BIT);
if (warp.thread_rank() == 0) {
*reinterpret_cast<bitmask_type *>(&shared_data[validity_write_offset]) = validity_data;
}
}
}
}
auto const output_data_base =
output_data[tile.batch_number] + validity_offset + tile.start_col / CHAR_BIT;
// each warp copies a row at a time
auto const row_bytes = util::div_rounding_up_unsafe(num_tile_cols, CHAR_BIT);
auto const row_batch_start = tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
// make sure entire tile has finished copy
// Note that this was copied from above just under the for loop due to nsight complaints about
// divergent threads
group.sync();
for (int relative_row = warp.meta_group_rank(); relative_row < num_tile_rows;
relative_row += warp.meta_group_size()) {
auto const src = &shared_data[validity_data_row_length * relative_row];
auto const dst = output_data_base + row_offsets(relative_row + tile.start_row, row_batch_start);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, row_bytes, shared_tile_barrier);
#else
for (int b = warp.thread_rank(); b < row_bytes; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait for tile of data to arrive
shared_tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief kernel to copy string data to JCUDF row format
*
* @tparam RowOffsetFunctor iterator for row offsets into the destination data
* @param num_rows number of rows in this portion of the table
* @param num_variable_columns number of columns of variable-width data
* @param variable_input_data variable width data column pointers
* @param variable_col_output_offsets output offset information for variable-width columns
* @param variable_col_offsets input offset information for variable-width columns
* @param fixed_width_row_size offset to variable-width data in a row
* @param row_offsets offsets for each row in output data
* @param batch_row_offset row start for this batch
* @param output_data pointer to output data for this batch
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_strings_to_rows(size_type const num_rows, size_type const num_variable_columns,
int8_t const **variable_input_data,
size_type const *variable_col_output_offsets,
size_type const **variable_col_offsets,
size_type fixed_width_row_size, RowOffsetFunctor row_offsets,
size_type const batch_row_offset, int8_t *output_data) {
// Each block will take a group of rows controlled by NUM_STRING_ROWS_PER_BLOCK_TO_ROWS. Each warp
// will copy a row at a time. The base thread will first go through column data and fill out
// offset/length information for the column. Then all threads of the warp will participate in the
// memcpy of the string data.
auto const my_block = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(my_block);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::barrier<cuda::thread_scope_block> block_barrier;
#endif
auto const start_row =
blockIdx.x * NUM_STRING_ROWS_PER_BLOCK_TO_ROWS + warp.meta_group_rank() + batch_row_offset;
auto const end_row =
std::min(num_rows, static_cast<size_type>(start_row + NUM_STRING_ROWS_PER_BLOCK_TO_ROWS));
for (int row = start_row; row < end_row; row += warp.meta_group_size()) {
auto offset = fixed_width_row_size; // initial offset to variable-width data
auto const base_row_offset = row_offsets(row, 0);
for (int col = 0; col < num_variable_columns; ++col) {
auto const string_start_offset = variable_col_offsets[col][row];
auto const string_length = variable_col_offsets[col][row + 1] - string_start_offset;
if (warp.thread_rank() == 0) {
// write the offset/length to column
uint32_t *output_dest = reinterpret_cast<uint32_t *>(
&output_data[base_row_offset + variable_col_output_offsets[col]]);
output_dest[0] = offset;
output_dest[1] = string_length;
}
auto string_output_dest = &output_data[base_row_offset + offset];
auto string_output_src = &variable_input_data[col][string_start_offset];
warp.sync();
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, string_output_dest, string_output_src, string_length, block_barrier);
#else
for (int c = warp.thread_rank(); c < string_length; c += warp.size()) {
string_output_dest[c] = string_output_src[c];
}
#endif
offset += string_length;
}
}
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_data pointers to column data
* @param col_sizes array of sizes for each element in a column - one per column
* @param col_offsets offset into input data row for each column's start
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void copy_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, int8_t **output_data,
const size_type *col_sizes, const size_type *col_offsets,
device_span<const tile_info> tile_infos, const int8_t *input_data) {
// We are going to copy the data in two passes.
// The first pass copies a chunk of data into shared memory.
// The second pass copies that chunk from shared memory out to the final location.
// Because shared memory is limited we copy a subset of the rows at a time. This has been broken
// up for us in the tile_info struct, so we don't have any calculation to do here, but it is
// important to note.
// To speed up some of the random access memory we do, we copy col_sizes and col_offsets to shared
// memory for each of the tiles that we work on
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
extern __shared__ int8_t shared[];
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> tile_barrier;
if (group.thread_rank() == 0) {
init(&tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
{
auto const fetch_tile = tile_infos[blockIdx.x];
auto const fetch_tile_start_row = fetch_tile.start_row;
auto const starting_col_offset = col_offsets[fetch_tile.start_col];
auto const fetch_tile_row_size = fetch_tile.get_shared_row_size(col_offsets, col_sizes);
auto const row_batch_start =
fetch_tile.batch_number == 0 ? 0 : batch_row_boundaries[fetch_tile.batch_number];
for (int absolute_row = warp.meta_group_rank() + fetch_tile.start_row;
absolute_row <= fetch_tile.end_row; absolute_row += warp.meta_group_size()) {
warp.sync();
auto shared_offset = (absolute_row - fetch_tile_start_row) * fetch_tile_row_size;
auto dst = &shared[shared_offset];
auto src = &input_data[row_offsets(absolute_row, row_batch_start) + starting_col_offset];
// copy the data
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, fetch_tile_row_size, tile_barrier);
#else
for (int b = warp.thread_rank(); b < fetch_tile_row_size; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
}
{
auto const tile = tile_infos[blockIdx.x];
auto const rows_in_tile = tile.num_rows();
auto const cols_in_tile = tile.num_cols();
auto const tile_row_size = tile.get_shared_row_size(col_offsets, col_sizes);
#ifdef ASYNC_MEMCPY_SUPPORTED
// ensure our data is ready
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
// Now we copy from shared memory to final destination. The data is laid out in rows in shared
// memory, so the reads for a column will be "vertical". Because of this and the different sizes
// for each column, this portion is handled on row/column basis. to prevent each thread working
// on a single row and also to ensure that all threads can do work in the case of more threads
// than rows, we do a global index instead of a double for loop with col/row.
for (int relative_row = warp.thread_rank(); relative_row < rows_in_tile;
relative_row += warp.size()) {
auto const absolute_row = relative_row + tile.start_row;
auto const shared_memory_row_offset = tile_row_size * relative_row;
for (int relative_col = warp.meta_group_rank(); relative_col < cols_in_tile;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile.start_col;
auto const shared_memory_offset =
col_offsets[absolute_col] - col_offsets[tile.start_col] + shared_memory_row_offset;
auto const column_size = col_sizes[absolute_col];
int8_t *shmem_src = &shared[shared_memory_offset];
int8_t *dst = &output_data[absolute_col][absolute_row * column_size];
MEMCPY(dst, shmem_src, column_size, tile_barrier);
}
}
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait on the last copies to complete
tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copy data from row-based format to cudf columns
*
* @tparam RowOffsetFunctor iterator that gives the size of a specific row of the table.
* @param num_rows total number of rows in the table
* @param num_columns total number of columns in the table
* @param shmem_used_per_tile amount of shared memory that is used by a tile
* @param row_offsets offset to the first column a specific row in the input data
* @param batch_row_boundaries row numbers for batch starts
* @param output_nm pointers to null masks for columns
* @param validity_offsets offset into input data row for validity data
* @param tile_infos information about the tiles of work
* @param input_data pointer to input data
*
*/
template <typename RowOffsetFunctor>
__global__ void
copy_validity_from_rows(const size_type num_rows, const size_type num_columns,
const size_type shmem_used_per_tile, RowOffsetFunctor row_offsets,
size_type const *batch_row_boundaries, bitmask_type **output_nm,
const size_type validity_offset, device_span<const tile_info> tile_infos,
const int8_t *input_data) {
extern __shared__ int8_t shared[];
using cudf::detail::warp_size;
// each thread of warp reads a single byte of validity - so we read 32 bytes then ballot_sync the
// bits and write the result to shmem after we fill shared mem memcpy it out in a blob. Probably
// need knobs for number of rows vs columns to balance read/write
// C0 C1 C2 C3 C4 C5 C6 C7
// R0 1 0 1 0 0 1 1 0 <-- thread 0 reads byte r0
// R1 1 1 1 1 1 1 1 0 <-- thread 1 reads byte r1
// R2 0 0 1 0 0 1 1 0 <-- thread 2 reads byte r2
// ...
// R31 1 1 1 1 1 1 1 1 <-- thread 31 reads byte r31
// ^
// | 1 bit of each input byte, by column, are swizzled into a single 32 bit word via
// __ballot_sync, representing 32 rows of that column.
auto const group = cooperative_groups::this_thread_block();
auto const warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(group);
#ifdef ASYNC_MEMCPY_SUPPORTED
// Initialize cuda barriers for each tile.
__shared__ cuda::barrier<cuda::thread_scope_block> shared_tile_barrier;
if (group.thread_rank() == 0) {
init(&shared_tile_barrier, group.size());
}
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
auto const tile = tile_infos[blockIdx.x];
auto const tile_start_col = tile.start_col;
auto const tile_start_row = tile.start_row;
auto const num_tile_cols = tile.num_cols();
auto const num_tile_rows = tile.num_rows();
auto const threads_per_warp = warp.size();
auto const cols_per_read = CHAR_BIT;
auto const rows_per_read = static_cast<size_type>(threads_per_warp);
auto const num_sections_x = util::div_rounding_up_safe(num_tile_cols, cols_per_read);
auto const num_sections_y = util::div_rounding_up_safe(num_tile_rows, rows_per_read);
auto const validity_data_col_length = num_sections_y * 4; // words to bytes
auto const total_sections = num_sections_x * num_sections_y;
// the tile is divided into sections. A warp operates on a section at a time.
for (int my_section_idx = warp.meta_group_rank(); my_section_idx < total_sections;
my_section_idx += warp.meta_group_size()) {
// convert section to row and col
auto const section_x = my_section_idx % num_sections_x;
auto const section_y = my_section_idx / num_sections_x;
auto const relative_col = section_x * cols_per_read;
auto const relative_row = section_y * rows_per_read + warp.thread_rank();
auto const absolute_col = relative_col + tile_start_col;
auto const absolute_row = relative_row + tile_start_row;
auto const row_batch_start =
tile.batch_number == 0 ? 0 : batch_row_boundaries[tile.batch_number];
auto const participation_mask = __ballot_sync(0xFFFF'FFFFu, absolute_row < num_rows);
if (absolute_row < num_rows) {
auto const my_byte = input_data[row_offsets(absolute_row, row_batch_start) + validity_offset +
(absolute_col / cols_per_read)];
// so every thread that is participating in the warp has a byte, but it's row-based data and
// we need it in column-based. So we shuffle the bits around to make the bytes we actually
// write.
for (int i = 0, byte_mask = 0x1; (i < cols_per_read) && ((relative_col + i) < num_columns);
++i, byte_mask <<= 1) {
auto const validity_data = __ballot_sync(participation_mask, my_byte & byte_mask);
// lead thread in each warp writes data
if (warp.thread_rank() == 0) {
auto const validity_write_offset =
validity_data_col_length * (relative_col + i) + relative_row / cols_per_read;
*reinterpret_cast<bitmask_type *>(&shared[validity_write_offset]) = validity_data;
}
}
}
}
// now memcpy the shared memory out to the final destination
auto const col_words = util::div_rounding_up_unsafe(num_tile_rows, CHAR_BIT * 4);
// make sure entire tile has finished copy
group.sync();
for (int relative_col = warp.meta_group_rank(); relative_col < num_tile_cols;
relative_col += warp.meta_group_size()) {
auto const absolute_col = relative_col + tile_start_col;
auto dst = output_nm[absolute_col] + word_index(tile_start_row);
auto const src =
reinterpret_cast<bitmask_type *>(&shared[validity_data_col_length * relative_col]);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, aligned_size_t<4>(validity_data_col_length),
shared_tile_barrier);
#else
for (int b = warp.thread_rank(); b < col_words; b += warp.size()) {
dst[b] = src[b];
}
#endif
}
#ifdef ASYNC_MEMCPY_SUPPORTED
// wait for tile of data to arrive
shared_tile_barrier.arrive_and_wait();
#else
group.sync();
#endif // ASYNC_MEMCPY_SUPPORTED
}
/**
* @brief copies string data from jcudf row format to cudf columns
*
* @tparam RowOffsetFunctor iterator for row offsets into the destination data
* @param row_offsets offsets for each row in input data
* @param string_row_offsets offset data into jcudf row data for each string
* @param string_lengths length of each incoming string in each column
* @param string_column_offsets offset column data for cudf column
* @param string_col_data output cudf string column data
* @param row_data jcudf row data
* @param num_rows number of rows in data
* @param num_string_columns number of string columns in the table
*/
template <typename RowOffsetFunctor>
__global__ void copy_strings_from_rows(RowOffsetFunctor row_offsets, int32_t **string_row_offsets,
int32_t **string_lengths, size_type **string_column_offsets,
char **string_col_data, int8_t const *row_data,
size_type const num_rows,
size_type const num_string_columns) {
// Each warp takes a tile, which is a single column and up to ROWS_PER_BLOCK rows. A tile will not
// wrap around the bottom of the table. The warp will copy the strings for each row in the tile.
// Traversing in row-major order to coalesce the offsets and size reads.
auto my_block = cooperative_groups::this_thread_block();
auto warp = cooperative_groups::tiled_partition<cudf::detail::warp_size>(my_block);
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::barrier<cuda::thread_scope_block> block_barrier;
#endif
// workaround for not being able to take a reference to a constexpr host variable
auto const ROWS_PER_BLOCK = NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS;
auto const tiles_per_col = util::div_rounding_up_unsafe(num_rows, ROWS_PER_BLOCK);
auto const starting_tile = blockIdx.x * warp.meta_group_size() + warp.meta_group_rank();
auto const num_tiles = tiles_per_col * num_string_columns;
auto const tile_stride = warp.meta_group_size() * gridDim.x;
// Each warp will copy strings in its tile. This is handled by all the threads of a warp passing
// the same parameters to async_memcpy and all threads in the warp participating in the copy.
for (auto my_tile = starting_tile; my_tile < num_tiles; my_tile += tile_stride) {
auto const starting_row = (my_tile % tiles_per_col) * ROWS_PER_BLOCK;
auto const col = my_tile / tiles_per_col;
auto const str_len = string_lengths[col];
auto const str_row_off = string_row_offsets[col];
auto const str_col_off = string_column_offsets[col];
auto str_col_data = string_col_data[col];
for (int row = starting_row; row < starting_row + ROWS_PER_BLOCK && row < num_rows; ++row) {
auto const src = &row_data[row_offsets(row, 0) + str_row_off[row]];
auto dst = &str_col_data[str_col_off[row]];
#ifdef ASYNC_MEMCPY_SUPPORTED
cuda::memcpy_async(warp, dst, src, str_len[row], block_barrier);
#else
for (int c = warp.thread_rank(); c < str_len[row]; c += warp.size()) {
dst[c] = src[c];
}
#endif
}
}
}
/**
* @brief Calculate the dimensions of the kernel for fixed width only columns.
*
* @param [in] num_columns the number of columns being copied.
* @param [in] num_rows the number of rows being copied.
* @param [in] size_per_row the size each row takes up when padded.
* @param [out] blocks the size of the blocks for the kernel
* @param [out] threads the size of the threads for the kernel
* @return the size in bytes of shared memory needed for each block.
*/
static int calc_fixed_width_kernel_dims(const size_type num_columns, const size_type num_rows,
const size_type size_per_row, dim3 &blocks, dim3 &threads) {
// We have found speed degrades when a thread handles more than 4 columns.
// Each block is 2 dimensional. The y dimension indicates the columns.
// We limit this to 32 threads in the y dimension so we can still
// have at least 32 threads in the x dimension (1 warp) which should
// result in better coalescing of memory operations. We also
// want to guarantee that we are processing a multiple of 32 threads
// in the x dimension because we use atomic operations at the block
// level when writing validity data out to main memory, and that would
// need to change if we split a word of validity data between blocks.
int const y_block_size = min(util::div_rounding_up_safe(num_columns, 4), 32);
int const x_possible_block_size = 1024 / y_block_size;
// 48KB is the default setting for shared memory per block according to the cuda tutorials
// If someone configures the GPU to only have 16 KB this might not work.
int const max_shared_size = 48 * 1024;
// If we don't have enough shared memory there is no point in having more threads
// per block that will just sit idle
auto const max_block_size = std::min(x_possible_block_size, max_shared_size / size_per_row);
// Make sure that the x dimension is a multiple of 32 this not only helps
// coalesce memory access it also lets us do a ballot sync for validity to write
// the data back out the warp level. If x is a multiple of 32 then each thread in the y
// dimension is associated with one or more warps, that should correspond to the validity
// words directly.
int const block_size = (max_block_size / 32) * 32;
CUDF_EXPECTS(block_size != 0, "Row size is too large to fit in shared memory");
// The maximum number of blocks supported in the x dimension is 2 ^ 31 - 1
// but in practice having too many can cause some overhead that I don't totally
// understand. Playing around with this having as little as 600 blocks appears
// to be able to saturate memory on V100, so this is an order of magnitude higher
// to try and future proof this a bit.
int const num_blocks = std::clamp((num_rows + block_size - 1) / block_size, 1, 10240);
blocks.x = num_blocks;
blocks.y = 1;
blocks.z = 1;
threads.x = block_size;
threads.y = y_block_size;
threads.z = 1;
return size_per_row * block_size;
}
/**
* When converting to rows it is possible that the size of the table was too big to fit
* in a single column. This creates an output column for a subset of the rows in a table
* going from start row and containing the next num_rows. Most of the parameters passed
* into this function are common between runs and should be calculated once.
*/
static std::unique_ptr<column> fixed_width_convert_to_rows(
const size_type start_row, const size_type num_rows, const size_type num_columns,
const size_type size_per_row, rmm::device_uvector<size_type> &column_start,
rmm::device_uvector<size_type> &column_size, rmm::device_uvector<const int8_t *> &input_data,
rmm::device_uvector<const bitmask_type *> &input_nm, const scalar &zero,
const scalar &scalar_size_per_row, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
int64_t const total_allocation = size_per_row * num_rows;
// We made a mistake in the split somehow
CUDF_EXPECTS(total_allocation < std::numeric_limits<size_type>::max(),
"Table is too large to fit!");
// Allocate and set the offsets row for the byte array
std::unique_ptr<column> offsets =
cudf::detail::sequence(num_rows + 1, zero, scalar_size_per_row, stream, mr);
std::unique_ptr<column> data =
make_numeric_column(data_type(type_id::INT8), static_cast<size_type>(total_allocation),
mask_state::UNALLOCATED, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
copy_to_rows_fixed_width_optimized<<<blocks, threads, shared_size, stream.value()>>>(
start_row, num_rows, num_columns, size_per_row, column_start.data(), column_size.data(),
input_data.data(), input_nm.data(), data->mutable_view().data<int8_t>());
return make_lists_column(num_rows, std::move(offsets), std::move(data), 0,
rmm::device_buffer{0, stream, mr}, stream, mr);
}
static inline bool are_all_fixed_width(std::vector<data_type> const &schema) {
return std::all_of(schema.begin(), schema.end(),
[](const data_type &t) { return is_fixed_width(t); });
}
/**
* @brief Given a set of fixed width columns, calculate how the data will be laid out in memory.
*
* @param [in] schema the types of columns that need to be laid out.
* @param [out] column_start the byte offset where each column starts in the row.
* @param [out] column_size the size in bytes of the data for each columns in the row.
* @return the size in bytes each row needs.
*/
static inline int32_t compute_fixed_width_layout(std::vector<data_type> const &schema,
std::vector<size_type> &column_start,
std::vector<size_type> &column_size) {
// We guarantee that the start of each column is 64-bit aligned so anything can go
// there, but to make the code simple we will still do an alignment for it.
int32_t at_offset = 0;
for (auto col = schema.begin(); col < schema.end(); col++) {
size_type s = size_of(*col);
column_size.emplace_back(s);
std::size_t allocation_needed = s;
std::size_t alignment_needed = allocation_needed; // They are the same for fixed width types
at_offset = util::round_up_unsafe(at_offset, static_cast<int32_t>(alignment_needed));
column_start.emplace_back(at_offset);
at_offset += allocation_needed;
}
// Now we need to add in space for validity
// Eventually we can think about nullable vs not nullable, but for now we will just always add
// it in
int32_t const validity_bytes_needed =
util::div_rounding_up_safe<int32_t>(schema.size(), CHAR_BIT);
// validity comes at the end and is byte aligned so we can pack more in.
at_offset += validity_bytes_needed;
// Now we need to pad the end so all rows are 64 bit aligned
return util::round_up_unsafe(at_offset, JCUDF_ROW_ALIGNMENT);
}
/**
* @brief column sizes and column start offsets for a table
*/
struct column_info_s {
size_type size_per_row;
std::vector<size_type> column_starts;
std::vector<size_type> column_sizes;
std::vector<size_type> variable_width_column_starts;
column_info_s &operator=(column_info_s const &other) = delete;
column_info_s &operator=(column_info_s &&other) = delete;
};
/**
* @brief Compute information about a table such as bytes per row and offsets.
*
* @tparam iterator iterator of column schema data
* @param begin starting iterator of column schema
* @param end ending iterator of column schema
* @param column_starts column start offsets
* @param column_sizes size in bytes of each column
* @return size of the fixed_width data portion of a row.
*/
template <typename iterator>
column_info_s compute_column_information(iterator begin, iterator end) {
size_type size_per_row = 0;
std::vector<size_type> column_starts;
std::vector<size_type> column_sizes;
std::vector<size_type> variable_width_column_starts;
column_starts.reserve(std::distance(begin, end) + 1);
column_sizes.reserve(std::distance(begin, end));
for (auto col_type = begin; col_type != end; ++col_type) {
bool const compound_type = is_compound(*col_type);
// a list or string column will write a single uint64 of data here for offset/length
auto const col_size = compound_type ? sizeof(uint32_t) + sizeof(uint32_t) : size_of(*col_type);
// align size for this type - They are the same for fixed width types and 4 bytes for variable
// width length/offset combos
size_type const alignment_needed = compound_type ? __alignof(uint32_t) : col_size;
size_per_row = util::round_up_unsafe(size_per_row, alignment_needed);
if (compound_type) {
variable_width_column_starts.push_back(size_per_row);
}
column_starts.push_back(size_per_row);
column_sizes.push_back(col_size);
size_per_row += col_size;
}
// add validity offset to the end of fixed_width offsets
auto validity_offset = size_per_row;
column_starts.push_back(validity_offset);
// validity is byte-aligned in the JCUDF format
size_per_row +=
util::div_rounding_up_safe(static_cast<size_type>(std::distance(begin, end)), CHAR_BIT);
return {size_per_row, std::move(column_starts), std::move(column_sizes),
std::move(variable_width_column_starts)};
}
/**
* @brief Build `tile_info` for the validity data to break up the work.
*
* @param num_columns number of columns in the table
* @param num_rows number of rows in the table
* @param shmem_limit_per_tile size of shared memory available to a single gpu tile
* @param row_batches batched row information for multiple output locations
* @return vector of `tile_info` structs for validity data
*/
std::vector<detail::tile_info>
build_validity_tile_infos(size_type const &num_columns, size_type const &num_rows,
size_type const &shmem_limit_per_tile,
std::vector<row_batch> const &row_batches) {
auto const desired_rows_and_columns = static_cast<int>(sqrt(shmem_limit_per_tile));
auto const column_stride = util::round_up_unsafe(
[&]() {
if (desired_rows_and_columns > num_columns) {
// not many columns, build a single tile for table width and ship it off
return num_columns;
} else {
return util::round_down_safe(desired_rows_and_columns, CHAR_BIT);
}
}(),
JCUDF_ROW_ALIGNMENT);
// we fit as much as we can given the column stride note that an element in the table takes just 1
// bit, but a row with a single element still takes 8 bytes!
auto const bytes_per_row = util::round_up_safe(
util::div_rounding_up_unsafe(column_stride, CHAR_BIT), JCUDF_ROW_ALIGNMENT);
auto const row_stride =
std::min(num_rows, util::round_down_safe(shmem_limit_per_tile / bytes_per_row, 64));
std::vector<detail::tile_info> validity_tile_infos;
validity_tile_infos.reserve(num_columns / column_stride * num_rows / row_stride);
for (int col = 0; col < num_columns; col += column_stride) {
int current_tile_row_batch = 0;
int rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
int row = 0;
while (row < num_rows) {
if (rows_left_in_batch == 0) {
current_tile_row_batch++;
rows_left_in_batch = row_batches[current_tile_row_batch].row_count;
}
int const tile_height = std::min(row_stride, rows_left_in_batch);
validity_tile_infos.emplace_back(
detail::tile_info{col, row, std::min(col + column_stride - 1, num_columns - 1),
row + tile_height - 1, current_tile_row_batch});
row += tile_height;
rows_left_in_batch -= tile_height;
}
}
return validity_tile_infos;
}
/**
* @brief functor that returns the size of a row or 0 is row is greater than the number of rows in
* the table
*
* @tparam RowSize iterator that returns the size of a specific row
*/
template <typename RowSize> struct row_size_functor {
row_size_functor(size_type row_end, RowSize row_sizes, size_type last_row_end)
: _row_end(row_end), _row_sizes(row_sizes), _last_row_end(last_row_end) {}
__device__ inline uint64_t operator()(int i) const {
return i >= _row_end ? 0 : _row_sizes[i + _last_row_end];
}
size_type _row_end;
RowSize _row_sizes;
size_type _last_row_end;
};
/**
* @brief Builds batches of rows that will fit in the size limit of a column.
*
* @tparam RowSize iterator that gives the size of a specific row of the table.
* @param num_rows Total number of rows in the table
* @param row_sizes iterator that gives the size of a specific row of the table.
* @param all_fixed_width bool indicating all data in this table is fixed width
* @param stream stream to operate on for this work
* @param mr memory resource used to allocate any returned data
* @returns vector of size_type's that indicate row numbers for batch boundaries and a
* device_uvector of row offsets
*/
template <typename RowSize>
batch_data build_batches(size_type num_rows, RowSize row_sizes, bool all_fixed_width,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
auto const total_size = thrust::reduce(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows);
auto const num_batches = static_cast<int32_t>(
util::div_rounding_up_safe(total_size, static_cast<uint64_t>(MAX_BATCH_SIZE)));
auto const num_offsets = num_batches + 1;
std::vector<row_batch> row_batches;
std::vector<size_type> batch_row_boundaries;
device_uvector<size_type> batch_row_offsets(all_fixed_width ? 0 : num_rows, stream);
// at most max gpu memory / 2GB iterations.
batch_row_boundaries.reserve(num_offsets);
batch_row_boundaries.push_back(0);
size_type last_row_end = 0;
device_uvector<uint64_t> cumulative_row_sizes(num_rows, stream);
thrust::inclusive_scan(rmm::exec_policy(stream), row_sizes, row_sizes + num_rows,
cumulative_row_sizes.begin());
// This needs to be split this into 2 gig batches. Care must be taken to avoid a batch larger than
// 2 gigs. Imagine a table with 900 meg rows. The batches should occur every 2 rows, but if a
// lower bound is run at 2 gigs, 4 gigs, 6 gigs. the batches will be 2 rows, 2 rows, 3 rows, which
// will be invalid. The previous batch size must be taken into account when building a new batch.
// One way is to pull the batch size back to the host and add it to MAX_BATCH_SIZE for the lower
// bound search. The other method involves keeping everything on device, but subtracting the
// previous batch from cumulative_row_sizes based on index. This involves no synchronization
// between GPU and CPU, but involves more work on the GPU. These further need to be broken on a
// 32-row boundary to match the fixed_width optimized versions.
while (last_row_end < num_rows) {
auto offset_row_sizes = thrust::make_transform_iterator(
cumulative_row_sizes.begin(),
[last_row_end, cumulative_row_sizes = cumulative_row_sizes.data()] __device__(auto i) {
return i - cumulative_row_sizes[last_row_end];
});
auto search_start = offset_row_sizes + last_row_end;
auto search_end = offset_row_sizes + num_rows;
// find the next MAX_BATCH_SIZE boundary
auto const lb =
thrust::lower_bound(rmm::exec_policy(stream), search_start, search_end, MAX_BATCH_SIZE);
size_type const batch_size = lb - search_start;
size_type const row_end = lb == search_end ?
batch_size + last_row_end :
last_row_end + util::round_down_safe(batch_size, 32);
// build offset list for each row in this batch
auto const num_rows_in_batch = row_end - last_row_end;
// build offset list for each row in this batch
auto const num_entries = row_end - last_row_end + 1;
device_uvector<size_type> output_batch_row_offsets(num_entries, stream, mr);
auto row_size_iter_bounded = cudf::detail::make_counting_transform_iterator(
0, row_size_functor(row_end, row_sizes, last_row_end));
thrust::exclusive_scan(rmm::exec_policy(stream), row_size_iter_bounded,
row_size_iter_bounded + num_entries, output_batch_row_offsets.begin());
auto const batch_bytes = output_batch_row_offsets.element(num_rows_in_batch, stream);
// The output_batch_row_offsets vector is used as the offset column of the returned data. This
// needs to be individually allocated, but the kernel needs a contiguous array of offsets or
// more global lookups are necessary.
if (!all_fixed_width) {
cudaMemcpy(batch_row_offsets.data() + last_row_end, output_batch_row_offsets.data(),
num_rows_in_batch * sizeof(size_type), cudaMemcpyDefault);
}
batch_row_boundaries.push_back(row_end);
row_batches.push_back({batch_bytes, num_rows_in_batch, std::move(output_batch_row_offsets)});
last_row_end = row_end;
}
return {std::move(batch_row_offsets),
make_device_uvector_async(batch_row_boundaries, stream,
rmm::mr::get_current_device_resource()),
std::move(batch_row_boundaries), std::move(row_batches)};
}
/**
* @brief Computes the number of tiles necessary given a tile height and batch offsets
*
* @param batch_row_boundaries row boundaries for each batch
* @param desired_tile_height height of each tile in the table
* @param stream stream to use
* @return number of tiles necessary
*/
int compute_tile_counts(device_span<size_type const> const &batch_row_boundaries,
int desired_tile_height, rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
return thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
}
/**
* @brief Builds the `tile_info` structs for a given table.
*
* @param tiles span of tiles to populate
* @param batch_row_boundaries boundary to row batches
* @param column_start starting column of the tile
* @param column_end ending column of the tile
* @param desired_tile_height height of the tile
* @param total_number_of_rows total number of rows in the table
* @param stream stream to use
* @return number of tiles created
*/
size_type
build_tiles(device_span<tile_info> tiles,
device_uvector<size_type> const &batch_row_boundaries, // comes from build_batches
int column_start, int column_end, int desired_tile_height, int total_number_of_rows,
rmm::cuda_stream_view stream) {
size_type const num_batches = batch_row_boundaries.size() - 1;
device_uvector<size_type> num_tiles(num_batches, stream);
auto iter = thrust::make_counting_iterator(0);
thrust::transform(rmm::exec_policy(stream), iter, iter + num_batches, num_tiles.begin(),
[desired_tile_height,
batch_row_boundaries =
batch_row_boundaries.data()] __device__(auto batch_index) -> size_type {
return util::div_rounding_up_unsafe(batch_row_boundaries[batch_index + 1] -
batch_row_boundaries[batch_index],
desired_tile_height);
});
size_type const total_tiles =
thrust::reduce(rmm::exec_policy(stream), num_tiles.begin(), num_tiles.end());
device_uvector<size_type> tile_starts(num_batches + 1, stream);
auto tile_iter = cudf::detail::make_counting_transform_iterator(
0, [num_tiles = num_tiles.data(), num_batches] __device__(auto i) {
return (i < num_batches) ? num_tiles[i] : 0;
});
thrust::exclusive_scan(rmm::exec_policy(stream), tile_iter, tile_iter + num_batches + 1,
tile_starts.begin()); // in tiles
thrust::transform(
rmm::exec_policy(stream), iter, iter + total_tiles, tiles.begin(),
[=, tile_starts = tile_starts.data(),
batch_row_boundaries = batch_row_boundaries.data()] __device__(size_type tile_index) {
// what batch this tile falls in
auto const batch_index_iter =
thrust::upper_bound(thrust::seq, tile_starts, tile_starts + num_batches, tile_index);
auto const batch_index = std::distance(tile_starts, batch_index_iter) - 1;
// local index within the tile
int const local_tile_index = tile_index - tile_starts[batch_index];
// the start row for this batch.
int const batch_row_start = batch_row_boundaries[batch_index];
// the start row for this tile
int const tile_row_start = batch_row_start + (local_tile_index * desired_tile_height);
// the end row for this tile
int const max_row =
std::min(total_number_of_rows - 1,
batch_index + 1 > num_batches ?
std::numeric_limits<size_type>::max() :
static_cast<int>(batch_row_boundaries[batch_index + 1]) - 1);
int const tile_row_end =
std::min(batch_row_start + ((local_tile_index + 1) * desired_tile_height) - 1, max_row);
// stuff the tile
return tile_info{column_start, tile_row_start, column_end, tile_row_end,
static_cast<int>(batch_index)};
});
return total_tiles;
}
/**
* @brief Determines what data should be operated on by each tile for the incoming table.
*
* @tparam TileCallback Callback that receives the start and end columns of tiles
* @param column_sizes vector of the size of each column
* @param column_starts vector of the offset of each column
* @param first_row_batch_size size of the first row batch to limit max tile size since a tile
* is unable to span batches
* @param total_number_of_rows total number of rows in the table
* @param shmem_limit_per_tile shared memory allowed per tile
* @param f callback function called when building a tile
*/
template <typename TileCallback>
void determine_tiles(std::vector<size_type> const &column_sizes,
std::vector<size_type> const &column_starts,
size_type const first_row_batch_size, size_type const total_number_of_rows,
size_type const &shmem_limit_per_tile, TileCallback f) {
// tile infos are organized with the tile going "down" the columns this provides the most
// coalescing of memory access
int current_tile_width = 0;
int current_tile_start_col = 0;
// the ideal tile height has lots of 8-byte reads and 8-byte writes. The optimal read/write would
// be memory cache line sized access, but since other tiles will read/write the edges this may not
// turn out to be overly important. For now, we will attempt to build a square tile as far as byte
// sizes. x * y = shared_mem_size. Which translates to x^2 = shared_mem_size since we want them
// equal, so height and width are sqrt(shared_mem_size). The trick is that it's in bytes, not rows
// or columns.
auto const square_bias = 32; // bias towards columns for performance reasons
auto const optimal_square_len = static_cast<size_type>(sqrt(shmem_limit_per_tile));
auto const desired_tile_height = util::round_up_safe<int>(
std::min(optimal_square_len / square_bias, total_number_of_rows), cudf::detail::warp_size);
auto const tile_height = std::clamp(desired_tile_height, 1, first_row_batch_size);
int row_size = 0;
// march each column and build the tiles of appropriate sizes
for (uint col = 0; col < column_sizes.size(); ++col) {
auto const col_size = column_sizes[col];
// align size for this type
auto const alignment_needed = col_size; // They are the same for fixed width types
auto const row_size_aligned = util::round_up_unsafe(row_size, alignment_needed);
auto const row_size_with_this_col = row_size_aligned + col_size;
auto const row_size_with_end_pad =
util::round_up_unsafe(row_size_with_this_col, JCUDF_ROW_ALIGNMENT);
if (row_size_with_end_pad * tile_height > shmem_limit_per_tile) {
// too large, close this tile, generate vertical tiles and restart
f(current_tile_start_col, col == 0 ? col : col - 1, tile_height);
row_size =
util::round_up_unsafe((column_starts[col] + column_sizes[col]) & 7, alignment_needed);
row_size += col_size; // alignment required for shared memory tile boundary to match alignment
// of output row
current_tile_start_col = col;
current_tile_width = 0;
} else {
row_size = row_size_with_this_col;
current_tile_width++;
}
}
// build last set of tiles
if (current_tile_width > 0) {
f(current_tile_start_col, static_cast<int>(column_sizes.size()) - 1, tile_height);
}
}
/**
* @brief convert cudf table into JCUDF row format
*
* @tparam offsetFunctor functor type for offset functor
* @param tbl table to convert to JCUDF row format
* @param batch_info information about the batches of data
* @param offset_functor functor that returns the starting offset of each row
* @param column_info information about incoming columns
* @param variable_width_offsets optional vector of offsets for variable-with columns
* @param stream stream used
* @param mr selected memory resource for returned data
* @return vector of list columns containing byte columns of the JCUDF row data
*/
template <typename offsetFunctor>
std::vector<std::unique_ptr<column>> convert_to_rows(
table_view const &tbl, batch_data &batch_info, offsetFunctor offset_functor,
column_info_s const &column_info,
std::optional<rmm::device_uvector<strings_column_view::offset_iterator>> variable_width_offsets,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
int device_id;
CUDF_CUDA_TRY(cudaGetDevice(&device_id));
int total_shmem_in_bytes;
CUDF_CUDA_TRY(
cudaDeviceGetAttribute(&total_shmem_in_bytes, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
#ifndef __CUDA_ARCH__ // __host__ code.
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
util::round_up_unsafe(sizeof(cuda::barrier<cuda::thread_scope_block>), 16ul);
#endif // __CUDA_ARCH__
auto const shmem_limit_per_tile = total_shmem_in_bytes;
auto const num_rows = tbl.num_rows();
auto const fixed_width_only = !variable_width_offsets.has_value();
auto select_columns = [](auto const &tbl, auto column_predicate) {
std::vector<column_view> cols;
std::copy_if(tbl.begin(), tbl.end(), std::back_inserter(cols),
[&](auto c) { return column_predicate(c); });
return table_view(cols);
};
auto dev_col_sizes = make_device_uvector_async(column_info.column_sizes, stream,
rmm::mr::get_current_device_resource());
auto dev_col_starts = make_device_uvector_async(column_info.column_starts, stream,
rmm::mr::get_current_device_resource());
// Get the pointers to the input columnar data ready
auto const data_begin = thrust::make_transform_iterator(tbl.begin(), [](auto const &c) {
return is_compound(c.type()) ? nullptr : c.template data<int8_t>();
});
std::vector<int8_t const *> input_data(data_begin, data_begin + tbl.num_columns());
// validity code handles variable and fixed-width data, so give it everything
auto const nm_begin =
thrust::make_transform_iterator(tbl.begin(), [](auto const &c) { return c.null_mask(); });
std::vector<bitmask_type const *> input_nm(nm_begin, nm_begin + tbl.num_columns());
auto dev_input_data =
make_device_uvector_async(input_data, stream, rmm::mr::get_current_device_resource());
auto dev_input_nm =
make_device_uvector_async(input_nm, stream, rmm::mr::get_current_device_resource());
// the first batch always exists unless we were sent an empty table
auto const first_batch_size = batch_info.row_batches[0].row_count;
std::vector<rmm::device_buffer> output_buffers;
std::vector<int8_t *> output_data;
output_data.reserve(batch_info.row_batches.size());
output_buffers.reserve(batch_info.row_batches.size());
std::transform(batch_info.row_batches.begin(), batch_info.row_batches.end(),
std::back_inserter(output_buffers), [&](auto const &batch) {
return rmm::device_buffer(batch.num_bytes, stream, mr);
});
std::transform(output_buffers.begin(), output_buffers.end(), std::back_inserter(output_data),
[](auto &buf) { return static_cast<int8_t *>(buf.data()); });
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
int info_count = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, first_batch_size, num_rows,
shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &info_count,
&stream](int const start_col, int const end_col, int const tile_height) {
int i = detail::compute_tile_counts(gpu_batch_row_boundaries, tile_height, stream);
info_count += i;
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, first_batch_size, num_rows,
shmem_limit_per_tile,
[&gpu_batch_row_boundaries = batch_info.d_batch_row_boundaries, &gpu_tile_infos, num_rows,
&tile_offset, stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
// build validity tiles for ALL columns, variable and fixed width.
auto validity_tile_infos = detail::build_validity_tile_infos(
tbl.num_columns(), num_rows, shmem_limit_per_tile, batch_info.row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream,
rmm::mr::get_current_device_resource());
auto const validity_offset = column_info.column_starts.back();
// blast through the entire table and convert it
detail::copy_to_rows<<<gpu_tile_infos.size(), NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, tbl.num_columns(), shmem_limit_per_tile, gpu_tile_infos, dev_input_data.data(),
dev_col_sizes.data(), dev_col_starts.data(), offset_functor,
batch_info.d_batch_row_boundaries.data(),
reinterpret_cast<int8_t **>(dev_output_data.data()));
// note that validity gets the entire table and not the fixed-width portion
detail::copy_validity_to_rows<<<validity_tile_infos.size(),
NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, tbl.num_columns(), shmem_limit_per_tile, offset_functor,
batch_info.d_batch_row_boundaries.data(), dev_output_data.data(), validity_offset,
dev_validity_tile_infos, dev_input_nm.data());
if (!fixed_width_only) {
// build table view for variable-width data only
auto const variable_width_table =
select_columns(tbl, [](auto col) { return is_compound(col.type()); });
CUDF_EXPECTS(!variable_width_table.is_empty(), "No variable-width columns when expected!");
CUDF_EXPECTS(variable_width_offsets.has_value(), "No variable width offset data!");
auto const variable_data_begin =
thrust::make_transform_iterator(variable_width_table.begin(), [](auto const &c) {
strings_column_view const scv{c};
return is_compound(c.type()) ? scv.chars().template data<int8_t>() : nullptr;
});
std::vector<int8_t const *> variable_width_input_data(
variable_data_begin, variable_data_begin + variable_width_table.num_columns());
auto dev_variable_input_data = make_device_uvector_async(
variable_width_input_data, stream, rmm::mr::get_current_device_resource());
auto dev_variable_col_output_offsets = make_device_uvector_async(
column_info.variable_width_column_starts, stream, rmm::mr::get_current_device_resource());
for (uint i = 0; i < batch_info.row_batches.size(); i++) {
auto const batch_row_offset = batch_info.batch_row_boundaries[i];
auto const batch_num_rows = batch_info.row_batches[i].row_count;
dim3 const string_blocks(std::min(
MAX_STRING_BLOCKS,
util::div_rounding_up_unsafe(batch_num_rows, NUM_STRING_ROWS_PER_BLOCK_TO_ROWS)));
detail::copy_strings_to_rows<<<string_blocks, NUM_WARPS_IN_BLOCK * cudf::detail::warp_size, 0,
stream.value()>>>(
batch_num_rows, variable_width_table.num_columns(), dev_variable_input_data.data(),
dev_variable_col_output_offsets.data(), variable_width_offsets->data(),
column_info.size_per_row, offset_functor, batch_row_offset,
reinterpret_cast<int8_t *>(output_data[i]));
}
}
// split up the output buffer into multiple buffers based on row batch sizes and create list of
// byte columns
std::vector<std::unique_ptr<column>> ret;
ret.reserve(batch_info.row_batches.size());
auto counting_iter = thrust::make_counting_iterator(0);
std::transform(counting_iter, counting_iter + batch_info.row_batches.size(),
std::back_inserter(ret), [&](auto batch) {
auto const offset_count = batch_info.row_batches[batch].row_offsets.size();
auto offsets =
std::make_unique<column>(data_type{type_id::INT32}, (size_type)offset_count,
batch_info.row_batches[batch].row_offsets.release(),
rmm::device_buffer{}, 0);
auto data = std::make_unique<column>(
data_type{type_id::INT8}, batch_info.row_batches[batch].num_bytes,
std::move(output_buffers[batch]), rmm::device_buffer{}, 0);
return make_lists_column(
batch_info.row_batches[batch].row_count, std::move(offsets), std::move(data),
0, rmm::device_buffer{0, cudf::get_default_stream(), mr}, stream, mr);
});
return ret;
}
} // namespace detail
/**
* @brief convert a cudf table to JCUDF row format
*
* @param tbl incoming table to convert
* @param stream stream to use for operations
* @param mr memory resource used for returned data
* @return vector of list columns containing byte columns of the JCUDF row data
*/
std::vector<std::unique_ptr<column>> convert_to_rows(table_view const &tbl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const num_columns = tbl.num_columns();
auto const num_rows = tbl.num_rows();
auto const fixed_width_only = std::all_of(
tbl.begin(), tbl.end(), [](column_view const &c) { return is_fixed_width(c.type()); });
// Break up the work into tiles, which are a starting and ending row/col #. This tile size is
// calculated based on the shared memory size available we want a single tile to fill up the
// entire shared memory space available for the transpose-like conversion.
// There are two different processes going on here. The GPU conversion of the data and the writing
// of the data into the list of byte columns that are a maximum of 2 gigs each due to offset
// maximum size. The GPU conversion portion has to understand this limitation because the column
// must own the data inside and as a result it must be a distinct allocation for that column.
// Copying the data into these final buffers would be prohibitively expensive, so care is taken to
// ensure the GPU writes to the proper buffer. The tiles are broken at the boundaries of specific
// rows based on the row sizes up to that point. These are row batches and they are decided first
// before building the tiles so the tiles can be properly cut around them.
auto schema_column_iter =
thrust::make_transform_iterator(tbl.begin(), [](auto const &i) { return i.type(); });
auto column_info =
detail::compute_column_information(schema_column_iter, schema_column_iter + num_columns);
auto const size_per_row = column_info.size_per_row;
if (fixed_width_only) {
// total encoded row size. This includes fixed-width data and validity only. It does not include
// variable-width data since it isn't copied with the fixed-width and validity kernel.
auto row_size_iter = thrust::make_constant_iterator<uint64_t>(
util::round_up_unsafe(size_per_row, JCUDF_ROW_ALIGNMENT));
auto batch_info = detail::build_batches(num_rows, row_size_iter, fixed_width_only, stream, mr);
detail::fixed_width_row_offset_functor offset_functor(
util::round_up_unsafe(size_per_row, JCUDF_ROW_ALIGNMENT));
return detail::convert_to_rows(tbl, batch_info, offset_functor, std::move(column_info),
std::nullopt, stream, mr);
} else {
auto offset_data = detail::build_string_row_offsets(tbl, size_per_row, stream);
auto &row_sizes = std::get<0>(offset_data);
auto row_size_iter = cudf::detail::make_counting_transform_iterator(
0, detail::row_size_functor(num_rows, row_sizes.data(), 0));
auto batch_info = detail::build_batches(num_rows, row_size_iter, fixed_width_only, stream, mr);
detail::string_row_offset_functor offset_functor(batch_info.batch_row_offsets);
return detail::convert_to_rows(tbl, batch_info, offset_functor, std::move(column_info),
std::make_optional(std::move(std::get<1>(offset_data))), stream,
mr);
}
}
std::vector<std::unique_ptr<column>>
convert_to_rows_fixed_width_optimized(table_view const &tbl, rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto const num_columns = tbl.num_columns();
std::vector<data_type> schema;
schema.resize(num_columns);
std::transform(tbl.begin(), tbl.end(), schema.begin(),
[](auto i) -> data_type { return i.type(); });
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
int32_t const size_per_row =
detail::compute_fixed_width_layout(schema, column_start, column_size);
auto dev_column_start = make_device_uvector_async(column_start, stream, mr);
auto dev_column_size = make_device_uvector_async(column_size, stream, mr);
// Make the number of rows per batch a multiple of 32 so we don't have to worry about splitting
// validity at a specific row offset. This might change in the future.
auto const max_rows_per_batch =
util::round_down_safe(std::numeric_limits<size_type>::max() / size_per_row, 32);
auto const num_rows = tbl.num_rows();
// Get the pointers to the input columnar data ready
std::vector<const int8_t *> input_data;
std::vector<bitmask_type const *> input_nm;
for (size_type column_number = 0; column_number < num_columns; column_number++) {
column_view cv = tbl.column(column_number);
input_data.emplace_back(cv.data<int8_t>());
input_nm.emplace_back(cv.null_mask());
}
auto dev_input_data = make_device_uvector_async(input_data, stream, mr);
auto dev_input_nm = make_device_uvector_async(input_nm, stream, mr);
using ScalarType = scalar_type_t<size_type>;
auto zero = make_numeric_scalar(data_type(type_id::INT32), stream.value());
zero->set_valid_async(true, stream);
static_cast<ScalarType *>(zero.get())->set_value(0, stream);
auto step = make_numeric_scalar(data_type(type_id::INT32), stream.value());
step->set_valid_async(true, stream);
static_cast<ScalarType *>(step.get())->set_value(static_cast<size_type>(size_per_row), stream);
std::vector<std::unique_ptr<column>> ret;
for (size_type row_start = 0; row_start < num_rows; row_start += max_rows_per_batch) {
size_type row_count = num_rows - row_start;
row_count = row_count > max_rows_per_batch ? max_rows_per_batch : row_count;
ret.emplace_back(detail::fixed_width_convert_to_rows(
row_start, row_count, num_columns, size_per_row, dev_column_start, dev_column_size,
dev_input_data, dev_input_nm, *zero, *step, stream, mr));
}
return ret;
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
/**
* @brief convert from JCUDF row format to cudf columns
*
* @param input vector of list columns containing byte columns of the JCUDF row data
* @param schema incoming schema of the data
* @param stream stream to use for compute
* @param mr memory resource for returned data
* @return cudf table of the data
*/
std::unique_ptr<table> convert_from_rows(lists_column_view const &input,
std::vector<data_type> const &schema,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
// convert any strings in the schema to two int32 columns
// This allows us to leverage the fixed-width copy code to fill in our offset and string length
// data.
std::vector<data_type> string_schema;
string_schema.reserve(schema.size());
for (auto i : schema) {
if (i.id() == type_id::STRING) {
string_schema.push_back(data_type(type_id::INT32));
string_schema.push_back(data_type(type_id::INT32));
} else {
string_schema.push_back(i);
}
}
auto const num_columns = string_schema.size();
auto const num_rows = input.parent().size();
int device_id;
CUDF_CUDA_TRY(cudaGetDevice(&device_id));
int total_shmem_in_bytes;
CUDF_CUDA_TRY(
cudaDeviceGetAttribute(&total_shmem_in_bytes, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
#ifndef __CUDA_ARCH__ // __host__ code.
// Need to reduce total shmem available by the size of barriers in the kernel's shared memory
total_shmem_in_bytes -=
util::round_up_unsafe(sizeof(cuda::barrier<cuda::thread_scope_block>), 16ul);
#endif // __CUDA_ARCH__
auto const shmem_limit_per_tile = total_shmem_in_bytes;
auto column_info = detail::compute_column_information(string_schema.begin(), string_schema.end());
auto const size_per_row = util::round_up_unsafe(column_info.size_per_row, JCUDF_ROW_ALIGNMENT);
// Ideally we would check that the offsets are all the same, etc. but for now this is probably
// fine
CUDF_EXPECTS(size_per_row * num_rows <= child.size(), "The layout of the data appears to be off");
auto dev_col_starts = make_device_uvector_async(column_info.column_starts, stream,
rmm::mr::get_current_device_resource());
auto dev_col_sizes = make_device_uvector_async(column_info.column_sizes, stream,
rmm::mr::get_current_device_resource());
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<std::unique_ptr<column>> string_row_offset_columns;
std::vector<std::unique_ptr<column>> string_length_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
std::vector<int32_t *> string_row_offsets;
std::vector<int32_t *> string_lengths;
for (auto i : schema) {
auto make_col = [&output_data, &output_nm](data_type type, size_type num_rows, bool include_nm,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr) {
auto column = make_fixed_width_column(
type, num_rows, include_nm ? mask_state::UNINITIALIZED : mask_state::UNALLOCATED, stream,
mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
if (include_nm) {
output_nm.emplace_back(mut.null_mask());
}
return column;
};
if (i.id() == type_id::STRING) {
auto const int32type = data_type(type_id::INT32);
auto offset_col =
make_col(int32type, num_rows, true, stream, rmm::mr::get_current_device_resource());
string_row_offsets.push_back(offset_col->mutable_view().data<int32_t>());
string_row_offset_columns.emplace_back(std::move(offset_col));
auto length_col =
make_col(int32type, num_rows, false, stream, rmm::mr::get_current_device_resource());
string_lengths.push_back(length_col->mutable_view().data<int32_t>());
string_length_columns.emplace_back(std::move(length_col));
// placeholder
output_columns.emplace_back(make_empty_column(type_id::STRING));
} else {
output_columns.emplace_back(make_col(i, num_rows, true, stream, mr));
}
}
auto dev_string_row_offsets =
make_device_uvector_async(string_row_offsets, stream, rmm::mr::get_current_device_resource());
auto dev_string_lengths =
make_device_uvector_async(string_lengths, stream, rmm::mr::get_current_device_resource());
// build the row_batches from the passed in list column
std::vector<detail::row_batch> row_batches;
row_batches.push_back(
{detail::row_batch{child.size(), num_rows, device_uvector<size_type>(0, stream)}});
auto dev_output_data =
make_device_uvector_async(output_data, stream, rmm::mr::get_current_device_resource());
auto dev_output_nm =
make_device_uvector_async(output_nm, stream, rmm::mr::get_current_device_resource());
// only ever get a single batch when going from rows, so boundaries are 0, num_rows
constexpr auto num_batches = 2;
device_uvector<size_type> gpu_batch_row_boundaries(num_batches, stream);
thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator(0),
thrust::make_counting_iterator(num_batches), gpu_batch_row_boundaries.begin(),
[num_rows] __device__(auto i) { return i == 0 ? 0 : num_rows; });
int info_count = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &info_count, &stream](int const start_col, int const end_col,
int const tile_height) {
info_count += detail::compute_tile_counts(gpu_batch_row_boundaries, tile_height, stream);
});
// allocate space for tiles
device_uvector<detail::tile_info> gpu_tile_infos(info_count, stream);
int tile_offset = 0;
detail::determine_tiles(
column_info.column_sizes, column_info.column_starts, num_rows, num_rows, shmem_limit_per_tile,
[&gpu_batch_row_boundaries, &gpu_tile_infos, num_rows, &tile_offset,
stream](int const start_col, int const end_col, int const tile_height) {
tile_offset += detail::build_tiles(
{gpu_tile_infos.data() + tile_offset, gpu_tile_infos.size() - tile_offset},
gpu_batch_row_boundaries, start_col, end_col, tile_height, num_rows, stream);
});
dim3 const blocks(gpu_tile_infos.size());
// validity needs to be calculated based on the actual number of final table columns
auto validity_tile_infos =
detail::build_validity_tile_infos(schema.size(), num_rows, shmem_limit_per_tile, row_batches);
auto dev_validity_tile_infos = make_device_uvector_async(validity_tile_infos, stream,
rmm::mr::get_current_device_resource());
dim3 const validity_blocks(validity_tile_infos.size());
if (dev_string_row_offsets.size() == 0) {
detail::fixed_width_row_offset_functor offset_functor(size_per_row);
detail::copy_from_rows<<<gpu_tile_infos.size(), NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_data.data(), dev_col_sizes.data(),
dev_col_starts.data(), gpu_tile_infos, child.data<int8_t>());
detail::copy_validity_from_rows<<<validity_tile_infos.size(),
NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_nm.data(), column_info.column_starts.back(),
dev_validity_tile_infos, child.data<int8_t>());
} else {
detail::string_row_offset_functor offset_functor(device_span<size_type const>{input.offsets()});
detail::copy_from_rows<<<gpu_tile_infos.size(), NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_data.data(), dev_col_sizes.data(),
dev_col_starts.data(), gpu_tile_infos, child.data<int8_t>());
detail::copy_validity_from_rows<<<validity_tile_infos.size(),
NUM_WARPS_IN_BLOCK * cudf::detail::warp_size,
total_shmem_in_bytes, stream.value()>>>(
num_rows, num_columns, shmem_limit_per_tile, offset_functor,
gpu_batch_row_boundaries.data(), dev_output_nm.data(), column_info.column_starts.back(),
dev_validity_tile_infos, child.data<int8_t>());
std::vector<device_uvector<size_type>> string_col_offsets;
std::vector<rmm::device_uvector<char>> string_data_cols;
std::vector<size_type *> string_col_offset_ptrs;
std::vector<char *> string_data_col_ptrs;
for (auto &col_string_lengths : string_lengths) {
device_uvector<size_type> output_string_offsets(num_rows + 1, stream, mr);
auto tmp = [num_rows, col_string_lengths] __device__(auto const &i) {
return i < num_rows ? col_string_lengths[i] : 0;
};
auto bounded_iter = cudf::detail::make_counting_transform_iterator(0, tmp);
thrust::exclusive_scan(rmm::exec_policy(stream), bounded_iter, bounded_iter + num_rows + 1,
output_string_offsets.begin());
// allocate destination string column
rmm::device_uvector<char> string_data(output_string_offsets.element(num_rows, stream), stream,
mr);
string_col_offset_ptrs.push_back(output_string_offsets.data());
string_data_col_ptrs.push_back(string_data.data());
string_col_offsets.push_back(std::move(output_string_offsets));
string_data_cols.push_back(std::move(string_data));
}
auto dev_string_col_offsets = make_device_uvector_async(string_col_offset_ptrs, stream,
rmm::mr::get_current_device_resource());
auto dev_string_data_cols = make_device_uvector_async(string_data_col_ptrs, stream,
rmm::mr::get_current_device_resource());
dim3 const string_blocks(
std::min(std::max(MIN_STRING_BLOCKS, num_rows / NUM_STRING_ROWS_PER_BLOCK_FROM_ROWS),
MAX_STRING_BLOCKS));
detail::copy_strings_from_rows<<<string_blocks, NUM_WARPS_IN_BLOCK * cudf::detail::warp_size, 0,
stream.value()>>>(
offset_functor, dev_string_row_offsets.data(), dev_string_lengths.data(),
dev_string_col_offsets.data(), dev_string_data_cols.data(), child.data<int8_t>(), num_rows,
static_cast<cudf::size_type>(string_col_offsets.size()));
// merge strings back into output_columns
int string_idx = 0;
for (int i = 0; i < static_cast<int>(schema.size()); ++i) {
if (schema[i].id() == type_id::STRING) {
// stuff real string column
auto const null_count = string_row_offset_columns[string_idx]->null_count();
auto string_data = string_row_offset_columns[string_idx].release()->release();
output_columns[i] =
make_strings_column(num_rows, std::move(string_col_offsets[string_idx]),
std::move(string_data_cols[string_idx]),
std::move(*string_data.null_mask.release()), null_count);
string_idx++;
}
}
}
for (auto &col : output_columns) {
col->set_null_count(cudf::null_count(col->view().null_mask(), 0, col->size()));
}
return std::make_unique<table>(std::move(output_columns));
}
std::unique_ptr<table> convert_from_rows_fixed_width_optimized(
lists_column_view const &input, std::vector<data_type> const &schema,
rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) {
// verify that the types are what we expect
column_view child = input.child();
auto const list_type = child.type().id();
CUDF_EXPECTS(list_type == type_id::INT8 || list_type == type_id::UINT8,
"Only a list of bytes is supported as input");
auto const num_columns = schema.size();
if (detail::are_all_fixed_width(schema)) {
std::vector<size_type> column_start;
std::vector<size_type> column_size;
auto const num_rows = input.parent().size();
auto const size_per_row = detail::compute_fixed_width_layout(schema, column_start, column_size);
// Ideally we would check that the offsets are all the same, etc. but for now this is probably
// fine
CUDF_EXPECTS(size_per_row * num_rows == child.size(),
"The layout of the data appears to be off");
auto dev_column_start =
make_device_uvector_async(column_start, stream, rmm::mr::get_current_device_resource());
auto dev_column_size =
make_device_uvector_async(column_size, stream, rmm::mr::get_current_device_resource());
// Allocate the columns we are going to write into
std::vector<std::unique_ptr<column>> output_columns;
std::vector<int8_t *> output_data;
std::vector<bitmask_type *> output_nm;
for (int i = 0; i < static_cast<int>(num_columns); i++) {
auto column =
make_fixed_width_column(schema[i], num_rows, mask_state::UNINITIALIZED, stream, mr);
auto mut = column->mutable_view();
output_data.emplace_back(mut.data<int8_t>());
output_nm.emplace_back(mut.null_mask());
output_columns.emplace_back(std::move(column));
}
auto dev_output_data = make_device_uvector_async(output_data, stream, mr);
auto dev_output_nm = make_device_uvector_async(output_nm, stream, mr);
dim3 blocks;
dim3 threads;
int shared_size =
detail::calc_fixed_width_kernel_dims(num_columns, num_rows, size_per_row, blocks, threads);
detail::copy_from_rows_fixed_width_optimized<<<blocks, threads, shared_size, stream.value()>>>(
num_rows, num_columns, size_per_row, dev_column_start.data(), dev_column_size.data(),
dev_output_data.data(), dev_output_nm.data(), child.data<int8_t>());
for (auto &col : output_columns) {
col->set_null_count(cudf::null_count(col->view().null_mask(), 0, col->size()));
}
return std::make_unique<table>(std::move(output_columns));
} else {
CUDF_FAIL("Only fixed width types are currently supported");
}
}
} // namespace jni
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/HashJoinJni.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/join.hpp>
#include "cudf_jni_apis.hpp"
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_HashJoin_create(JNIEnv *env, jclass, jlong j_table,
jboolean j_nulls_equal) {
JNI_NULL_CHECK(env, j_table, "table handle is null", 0);
try {
cudf::jni::auto_set_device(env);
auto tview = reinterpret_cast<cudf::table_view const *>(j_table);
auto nulleq = j_nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL;
auto hash_join_ptr = new cudf::hash_join(*tview, nulleq);
return reinterpret_cast<jlong>(hash_join_ptr);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_HashJoin_destroy(JNIEnv *env, jclass, jlong j_handle) {
try {
cudf::jni::auto_set_device(env);
auto hash_join_ptr = reinterpret_cast<cudf::hash_join *>(j_handle);
delete hash_join_ptr;
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/CompiledExpression.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <memory>
#include <stdexcept>
#include <vector>
#include <cudf/ast/expressions.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/transform.hpp>
#include <cudf/types.hpp>
#include "cudf_jni_apis.hpp"
#include "jni_compiled_expr.hpp"
namespace {
/** Utility class to read data from the serialized AST buffer generated from Java */
class jni_serialized_ast {
jbyte const *data_ptr; // pointer to the current entity to deserialize
jbyte const *const end_ptr; // pointer to the byte immediately after the AST serialized data
/** Throws an error if there is insufficient space left to read the specified number of bytes */
void check_for_eof(std::size_t num_bytes_to_read) {
if (data_ptr + num_bytes_to_read > end_ptr) {
throw std::runtime_error("Unexpected end of serialized data");
}
}
public:
jni_serialized_ast(cudf::jni::native_jbyteArray &jni_data)
: data_ptr(jni_data.begin()), end_ptr(jni_data.end()) {}
/** Returns true if there is no data remaining to be read */
bool at_eof() { return data_ptr == end_ptr; }
/** Read a byte from the serialized AST data buffer */
jbyte read_byte() {
check_for_eof(sizeof(jbyte));
return *data_ptr++;
}
/** Read a multi-byte value from the serialized AST data buffer */
template <typename T> T read() {
if constexpr (std::is_same_v<T, std::string>) {
auto const size = read<cudf::size_type>();
check_for_eof(size);
auto const result = std::string(reinterpret_cast<char const *>(data_ptr), size);
data_ptr += size;
return result;
} else {
check_for_eof(sizeof(T));
// use memcpy since data may be misaligned
T result;
memcpy(reinterpret_cast<jbyte *>(&result), data_ptr, sizeof(T));
data_ptr += sizeof(T);
return result;
}
}
/** Decode a libcudf data type from the serialized AST data buffer */
cudf::data_type read_cudf_type() {
auto const dtype_id = static_cast<cudf::type_id>(read_byte());
switch (dtype_id) {
case cudf::type_id::INT8:
case cudf::type_id::INT16:
case cudf::type_id::INT32:
case cudf::type_id::INT64:
case cudf::type_id::UINT8:
case cudf::type_id::UINT16:
case cudf::type_id::UINT32:
case cudf::type_id::UINT64:
case cudf::type_id::FLOAT32:
case cudf::type_id::FLOAT64:
case cudf::type_id::BOOL8:
case cudf::type_id::TIMESTAMP_DAYS:
case cudf::type_id::TIMESTAMP_SECONDS:
case cudf::type_id::TIMESTAMP_MILLISECONDS:
case cudf::type_id::TIMESTAMP_MICROSECONDS:
case cudf::type_id::TIMESTAMP_NANOSECONDS:
case cudf::type_id::DURATION_DAYS:
case cudf::type_id::DURATION_SECONDS:
case cudf::type_id::DURATION_MILLISECONDS:
case cudf::type_id::DURATION_MICROSECONDS:
case cudf::type_id::DURATION_NANOSECONDS:
case cudf::type_id::STRING: {
return cudf::data_type(dtype_id);
}
case cudf::type_id::DECIMAL32:
case cudf::type_id::DECIMAL64: {
int32_t const scale = read_byte();
return cudf::data_type(dtype_id, scale);
}
default: throw new std::invalid_argument("unrecognized cudf data type");
}
}
};
/**
* Enumeration of the AST expression types that can appear in the serialized data.
* NOTE: This must be kept in sync with the NodeType enumeration in AstNode.java!
*/
enum class jni_serialized_expression_type : int8_t {
VALID_LITERAL = 0,
NULL_LITERAL = 1,
COLUMN_REFERENCE = 2,
UNARY_OPERATION = 3,
BINARY_OPERATION = 4
};
/**
* Convert a Java AST serialized byte representing an AST unary operator into the
* corresponding libcudf AST operator.
* NOTE: This must be kept in sync with the enumeration in UnaryOperator.java!
*/
cudf::ast::ast_operator jni_to_unary_operator(jbyte jni_op_value) {
switch (jni_op_value) {
case 0: return cudf::ast::ast_operator::IDENTITY;
case 1: return cudf::ast::ast_operator::IS_NULL;
case 2: return cudf::ast::ast_operator::SIN;
case 3: return cudf::ast::ast_operator::COS;
case 4: return cudf::ast::ast_operator::TAN;
case 5: return cudf::ast::ast_operator::ARCSIN;
case 6: return cudf::ast::ast_operator::ARCCOS;
case 7: return cudf::ast::ast_operator::ARCTAN;
case 8: return cudf::ast::ast_operator::SINH;
case 9: return cudf::ast::ast_operator::COSH;
case 10: return cudf::ast::ast_operator::TANH;
case 11: return cudf::ast::ast_operator::ARCSINH;
case 12: return cudf::ast::ast_operator::ARCCOSH;
case 13: return cudf::ast::ast_operator::ARCTANH;
case 14: return cudf::ast::ast_operator::EXP;
case 15: return cudf::ast::ast_operator::LOG;
case 16: return cudf::ast::ast_operator::SQRT;
case 17: return cudf::ast::ast_operator::CBRT;
case 18: return cudf::ast::ast_operator::CEIL;
case 19: return cudf::ast::ast_operator::FLOOR;
case 20: return cudf::ast::ast_operator::ABS;
case 21: return cudf::ast::ast_operator::RINT;
case 22: return cudf::ast::ast_operator::BIT_INVERT;
case 23: return cudf::ast::ast_operator::NOT;
case 24: return cudf::ast::ast_operator::CAST_TO_INT64;
case 25: return cudf::ast::ast_operator::CAST_TO_UINT64;
case 26: return cudf::ast::ast_operator::CAST_TO_FLOAT64;
default: throw std::invalid_argument("unexpected JNI AST unary operator value");
}
}
/**
* Convert a Java AST serialized byte representing an AST binary operator into the
* corresponding libcudf AST operator.
* NOTE: This must be kept in sync with the enumeration in BinaryOperator.java!
*/
cudf::ast::ast_operator jni_to_binary_operator(jbyte jni_op_value) {
switch (jni_op_value) {
case 0: return cudf::ast::ast_operator::ADD;
case 1: return cudf::ast::ast_operator::SUB;
case 2: return cudf::ast::ast_operator::MUL;
case 3: return cudf::ast::ast_operator::DIV;
case 4: return cudf::ast::ast_operator::TRUE_DIV;
case 5: return cudf::ast::ast_operator::FLOOR_DIV;
case 6: return cudf::ast::ast_operator::MOD;
case 7: return cudf::ast::ast_operator::PYMOD;
case 8: return cudf::ast::ast_operator::POW;
case 9: return cudf::ast::ast_operator::EQUAL;
case 10: return cudf::ast::ast_operator::NULL_EQUAL;
case 11: return cudf::ast::ast_operator::NOT_EQUAL;
case 12: return cudf::ast::ast_operator::LESS;
case 13: return cudf::ast::ast_operator::GREATER;
case 14: return cudf::ast::ast_operator::LESS_EQUAL;
case 15: return cudf::ast::ast_operator::GREATER_EQUAL;
case 16: return cudf::ast::ast_operator::BITWISE_AND;
case 17: return cudf::ast::ast_operator::BITWISE_OR;
case 18: return cudf::ast::ast_operator::BITWISE_XOR;
case 19: return cudf::ast::ast_operator::LOGICAL_AND;
case 20: return cudf::ast::ast_operator::NULL_LOGICAL_AND;
case 21: return cudf::ast::ast_operator::LOGICAL_OR;
case 22: return cudf::ast::ast_operator::NULL_LOGICAL_OR;
default: throw std::invalid_argument("unexpected JNI AST binary operator value");
}
}
/**
* Convert a Java AST serialized byte representing an AST table reference into the
* corresponding libcudf AST table reference.
* NOTE: This must be kept in sync with the enumeration in TableReference.java!
*/
cudf::ast::table_reference jni_to_table_reference(jbyte jni_value) {
switch (jni_value) {
case 0: return cudf::ast::table_reference::LEFT;
case 1: return cudf::ast::table_reference::RIGHT;
default: throw std::invalid_argument("unexpected JNI table reference value");
}
}
/** Functor for type-dispatching the creation of an AST literal */
struct make_literal {
/** Construct an AST literal from a numeric value */
template <typename T, std::enable_if_t<cudf::is_numeric<T>()> * = nullptr>
cudf::ast::literal &operator()(cudf::data_type dtype, bool is_valid,
cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
std::unique_ptr<cudf::scalar> scalar_ptr = cudf::make_numeric_scalar(dtype);
scalar_ptr->set_valid_async(is_valid);
if (is_valid) {
T val = jni_ast.read<T>();
using ScalarType = cudf::scalar_type_t<T>;
static_cast<ScalarType *>(scalar_ptr.get())->set_value(val);
}
auto &numeric_scalar = static_cast<cudf::numeric_scalar<T> &>(*scalar_ptr);
return compiled_expr.add_literal(std::make_unique<cudf::ast::literal>(numeric_scalar),
std::move(scalar_ptr));
}
/** Construct an AST literal from a timestamp value */
template <typename T, std::enable_if_t<cudf::is_timestamp<T>()> * = nullptr>
cudf::ast::literal &operator()(cudf::data_type dtype, bool is_valid,
cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
std::unique_ptr<cudf::scalar> scalar_ptr = cudf::make_timestamp_scalar(dtype);
scalar_ptr->set_valid_async(is_valid);
if (is_valid) {
T val = jni_ast.read<T>();
using ScalarType = cudf::scalar_type_t<T>;
static_cast<ScalarType *>(scalar_ptr.get())->set_value(val);
}
auto ×tamp_scalar = static_cast<cudf::timestamp_scalar<T> &>(*scalar_ptr);
return compiled_expr.add_literal(std::make_unique<cudf::ast::literal>(timestamp_scalar),
std::move(scalar_ptr));
}
/** Construct an AST literal from a duration value */
template <typename T, std::enable_if_t<cudf::is_duration<T>()> * = nullptr>
cudf::ast::literal &operator()(cudf::data_type dtype, bool is_valid,
cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
std::unique_ptr<cudf::scalar> scalar_ptr = cudf::make_duration_scalar(dtype);
scalar_ptr->set_valid_async(is_valid);
if (is_valid) {
T val = jni_ast.read<T>();
using ScalarType = cudf::scalar_type_t<T>;
static_cast<ScalarType *>(scalar_ptr.get())->set_value(val);
}
auto &duration_scalar = static_cast<cudf::duration_scalar<T> &>(*scalar_ptr);
return compiled_expr.add_literal(std::make_unique<cudf::ast::literal>(duration_scalar),
std::move(scalar_ptr));
}
/** Construct an AST literal from a string value */
template <typename T, std::enable_if_t<std::is_same_v<T, cudf::string_view>> * = nullptr>
cudf::ast::literal &operator()(cudf::data_type dtype, bool is_valid,
cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
std::unique_ptr<cudf::scalar> scalar_ptr = [&]() {
if (is_valid) {
std::string val = jni_ast.read<std::string>();
return std::make_unique<cudf::string_scalar>(val, is_valid);
} else {
return std::make_unique<cudf::string_scalar>(rmm::device_buffer{}, is_valid);
}
}();
auto &str_scalar = static_cast<cudf::string_scalar &>(*scalar_ptr);
return compiled_expr.add_literal(std::make_unique<cudf::ast::literal>(str_scalar),
std::move(scalar_ptr));
}
/** Default functor implementation to catch type dispatch errors */
template <typename T, std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_timestamp<T>() &&
!cudf::is_duration<T>() &&
!std::is_same_v<T, cudf::string_view>> * = nullptr>
cudf::ast::literal &operator()(cudf::data_type dtype, bool is_valid,
cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
throw std::logic_error("Unsupported AST literal type");
}
};
/** Decode a serialized AST literal */
cudf::ast::literal &compile_literal(bool is_valid, cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
auto const dtype = jni_ast.read_cudf_type();
return cudf::type_dispatcher(dtype, make_literal{}, dtype, is_valid, compiled_expr, jni_ast);
}
/** Decode a serialized AST column reference */
cudf::ast::column_reference &compile_column_reference(cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
auto const table_ref = jni_to_table_reference(jni_ast.read_byte());
cudf::size_type const column_index = jni_ast.read<int>();
return compiled_expr.add_column_ref(
std::make_unique<cudf::ast::column_reference>(column_index, table_ref));
}
// forward declaration
cudf::ast::expression &compile_expression(cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast);
/** Decode a serialized AST unary expression */
cudf::ast::operation &compile_unary_expression(cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
auto const ast_op = jni_to_unary_operator(jni_ast.read_byte());
cudf::ast::expression &child_expression = compile_expression(compiled_expr, jni_ast);
return compiled_expr.add_operation(
std::make_unique<cudf::ast::operation>(ast_op, child_expression));
}
/** Decode a serialized AST binary expression */
cudf::ast::operation &compile_binary_expression(cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
auto const ast_op = jni_to_binary_operator(jni_ast.read_byte());
cudf::ast::expression &left_child = compile_expression(compiled_expr, jni_ast);
cudf::ast::expression &right_child = compile_expression(compiled_expr, jni_ast);
return compiled_expr.add_operation(
std::make_unique<cudf::ast::operation>(ast_op, left_child, right_child));
}
/** Decode a serialized AST expression by reading the expression type and dispatching */
cudf::ast::expression &compile_expression(cudf::jni::ast::compiled_expr &compiled_expr,
jni_serialized_ast &jni_ast) {
auto const expression_type = static_cast<jni_serialized_expression_type>(jni_ast.read_byte());
switch (expression_type) {
case jni_serialized_expression_type::VALID_LITERAL:
return compile_literal(true, compiled_expr, jni_ast);
case jni_serialized_expression_type::NULL_LITERAL:
return compile_literal(false, compiled_expr, jni_ast);
case jni_serialized_expression_type::COLUMN_REFERENCE:
return compile_column_reference(compiled_expr, jni_ast);
case jni_serialized_expression_type::UNARY_OPERATION:
return compile_unary_expression(compiled_expr, jni_ast);
case jni_serialized_expression_type::BINARY_OPERATION:
return compile_binary_expression(compiled_expr, jni_ast);
default: throw std::invalid_argument("data is not a serialized AST expression");
}
}
/** Decode a serialized AST into a native libcudf AST and associated resources */
std::unique_ptr<cudf::jni::ast::compiled_expr> compile_serialized_ast(jni_serialized_ast &jni_ast) {
auto jni_expr_ptr = std::make_unique<cudf::jni::ast::compiled_expr>();
(void)compile_expression(*jni_expr_ptr, jni_ast);
if (!jni_ast.at_eof()) {
throw std::invalid_argument("Extra bytes at end of serialized AST");
}
return jni_expr_ptr;
}
} // anonymous namespace
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ast_CompiledExpression_compile(JNIEnv *env, jclass,
jbyteArray jni_data) {
JNI_NULL_CHECK(env, jni_data, "Serialized AST data is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jbyteArray jbytes(env, jni_data);
jni_serialized_ast jni_ast(jbytes);
auto compiled_expr_ptr = compile_serialized_ast(jni_ast);
jbytes.cancel();
return reinterpret_cast<jlong>(compiled_expr_ptr.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ast_CompiledExpression_computeColumn(JNIEnv *env,
jclass,
jlong j_ast,
jlong j_table) {
JNI_NULL_CHECK(env, j_ast, "Compiled AST pointer is null", 0);
JNI_NULL_CHECK(env, j_table, "Table view pointer is null", 0);
try {
cudf::jni::auto_set_device(env);
auto compiled_expr_ptr = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_ast);
auto tview_ptr = reinterpret_cast<cudf::table_view const *>(j_table);
std::unique_ptr<cudf::column> result =
cudf::compute_column(*tview_ptr, compiled_expr_ptr->get_top_expression());
return reinterpret_cast<jlong>(result.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_ast_CompiledExpression_destroy(JNIEnv *env, jclass,
jlong jni_handle) {
try {
cudf::jni::auto_set_device(env);
auto ptr = reinterpret_cast<cudf::jni::ast::compiled_expr *>(jni_handle);
delete ptr;
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/PackedColumnMetadataJni.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudf_jni_apis.hpp"
extern "C" {
JNIEXPORT jobject JNICALL Java_ai_rapids_cudf_PackedColumnMetadata_createMetadataDirectBuffer(
JNIEnv *env, jclass, jlong j_metadata_ptr) {
JNI_NULL_CHECK(env, j_metadata_ptr, "metadata is null", nullptr);
try {
auto metadata = reinterpret_cast<std::vector<uint8_t> *>(j_metadata_ptr);
return env->NewDirectByteBuffer(const_cast<uint8_t *>(metadata->data()), metadata->size());
}
CATCH_STD(env, nullptr);
}
JNIEXPORT void JNICALL
Java_ai_rapids_cudf_PackedColumnMetadata_closeMetadata(JNIEnv *env, jclass, jlong j_metadata_ptr) {
JNI_NULL_CHECK(env, j_metadata_ptr, "metadata is null", );
try {
auto metadata = reinterpret_cast<std::vector<uint8_t> *>(j_metadata_ptr);
delete metadata;
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/CudfJni.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sstream>
#include <cudf/copying.hpp>
#include <cudf/utilities/default_stream.hpp>
#include "cudf_jni_apis.hpp"
namespace {
// handles detaching a thread from the JVM when the thread terminates
class jvm_detach_on_destruct {
public:
explicit jvm_detach_on_destruct(JavaVM *jvm) : jvm{jvm} {}
~jvm_detach_on_destruct() { jvm->DetachCurrentThread(); }
private:
JavaVM *jvm;
};
} // anonymous namespace
namespace cudf {
namespace jni {
#ifdef CUDA_API_PER_THREAD_DEFAULT_STREAM
constexpr bool is_ptds_enabled{true};
#else
constexpr bool is_ptds_enabled{false};
#endif
static jclass Host_memory_buffer_jclass;
static jfieldID Host_buffer_address;
static jfieldID Host_buffer_length;
#define HOST_MEMORY_BUFFER_CLASS "ai/rapids/cudf/HostMemoryBuffer"
#define HOST_MEMORY_BUFFER_SIG(param_sig) "(" param_sig ")L" HOST_MEMORY_BUFFER_CLASS ";"
static bool cache_host_memory_buffer_jni(JNIEnv *env) {
jclass cls = env->FindClass(HOST_MEMORY_BUFFER_CLASS);
if (cls == nullptr) {
return false;
}
Host_buffer_address = env->GetFieldID(cls, "address", "J");
if (Host_buffer_address == nullptr) {
return false;
}
Host_buffer_length = env->GetFieldID(cls, "length", "J");
if (Host_buffer_length == nullptr) {
return false;
}
// Convert local reference to global so it cannot be garbage collected.
Host_memory_buffer_jclass = static_cast<jclass>(env->NewGlobalRef(cls));
if (Host_memory_buffer_jclass == nullptr) {
return false;
}
return true;
}
static void release_host_memory_buffer_jni(JNIEnv *env) {
Host_memory_buffer_jclass = del_global_ref(env, Host_memory_buffer_jclass);
}
jobject allocate_host_buffer(JNIEnv *env, jlong amount, jboolean prefer_pinned,
jobject host_memory_allocator) {
auto const host_memory_allocator_class = env->GetObjectClass(host_memory_allocator);
auto const allocateMethodId =
env->GetMethodID(host_memory_allocator_class, "allocate", HOST_MEMORY_BUFFER_SIG("JZ"));
jobject ret =
env->CallObjectMethod(host_memory_allocator, allocateMethodId, amount, prefer_pinned);
if (env->ExceptionCheck()) {
throw std::runtime_error("allocateHostBuffer threw an exception");
}
return ret;
}
jlong get_host_buffer_address(JNIEnv *env, jobject buffer) {
return env->GetLongField(buffer, Host_buffer_address);
}
jlong get_host_buffer_length(JNIEnv *env, jobject buffer) {
return env->GetLongField(buffer, Host_buffer_length);
}
// Get the JNI environment, attaching the current thread to the JVM if necessary. If the thread
// needs to be attached, the thread will automatically detach when the thread terminates.
JNIEnv *get_jni_env(JavaVM *jvm) {
JNIEnv *env = nullptr;
jint rc = jvm->GetEnv(reinterpret_cast<void **>(&env), MINIMUM_JNI_VERSION);
if (rc == JNI_OK) {
return env;
}
if (rc == JNI_EDETACHED) {
JavaVMAttachArgs attach_args;
attach_args.version = MINIMUM_JNI_VERSION;
attach_args.name = const_cast<char *>("cudf thread");
attach_args.group = NULL;
if (jvm->AttachCurrentThreadAsDaemon(reinterpret_cast<void **>(&env), &attach_args) == JNI_OK) {
// use thread_local object to detach the thread from the JVM when thread terminates.
thread_local jvm_detach_on_destruct detacher(jvm);
} else {
throw std::runtime_error("unable to attach to JVM");
}
return env;
}
throw std::runtime_error("error detecting thread attach state with JVM");
}
} // namespace jni
} // namespace cudf
extern "C" {
JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) {
JNIEnv *env;
if (vm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) != JNI_OK) {
return JNI_ERR;
}
// make sure libcudf and the JNI library are built with the same PTDS mode
if (cudf::is_ptds_enabled() != cudf::jni::is_ptds_enabled) {
std::ostringstream ss;
ss << "Libcudf is_ptds_enabled=" << cudf::is_ptds_enabled()
<< ", which does not match cudf jni is_ptds_enabled=" << cudf::jni::is_ptds_enabled
<< ". They need to be built with the same per-thread default stream flag.";
env->ThrowNew(env->FindClass("java/lang/RuntimeException"), ss.str().c_str());
return JNI_ERR;
}
// cache any class objects and method IDs here
if (!cudf::jni::cache_contiguous_table_jni(env)) {
if (!env->ExceptionCheck()) {
env->ThrowNew(env->FindClass("java/lang/RuntimeException"),
"Unable to locate contiguous table methods needed by JNI");
}
return JNI_ERR;
}
if (!cudf::jni::cache_contig_split_group_by_result_jni(env)) {
if (!env->ExceptionCheck()) {
env->ThrowNew(env->FindClass("java/lang/RuntimeException"),
"Unable to locate group by table result methods needed by JNI");
}
return JNI_ERR;
}
if (!cudf::jni::cache_host_memory_buffer_jni(env)) {
if (!env->ExceptionCheck()) {
env->ThrowNew(env->FindClass("java/lang/RuntimeException"),
"Unable to locate host memory buffer methods needed by JNI");
}
return JNI_ERR;
}
if (!cudf::jni::cache_data_source_jni(env)) {
if (!env->ExceptionCheck()) {
env->ThrowNew(env->FindClass("java/lang/RuntimeException"),
"Unable to locate data source helper methods needed by JNI");
}
return JNI_ERR;
}
return cudf::jni::MINIMUM_JNI_VERSION;
}
JNIEXPORT void JNI_OnUnload(JavaVM *vm, void *) {
JNIEnv *env = nullptr;
if (vm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) != JNI_OK) {
return;
}
// release cached class objects here.
cudf::jni::release_contiguous_table_jni(env);
cudf::jni::release_contig_split_group_by_result_jni(env);
cudf::jni::release_host_memory_buffer_jni(env);
}
JNIEXPORT jboolean JNICALL Java_ai_rapids_cudf_Cuda_isPtdsEnabled(JNIEnv *env, jclass) {
return cudf::jni::is_ptds_enabled;
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/DataSourceHelperJni.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/io/datasource.hpp>
#include "cudf_jni_apis.hpp"
#include "jni_utils.hpp"
namespace {
#define DATA_SOURCE_CLASS "ai/rapids/cudf/DataSource"
jclass DataSource_jclass;
jmethodID hostRead_method;
jmethodID hostReadBuff_method;
jmethodID onHostBufferDone_method;
jmethodID deviceRead_method;
} // anonymous namespace
namespace cudf {
namespace jni {
bool cache_data_source_jni(JNIEnv *env) {
jclass cls = env->FindClass(DATA_SOURCE_CLASS);
if (cls == nullptr) {
return false;
}
hostRead_method = env->GetMethodID(cls, "hostRead", "(JJJ)J");
if (hostRead_method == nullptr) {
return false;
}
hostReadBuff_method = env->GetMethodID(cls, "hostReadBuff", "(JJ)[J");
if (hostReadBuff_method == nullptr) {
return false;
}
onHostBufferDone_method = env->GetMethodID(cls, "onHostBufferDone", "(J)V");
if (onHostBufferDone_method == nullptr) {
return false;
}
deviceRead_method = env->GetMethodID(cls, "deviceRead", "(JJJJ)J");
if (deviceRead_method == nullptr) {
return false;
}
// Convert local reference to global so it cannot be garbage collected.
DataSource_jclass = static_cast<jclass>(env->NewGlobalRef(cls));
if (DataSource_jclass == nullptr) {
return false;
}
return true;
}
void release_data_source_jni(JNIEnv *env) {
DataSource_jclass = cudf::jni::del_global_ref(env, DataSource_jclass);
}
class host_buffer_done_callback {
public:
explicit host_buffer_done_callback(JavaVM *jvm, jobject ds, long id) : jvm(jvm), ds(ds), id(id) {}
host_buffer_done_callback(host_buffer_done_callback const &other) = delete;
host_buffer_done_callback(host_buffer_done_callback &&other)
: jvm(other.jvm), ds(other.ds), id(other.id) {
other.jvm = nullptr;
other.ds = nullptr;
other.id = -1;
}
host_buffer_done_callback &operator=(host_buffer_done_callback &&other) = delete;
host_buffer_done_callback &operator=(host_buffer_done_callback const &other) = delete;
~host_buffer_done_callback() {
// because we are in a destructor we cannot throw an exception, so for now we are
// just going to keep the java exceptions around and have them be thrown when this
// thread returns to the JVM. It might be kind of confusing, but we will not lose
// them.
if (jvm != nullptr) {
// We cannot throw an exception in the destructor, so this is really best effort
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) {
env->CallVoidMethod(this->ds, onHostBufferDone_method, id);
}
}
}
private:
JavaVM *jvm;
jobject ds;
long id;
};
class jni_datasource : public cudf::io::datasource {
public:
explicit jni_datasource(JNIEnv *env, jobject ds, size_t ds_size, bool device_read_supported,
size_t device_read_cutoff)
: ds_size(ds_size), device_read_supported(device_read_supported),
device_read_cutoff(device_read_cutoff) {
if (env->GetJavaVM(&jvm) < 0) {
throw std::runtime_error("GetJavaVM failed");
}
this->ds = add_global_ref(env, ds);
}
virtual ~jni_datasource() {
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) {
ds = del_global_ref(env, ds);
}
ds = nullptr;
}
std::unique_ptr<buffer> host_read(size_t offset, size_t size) override {
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) != JNI_OK) {
throw cudf::jni::jni_exception("Could not load JNIEnv");
}
jlongArray jbuffer_info =
static_cast<jlongArray>(env->CallObjectMethod(this->ds, hostReadBuff_method, offset, size));
if (env->ExceptionOccurred()) {
throw cudf::jni::jni_exception("Java exception in hostRead");
}
cudf::jni::native_jlongArray buffer_info(env, jbuffer_info);
auto ptr = reinterpret_cast<uint8_t *>(buffer_info[0]);
size_t length = buffer_info[1];
long id = buffer_info[2];
cudf::jni::host_buffer_done_callback cb(this->jvm, this->ds, id);
return std::make_unique<owning_buffer<cudf::jni::host_buffer_done_callback>>(std::move(cb), ptr,
length);
}
size_t host_read(size_t offset, size_t size, uint8_t *dst) override {
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) != JNI_OK) {
throw cudf::jni::jni_exception("Could not load JNIEnv");
}
jlong amount_read =
env->CallLongMethod(this->ds, hostRead_method, offset, size, reinterpret_cast<jlong>(dst));
if (env->ExceptionOccurred()) {
throw cudf::jni::jni_exception("Java exception in hostRead");
}
return amount_read;
}
size_t size() const override { return ds_size; }
bool supports_device_read() const override { return device_read_supported; }
bool is_device_read_preferred(size_t size) const override {
return device_read_supported && size >= device_read_cutoff;
}
size_t device_read(size_t offset, size_t size, uint8_t *dst,
rmm::cuda_stream_view stream) override {
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) != JNI_OK) {
throw cudf::jni::jni_exception("Could not load JNIEnv");
}
jlong amount_read =
env->CallLongMethod(this->ds, deviceRead_method, offset, size, reinterpret_cast<jlong>(dst),
reinterpret_cast<jlong>(stream.value()));
if (env->ExceptionOccurred()) {
throw cudf::jni::jni_exception("Java exception in deviceRead");
}
return amount_read;
}
std::future<size_t> device_read_async(size_t offset, size_t size, uint8_t *dst,
rmm::cuda_stream_view stream) override {
auto amount_read = device_read(offset, size, dst, stream);
// This is a bit ugly, but we don't have a good way or a need to return
// a future for the read
std::promise<size_t> ret;
ret.set_value(amount_read);
return ret.get_future();
}
private:
size_t ds_size;
bool device_read_supported;
size_t device_read_cutoff;
JavaVM *jvm;
jobject ds;
};
} // namespace jni
} // namespace cudf
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_DataSourceHelper_createWrapperDataSource(
JNIEnv *env, jclass, jobject ds, jlong ds_size, jboolean device_read_supported,
jlong device_read_cutoff) {
JNI_NULL_CHECK(env, ds, "Null data source", 0);
try {
cudf::jni::auto_set_device(env);
auto source =
new cudf::jni::jni_datasource(env, ds, ds_size, device_read_supported, device_read_cutoff);
return reinterpret_cast<jlong>(source);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_DataSourceHelper_destroyWrapperDataSource(JNIEnv *env,
jclass,
jlong handle) {
try {
cudf::jni::auto_set_device(env);
if (handle != 0) {
auto source = reinterpret_cast<cudf::jni::jni_datasource *>(handle);
delete (source);
}
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/ContiguousTableJni.cpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudf_jni_apis.hpp"
namespace {
#define CONTIGUOUS_TABLE_CLASS "ai/rapids/cudf/ContiguousTable"
#define CONTIGUOUS_TABLE_FACTORY_SIG(param_sig) "(" param_sig ")L" CONTIGUOUS_TABLE_CLASS ";"
jclass Contiguous_table_jclass;
jmethodID From_packed_table_method;
#define GROUP_BY_RESULT_CLASS "ai/rapids/cudf/ContigSplitGroupByResult"
jclass Contig_split_group_by_result_jclass;
jfieldID Contig_split_group_by_result_groups_field;
jfieldID Contig_split_group_by_result_uniq_key_columns_field;
} // anonymous namespace
namespace cudf {
namespace jni {
bool cache_contiguous_table_jni(JNIEnv *env) {
jclass cls = env->FindClass(CONTIGUOUS_TABLE_CLASS);
if (cls == nullptr) {
return false;
}
From_packed_table_method =
env->GetStaticMethodID(cls, "fromPackedTable", CONTIGUOUS_TABLE_FACTORY_SIG("JJJJJ"));
if (From_packed_table_method == nullptr) {
return false;
}
// Convert local reference to global so it cannot be garbage collected.
Contiguous_table_jclass = static_cast<jclass>(env->NewGlobalRef(cls));
if (Contiguous_table_jclass == nullptr) {
return false;
}
return true;
}
void release_contiguous_table_jni(JNIEnv *env) {
Contiguous_table_jclass = cudf::jni::del_global_ref(env, Contiguous_table_jclass);
}
bool cache_contig_split_group_by_result_jni(JNIEnv *env) {
jclass cls = env->FindClass(GROUP_BY_RESULT_CLASS);
if (cls == nullptr) {
return false;
}
Contig_split_group_by_result_groups_field =
env->GetFieldID(cls, "groups", "[Lai/rapids/cudf/ContiguousTable;");
if (Contig_split_group_by_result_groups_field == nullptr) {
return false;
}
Contig_split_group_by_result_uniq_key_columns_field =
env->GetFieldID(cls, "uniqKeyColumns", "[J");
if (Contig_split_group_by_result_uniq_key_columns_field == nullptr) {
return false;
}
// Convert local reference to global so it cannot be garbage collected.
Contig_split_group_by_result_jclass = static_cast<jclass>(env->NewGlobalRef(cls));
if (Contig_split_group_by_result_jclass == nullptr) {
return false;
}
return true;
}
void release_contig_split_group_by_result_jni(JNIEnv *env) {
Contig_split_group_by_result_jclass = del_global_ref(env, Contig_split_group_by_result_jclass);
}
jobject contig_split_group_by_result_from(JNIEnv *env, jobjectArray &groups) {
jobject gbr = env->AllocObject(Contig_split_group_by_result_jclass);
env->SetObjectField(gbr, Contig_split_group_by_result_groups_field, groups);
return gbr;
}
jobject contig_split_group_by_result_from(JNIEnv *env, jobjectArray &groups,
jlongArray &uniq_key_columns) {
jobject gbr = env->AllocObject(Contig_split_group_by_result_jclass);
env->SetObjectField(gbr, Contig_split_group_by_result_groups_field, groups);
env->SetObjectField(gbr, Contig_split_group_by_result_uniq_key_columns_field, uniq_key_columns);
return gbr;
}
jobject contiguous_table_from(JNIEnv *env, cudf::packed_columns &split, long row_count) {
jlong metadata_address = reinterpret_cast<jlong>(split.metadata.get());
jlong data_address = reinterpret_cast<jlong>(split.gpu_data->data());
jlong data_size = static_cast<jlong>(split.gpu_data->size());
jlong rmm_buffer_address = reinterpret_cast<jlong>(split.gpu_data.get());
jobject contig_table_obj = env->CallStaticObjectMethod(
Contiguous_table_jclass, From_packed_table_method, metadata_address, data_address, data_size,
rmm_buffer_address, row_count);
if (contig_table_obj != nullptr) {
split.metadata.release();
split.gpu_data.release();
}
return contig_table_obj;
}
native_jobjectArray<jobject> contiguous_table_array(JNIEnv *env, jsize length) {
return native_jobjectArray<jobject>(
env, env->NewObjectArray(length, Contiguous_table_jclass, nullptr));
}
} // namespace jni
} // namespace cudf
extern "C" {
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ContiguousTable_createPackedMetadata(
JNIEnv *env, jclass, jlong j_table, jlong j_buffer_addr, jlong j_buffer_length) {
JNI_NULL_CHECK(env, j_table, "input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto table = reinterpret_cast<cudf::table_view const *>(j_table);
auto data_addr = reinterpret_cast<uint8_t const *>(j_buffer_addr);
auto data_size = static_cast<size_t>(j_buffer_length);
auto metadata_ptr = new std::vector<uint8_t>(cudf::pack_metadata(*table, data_addr, data_size));
return reinterpret_cast<jlong>(metadata_ptr);
}
CATCH_STD(env, 0);
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/ColumnViewJni.hpp
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf::jni {
/**
* @brief Creates a deep copy of the exemplar column, with its validity set to the equivalent
* of the boolean `validity` column's value.
*
* The bool_column must have the same number of rows as the exemplar column.
* The result column will have the same number of rows as the exemplar.
* For all indices `i` where the boolean column is `true`, the result column will have a valid value
* at index i. For all other values (i.e. `false` or `null`), the result column will have nulls.
*
* @param exemplar The column to be deep copied.
* @param bool_column bool column whose value is to be used as the validity.
* @return Deep copy of the exemplar, with the replaced validity.
*/
std::unique_ptr<cudf::column>
new_column_with_boolean_column_as_validity(cudf::column_view const &exemplar,
cudf::column_view const &bool_column);
/**
* @brief Generates list offsets with lengths of each list.
*
* For example,
* Given a list column: [[1,2,3], [4,5], [6], [], [7,8]]
* The list lengths of it: [3, 2, 1, 0, 2]
* The list offsets of it: [0, 3, 5, 6, 6, 8]
*
* @param list_length The column represents list lengths.
* @return The column represents list offsets.
*/
std::unique_ptr<cudf::column>
generate_list_offsets(cudf::column_view const &list_length,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Perform a special treatment for the results of `cudf::lists::have_overlap` to produce the
* results that match with Spark's `arrays_overlap`.
*
* The function `arrays_overlap` of Apache Spark has a special behavior that needs to be addressed.
* In particular, the result of checking overlap between two lists will be a null element instead of
* a `false` value (as output by `cudf::lists::have_overlap`) if:
* - Both of the input lists have no non-null common element, and
* - They are both non-empty, and
* - Either of them contains null elements.
*
* This function performs post-processing on the results of `cudf::lists::have_overlap`, adding
* special treatment to produce an output column that matches with the behavior described above.
*
* @param lhs The input lists column for one side.
* @param rhs The input lists column for the other side.
* @param overlap_result The result column generated by checking list overlap in cudf.
*/
void post_process_list_overlap(cudf::column_view const &lhs, cudf::column_view const &rhs,
std::unique_ptr<cudf::column> const &overlap_result,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Generates lists column by copying elements that are distinct by key from each input list
* row to the corresponding output row.
*
* The input lists column must be given such that each list element is a struct of <key, value>
* pair. With such input, a list containing distinct by key elements are defined such that the keys
* of all elements in the list are distinct (i.e., any two keys are always compared unequal).
*
* There will not be any validity check for the input. The caller is responsible to make sure that
* the input lists column has the right structure.
*
* @return A new list columns in which the elements in each list are distinct by key.
*/
std::unique_ptr<cudf::column> lists_distinct_by_key(cudf::lists_column_view const &input,
rmm::cuda_stream_view stream);
} // namespace cudf::jni
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/aggregation128_utils.cu
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstddef>
#include <utility>
#include <vector>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include "aggregation128_utils.hpp"
namespace {
// Functor to reassemble a 128-bit value from four 64-bit chunks with overflow detection.
class chunk_assembler : public thrust::unary_function<cudf::size_type, __int128_t> {
public:
chunk_assembler(bool *overflows, uint64_t const *chunks0, uint64_t const *chunks1,
uint64_t const *chunks2, int64_t const *chunks3)
: overflows(overflows), chunks0(chunks0), chunks1(chunks1), chunks2(chunks2),
chunks3(chunks3) {}
__device__ __int128_t operator()(cudf::size_type i) const {
// Starting with the least significant input and moving to the most significant, propagate the
// upper 32-bits of the previous column into the next column, i.e.: propagate the "carry" bits
// of each 64-bit chunk into the next chunk.
uint64_t const c0 = chunks0[i];
uint64_t const c1 = chunks1[i] + (c0 >> 32);
uint64_t const c2 = chunks2[i] + (c1 >> 32);
int64_t const c3 = chunks3[i] + (c2 >> 32);
uint64_t const lower64 = (c1 << 32) | static_cast<uint32_t>(c0);
int64_t const upper64 = (c3 << 32) | static_cast<uint32_t>(c2);
// check for overflow by ensuring the sign bit matches the top carry bits
int32_t const replicated_sign_bit = static_cast<int32_t>(c3) >> 31;
int32_t const top_carry_bits = static_cast<int32_t>(c3 >> 32);
overflows[i] = (replicated_sign_bit != top_carry_bits);
return (static_cast<__int128_t>(upper64) << 64) | lower64;
}
private:
// output column for overflow detected
bool *const overflows;
// input columns for the four 64-bit values
uint64_t const *const chunks0;
uint64_t const *const chunks1;
uint64_t const *const chunks2;
int64_t const *const chunks3;
};
} // anonymous namespace
namespace cudf::jni {
// Extract a 32-bit chunk from a 128-bit value.
std::unique_ptr<cudf::column> extract_chunk32(cudf::column_view const &in_col, cudf::data_type type,
int chunk_idx, rmm::cuda_stream_view stream) {
CUDF_EXPECTS(in_col.type().id() == cudf::type_id::DECIMAL128, "not a 128-bit type");
CUDF_EXPECTS(chunk_idx >= 0 && chunk_idx < 4, "invalid chunk index");
CUDF_EXPECTS(type.id() == cudf::type_id::INT32 || type.id() == cudf::type_id::UINT32,
"not a 32-bit integer type");
auto const num_rows = in_col.size();
auto out_col =
cudf::make_fixed_width_column(type, num_rows, copy_bitmask(in_col), in_col.null_count());
auto out_view = out_col->mutable_view();
auto const in_begin = in_col.begin<int32_t>();
// Build an iterator for every fourth 32-bit value, i.e.: one "chunk" of a __int128_t value
thrust::transform_iterator transform_iter{thrust::counting_iterator{0},
[] __device__(auto i) { return i * 4; }};
thrust::permutation_iterator stride_iter{in_begin + chunk_idx, transform_iter};
thrust::copy(rmm::exec_policy(stream), stride_iter, stride_iter + num_rows,
out_view.data<int32_t>());
return out_col;
}
// Reassemble a column of 128-bit values from four 64-bit integer columns with overflow detection.
std::unique_ptr<cudf::table> assemble128_from_sum(cudf::table_view const &chunks_table,
cudf::data_type output_type,
rmm::cuda_stream_view stream) {
CUDF_EXPECTS(output_type.id() == cudf::type_id::DECIMAL128, "not a 128-bit type");
CUDF_EXPECTS(chunks_table.num_columns() == 4, "must be 4 column table");
auto const num_rows = chunks_table.num_rows();
auto const chunks0 = chunks_table.column(0);
auto const chunks1 = chunks_table.column(1);
auto const chunks2 = chunks_table.column(2);
auto const chunks3 = chunks_table.column(3);
CUDF_EXPECTS(cudf::size_of(chunks0.type()) == 8 && cudf::size_of(chunks1.type()) == 8 &&
cudf::size_of(chunks2.type()) == 8 &&
chunks3.type().id() == cudf::type_id::INT64,
"chunks type mismatch");
std::vector<std::unique_ptr<cudf::column>> columns;
columns.push_back(cudf::make_fixed_width_column(cudf::data_type{cudf::type_id::BOOL8}, num_rows,
copy_bitmask(chunks0), chunks0.null_count()));
columns.push_back(cudf::make_fixed_width_column(output_type, num_rows, copy_bitmask(chunks0),
chunks0.null_count()));
auto overflows_view = columns[0]->mutable_view();
auto assembled_view = columns[1]->mutable_view();
thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(num_rows),
assembled_view.begin<__int128_t>(),
chunk_assembler(overflows_view.begin<bool>(), chunks0.begin<uint64_t>(),
chunks1.begin<uint64_t>(), chunks2.begin<uint64_t>(),
chunks3.begin<int64_t>()));
return std::make_unique<cudf::table>(std::move(columns));
}
} // namespace cudf::jni
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/nvtx_common.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace cudf {
namespace jni {
struct java_domain {
static constexpr char const *name{"Java"};
};
} // namespace jni
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/CudaJni.cpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/error.hpp>
#include <rmm/device_buffer.hpp>
#ifdef CUDF_JNI_ENABLE_PROFILING
#include <cuda_profiler_api.h>
#endif
#include "jni_utils.hpp"
namespace {
/** The CUDA device that should be used by all threads using cudf */
int Cudf_device{cudaInvalidDeviceId};
thread_local int Thread_device = cudaInvalidDeviceId;
} // anonymous namespace
namespace cudf {
namespace jni {
/** Set the device to use for cudf */
void set_cudf_device(int device) {
Cudf_device = device;
}
/**
* If a cudf device has been specified then this ensures the calling thread
* is using the same device.
*/
void auto_set_device(JNIEnv *env) {
if (Cudf_device != cudaInvalidDeviceId) {
if (Thread_device != Cudf_device) {
cudaError_t cuda_status = cudaSetDevice(Cudf_device);
jni_cuda_check(env, cuda_status);
Thread_device = Cudf_device;
}
}
}
/** Fills all the bytes in the buffer 'buf' with 'value'. */
void device_memset_async(JNIEnv *env, rmm::device_buffer &buf, char value) {
cudaError_t cuda_status = cudaMemsetAsync((void *)buf.data(), value, buf.size());
jni_cuda_check(env, cuda_status);
}
} // namespace jni
} // namespace cudf
extern "C" {
JNIEXPORT jobject JNICALL Java_ai_rapids_cudf_Cuda_memGetInfo(JNIEnv *env, jclass clazz) {
try {
cudf::jni::auto_set_device(env);
size_t free, total;
CUDF_CUDA_TRY(cudaMemGetInfo(&free, &total));
jclass info_class = env->FindClass("Lai/rapids/cudf/CudaMemInfo;");
if (info_class == NULL) {
return NULL;
}
jmethodID ctor_id = env->GetMethodID(info_class, "<init>", "(JJ)V");
if (ctor_id == NULL) {
return NULL;
}
jobject info_obj = env->NewObject(info_class, ctor_id, (jlong)free, (jlong)total);
// No need to check for exceptions of null return value as we are just handing the object back
// to the JVM which will handle throwing any exceptions that happened in the constructor.
return info_obj;
}
CATCH_STD(env, nullptr);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Cuda_hostAllocPinned(JNIEnv *env, jclass, jlong size) {
try {
cudf::jni::auto_set_device(env);
void *ret = nullptr;
CUDF_CUDA_TRY(cudaMallocHost(&ret, size));
return reinterpret_cast<jlong>(ret);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_freePinned(JNIEnv *env, jclass, jlong ptr) {
try {
cudf::jni::auto_set_device(env);
CUDF_CUDA_TRY(cudaFreeHost(reinterpret_cast<void *>(ptr)));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_memset(JNIEnv *env, jclass, jlong dst, jbyte value,
jlong count, jint kind) {
JNI_NULL_CHECK(env, dst, "dst memory pointer is null", );
try {
cudf::jni::auto_set_device(env);
CUDF_CUDA_TRY(cudaMemsetAsync((void *)dst, value, count));
CUDF_CUDA_TRY(cudaStreamSynchronize(0));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_asyncMemset(JNIEnv *env, jclass, jlong dst,
jbyte value, jlong count, jint kind) {
JNI_NULL_CHECK(env, dst, "dst memory pointer is null", );
try {
cudf::jni::auto_set_device(env);
CUDF_CUDA_TRY(cudaMemsetAsync((void *)dst, value, count));
}
CATCH_STD(env, );
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Cuda_getDevice(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
jint dev;
CUDF_CUDA_TRY(cudaGetDevice(&dev));
return dev;
}
CATCH_STD(env, -2);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Cuda_getDeviceCount(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
jint count;
CUDF_CUDA_TRY(cudaGetDeviceCount(&count));
return count;
}
CATCH_STD(env, -2);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_setDevice(JNIEnv *env, jclass, jint dev) {
try {
if (Cudf_device != cudaInvalidDeviceId && dev != Cudf_device) {
cudf::jni::throw_java_exception(env, cudf::jni::CUDF_ERROR_CLASS,
"Cannot change device after RMM init");
}
CUDF_CUDA_TRY(cudaSetDevice(dev));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_autoSetDevice(JNIEnv *env, jclass, jint dev) {
try {
cudf::jni::auto_set_device(env);
}
CATCH_STD(env, );
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Cuda_getDriverVersion(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
jint driver_version;
CUDF_CUDA_TRY(cudaDriverGetVersion(&driver_version));
return driver_version;
}
CATCH_STD(env, -2);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Cuda_getRuntimeVersion(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
jint runtime_version;
CUDF_CUDA_TRY(cudaRuntimeGetVersion(&runtime_version));
return runtime_version;
}
CATCH_STD(env, -2);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Cuda_getNativeComputeMode(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
int device;
CUDF_CUDA_TRY(cudaGetDevice(&device));
cudaDeviceProp device_prop;
CUDF_CUDA_TRY(cudaGetDeviceProperties(&device_prop, device));
return device_prop.computeMode;
}
CATCH_STD(env, -2);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Cuda_getComputeCapabilityMajor(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
int device;
CUDF_CUDA_TRY(::cudaGetDevice(&device));
int attribute_value;
CUDF_CUDA_TRY(
::cudaDeviceGetAttribute(&attribute_value, ::cudaDevAttrComputeCapabilityMajor, device));
return attribute_value;
}
CATCH_STD(env, -2);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Cuda_getComputeCapabilityMinor(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
int device;
CUDF_CUDA_TRY(::cudaGetDevice(&device));
int attribute_value;
CUDF_CUDA_TRY(
::cudaDeviceGetAttribute(&attribute_value, ::cudaDevAttrComputeCapabilityMinor, device));
return attribute_value;
}
CATCH_STD(env, -2);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_freeZero(JNIEnv *env, jclass) {
try {
cudf::jni::auto_set_device(env);
CUDF_CUDA_TRY(cudaFree(0));
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Cuda_createStream(JNIEnv *env, jclass,
jboolean isNonBlocking) {
try {
cudf::jni::auto_set_device(env);
cudaStream_t stream = nullptr;
auto flags = isNonBlocking ? cudaStreamNonBlocking : cudaStreamDefault;
CUDF_CUDA_TRY(cudaStreamCreateWithFlags(&stream, flags));
return reinterpret_cast<jlong>(stream);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_destroyStream(JNIEnv *env, jclass, jlong jstream) {
try {
cudf::jni::auto_set_device(env);
auto stream = reinterpret_cast<cudaStream_t>(jstream);
CUDF_CUDA_TRY(cudaStreamDestroy(stream));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_streamWaitEvent(JNIEnv *env, jclass, jlong jstream,
jlong jevent) {
try {
cudf::jni::auto_set_device(env);
auto stream = reinterpret_cast<cudaStream_t>(jstream);
auto event = reinterpret_cast<cudaEvent_t>(jevent);
CUDF_CUDA_TRY(cudaStreamWaitEvent(stream, event, 0));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_streamSynchronize(JNIEnv *env, jclass,
jlong jstream) {
try {
cudf::jni::auto_set_device(env);
auto stream = reinterpret_cast<cudaStream_t>(jstream);
CUDF_CUDA_TRY(cudaStreamSynchronize(stream));
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Cuda_createEvent(JNIEnv *env, jclass,
jboolean enableTiming,
jboolean blockingSync) {
try {
cudf::jni::auto_set_device(env);
cudaEvent_t event = nullptr;
unsigned int flags = 0;
if (!enableTiming) {
flags = flags | cudaEventDisableTiming;
}
if (blockingSync) {
flags = flags | cudaEventBlockingSync;
}
CUDF_CUDA_TRY(cudaEventCreateWithFlags(&event, flags));
return reinterpret_cast<jlong>(event);
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_destroyEvent(JNIEnv *env, jclass, jlong jevent) {
try {
cudf::jni::auto_set_device(env);
auto event = reinterpret_cast<cudaEvent_t>(jevent);
CUDF_CUDA_TRY(cudaEventDestroy(event));
}
CATCH_STD(env, );
}
JNIEXPORT jboolean JNICALL Java_ai_rapids_cudf_Cuda_eventQuery(JNIEnv *env, jclass, jlong jevent) {
try {
cudf::jni::auto_set_device(env);
auto event = reinterpret_cast<cudaEvent_t>(jevent);
auto result = cudaEventQuery(event);
if (result == cudaSuccess) {
return true;
} else if (result == cudaErrorNotReady) {
return false;
} // else
CUDF_CUDA_TRY(result);
}
CATCH_STD(env, false);
return false;
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_eventRecord(JNIEnv *env, jclass, jlong jevent,
jlong jstream) {
try {
cudf::jni::auto_set_device(env);
auto event = reinterpret_cast<cudaEvent_t>(jevent);
auto stream = reinterpret_cast<cudaStream_t>(jstream);
CUDF_CUDA_TRY(cudaEventRecord(event, stream));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_eventSynchronize(JNIEnv *env, jclass,
jlong jevent) {
try {
cudf::jni::auto_set_device(env);
auto event = reinterpret_cast<cudaEvent_t>(jevent);
CUDF_CUDA_TRY(cudaEventSynchronize(event));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_memcpyOnStream(JNIEnv *env, jclass, jlong jdst,
jlong jsrc, jlong count, jint jkind,
jlong jstream) {
if (count == 0) {
return;
}
JNI_ARG_CHECK(env, jdst != 0, "dst memory pointer is null", );
JNI_ARG_CHECK(env, jsrc != 0, "src memory pointer is null", );
try {
cudf::jni::auto_set_device(env);
auto dst = reinterpret_cast<void *>(jdst);
auto src = reinterpret_cast<void *>(jsrc);
auto kind = static_cast<cudaMemcpyKind>(jkind);
auto stream = reinterpret_cast<cudaStream_t>(jstream);
CUDF_CUDA_TRY(cudaMemcpyAsync(dst, src, count, kind, stream));
CUDF_CUDA_TRY(cudaStreamSynchronize(stream));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_asyncMemcpyOnStream(JNIEnv *env, jclass, jlong jdst,
jlong jsrc, jlong count,
jint jkind, jlong jstream) {
if (count == 0) {
return;
}
JNI_ARG_CHECK(env, jdst != 0, "dst memory pointer is null", );
JNI_ARG_CHECK(env, jsrc != 0, "src memory pointer is null", );
try {
cudf::jni::auto_set_device(env);
auto dst = reinterpret_cast<void *>(jdst);
auto src = reinterpret_cast<void *>(jsrc);
auto kind = static_cast<cudaMemcpyKind>(jkind);
auto stream = reinterpret_cast<cudaStream_t>(jstream);
CUDF_CUDA_TRY(cudaMemcpyAsync(dst, src, count, kind, stream));
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_profilerStart(JNIEnv *env, jclass clazz) {
#ifdef CUDF_JNI_ENABLE_PROFILING
try {
cudaProfilerStart();
}
CATCH_STD(env, );
#else
cudf::jni::throw_java_exception(env, cudf::jni::CUDF_ERROR_CLASS,
"This library was built without CUDA profiler support.");
#endif
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_profilerStop(JNIEnv *env, jclass clazz) {
#ifdef CUDF_JNI_ENABLE_PROFILING
try {
cudaProfilerStop();
}
CATCH_STD(env, );
#else
cudf::jni::throw_java_exception(env, cudf::jni::CUDF_ERROR_CLASS,
"This library was built without CUDA profiler support.");
#endif
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Cuda_deviceSynchronize(JNIEnv *env, jclass clazz) {
try {
cudf::jni::auto_set_device(env);
CUDF_CUDA_TRY(cudaDeviceSynchronize());
}
CATCH_STD(env, );
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/main/native
|
rapidsai_public_repos/cudf/java/src/main/native/src/TableJni.cpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <arrow/io/api.h>
#include <arrow/ipc/api.h>
#include <cudf/aggregation.hpp>
#include <cudf/column/column.hpp>
#include <cudf/concatenate.hpp>
#include <cudf/contiguous_split.hpp>
#include <cudf/copying.hpp>
#include <cudf/filling.hpp>
#include <cudf/groupby.hpp>
#include <cudf/hashing.hpp>
#include <cudf/interop.hpp>
#include <cudf/io/avro.hpp>
#include <cudf/io/csv.hpp>
#include <cudf/io/data_sink.hpp>
#include <cudf/io/json.hpp>
#include <cudf/io/orc.hpp>
#include <cudf/io/parquet.hpp>
#include <cudf/join.hpp>
#include <cudf/lists/explode.hpp>
#include <cudf/merge.hpp>
#include <cudf/partitioning.hpp>
#include <cudf/replace.hpp>
#include <cudf/reshape.hpp>
#include <cudf/rolling.hpp>
#include <cudf/search.hpp>
#include <cudf/sorting.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/iterator/counting_iterator.h>
#include "csv_chunked_writer.hpp"
#include "cudf_jni_apis.hpp"
#include "dtype_utils.hpp"
#include "jni_compiled_expr.hpp"
#include "jni_utils.hpp"
#include "jni_writer_data_sink.hpp"
#include "row_conversion.hpp"
namespace cudf {
namespace jni {
/**
* @brief The base class for table writer.
*
* By storing a pointer to this base class instead of pointer to specific writer class, we can
* retrieve common data like `sink` and `stats` for any derived writer class without the need of
* casting or knowing its type.
*/
struct jni_table_writer_handle_base {
explicit jni_table_writer_handle_base(
std::unique_ptr<jni_writer_data_sink> &&sink_,
std::shared_ptr<cudf::io::writer_compression_statistics> &&stats_)
: sink{std::move(sink_)}, stats{std::move(stats_)} {}
std::unique_ptr<jni_writer_data_sink> sink;
std::shared_ptr<cudf::io::writer_compression_statistics> stats;
};
template <typename Writer>
struct jni_table_writer_handle final : public jni_table_writer_handle_base {
explicit jni_table_writer_handle(std::unique_ptr<Writer> &&writer_)
: jni_table_writer_handle_base(nullptr, nullptr), writer{std::move(writer_)} {}
explicit jni_table_writer_handle(
std::unique_ptr<Writer> &&writer_, std::unique_ptr<jni_writer_data_sink> &&sink_,
std::shared_ptr<cudf::io::writer_compression_statistics> &&stats_)
: jni_table_writer_handle_base(std::move(sink_), std::move(stats_)),
writer{std::move(writer_)} {}
std::unique_ptr<Writer> writer;
};
typedef jni_table_writer_handle<cudf::io::parquet_chunked_writer> native_parquet_writer_handle;
typedef jni_table_writer_handle<cudf::io::orc_chunked_writer> native_orc_writer_handle;
class native_arrow_ipc_writer_handle final {
public:
explicit native_arrow_ipc_writer_handle(const std::vector<std::string> &col_names,
const std::string &file_name)
: initialized(false), column_names(col_names), file_name(file_name) {}
explicit native_arrow_ipc_writer_handle(const std::vector<std::string> &col_names,
const std::shared_ptr<arrow::io::OutputStream> &sink)
: initialized(false), column_names(col_names), file_name(""), sink(sink) {}
private:
bool initialized;
std::vector<std::string> column_names;
std::vector<cudf::column_metadata> columns_meta;
std::string file_name;
std::shared_ptr<arrow::io::OutputStream> sink;
std::shared_ptr<arrow::ipc::RecordBatchWriter> writer;
public:
void write(std::shared_ptr<arrow::Table> &arrow_tab, int64_t max_chunk) {
if (!initialized) {
if (!sink) {
auto tmp_sink = arrow::io::FileOutputStream::Open(file_name);
if (!tmp_sink.ok()) {
throw std::runtime_error(tmp_sink.status().message());
}
sink = *tmp_sink;
}
// There is an option to have a file writer too, with metadata
auto tmp_writer = arrow::ipc::MakeStreamWriter(sink, arrow_tab->schema());
if (!tmp_writer.ok()) {
throw std::runtime_error(tmp_writer.status().message());
}
writer = *tmp_writer;
initialized = true;
}
if (arrow_tab->num_rows() == 0) {
// Arrow C++ IPC writer will not write an empty batch in the case of an
// empty table, so need to write an empty batch explicitly.
// For more please see https://issues.apache.org/jira/browse/ARROW-17912.
auto empty_batch = arrow::RecordBatch::MakeEmpty(arrow_tab->schema());
auto status = writer->WriteRecordBatch(*(*empty_batch));
if (!status.ok()) {
throw std::runtime_error("writer failed to write batch with the following error: " +
status.ToString());
}
} else {
auto status = writer->WriteTable(*arrow_tab, max_chunk);
if (!status.ok()) {
throw std::runtime_error("writer failed to write table with the following error: " +
status.ToString());
};
}
}
void close() {
if (initialized) {
{
auto status = writer->Close();
if (!status.ok()) {
throw std::runtime_error("Closing writer failed with the following error: " +
status.ToString());
}
}
{
auto status = sink->Close();
if (!status.ok()) {
throw std::runtime_error("Closing sink failed with the following error: " +
status.ToString());
}
}
}
initialized = false;
}
std::vector<cudf::column_metadata> get_column_metadata(const cudf::table_view &tview) {
if (!column_names.empty() && columns_meta.empty()) {
// Rebuild the structure of column meta according to table schema.
// All the tables written by this writer should share the same schema,
// so build column metadata only once.
columns_meta.reserve(tview.num_columns());
size_t idx = 0;
for (auto itr = tview.begin(); itr < tview.end(); ++itr) {
// It should consume the column names only when a column is
// - type of struct, or
// - not a child.
columns_meta.push_back(build_one_column_meta(*itr, idx));
}
if (idx < column_names.size()) {
throw cudf::jni::jni_exception("Too many column names are provided.");
}
}
return columns_meta;
}
private:
cudf::column_metadata build_one_column_meta(const cudf::column_view &cview, size_t &idx,
const bool consume_name = true) {
auto col_meta = cudf::column_metadata{};
if (consume_name) {
col_meta.name = get_column_name(idx++);
}
// Process children
if (cview.type().id() == cudf::type_id::LIST) {
// list type:
// - requires a stub metadata for offset column(index: 0).
// - does not require a name for the child column(index 1).
col_meta.children_meta = {{}, build_one_column_meta(cview.child(1), idx, false)};
} else if (cview.type().id() == cudf::type_id::STRUCT) {
// struct type always consumes the column names.
col_meta.children_meta.reserve(cview.num_children());
for (auto itr = cview.child_begin(); itr < cview.child_end(); ++itr) {
col_meta.children_meta.push_back(build_one_column_meta(*itr, idx));
}
} else if (cview.type().id() == cudf::type_id::DICTIONARY32) {
// not supported yet in JNI, nested type?
throw cudf::jni::jni_exception("Unsupported type 'DICTIONARY32'");
}
return col_meta;
}
std::string &get_column_name(const size_t idx) {
if (idx < 0 || idx >= column_names.size()) {
throw cudf::jni::jni_exception("Missing names for columns or nested struct columns");
}
return column_names[idx];
}
};
class jni_arrow_output_stream final : public arrow::io::OutputStream {
public:
explicit jni_arrow_output_stream(JNIEnv *env, jobject callback, jobject host_memory_allocator) {
if (env->GetJavaVM(&jvm) < 0) {
throw std::runtime_error("GetJavaVM failed");
}
jclass cls = env->GetObjectClass(callback);
if (cls == nullptr) {
throw cudf::jni::jni_exception("class not found");
}
handle_buffer_method =
env->GetMethodID(cls, "handleBuffer", "(Lai/rapids/cudf/HostMemoryBuffer;J)V");
if (handle_buffer_method == nullptr) {
throw cudf::jni::jni_exception("handleBuffer method");
}
this->callback = add_global_ref(env, callback);
this->host_memory_allocator = add_global_ref(env, host_memory_allocator);
}
virtual ~jni_arrow_output_stream() {
// This should normally be called by a JVM thread. If the JVM environment is missing then this
// is likely being triggered by the C++ runtime during shutdown. In that case the JVM may
// already be destroyed and this thread should not try to attach to get an environment.
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) {
callback = del_global_ref(env, callback);
current_buffer = del_global_ref(env, current_buffer);
host_memory_allocator = del_global_ref(env, host_memory_allocator);
}
callback = nullptr;
current_buffer = nullptr;
host_memory_allocator = nullptr;
}
arrow::Status Write(const std::shared_ptr<arrow::Buffer> &data) override {
return Write(data->data(), data->size());
}
arrow::Status Write(const void *data, int64_t nbytes) override {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
int64_t left_to_copy = nbytes;
const char *copy_from = static_cast<const char *>(data);
while (left_to_copy > 0) {
long buffer_amount_available = current_buffer_len - current_buffer_written;
if (buffer_amount_available <= 0) {
// should never be < 0, but just to be safe
rotate_buffer(env);
buffer_amount_available = current_buffer_len - current_buffer_written;
}
long amount_to_copy =
left_to_copy < buffer_amount_available ? left_to_copy : buffer_amount_available;
char *copy_to = current_buffer_data + current_buffer_written;
std::memcpy(copy_to, copy_from, amount_to_copy);
copy_from = copy_from + amount_to_copy;
current_buffer_written += amount_to_copy;
total_written += amount_to_copy;
left_to_copy -= amount_to_copy;
}
return arrow::Status::OK();
}
arrow::Status Flush() override {
if (current_buffer_written > 0) {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
handle_buffer(env, current_buffer, current_buffer_written);
current_buffer = del_global_ref(env, current_buffer);
current_buffer_len = 0;
current_buffer_data = nullptr;
current_buffer_written = 0;
}
return arrow::Status::OK();
}
arrow::Status Close() override {
auto ret = Flush();
is_closed = true;
return ret;
}
arrow::Status Abort() override {
is_closed = true;
return arrow::Status::OK();
}
arrow::Result<int64_t> Tell() const override { return total_written; }
bool closed() const override { return is_closed; }
private:
void rotate_buffer(JNIEnv *env) {
if (current_buffer != nullptr) {
handle_buffer(env, current_buffer, current_buffer_written);
}
current_buffer = del_global_ref(env, current_buffer);
jobject tmp_buffer = allocate_host_buffer(env, alloc_size, true, host_memory_allocator);
current_buffer = add_global_ref(env, tmp_buffer);
current_buffer_len = get_host_buffer_length(env, current_buffer);
current_buffer_data = reinterpret_cast<char *>(get_host_buffer_address(env, current_buffer));
current_buffer_written = 0;
}
void handle_buffer(JNIEnv *env, jobject buffer, jlong len) {
env->CallVoidMethod(callback, handle_buffer_method, buffer, len);
if (env->ExceptionCheck()) {
throw std::runtime_error("handleBuffer threw an exception");
}
}
JavaVM *jvm;
jobject callback;
jmethodID handle_buffer_method;
jobject current_buffer = nullptr;
char *current_buffer_data = nullptr;
long current_buffer_len = 0;
long current_buffer_written = 0;
int64_t total_written = 0;
long alloc_size = MINIMUM_WRITE_BUFFER_SIZE;
bool is_closed = false;
jobject host_memory_allocator;
};
class jni_arrow_input_stream final : public arrow::io::InputStream {
public:
explicit jni_arrow_input_stream(JNIEnv *env, jobject callback)
: mm(arrow::default_cpu_memory_manager()) {
if (env->GetJavaVM(&jvm) < 0) {
throw std::runtime_error("GetJavaVM failed");
}
jclass cls = env->GetObjectClass(callback);
if (cls == nullptr) {
throw cudf::jni::jni_exception("class not found");
}
read_into_method = env->GetMethodID(cls, "readInto", "(JJ)J");
if (read_into_method == nullptr) {
throw cudf::jni::jni_exception("readInto method");
}
this->callback = add_global_ref(env, callback);
}
virtual ~jni_arrow_input_stream() {
// This should normally be called by a JVM thread. If the JVM environment is missing then this
// is likely being triggered by the C++ runtime during shutdown. In that case the JVM may
// already be destroyed and this thread should not try to attach to get an environment.
JNIEnv *env = nullptr;
if (jvm->GetEnv(reinterpret_cast<void **>(&env), cudf::jni::MINIMUM_JNI_VERSION) == JNI_OK) {
callback = del_global_ref(env, callback);
}
callback = nullptr;
}
arrow::Result<int64_t> Read(int64_t nbytes, void *out) override {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
jlong ret = read_into(env, ptr_as_jlong(out), nbytes);
total_read += ret;
return ret;
}
arrow::Result<std::shared_ptr<arrow::Buffer>> Read(int64_t nbytes) override {
JNIEnv *env = cudf::jni::get_jni_env(jvm);
arrow::Result<std::shared_ptr<arrow::ResizableBuffer>> tmp_buffer =
arrow::AllocateResizableBuffer(nbytes);
if (!tmp_buffer.ok()) {
return tmp_buffer;
}
jlong amount_read = read_into(env, ptr_as_jlong((*tmp_buffer)->data()), nbytes);
arrow::Status stat = (*tmp_buffer)->Resize(amount_read);
if (!stat.ok()) {
return stat;
}
return tmp_buffer;
}
arrow::Status Close() override {
is_closed = true;
return arrow::Status::OK();
}
arrow::Status Abort() override {
is_closed = true;
return arrow::Status::OK();
}
arrow::Result<int64_t> Tell() const override { return total_read; }
bool closed() const override { return is_closed; }
private:
jlong read_into(JNIEnv *env, jlong addr, jlong len) {
jlong ret = env->CallLongMethod(callback, read_into_method, addr, len);
if (env->ExceptionCheck()) {
throw std::runtime_error("readInto threw an exception");
}
return ret;
}
JavaVM *jvm;
jobject callback;
jmethodID read_into_method;
int64_t total_read = 0;
bool is_closed = false;
std::vector<uint8_t> tmp_buffer;
std::shared_ptr<arrow::MemoryManager> mm;
};
class native_arrow_ipc_reader_handle final {
public:
explicit native_arrow_ipc_reader_handle(const std::string &file_name) {
auto tmp_source = arrow::io::ReadableFile::Open(file_name);
if (!tmp_source.ok()) {
throw std::runtime_error(tmp_source.status().message());
}
source = *tmp_source;
auto tmp_reader = arrow::ipc::RecordBatchStreamReader::Open(source);
if (!tmp_reader.ok()) {
throw std::runtime_error(tmp_reader.status().message());
}
reader = *tmp_reader;
}
explicit native_arrow_ipc_reader_handle(std::shared_ptr<arrow::io::InputStream> source)
: source(source) {
auto tmp_reader = arrow::ipc::RecordBatchStreamReader::Open(source);
if (!tmp_reader.ok()) {
throw std::runtime_error(tmp_reader.status().message());
}
reader = *tmp_reader;
}
std::shared_ptr<arrow::Table> next(int32_t row_target) {
int64_t total_rows = 0;
bool done = false;
std::vector<std::shared_ptr<arrow::RecordBatch>> batches;
while (!done) {
arrow::Result<std::shared_ptr<arrow::RecordBatch>> batch = reader->Next();
if (!batch.ok()) {
throw std::runtime_error(batch.status().message());
}
if (!*batch) {
done = true;
} else {
batches.push_back(*batch);
total_rows += (*batch)->num_rows();
done = (total_rows >= row_target);
}
}
if (batches.empty()) {
// EOF
return std::unique_ptr<arrow::Table>();
}
arrow::Result<std::shared_ptr<arrow::Table>> tmp =
arrow::Table::FromRecordBatches(reader->schema(), batches);
if (!tmp.ok()) {
throw std::runtime_error(tmp.status().message());
}
return *tmp;
}
std::shared_ptr<arrow::io::InputStream> source;
std::shared_ptr<arrow::ipc::RecordBatchReader> reader;
void close() {
auto status = source->Close();
if (!status.ok()) {
throw std::runtime_error("Closing source failed with the following error: " +
status.ToString());
}
}
};
jlongArray convert_table_for_return(JNIEnv *env, std::unique_ptr<cudf::table> &&table_result,
std::vector<std::unique_ptr<cudf::column>> &&extra_columns) {
std::vector<std::unique_ptr<cudf::column>> ret = table_result->release();
int table_cols = ret.size();
int num_columns = table_cols + extra_columns.size();
cudf::jni::native_jlongArray outcol_handles(env, num_columns);
std::transform(ret.begin(), ret.end(), outcol_handles.begin(),
[](auto &col) { return release_as_jlong(col); });
std::transform(extra_columns.begin(), extra_columns.end(), outcol_handles.begin() + table_cols,
[](auto &col) { return release_as_jlong(col); });
return outcol_handles.get_jArray();
}
jlongArray convert_table_for_return(JNIEnv *env, std::unique_ptr<cudf::table> &table_result,
std::vector<std::unique_ptr<cudf::column>> &&extra_columns) {
return convert_table_for_return(env, std::move(table_result), std::move(extra_columns));
}
jlongArray convert_table_for_return(JNIEnv *env, std::unique_ptr<cudf::table> &first_table,
std::unique_ptr<cudf::table> &second_table) {
return convert_table_for_return(env, first_table, second_table->release());
}
// Convert the JNI boolean array of key column sort order to a vector of cudf::order
// for groupby.
std::vector<cudf::order> resolve_column_order(JNIEnv *env, jbooleanArray jkeys_sort_desc,
int key_size) {
cudf::jni::native_jbooleanArray keys_sort_desc(env, jkeys_sort_desc);
auto keys_sort_num = keys_sort_desc.size();
// The number of column order should be 0 or equal to the number of key.
if (keys_sort_num != 0 && keys_sort_num != key_size) {
throw cudf::jni::jni_exception("key-column and key-sort-order size mismatch.");
}
std::vector<cudf::order> column_order(keys_sort_num);
if (keys_sort_num > 0) {
std::transform(keys_sort_desc.data(), keys_sort_desc.data() + keys_sort_num,
column_order.begin(), [](jboolean is_desc) {
return is_desc ? cudf::order::DESCENDING : cudf::order::ASCENDING;
});
}
return column_order;
}
// Convert the JNI boolean array of key column null order to a vector of cudf::null_order
// for groupby.
std::vector<cudf::null_order> resolve_null_precedence(JNIEnv *env, jbooleanArray jkeys_null_first,
int key_size) {
cudf::jni::native_jbooleanArray keys_null_first(env, jkeys_null_first);
auto null_order_num = keys_null_first.size();
// The number of null order should be 0 or equal to the number of key.
if (null_order_num != 0 && null_order_num != key_size) {
throw cudf::jni::jni_exception("key-column and key-null-order size mismatch.");
}
std::vector<cudf::null_order> null_precedence(null_order_num);
if (null_order_num > 0) {
std::transform(keys_null_first.data(), keys_null_first.data() + null_order_num,
null_precedence.begin(), [](jboolean null_before) {
return null_before ? cudf::null_order::BEFORE : cudf::null_order::AFTER;
});
}
return null_precedence;
}
namespace {
int set_column_metadata(
cudf::io::column_in_metadata &column_metadata, std::vector<std::string> &col_names,
cudf::jni::native_jbooleanArray &nullability, cudf::jni::native_jbooleanArray &is_int96,
cudf::jni::native_jintArray &precisions, cudf::jni::native_jbooleanArray &is_map,
cudf::jni::native_jbooleanArray &hasParquetFieldIds,
cudf::jni::native_jintArray &parquetFieldIds, cudf::jni::native_jintArray &children,
int num_children, int read_index, cudf::jni::native_jbooleanArray &is_binary) {
int write_index = 0;
for (int i = 0; i < num_children; i++, write_index++) {
cudf::io::column_in_metadata child;
child.set_name(col_names[read_index]).set_nullability(nullability[read_index]);
if (precisions[read_index] > -1) {
child.set_decimal_precision(precisions[read_index]);
}
if (!is_int96.is_null()) {
child.set_int96_timestamps(is_int96[read_index]);
}
if (!is_binary.is_null()) {
child.set_output_as_binary(is_binary[read_index]);
}
if (is_map[read_index]) {
child.set_list_column_as_map();
}
if (!parquetFieldIds.is_null() && hasParquetFieldIds[read_index]) {
child.set_parquet_field_id(parquetFieldIds[read_index]);
}
column_metadata.add_child(child);
int childs_children = children[read_index++];
if (childs_children > 0) {
read_index = set_column_metadata(
column_metadata.child(write_index), col_names, nullability, is_int96, precisions, is_map,
hasParquetFieldIds, parquetFieldIds, children, childs_children, read_index, is_binary);
}
}
return read_index;
}
void createTableMetaData(JNIEnv *env, jint num_children, jobjectArray &j_col_names,
jintArray &j_children, jbooleanArray &j_col_nullability,
jbooleanArray &j_is_int96, jintArray &j_precisions,
jbooleanArray &j_is_map, cudf::io::table_input_metadata &metadata,
jbooleanArray &j_hasParquetFieldIds, jintArray &j_parquetFieldIds,
jbooleanArray &j_is_binary) {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray col_names(env, j_col_names);
cudf::jni::native_jbooleanArray col_nullability(env, j_col_nullability);
cudf::jni::native_jbooleanArray is_int96(env, j_is_int96);
cudf::jni::native_jintArray precisions(env, j_precisions);
cudf::jni::native_jbooleanArray hasParquetFieldIds(env, j_hasParquetFieldIds);
cudf::jni::native_jintArray parquetFieldIds(env, j_parquetFieldIds);
cudf::jni::native_jintArray children(env, j_children);
cudf::jni::native_jbooleanArray is_map(env, j_is_map);
cudf::jni::native_jbooleanArray is_binary(env, j_is_binary);
auto cpp_names = col_names.as_cpp_vector();
int top_level_children = num_children;
metadata.column_metadata.resize(top_level_children);
int read_index = 0; // the read_index, which will be used to read the arrays
for (int i = read_index, write_index = 0; i < top_level_children; i++, write_index++) {
metadata.column_metadata[write_index]
.set_name(cpp_names[read_index])
.set_nullability(col_nullability[read_index]);
if (precisions[read_index] > -1) {
metadata.column_metadata[write_index].set_decimal_precision(precisions[read_index]);
}
if (!is_int96.is_null()) {
metadata.column_metadata[write_index].set_int96_timestamps(is_int96[read_index]);
}
if (!is_binary.is_null()) {
metadata.column_metadata[write_index].set_output_as_binary(is_binary[read_index]);
}
if (is_map[read_index]) {
metadata.column_metadata[write_index].set_list_column_as_map();
}
if (!parquetFieldIds.is_null() && hasParquetFieldIds[read_index]) {
metadata.column_metadata[write_index].set_parquet_field_id(parquetFieldIds[read_index]);
}
int childs_children = children[read_index++];
if (childs_children > 0) {
read_index =
set_column_metadata(metadata.column_metadata[write_index], cpp_names, col_nullability,
is_int96, precisions, is_map, hasParquetFieldIds, parquetFieldIds,
children, childs_children, read_index, is_binary);
}
}
}
// Check that window parameters are valid.
bool valid_window_parameters(native_jintArray const &values,
native_jpointerArray<cudf::aggregation> const &ops,
native_jintArray const &min_periods, native_jintArray const &preceding,
native_jintArray const &following) {
return values.size() == ops.size() && values.size() == min_periods.size() &&
values.size() == preceding.size() && values.size() == following.size();
}
// Check that window parameters are valid.
bool valid_window_parameters(native_jintArray const &values,
native_jpointerArray<cudf::aggregation> const &ops,
native_jintArray const &min_periods,
native_jpointerArray<cudf::scalar> const &preceding,
native_jpointerArray<cudf::scalar> const &following) {
return values.size() == ops.size() && values.size() == min_periods.size() &&
values.size() == preceding.size() && values.size() == following.size();
}
// Convert a cudf gather map pair into the form that Java expects
// The resulting Java long array contains the following at each index:
// 0: Size of each gather map in bytes
// 1: Device address of the gather map for the left table
// 2: Host address of the rmm::device_buffer instance that owns the left gather map data
// 3: Device address of the gather map for the right table
// 4: Host address of the rmm::device_buffer instance that owns the right gather map data
jlongArray gather_maps_to_java(JNIEnv *env,
std::pair<std::unique_ptr<rmm::device_uvector<cudf::size_type>>,
std::unique_ptr<rmm::device_uvector<cudf::size_type>>>
maps) {
// release the underlying device buffer to Java
auto left_map_buffer = std::make_unique<rmm::device_buffer>(maps.first->release());
auto right_map_buffer = std::make_unique<rmm::device_buffer>(maps.second->release());
cudf::jni::native_jlongArray result(env, 5);
result[0] = static_cast<jlong>(left_map_buffer->size());
result[1] = ptr_as_jlong(left_map_buffer->data());
result[2] = release_as_jlong(left_map_buffer);
result[3] = ptr_as_jlong(right_map_buffer->data());
result[4] = release_as_jlong(right_map_buffer);
return result.get_jArray();
}
// Convert a cudf gather map into the form that Java expects
// The resulting Java long array contains the following at each index:
// 0: Size of the gather map in bytes
// 1: Device address of the gather map
// 2: Host address of the rmm::device_buffer instance that owns the gather map data
jlongArray gather_map_to_java(JNIEnv *env,
std::unique_ptr<rmm::device_uvector<cudf::size_type>> map) {
// release the underlying device buffer to Java
auto gather_map_buffer = std::make_unique<rmm::device_buffer>(map->release());
cudf::jni::native_jlongArray result(env, 3);
result[0] = static_cast<jlong>(gather_map_buffer->size());
result[1] = ptr_as_jlong(gather_map_buffer->data());
result[2] = release_as_jlong(gather_map_buffer);
return result.get_jArray();
}
// Generate gather maps needed to manifest the result of an equi-join between two tables.
template <typename T>
jlongArray join_gather_maps(JNIEnv *env, jlong j_left_keys, jlong j_right_keys,
jboolean compare_nulls_equal, T join_func) {
JNI_NULL_CHECK(env, j_left_keys, "left_table is null", NULL);
JNI_NULL_CHECK(env, j_right_keys, "right_table is null", NULL);
try {
cudf::jni::auto_set_device(env);
auto left_keys = reinterpret_cast<cudf::table_view const *>(j_left_keys);
auto right_keys = reinterpret_cast<cudf::table_view const *>(j_right_keys);
auto nulleq = compare_nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL;
return gather_maps_to_java(env, join_func(*left_keys, *right_keys, nulleq));
}
CATCH_STD(env, NULL);
}
// Generate gather maps needed to manifest the result of an equi-join between a left table and
// a hash table built from the join's right table.
template <typename T>
jlongArray hash_join_gather_maps(JNIEnv *env, jlong j_left_keys, jlong j_right_hash_join,
T join_func) {
JNI_NULL_CHECK(env, j_left_keys, "left table is null", NULL);
JNI_NULL_CHECK(env, j_right_hash_join, "hash join is null", NULL);
try {
cudf::jni::auto_set_device(env);
auto left_keys = reinterpret_cast<cudf::table_view const *>(j_left_keys);
auto hash_join = reinterpret_cast<cudf::hash_join const *>(j_right_hash_join);
return gather_maps_to_java(env, join_func(*left_keys, *hash_join));
}
CATCH_STD(env, NULL);
}
// Generate gather maps needed to manifest the result of a conditional join between two tables.
template <typename T>
jlongArray cond_join_gather_maps(JNIEnv *env, jlong j_left_table, jlong j_right_table,
jlong j_condition, T join_func) {
JNI_NULL_CHECK(env, j_left_table, "left_table is null", NULL);
JNI_NULL_CHECK(env, j_right_table, "right_table is null", NULL);
JNI_NULL_CHECK(env, j_condition, "condition is null", NULL);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto right_table = reinterpret_cast<cudf::table_view const *>(j_right_table);
auto condition = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_condition);
return gather_maps_to_java(
env, join_func(*left_table, *right_table, condition->get_top_expression()));
}
CATCH_STD(env, NULL);
}
// Generate a gather map needed to manifest the result of a semi/anti join between two tables.
template <typename T>
jlongArray join_gather_single_map(JNIEnv *env, jlong j_left_keys, jlong j_right_keys,
jboolean compare_nulls_equal, T join_func) {
JNI_NULL_CHECK(env, j_left_keys, "left_table is null", NULL);
JNI_NULL_CHECK(env, j_right_keys, "right_table is null", NULL);
try {
cudf::jni::auto_set_device(env);
auto left_keys = reinterpret_cast<cudf::table_view const *>(j_left_keys);
auto right_keys = reinterpret_cast<cudf::table_view const *>(j_right_keys);
auto nulleq = compare_nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL;
return gather_map_to_java(env, join_func(*left_keys, *right_keys, nulleq));
}
CATCH_STD(env, NULL);
}
// Generate a gather map needed to manifest the result of a conditional semi/anti join
// between two tables.
template <typename T>
jlongArray cond_join_gather_single_map(JNIEnv *env, jlong j_left_table, jlong j_right_table,
jlong j_condition, T join_func) {
JNI_NULL_CHECK(env, j_left_table, "left_table is null", NULL);
JNI_NULL_CHECK(env, j_right_table, "right_table is null", NULL);
JNI_NULL_CHECK(env, j_condition, "condition is null", NULL);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto right_table = reinterpret_cast<cudf::table_view const *>(j_right_table);
auto condition = reinterpret_cast<cudf::jni::ast::compiled_expr *>(j_condition);
return gather_map_to_java(
env, join_func(*left_table, *right_table, condition->get_top_expression()));
}
CATCH_STD(env, NULL);
}
template <typename T>
jlongArray mixed_join_size(JNIEnv *env, jlong j_left_keys, jlong j_right_keys,
jlong j_left_condition, jlong j_right_condition, jlong j_condition,
jboolean j_nulls_equal, T join_size_func) {
JNI_NULL_CHECK(env, j_left_keys, "left keys table is null", 0);
JNI_NULL_CHECK(env, j_right_keys, "right keys table is null", 0);
JNI_NULL_CHECK(env, j_left_condition, "left condition table is null", 0);
JNI_NULL_CHECK(env, j_right_condition, "right condition table is null", 0);
JNI_NULL_CHECK(env, j_condition, "condition is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const left_keys = reinterpret_cast<cudf::table_view const *>(j_left_keys);
auto const right_keys = reinterpret_cast<cudf::table_view const *>(j_right_keys);
auto const left_condition = reinterpret_cast<cudf::table_view const *>(j_left_condition);
auto const right_condition = reinterpret_cast<cudf::table_view const *>(j_right_condition);
auto const condition = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_condition);
auto const nulls_equal =
j_nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL;
auto [join_size, matches_per_row] =
join_size_func(*left_keys, *right_keys, *left_condition, *right_condition,
condition->get_top_expression(), nulls_equal);
if (matches_per_row->size() > std::numeric_limits<cudf::size_type>::max()) {
throw std::runtime_error("Too many values in device buffer to convert into a column");
}
auto col_size = static_cast<size_type>(matches_per_row->size());
auto col_data = matches_per_row->release();
cudf::jni::native_jlongArray result(env, 2);
result[0] = static_cast<jlong>(join_size);
result[1] = ptr_as_jlong(new cudf::column{cudf::data_type{cudf::type_id::INT32}, col_size,
std::move(col_data), rmm::device_buffer{}, 0});
return result.get_jArray();
}
CATCH_STD(env, NULL);
}
template <typename T>
jlongArray mixed_join_gather_maps(JNIEnv *env, jlong j_left_keys, jlong j_right_keys,
jlong j_left_condition, jlong j_right_condition,
jlong j_condition, jboolean j_nulls_equal, T join_func) {
JNI_NULL_CHECK(env, j_left_keys, "left keys table is null", 0);
JNI_NULL_CHECK(env, j_right_keys, "right keys table is null", 0);
JNI_NULL_CHECK(env, j_left_condition, "left condition table is null", 0);
JNI_NULL_CHECK(env, j_right_condition, "right condition table is null", 0);
JNI_NULL_CHECK(env, j_condition, "condition is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const left_keys = reinterpret_cast<cudf::table_view const *>(j_left_keys);
auto const right_keys = reinterpret_cast<cudf::table_view const *>(j_right_keys);
auto const left_condition = reinterpret_cast<cudf::table_view const *>(j_left_condition);
auto const right_condition = reinterpret_cast<cudf::table_view const *>(j_right_condition);
auto const condition = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_condition);
auto const nulls_equal =
j_nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL;
return gather_maps_to_java(env,
join_func(*left_keys, *right_keys, *left_condition, *right_condition,
condition->get_top_expression(), nulls_equal));
}
CATCH_STD(env, NULL);
}
template <typename T>
jlongArray mixed_join_gather_single_map(JNIEnv *env, jlong j_left_keys, jlong j_right_keys,
jlong j_left_condition, jlong j_right_condition,
jlong j_condition, jboolean j_nulls_equal, T join_func) {
JNI_NULL_CHECK(env, j_left_keys, "left keys table is null", 0);
JNI_NULL_CHECK(env, j_right_keys, "right keys table is null", 0);
JNI_NULL_CHECK(env, j_left_condition, "left condition table is null", 0);
JNI_NULL_CHECK(env, j_right_condition, "right condition table is null", 0);
JNI_NULL_CHECK(env, j_condition, "condition is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const left_keys = reinterpret_cast<cudf::table_view const *>(j_left_keys);
auto const right_keys = reinterpret_cast<cudf::table_view const *>(j_right_keys);
auto const left_condition = reinterpret_cast<cudf::table_view const *>(j_left_condition);
auto const right_condition = reinterpret_cast<cudf::table_view const *>(j_right_condition);
auto const condition = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_condition);
auto const nulls_equal =
j_nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL;
return gather_map_to_java(env,
join_func(*left_keys, *right_keys, *left_condition, *right_condition,
condition->get_top_expression(), nulls_equal));
}
CATCH_STD(env, NULL);
}
std::pair<std::size_t, cudf::device_span<cudf::size_type const>>
get_mixed_size_info(JNIEnv *env, jlong j_output_row_count, jlong j_matches_view) {
auto const row_count = static_cast<std::size_t>(j_output_row_count);
auto const matches = reinterpret_cast<cudf::column_view const *>(j_matches_view);
return std::make_pair(row_count, cudf::device_span<cudf::size_type const>(
matches->template data<cudf::size_type>(), matches->size()));
}
cudf::column_view remove_validity_from_col(cudf::column_view column_view) {
if (!cudf::is_compound(column_view.type())) {
if (column_view.nullable() && column_view.null_count() == 0) {
// null_mask is allocated but no nulls present therefore we create a new column_view without
// the null_mask to avoid things blowing up in reading the parquet file
return cudf::column_view(column_view.type(), column_view.size(), column_view.head(), nullptr,
0, column_view.offset());
} else {
return cudf::column_view(column_view);
}
} else {
std::vector<cudf::column_view> children;
children.reserve(column_view.num_children());
for (auto it = column_view.child_begin(); it != column_view.child_end(); it++) {
children.push_back(remove_validity_from_col(*it));
}
if (!column_view.nullable() || column_view.null_count() != 0) {
return cudf::column_view(column_view.type(), column_view.size(), nullptr,
column_view.null_mask(), column_view.null_count(),
column_view.offset(), children);
} else {
return cudf::column_view(column_view.type(), column_view.size(), nullptr, nullptr, 0,
column_view.offset(), children);
}
}
}
cudf::table_view remove_validity_if_needed(cudf::table_view *input_table_view) {
std::vector<cudf::column_view> views;
views.reserve(input_table_view->num_columns());
for (auto it = input_table_view->begin(); it != input_table_view->end(); it++) {
views.push_back(remove_validity_from_col(*it));
}
return cudf::table_view(views);
}
} // namespace
} // namespace jni
} // namespace cudf
using cudf::jni::convert_table_for_return;
using cudf::jni::ptr_as_jlong;
using cudf::jni::release_as_jlong;
extern "C" {
// This is a method purely added for testing remove_validity_if_needed method
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_removeNullMasksIfNeeded(JNIEnv *env, jclass,
jlong j_table_view) {
JNI_NULL_CHECK(env, j_table_view, "table view handle is null", 0);
try {
cudf::table_view *tview = reinterpret_cast<cudf::table_view *>(j_table_view);
cudf::table_view result = cudf::jni::remove_validity_if_needed(tview);
cudf::table m_tbl(result);
std::vector<std::unique_ptr<cudf::column>> cols = m_tbl.release();
auto results = cudf::jni::native_jlongArray(env, cols.size());
std::transform(cols.begin(), cols.end(), results.begin(),
[](auto &col) { return release_as_jlong(col); });
return results.get_jArray();
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_createCudfTableView(JNIEnv *env, jclass,
jlongArray j_cudf_columns) {
JNI_NULL_CHECK(env, j_cudf_columns, "columns are null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jpointerArray<cudf::column_view> n_cudf_columns(env, j_cudf_columns);
std::vector<cudf::column_view> column_views = n_cudf_columns.get_dereferenced();
return ptr_as_jlong(new cudf::table_view(column_views));
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_deleteCudfTable(JNIEnv *env, jclass,
jlong j_cudf_table_view) {
JNI_NULL_CHECK(env, j_cudf_table_view, "table view handle is null", );
try {
cudf::jni::auto_set_device(env);
delete reinterpret_cast<cudf::table_view *>(j_cudf_table_view);
}
CATCH_STD(env, );
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_columnViewsFromPacked(JNIEnv *env, jclass,
jobject buffer_obj,
jlong j_data_address) {
// The GPU data address can be null when the table is empty, so it is not null-checked here.
JNI_NULL_CHECK(env, buffer_obj, "metadata is null", nullptr);
try {
cudf::jni::auto_set_device(env);
void const *metadata_address = env->GetDirectBufferAddress(buffer_obj);
JNI_NULL_CHECK(env, metadata_address, "metadata buffer address is null", nullptr);
cudf::table_view table = cudf::unpack(static_cast<uint8_t const *>(metadata_address),
reinterpret_cast<uint8_t const *>(j_data_address));
cudf::jni::native_jlongArray views(env, table.num_columns());
for (int i = 0; i < table.num_columns(); i++) {
// TODO Exception handling is not ideal, if no exceptions are thrown ownership of the new cv
// is passed to Java. If an exception is thrown we need to free it, but this needs to be
// coordinated with the Java side because one column may have changed ownership while
// another may not have. We don't want to double free the view so for now we just let it
// leak because it should be a small amount of host memory.
//
// In the ideal case we would keep the view where it is at, and pass in a pointer to it
// That pointer would then be copied when Java takes ownership of it, but that adds an
// extra JNI call that I would like to avoid for performance reasons.
views[i] = ptr_as_jlong(new cudf::column_view(table.column(i)));
}
views.commit();
return views.get_jArray();
}
CATCH_STD(env, nullptr);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_sortOrder(JNIEnv *env, jclass,
jlong j_input_table,
jlongArray j_sort_keys_columns,
jbooleanArray j_is_descending,
jbooleanArray j_are_nulls_smallest) {
// input validations & verifications
JNI_NULL_CHECK(env, j_input_table, "input table is null", 0);
JNI_NULL_CHECK(env, j_sort_keys_columns, "sort keys columns is null", 0);
JNI_NULL_CHECK(env, j_is_descending, "sort order array is null", 0);
JNI_NULL_CHECK(env, j_are_nulls_smallest, "null order array is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jpointerArray<cudf::column_view> n_sort_keys_columns(env,
j_sort_keys_columns);
jsize num_columns = n_sort_keys_columns.size();
const cudf::jni::native_jbooleanArray n_is_descending(env, j_is_descending);
jsize num_columns_is_desc = n_is_descending.size();
JNI_ARG_CHECK(env, num_columns_is_desc == num_columns,
"columns and is_descending lengths don't match", 0);
const cudf::jni::native_jbooleanArray n_are_nulls_smallest(env, j_are_nulls_smallest);
jsize num_columns_null_smallest = n_are_nulls_smallest.size();
JNI_ARG_CHECK(env, num_columns_null_smallest == num_columns,
"columns and is_descending lengths don't match", 0);
std::vector<cudf::order> order =
n_is_descending.transform_if_else(cudf::order::DESCENDING, cudf::order::ASCENDING);
std::vector<cudf::null_order> null_order =
n_are_nulls_smallest.transform_if_else(cudf::null_order::BEFORE, cudf::null_order::AFTER);
std::vector<cudf::column_view> sort_keys = n_sort_keys_columns.get_dereferenced();
return release_as_jlong(cudf::sorted_order(cudf::table_view{sort_keys}, order, null_order));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_orderBy(JNIEnv *env, jclass,
jlong j_input_table,
jlongArray j_sort_keys_columns,
jbooleanArray j_is_descending,
jbooleanArray j_are_nulls_smallest) {
// input validations & verifications
JNI_NULL_CHECK(env, j_input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, j_sort_keys_columns, "sort keys columns is null", NULL);
JNI_NULL_CHECK(env, j_is_descending, "sort order array is null", NULL);
JNI_NULL_CHECK(env, j_are_nulls_smallest, "null order array is null", NULL);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jpointerArray<cudf::column_view> n_sort_keys_columns(env,
j_sort_keys_columns);
jsize num_columns = n_sort_keys_columns.size();
const cudf::jni::native_jbooleanArray n_is_descending(env, j_is_descending);
jsize num_columns_is_desc = n_is_descending.size();
JNI_ARG_CHECK(env, num_columns_is_desc == num_columns,
"columns and is_descending lengths don't match", 0);
const cudf::jni::native_jbooleanArray n_are_nulls_smallest(env, j_are_nulls_smallest);
jsize num_columns_null_smallest = n_are_nulls_smallest.size();
JNI_ARG_CHECK(env, num_columns_null_smallest == num_columns,
"columns and areNullsSmallest lengths don't match", 0);
std::vector<cudf::order> order =
n_is_descending.transform_if_else(cudf::order::DESCENDING, cudf::order::ASCENDING);
std::vector<cudf::null_order> null_order =
n_are_nulls_smallest.transform_if_else(cudf::null_order::BEFORE, cudf::null_order::AFTER);
std::vector<cudf::column_view> sort_keys = n_sort_keys_columns.get_dereferenced();
auto sorted_col = cudf::sorted_order(cudf::table_view{sort_keys}, order, null_order);
auto const input_table = reinterpret_cast<cudf::table_view const *>(j_input_table);
return convert_table_for_return(env, cudf::gather(*input_table, sorted_col->view()));
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_merge(JNIEnv *env, jclass,
jlongArray j_table_handles,
jintArray j_sort_key_indexes,
jbooleanArray j_is_descending,
jbooleanArray j_are_nulls_smallest) {
// input validations & verifications
JNI_NULL_CHECK(env, j_table_handles, "input tables are null", NULL);
JNI_NULL_CHECK(env, j_sort_key_indexes, "key indexes is null", NULL);
JNI_NULL_CHECK(env, j_is_descending, "sort order array is null", NULL);
JNI_NULL_CHECK(env, j_are_nulls_smallest, "null order array is null", NULL);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jpointerArray<cudf::table_view> n_table_handles(env, j_table_handles);
const cudf::jni::native_jintArray n_sort_key_indexes(env, j_sort_key_indexes);
jsize num_columns = n_sort_key_indexes.size();
const cudf::jni::native_jbooleanArray n_is_descending(env, j_is_descending);
jsize num_columns_is_desc = n_is_descending.size();
JNI_ARG_CHECK(env, num_columns_is_desc == num_columns,
"columns and is_descending lengths don't match", NULL);
const cudf::jni::native_jbooleanArray n_are_nulls_smallest(env, j_are_nulls_smallest);
jsize num_columns_null_smallest = n_are_nulls_smallest.size();
JNI_ARG_CHECK(env, num_columns_null_smallest == num_columns,
"columns and areNullsSmallest lengths don't match", NULL);
std::vector<int> indexes = n_sort_key_indexes.to_vector<int>();
std::vector<cudf::order> order =
n_is_descending.transform_if_else(cudf::order::DESCENDING, cudf::order::ASCENDING);
std::vector<cudf::null_order> null_order =
n_are_nulls_smallest.transform_if_else(cudf::null_order::BEFORE, cudf::null_order::AFTER);
std::vector<cudf::table_view> tables = n_table_handles.get_dereferenced();
return convert_table_for_return(env, cudf::merge(tables, indexes, order, null_order));
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readCSVFromDataSource(
JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales,
jobjectArray filter_col_names, jint header_row, jbyte delim, jint j_quote_style, jbyte quote,
jbyte comment, jobjectArray null_values, jobjectArray true_values, jobjectArray false_values,
jlong ds_handle) {
JNI_NULL_CHECK(env, null_values, "null_values must be supplied, even if it is empty", NULL);
JNI_NULL_CHECK(env, ds_handle, "no data source handle given", NULL);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray n_col_names(env, col_names);
cudf::jni::native_jintArray n_types(env, j_types);
cudf::jni::native_jintArray n_scales(env, j_scales);
if (n_types.is_null() != n_scales.is_null()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match null",
NULL);
}
std::vector<cudf::data_type> data_types;
if (!n_types.is_null()) {
if (n_types.size() != n_scales.size()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match size",
NULL);
}
data_types.reserve(n_types.size());
std::transform(n_types.begin(), n_types.end(), n_scales.begin(),
std::back_inserter(data_types), [](auto type, auto scale) {
return cudf::data_type{static_cast<cudf::type_id>(type), scale};
});
}
cudf::jni::native_jstringArray n_null_values(env, null_values);
cudf::jni::native_jstringArray n_true_values(env, true_values);
cudf::jni::native_jstringArray n_false_values(env, false_values);
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
auto ds = reinterpret_cast<cudf::io::datasource *>(ds_handle);
cudf::io::source_info source{ds};
auto const quote_style = static_cast<cudf::io::quote_style>(j_quote_style);
cudf::io::csv_reader_options opts = cudf::io::csv_reader_options::builder(source)
.delimiter(delim)
.header(header_row)
.names(n_col_names.as_cpp_vector())
.dtypes(data_types)
.use_cols_names(n_filter_col_names.as_cpp_vector())
.true_values(n_true_values.as_cpp_vector())
.false_values(n_false_values.as_cpp_vector())
.na_values(n_null_values.as_cpp_vector())
.keep_default_na(false)
.na_filter(n_null_values.size() > 0)
.quoting(quote_style)
.quotechar(quote)
.comment(comment)
.build();
return convert_table_for_return(env, cudf::io::read_csv(opts).tbl);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readCSV(
JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales,
jobjectArray filter_col_names, jstring inputfilepath, jlong buffer, jlong buffer_length,
jint header_row, jbyte delim, jint j_quote_style, jbyte quote, jbyte comment,
jobjectArray null_values, jobjectArray true_values, jobjectArray false_values) {
JNI_NULL_CHECK(env, null_values, "null_values must be supplied, even if it is empty", NULL);
bool read_buffer = true;
if (buffer == 0) {
JNI_NULL_CHECK(env, inputfilepath, "input file or buffer must be supplied", NULL);
read_buffer = false;
} else if (inputfilepath != NULL) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"cannot pass in both a buffer and an inputfilepath", NULL);
} else if (buffer_length <= 0) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "An empty buffer is not supported",
NULL);
}
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray n_col_names(env, col_names);
cudf::jni::native_jintArray n_types(env, j_types);
cudf::jni::native_jintArray n_scales(env, j_scales);
if (n_types.is_null() != n_scales.is_null()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match null",
NULL);
}
std::vector<cudf::data_type> data_types;
if (!n_types.is_null()) {
if (n_types.size() != n_scales.size()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match size",
NULL);
}
data_types.reserve(n_types.size());
std::transform(n_types.begin(), n_types.end(), n_scales.begin(),
std::back_inserter(data_types), [](auto type, auto scale) {
return cudf::data_type{static_cast<cudf::type_id>(type), scale};
});
}
cudf::jni::native_jstring filename(env, inputfilepath);
if (!read_buffer && filename.is_empty()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "inputfilepath can't be empty",
NULL);
}
cudf::jni::native_jstringArray n_null_values(env, null_values);
cudf::jni::native_jstringArray n_true_values(env, true_values);
cudf::jni::native_jstringArray n_false_values(env, false_values);
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
auto source = read_buffer ? cudf::io::source_info{reinterpret_cast<char *>(buffer),
static_cast<std::size_t>(buffer_length)} :
cudf::io::source_info{filename.get()};
auto const quote_style = static_cast<cudf::io::quote_style>(j_quote_style);
cudf::io::csv_reader_options opts = cudf::io::csv_reader_options::builder(source)
.delimiter(delim)
.header(header_row)
.names(n_col_names.as_cpp_vector())
.dtypes(data_types)
.use_cols_names(n_filter_col_names.as_cpp_vector())
.true_values(n_true_values.as_cpp_vector())
.false_values(n_false_values.as_cpp_vector())
.na_values(n_null_values.as_cpp_vector())
.keep_default_na(false)
.na_filter(n_null_values.size() > 0)
.quoting(quote_style)
.quotechar(quote)
.comment(comment)
.build();
return convert_table_for_return(env, cudf::io::read_csv(opts).tbl);
}
CATCH_STD(env, NULL);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeCSVToFile(
JNIEnv *env, jclass, jlong j_table_handle, jobjectArray j_column_names, jboolean include_header,
jstring j_row_delimiter, jbyte j_field_delimiter, jstring j_null_value, jstring j_true_value,
jstring j_false_value, jint j_quote_style, jstring j_output_path) {
JNI_NULL_CHECK(env, j_table_handle, "table handle cannot be null.", );
JNI_NULL_CHECK(env, j_column_names, "column name array cannot be null", );
JNI_NULL_CHECK(env, j_row_delimiter, "row delimiter cannot be null", );
JNI_NULL_CHECK(env, j_field_delimiter, "field delimiter cannot be null", );
JNI_NULL_CHECK(env, j_null_value, "null representation string cannot be itself null", );
JNI_NULL_CHECK(env, j_true_value, "representation string for `true` cannot be null", );
JNI_NULL_CHECK(env, j_false_value, "representation string for `false` cannot be null", );
JNI_NULL_CHECK(env, j_output_path, "output path cannot be null", );
try {
cudf::jni::auto_set_device(env);
auto const native_output_path = cudf::jni::native_jstring{env, j_output_path};
auto const output_path = native_output_path.get();
auto const table = reinterpret_cast<cudf::table_view *>(j_table_handle);
auto const n_column_names = cudf::jni::native_jstringArray{env, j_column_names};
auto const column_names = n_column_names.as_cpp_vector();
auto const line_terminator = cudf::jni::native_jstring{env, j_row_delimiter};
auto const na_rep = cudf::jni::native_jstring{env, j_null_value};
auto const true_value = cudf::jni::native_jstring{env, j_true_value};
auto const false_value = cudf::jni::native_jstring{env, j_false_value};
auto const quote_style = static_cast<cudf::io::quote_style>(j_quote_style);
auto options = cudf::io::csv_writer_options::builder(cudf::io::sink_info{output_path}, *table)
.names(column_names)
.include_header(static_cast<bool>(include_header))
.line_terminator(line_terminator.get())
.inter_column_delimiter(j_field_delimiter)
.na_rep(na_rep.get())
.true_value(true_value.get())
.false_value(false_value.get())
.quoting(quote_style);
cudf::io::write_csv(options.build());
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_startWriteCSVToBuffer(
JNIEnv *env, jclass, jobjectArray j_column_names, jboolean include_header,
jstring j_row_delimiter, jbyte j_field_delimiter, jstring j_null_value, jstring j_true_value,
jstring j_false_value, jint j_quote_style, jobject j_buffer, jobject host_memory_allocator) {
JNI_NULL_CHECK(env, j_column_names, "column name array cannot be null", 0);
JNI_NULL_CHECK(env, j_row_delimiter, "row delimiter cannot be null", 0);
JNI_NULL_CHECK(env, j_field_delimiter, "field delimiter cannot be null", 0);
JNI_NULL_CHECK(env, j_null_value, "null representation string cannot be itself null", 0);
JNI_NULL_CHECK(env, j_buffer, "output buffer cannot be null", 0);
try {
cudf::jni::auto_set_device(env);
auto data_sink =
std::make_unique<cudf::jni::jni_writer_data_sink>(env, j_buffer, host_memory_allocator);
auto const n_column_names = cudf::jni::native_jstringArray{env, j_column_names};
auto const column_names = n_column_names.as_cpp_vector();
auto const line_terminator = cudf::jni::native_jstring{env, j_row_delimiter};
auto const na_rep = cudf::jni::native_jstring{env, j_null_value};
auto const true_value = cudf::jni::native_jstring{env, j_true_value};
auto const false_value = cudf::jni::native_jstring{env, j_false_value};
auto const quote_style = static_cast<cudf::io::quote_style>(j_quote_style);
auto options = cudf::io::csv_writer_options::builder(cudf::io::sink_info{data_sink.get()},
cudf::table_view{})
.names(column_names)
.include_header(static_cast<bool>(include_header))
.line_terminator(line_terminator.get())
.inter_column_delimiter(j_field_delimiter)
.na_rep(na_rep.get())
.true_value(true_value.get())
.false_value(false_value.get())
.quoting(quote_style)
.build();
return ptr_as_jlong(new cudf::jni::io::csv_chunked_writer{options, data_sink});
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeCSVChunkToBuffer(JNIEnv *env, jclass,
jlong j_writer_handle,
jlong j_table_handle) {
JNI_NULL_CHECK(env, j_writer_handle, "writer handle cannot be null.", );
JNI_NULL_CHECK(env, j_table_handle, "table handle cannot be null.", );
auto const table = reinterpret_cast<cudf::table_view *>(j_table_handle);
auto writer = reinterpret_cast<cudf::jni::io::csv_chunked_writer *>(j_writer_handle);
try {
cudf::jni::auto_set_device(env);
writer->write(*table);
}
CATCH_STD(env, );
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_endWriteCSVToBuffer(JNIEnv *env, jclass,
jlong j_writer_handle) {
JNI_NULL_CHECK(env, j_writer_handle, "writer handle cannot be null.", );
using cudf::jni::io::csv_chunked_writer;
auto writer =
std::unique_ptr<csv_chunked_writer>{reinterpret_cast<csv_chunked_writer *>(j_writer_handle)};
try {
cudf::jni::auto_set_device(env);
writer->close();
}
CATCH_STD(env, );
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readAndInferJSON(
JNIEnv *env, jclass, jlong buffer, jlong buffer_length, jboolean day_first, jboolean lines,
jboolean recover_with_null) {
JNI_NULL_CHECK(env, buffer, "buffer cannot be null", 0);
if (buffer_length <= 0) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "An empty buffer is not supported", 0);
}
try {
cudf::jni::auto_set_device(env);
auto source = cudf::io::source_info{reinterpret_cast<char *>(buffer),
static_cast<std::size_t>(buffer_length)};
auto const recovery_mode = recover_with_null ?
cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL :
cudf::io::json_recovery_mode_t::FAIL;
cudf::io::json_reader_options_builder opts = cudf::io::json_reader_options::builder(source)
.dayfirst(static_cast<bool>(day_first))
.lines(static_cast<bool>(lines))
.recovery_mode(recovery_mode);
auto result =
std::make_unique<cudf::io::table_with_metadata>(cudf::io::read_json(opts.build()));
return reinterpret_cast<jlong>(result.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_TableWithMeta_close(JNIEnv *env, jclass, jlong handle) {
JNI_NULL_CHECK(env, handle, "handle is null", );
try {
cudf::jni::auto_set_device(env);
delete reinterpret_cast<cudf::io::table_with_metadata *>(handle);
}
CATCH_STD(env, );
}
JNIEXPORT jobjectArray JNICALL Java_ai_rapids_cudf_TableWithMeta_getColumnNames(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "handle is null", nullptr);
try {
cudf::jni::auto_set_device(env);
auto ptr = reinterpret_cast<cudf::io::table_with_metadata *>(handle);
auto length = ptr->metadata.schema_info.size();
auto ret = static_cast<jobjectArray>(
env->NewObjectArray(length, env->FindClass("java/lang/String"), nullptr));
for (size_t i = 0; i < length; i++) {
env->SetObjectArrayElement(ret, i,
env->NewStringUTF(ptr->metadata.schema_info[i].name.c_str()));
}
return ret;
}
CATCH_STD(env, nullptr);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_TableWithMeta_releaseTable(JNIEnv *env, jclass,
jlong handle) {
JNI_NULL_CHECK(env, handle, "handle is null", nullptr);
try {
cudf::jni::auto_set_device(env);
auto ptr = reinterpret_cast<cudf::io::table_with_metadata *>(handle);
if (ptr->tbl) {
return convert_table_for_return(env, ptr->tbl);
} else {
return nullptr;
}
}
CATCH_STD(env, nullptr);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readJSONFromDataSource(
JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales,
jboolean day_first, jboolean lines, jboolean recover_with_null, jlong ds_handle) {
JNI_NULL_CHECK(env, ds_handle, "no data source handle given", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray n_col_names(env, col_names);
cudf::jni::native_jintArray n_types(env, j_types);
cudf::jni::native_jintArray n_scales(env, j_scales);
if (n_types.is_null() != n_scales.is_null()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match null",
0);
}
std::vector<cudf::data_type> data_types;
if (!n_types.is_null()) {
if (n_types.size() != n_scales.size()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match size",
0);
}
data_types.reserve(n_types.size());
std::transform(n_types.begin(), n_types.end(), n_scales.begin(),
std::back_inserter(data_types), [](auto const &type, auto const &scale) {
return cudf::data_type{static_cast<cudf::type_id>(type), scale};
});
}
auto ds = reinterpret_cast<cudf::io::datasource *>(ds_handle);
cudf::io::source_info source{ds};
cudf::io::json_recovery_mode_t recovery_mode =
recover_with_null ? cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL :
cudf::io::json_recovery_mode_t::FAIL;
cudf::io::json_reader_options_builder opts = cudf::io::json_reader_options::builder(source)
.dayfirst(static_cast<bool>(day_first))
.lines(static_cast<bool>(lines))
.recovery_mode(recovery_mode);
if (!n_col_names.is_null() && data_types.size() > 0) {
if (n_col_names.size() != n_types.size()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"types and column names must match size", 0);
}
std::map<std::string, cudf::data_type> map;
auto col_names_vec = n_col_names.as_cpp_vector();
std::transform(col_names_vec.begin(), col_names_vec.end(), data_types.begin(),
std::inserter(map, map.end()),
[](std::string a, cudf::data_type b) { return std::make_pair(a, b); });
opts.dtypes(map);
} else if (data_types.size() > 0) {
opts.dtypes(data_types);
} else {
// should infer the types
}
auto result =
std::make_unique<cudf::io::table_with_metadata>(cudf::io::read_json(opts.build()));
return reinterpret_cast<jlong>(result.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readJSON(
JNIEnv *env, jclass, jobjectArray col_names, jintArray j_types, jintArray j_scales,
jstring inputfilepath, jlong buffer, jlong buffer_length, jboolean day_first, jboolean lines,
jboolean recover_with_null) {
bool read_buffer = true;
if (buffer == 0) {
JNI_NULL_CHECK(env, inputfilepath, "input file or buffer must be supplied", 0);
read_buffer = false;
} else if (inputfilepath != NULL) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"cannot pass in both a buffer and an inputfilepath", 0);
} else if (buffer_length <= 0) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "An empty buffer is not supported", 0);
}
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray n_col_names(env, col_names);
cudf::jni::native_jintArray n_types(env, j_types);
cudf::jni::native_jintArray n_scales(env, j_scales);
if (n_types.is_null() != n_scales.is_null()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match null",
0);
}
std::vector<cudf::data_type> data_types;
if (!n_types.is_null()) {
if (n_types.size() != n_scales.size()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match size",
0);
}
data_types.reserve(n_types.size());
std::transform(n_types.begin(), n_types.end(), n_scales.begin(),
std::back_inserter(data_types), [](auto const &type, auto const &scale) {
return cudf::data_type{static_cast<cudf::type_id>(type), scale};
});
}
cudf::jni::native_jstring filename(env, inputfilepath);
if (!read_buffer && filename.is_empty()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "inputfilepath can't be empty", 0);
}
auto source = read_buffer ? cudf::io::source_info{reinterpret_cast<char *>(buffer),
static_cast<std::size_t>(buffer_length)} :
cudf::io::source_info{filename.get()};
cudf::io::json_recovery_mode_t recovery_mode =
recover_with_null ? cudf::io::json_recovery_mode_t::RECOVER_WITH_NULL :
cudf::io::json_recovery_mode_t::FAIL;
cudf::io::json_reader_options_builder opts = cudf::io::json_reader_options::builder(source)
.dayfirst(static_cast<bool>(day_first))
.lines(static_cast<bool>(lines))
.recovery_mode(recovery_mode);
if (!n_col_names.is_null() && data_types.size() > 0) {
if (n_col_names.size() != n_types.size()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"types and column names must match size", 0);
}
std::map<std::string, cudf::data_type> map;
auto col_names_vec = n_col_names.as_cpp_vector();
std::transform(col_names_vec.begin(), col_names_vec.end(), data_types.begin(),
std::inserter(map, map.end()),
[](std::string a, cudf::data_type b) { return std::make_pair(a, b); });
opts.dtypes(map);
} else if (data_types.size() > 0) {
opts.dtypes(data_types);
} else {
// should infer the types
}
auto result =
std::make_unique<cudf::io::table_with_metadata>(cudf::io::read_json(opts.build()));
return reinterpret_cast<jlong>(result.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readParquetFromDataSource(
JNIEnv *env, jclass, jobjectArray filter_col_names, jbooleanArray j_col_binary_read, jint unit,
jlong ds_handle) {
JNI_NULL_CHECK(env, ds_handle, "no data source handle given", 0);
JNI_NULL_CHECK(env, j_col_binary_read, "null col_binary_read", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
cudf::jni::native_jbooleanArray n_col_binary_read(env, j_col_binary_read);
auto ds = reinterpret_cast<cudf::io::datasource *>(ds_handle);
cudf::io::source_info source{ds};
auto builder = cudf::io::parquet_reader_options::builder(source);
if (n_filter_col_names.size() > 0) {
builder = builder.columns(n_filter_col_names.as_cpp_vector());
}
cudf::io::parquet_reader_options opts =
builder.convert_strings_to_categories(false)
.timestamp_type(cudf::data_type(static_cast<cudf::type_id>(unit)))
.build();
return convert_table_for_return(env, cudf::io::read_parquet(opts).tbl);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readParquet(
JNIEnv *env, jclass, jobjectArray filter_col_names, jbooleanArray j_col_binary_read,
jstring inputfilepath, jlong buffer, jlong buffer_length, jint unit) {
JNI_NULL_CHECK(env, j_col_binary_read, "null col_binary_read", 0);
bool read_buffer = true;
if (buffer == 0) {
JNI_NULL_CHECK(env, inputfilepath, "input file or buffer must be supplied", NULL);
read_buffer = false;
} else if (inputfilepath != NULL) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"cannot pass in both a buffer and an inputfilepath", NULL);
} else if (buffer_length <= 0) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "An empty buffer is not supported",
NULL);
}
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstring filename(env, inputfilepath);
if (!read_buffer && filename.is_empty()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "inputfilepath can't be empty",
NULL);
}
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
cudf::jni::native_jbooleanArray n_col_binary_read(env, j_col_binary_read);
auto source = read_buffer ? cudf::io::source_info(reinterpret_cast<char *>(buffer),
static_cast<std::size_t>(buffer_length)) :
cudf::io::source_info(filename.get());
auto builder = cudf::io::parquet_reader_options::builder(source);
if (n_filter_col_names.size() > 0) {
builder = builder.columns(n_filter_col_names.as_cpp_vector());
}
cudf::io::parquet_reader_options opts =
builder.convert_strings_to_categories(false)
.timestamp_type(cudf::data_type(static_cast<cudf::type_id>(unit)))
.build();
return convert_table_for_return(env, cudf::io::read_parquet(opts).tbl);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readAvroFromDataSource(
JNIEnv *env, jclass, jobjectArray filter_col_names, jlong ds_handle) {
JNI_NULL_CHECK(env, ds_handle, "no data source handle given", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
auto ds = reinterpret_cast<cudf::io::datasource *>(ds_handle);
cudf::io::source_info source{ds};
cudf::io::avro_reader_options opts = cudf::io::avro_reader_options::builder(source)
.columns(n_filter_col_names.as_cpp_vector())
.build();
return convert_table_for_return(env, cudf::io::read_avro(opts).tbl);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readAvro(JNIEnv *env, jclass,
jobjectArray filter_col_names,
jstring inputfilepath, jlong buffer,
jlong buffer_length) {
const bool read_buffer = (buffer != 0);
if (!read_buffer) {
JNI_NULL_CHECK(env, inputfilepath, "input file or buffer must be supplied", NULL);
} else if (inputfilepath != NULL) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"cannot pass in both a buffer and an inputfilepath", NULL);
} else if (buffer_length <= 0) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "An empty buffer is not supported",
NULL);
}
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstring filename(env, inputfilepath);
if (!read_buffer && filename.is_empty()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "inputfilepath can't be empty",
NULL);
}
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
auto source = read_buffer ? cudf::io::source_info(reinterpret_cast<char *>(buffer),
static_cast<std::size_t>(buffer_length)) :
cudf::io::source_info(filename.get());
cudf::io::avro_reader_options opts = cudf::io::avro_reader_options::builder(source)
.columns(n_filter_col_names.as_cpp_vector())
.build();
return convert_table_for_return(env, cudf::io::read_avro(opts).tbl);
}
CATCH_STD(env, NULL);
}
JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeParquetBufferBegin(
JNIEnv *env, jclass, jobjectArray j_col_names, jint j_num_children, jintArray j_children,
jbooleanArray j_col_nullability, jobjectArray j_metadata_keys, jobjectArray j_metadata_values,
jint j_compression, jint j_stats_freq, jbooleanArray j_isInt96, jintArray j_precisions,
jbooleanArray j_is_map, jbooleanArray j_is_binary, jbooleanArray j_hasParquetFieldIds,
jintArray j_parquetFieldIds, jobject consumer, jobject host_memory_allocator) {
JNI_NULL_CHECK(env, j_col_names, "null columns", 0);
JNI_NULL_CHECK(env, j_col_nullability, "null nullability", 0);
JNI_NULL_CHECK(env, j_metadata_keys, "null metadata keys", 0);
JNI_NULL_CHECK(env, j_metadata_values, "null metadata values", 0);
JNI_NULL_CHECK(env, consumer, "null consumer", 0);
try {
std::unique_ptr<cudf::jni::jni_writer_data_sink> data_sink(
new cudf::jni::jni_writer_data_sink(env, consumer, host_memory_allocator));
using namespace cudf::io;
using namespace cudf::jni;
sink_info sink{data_sink.get()};
table_input_metadata metadata;
createTableMetaData(env, j_num_children, j_col_names, j_children, j_col_nullability, j_isInt96,
j_precisions, j_is_map, metadata, j_hasParquetFieldIds, j_parquetFieldIds,
j_is_binary);
auto meta_keys = cudf::jni::native_jstringArray{env, j_metadata_keys}.as_cpp_vector();
auto meta_values = cudf::jni::native_jstringArray{env, j_metadata_values}.as_cpp_vector();
std::map<std::string, std::string> kv_metadata;
std::transform(meta_keys.begin(), meta_keys.end(), meta_values.begin(),
std::inserter(kv_metadata, kv_metadata.end()),
[](auto const &key, auto const &value) {
// The metadata value will be ignored if it is empty.
// We modify it into a space character to workaround such issue.
return std::make_pair(key, value.empty() ? std::string(" ") : value);
});
auto stats = std::make_shared<cudf::io::writer_compression_statistics>();
chunked_parquet_writer_options opts =
chunked_parquet_writer_options::builder(sink)
.metadata(std::move(metadata))
.compression(static_cast<compression_type>(j_compression))
.stats_level(static_cast<statistics_freq>(j_stats_freq))
.key_value_metadata({kv_metadata})
.compression_statistics(stats)
.build();
auto writer_ptr = std::make_unique<cudf::io::parquet_chunked_writer>(opts);
cudf::jni::native_parquet_writer_handle *ret = new cudf::jni::native_parquet_writer_handle(
std::move(writer_ptr), std::move(data_sink), std::move(stats));
return ptr_as_jlong(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeParquetFileBegin(
JNIEnv *env, jclass, jobjectArray j_col_names, jint j_num_children, jintArray j_children,
jbooleanArray j_col_nullability, jobjectArray j_metadata_keys, jobjectArray j_metadata_values,
jint j_compression, jint j_stats_freq, jbooleanArray j_isInt96, jintArray j_precisions,
jbooleanArray j_is_map, jbooleanArray j_is_binary, jbooleanArray j_hasParquetFieldIds,
jintArray j_parquetFieldIds, jstring j_output_path) {
JNI_NULL_CHECK(env, j_col_names, "null columns", 0);
JNI_NULL_CHECK(env, j_col_nullability, "null nullability", 0);
JNI_NULL_CHECK(env, j_metadata_keys, "null metadata keys", 0);
JNI_NULL_CHECK(env, j_metadata_values, "null metadata values", 0);
JNI_NULL_CHECK(env, j_output_path, "null output path", 0);
try {
cudf::jni::native_jstring output_path(env, j_output_path);
using namespace cudf::io;
using namespace cudf::jni;
table_input_metadata metadata;
createTableMetaData(env, j_num_children, j_col_names, j_children, j_col_nullability, j_isInt96,
j_precisions, j_is_map, metadata, j_hasParquetFieldIds, j_parquetFieldIds,
j_is_binary);
auto meta_keys = cudf::jni::native_jstringArray{env, j_metadata_keys}.as_cpp_vector();
auto meta_values = cudf::jni::native_jstringArray{env, j_metadata_values}.as_cpp_vector();
std::map<std::string, std::string> kv_metadata;
std::transform(meta_keys.begin(), meta_keys.end(), meta_values.begin(),
std::inserter(kv_metadata, kv_metadata.end()),
[](auto const &key, auto const &value) {
// The metadata value will be ignored if it is empty.
// We modify it into a space character to workaround such issue.
return std::make_pair(key, value.empty() ? std::string(" ") : value);
});
sink_info sink{output_path.get()};
auto stats = std::make_shared<cudf::io::writer_compression_statistics>();
chunked_parquet_writer_options opts =
chunked_parquet_writer_options::builder(sink)
.metadata(std::move(metadata))
.compression(static_cast<compression_type>(j_compression))
.stats_level(static_cast<statistics_freq>(j_stats_freq))
.key_value_metadata({kv_metadata})
.compression_statistics(stats)
.build();
auto writer_ptr = std::make_unique<cudf::io::parquet_chunked_writer>(opts);
cudf::jni::native_parquet_writer_handle *ret = new cudf::jni::native_parquet_writer_handle(
std::move(writer_ptr), nullptr, std::move(stats));
return ptr_as_jlong(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeParquetChunk(JNIEnv *env, jclass,
jlong j_state, jlong j_table,
jlong mem_size) {
JNI_NULL_CHECK(env, j_table, "null table", );
JNI_NULL_CHECK(env, j_state, "null state", );
using namespace cudf::io;
cudf::table_view *tview_with_empty_nullmask = reinterpret_cast<cudf::table_view *>(j_table);
cudf::table_view tview = cudf::jni::remove_validity_if_needed(tview_with_empty_nullmask);
cudf::jni::native_parquet_writer_handle *state =
reinterpret_cast<cudf::jni::native_parquet_writer_handle *>(j_state);
if (state->sink) {
long alloc_size = std::max(cudf::jni::MINIMUM_WRITE_BUFFER_SIZE, mem_size / 2);
state->sink->set_alloc_size(alloc_size);
}
try {
cudf::jni::auto_set_device(env);
state->writer->write(tview);
}
CATCH_STD(env, )
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeParquetEnd(JNIEnv *env, jclass,
jlong j_state) {
JNI_NULL_CHECK(env, j_state, "null state", );
using namespace cudf::io;
cudf::jni::native_parquet_writer_handle *state =
reinterpret_cast<cudf::jni::native_parquet_writer_handle *>(j_state);
std::unique_ptr<cudf::jni::native_parquet_writer_handle> make_sure_we_delete(state);
try {
cudf::jni::auto_set_device(env);
state->writer->close();
}
CATCH_STD(env, )
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readORCFromDataSource(
JNIEnv *env, jclass, jobjectArray filter_col_names, jboolean usingNumPyTypes, jint unit,
jobjectArray dec128_col_names, jlong ds_handle) {
JNI_NULL_CHECK(env, ds_handle, "no data source handle given", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
cudf::jni::native_jstringArray n_dec128_col_names(env, dec128_col_names);
auto ds = reinterpret_cast<cudf::io::datasource *>(ds_handle);
cudf::io::source_info source{ds};
auto builder = cudf::io::orc_reader_options::builder(source);
if (n_filter_col_names.size() > 0) {
builder = builder.columns(n_filter_col_names.as_cpp_vector());
}
cudf::io::orc_reader_options opts =
builder.use_index(false)
.use_np_dtypes(static_cast<bool>(usingNumPyTypes))
.timestamp_type(cudf::data_type(static_cast<cudf::type_id>(unit)))
.decimal128_columns(n_dec128_col_names.as_cpp_vector())
.build();
return convert_table_for_return(env, cudf::io::read_orc(opts).tbl);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_readORC(
JNIEnv *env, jclass, jobjectArray filter_col_names, jstring inputfilepath, jlong buffer,
jlong buffer_length, jboolean usingNumPyTypes, jint unit, jobjectArray dec128_col_names) {
bool read_buffer = true;
if (buffer == 0) {
JNI_NULL_CHECK(env, inputfilepath, "input file or buffer must be supplied", NULL);
read_buffer = false;
} else if (inputfilepath != NULL) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"cannot pass in both a buffer and an inputfilepath", NULL);
} else if (buffer_length <= 0) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "An empty buffer is not supported",
NULL);
}
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstring filename(env, inputfilepath);
if (!read_buffer && filename.is_empty()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "inputfilepath can't be empty",
NULL);
}
cudf::jni::native_jstringArray n_filter_col_names(env, filter_col_names);
cudf::jni::native_jstringArray n_dec128_col_names(env, dec128_col_names);
auto source = read_buffer ?
cudf::io::source_info(reinterpret_cast<char *>(buffer), buffer_length) :
cudf::io::source_info(filename.get());
auto builder = cudf::io::orc_reader_options::builder(source);
if (n_filter_col_names.size() > 0) {
builder = builder.columns(n_filter_col_names.as_cpp_vector());
}
cudf::io::orc_reader_options opts =
builder.use_index(false)
.use_np_dtypes(static_cast<bool>(usingNumPyTypes))
.timestamp_type(cudf::data_type(static_cast<cudf::type_id>(unit)))
.decimal128_columns(n_dec128_col_names.as_cpp_vector())
.build();
return convert_table_for_return(env, cudf::io::read_orc(opts).tbl);
}
CATCH_STD(env, NULL);
}
JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeORCBufferBegin(
JNIEnv *env, jclass, jobjectArray j_col_names, jint j_num_children, jintArray j_children,
jbooleanArray j_col_nullability, jobjectArray j_metadata_keys, jobjectArray j_metadata_values,
jint j_compression, jintArray j_precisions, jbooleanArray j_is_map, jobject consumer,
jobject host_memory_allocator) {
JNI_NULL_CHECK(env, j_col_names, "null columns", 0);
JNI_NULL_CHECK(env, j_col_nullability, "null nullability", 0);
JNI_NULL_CHECK(env, j_metadata_keys, "null metadata keys", 0);
JNI_NULL_CHECK(env, j_metadata_values, "null metadata values", 0);
JNI_NULL_CHECK(env, consumer, "null consumer", 0);
try {
cudf::jni::auto_set_device(env);
using namespace cudf::io;
using namespace cudf::jni;
table_input_metadata metadata;
// ORC has no `j_is_int96`, but `createTableMetaData` needs a lvalue.
jbooleanArray j_is_int96 = NULL;
// ORC has no `j_parquetFieldIds`, but `createTableMetaData` needs a lvalue.
jbooleanArray j_hasParquetFieldIds = NULL;
jintArray j_parquetFieldIds = NULL;
// temp stub
jbooleanArray j_is_binary = NULL;
createTableMetaData(env, j_num_children, j_col_names, j_children, j_col_nullability, j_is_int96,
j_precisions, j_is_map, metadata, j_hasParquetFieldIds, j_parquetFieldIds,
j_is_binary);
auto meta_keys = cudf::jni::native_jstringArray{env, j_metadata_keys}.as_cpp_vector();
auto meta_values = cudf::jni::native_jstringArray{env, j_metadata_values}.as_cpp_vector();
std::map<std::string, std::string> kv_metadata;
std::transform(meta_keys.begin(), meta_keys.end(), meta_values.begin(),
std::inserter(kv_metadata, kv_metadata.end()),
[](const std::string &k, const std::string &v) { return std::make_pair(k, v); });
std::unique_ptr<cudf::jni::jni_writer_data_sink> data_sink(
new cudf::jni::jni_writer_data_sink(env, consumer, host_memory_allocator));
sink_info sink{data_sink.get()};
auto stats = std::make_shared<cudf::io::writer_compression_statistics>();
chunked_orc_writer_options opts = chunked_orc_writer_options::builder(sink)
.metadata(std::move(metadata))
.compression(static_cast<compression_type>(j_compression))
.enable_statistics(ORC_STATISTICS_ROW_GROUP)
.key_value_metadata(kv_metadata)
.compression_statistics(stats)
.build();
auto writer_ptr = std::make_unique<cudf::io::orc_chunked_writer>(opts);
cudf::jni::native_orc_writer_handle *ret = new cudf::jni::native_orc_writer_handle(
std::move(writer_ptr), std::move(data_sink), std::move(stats));
return ptr_as_jlong(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeORCFileBegin(
JNIEnv *env, jclass, jobjectArray j_col_names, jint j_num_children, jintArray j_children,
jbooleanArray j_col_nullability, jobjectArray j_metadata_keys, jobjectArray j_metadata_values,
jint j_compression, jintArray j_precisions, jbooleanArray j_is_map, jstring j_output_path) {
JNI_NULL_CHECK(env, j_col_names, "null columns", 0);
JNI_NULL_CHECK(env, j_col_nullability, "null nullability", 0);
JNI_NULL_CHECK(env, j_metadata_keys, "null metadata keys", 0);
JNI_NULL_CHECK(env, j_metadata_values, "null metadata values", 0);
JNI_NULL_CHECK(env, j_output_path, "null output path", 0);
try {
cudf::jni::auto_set_device(env);
using namespace cudf::io;
using namespace cudf::jni;
cudf::jni::native_jstring output_path(env, j_output_path);
table_input_metadata metadata;
// ORC has no `j_is_int96`, but `createTableMetaData` needs a lvalue.
jbooleanArray j_is_int96 = NULL;
// ORC has no `j_parquetFieldIds`, but `createTableMetaData` needs a lvalue.
jbooleanArray j_hasParquetFieldIds = NULL;
jintArray j_parquetFieldIds = NULL;
// temp stub
jbooleanArray j_is_binary = NULL;
createTableMetaData(env, j_num_children, j_col_names, j_children, j_col_nullability, j_is_int96,
j_precisions, j_is_map, metadata, j_hasParquetFieldIds, j_parquetFieldIds,
j_is_binary);
auto meta_keys = cudf::jni::native_jstringArray{env, j_metadata_keys}.as_cpp_vector();
auto meta_values = cudf::jni::native_jstringArray{env, j_metadata_values}.as_cpp_vector();
std::map<std::string, std::string> kv_metadata;
std::transform(meta_keys.begin(), meta_keys.end(), meta_values.begin(),
std::inserter(kv_metadata, kv_metadata.end()),
[](const std::string &k, const std::string &v) { return std::make_pair(k, v); });
sink_info sink{output_path.get()};
auto stats = std::make_shared<cudf::io::writer_compression_statistics>();
chunked_orc_writer_options opts = chunked_orc_writer_options::builder(sink)
.metadata(std::move(metadata))
.compression(static_cast<compression_type>(j_compression))
.enable_statistics(ORC_STATISTICS_ROW_GROUP)
.key_value_metadata(kv_metadata)
.compression_statistics(stats)
.build();
auto writer_ptr = std::make_unique<cudf::io::orc_chunked_writer>(opts);
cudf::jni::native_orc_writer_handle *ret =
new cudf::jni::native_orc_writer_handle(std::move(writer_ptr), nullptr, std::move(stats));
return ptr_as_jlong(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeORCChunk(JNIEnv *env, jclass, jlong j_state,
jlong j_table, jlong mem_size) {
JNI_NULL_CHECK(env, j_table, "null table", );
JNI_NULL_CHECK(env, j_state, "null state", );
using namespace cudf::io;
cudf::table_view *tview_orig = reinterpret_cast<cudf::table_view *>(j_table);
cudf::table_view tview = cudf::jni::remove_validity_if_needed(tview_orig);
cudf::jni::native_orc_writer_handle *state =
reinterpret_cast<cudf::jni::native_orc_writer_handle *>(j_state);
if (state->sink) {
long alloc_size = std::max(cudf::jni::MINIMUM_WRITE_BUFFER_SIZE, mem_size / 2);
state->sink->set_alloc_size(alloc_size);
}
try {
cudf::jni::auto_set_device(env);
state->writer->write(tview);
}
CATCH_STD(env, )
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeORCEnd(JNIEnv *env, jclass, jlong j_state) {
JNI_NULL_CHECK(env, j_state, "null state", );
using namespace cudf::io;
cudf::jni::native_orc_writer_handle *state =
reinterpret_cast<cudf::jni::native_orc_writer_handle *>(j_state);
std::unique_ptr<cudf::jni::native_orc_writer_handle> make_sure_we_delete(state);
try {
cudf::jni::auto_set_device(env);
state->writer->close();
}
CATCH_STD(env, )
}
JNIEXPORT jdoubleArray JNICALL Java_ai_rapids_cudf_TableWriter_getWriteStatistics(JNIEnv *env,
jclass,
jlong j_state) {
JNI_NULL_CHECK(env, j_state, "null state", nullptr);
using namespace cudf::io;
auto const state = reinterpret_cast<cudf::jni::jni_table_writer_handle_base const *>(j_state);
try {
cudf::jni::auto_set_device(env);
if (!state->stats) {
return nullptr;
}
auto const &stats = *state->stats;
auto output = cudf::jni::native_jdoubleArray(env, 4);
output[0] = static_cast<jdouble>(stats.num_compressed_bytes());
output[1] = static_cast<jdouble>(stats.num_failed_bytes());
output[2] = static_cast<jdouble>(stats.num_skipped_bytes());
output[3] = static_cast<jdouble>(stats.compression_ratio());
return output.get_jArray();
}
CATCH_STD(env, nullptr)
}
JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeArrowIPCBufferBegin(
JNIEnv *env, jclass, jobjectArray j_col_names, jobject consumer,
jobject host_memory_allocator) {
JNI_NULL_CHECK(env, j_col_names, "null columns", 0);
JNI_NULL_CHECK(env, consumer, "null consumer", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray col_names(env, j_col_names);
std::shared_ptr<cudf::jni::jni_arrow_output_stream> data_sink(
new cudf::jni::jni_arrow_output_stream(env, consumer, host_memory_allocator));
cudf::jni::native_arrow_ipc_writer_handle *ret =
new cudf::jni::native_arrow_ipc_writer_handle(col_names.as_cpp_vector(), data_sink);
return ptr_as_jlong(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_writeArrowIPCFileBegin(JNIEnv *env, jclass,
jobjectArray j_col_names,
jstring j_output_path) {
JNI_NULL_CHECK(env, j_col_names, "null columns", 0);
JNI_NULL_CHECK(env, j_output_path, "null output path", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstringArray col_names(env, j_col_names);
cudf::jni::native_jstring output_path(env, j_output_path);
cudf::jni::native_arrow_ipc_writer_handle *ret =
new cudf::jni::native_arrow_ipc_writer_handle(col_names.as_cpp_vector(), output_path.get());
return ptr_as_jlong(ret);
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_convertCudfToArrowTable(JNIEnv *env, jclass,
jlong j_state,
jlong j_table) {
JNI_NULL_CHECK(env, j_table, "null table", 0);
JNI_NULL_CHECK(env, j_state, "null state", 0);
cudf::table_view *tview = reinterpret_cast<cudf::table_view *>(j_table);
cudf::jni::native_arrow_ipc_writer_handle *state =
reinterpret_cast<cudf::jni::native_arrow_ipc_writer_handle *>(j_state);
try {
cudf::jni::auto_set_device(env);
// The semantics of this function are confusing:
// The return value is a pointer to a heap-allocated shared_ptr<arrow::Table>.
// i.e. the shared_ptr<> is on the heap.
// The pointer to the shared_ptr<> is returned as a jlong.
using result_t = std::shared_ptr<arrow::Table>;
auto result = cudf::to_arrow(*tview, state->get_column_metadata(*tview));
return ptr_as_jlong(new result_t{result});
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeArrowIPCArrowChunk(JNIEnv *env, jclass,
jlong j_state,
jlong arrow_table_handle,
jlong max_chunk) {
JNI_NULL_CHECK(env, arrow_table_handle, "null arrow table", );
JNI_NULL_CHECK(env, j_state, "null state", );
std::shared_ptr<arrow::Table> *handle =
reinterpret_cast<std::shared_ptr<arrow::Table> *>(arrow_table_handle);
cudf::jni::native_arrow_ipc_writer_handle *state =
reinterpret_cast<cudf::jni::native_arrow_ipc_writer_handle *>(j_state);
try {
cudf::jni::auto_set_device(env);
state->write(*handle, max_chunk);
}
CATCH_STD(env, )
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_writeArrowIPCEnd(JNIEnv *env, jclass,
jlong j_state) {
JNI_NULL_CHECK(env, j_state, "null state", );
cudf::jni::native_arrow_ipc_writer_handle *state =
reinterpret_cast<cudf::jni::native_arrow_ipc_writer_handle *>(j_state);
std::unique_ptr<cudf::jni::native_arrow_ipc_writer_handle> make_sure_we_delete(state);
try {
cudf::jni::auto_set_device(env);
state->close();
}
CATCH_STD(env, )
}
JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_readArrowIPCFileBegin(JNIEnv *env, jclass,
jstring j_input_path) {
JNI_NULL_CHECK(env, j_input_path, "null input path", 0);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jstring input_path(env, j_input_path);
return ptr_as_jlong(new cudf::jni::native_arrow_ipc_reader_handle(input_path.get()));
}
CATCH_STD(env, 0)
}
JNIEXPORT long JNICALL Java_ai_rapids_cudf_Table_readArrowIPCBufferBegin(JNIEnv *env, jclass,
jobject provider) {
JNI_NULL_CHECK(env, provider, "null provider", 0);
try {
cudf::jni::auto_set_device(env);
std::shared_ptr<cudf::jni::jni_arrow_input_stream> data_source(
new cudf::jni::jni_arrow_input_stream(env, provider));
return ptr_as_jlong(new cudf::jni::native_arrow_ipc_reader_handle(data_source));
}
CATCH_STD(env, 0)
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_readArrowIPCChunkToArrowTable(JNIEnv *env, jclass,
jlong j_state,
jint row_target) {
JNI_NULL_CHECK(env, j_state, "null state", 0);
cudf::jni::native_arrow_ipc_reader_handle *state =
reinterpret_cast<cudf::jni::native_arrow_ipc_reader_handle *>(j_state);
try {
cudf::jni::auto_set_device(env);
// This is a little odd because we have to return a pointer
// and arrow wants to deal with shared pointers for everything.
auto result = state->next(row_target);
return result ? ptr_as_jlong(new std::shared_ptr<arrow::Table>{result}) : 0;
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_closeArrowTable(JNIEnv *env, jclass,
jlong arrow_table_handle) {
std::shared_ptr<arrow::Table> *handle =
reinterpret_cast<std::shared_ptr<arrow::Table> *>(arrow_table_handle);
try {
cudf::jni::auto_set_device(env);
delete handle;
}
CATCH_STD(env, )
}
JNIEXPORT jlongArray JNICALL
Java_ai_rapids_cudf_Table_convertArrowTableToCudf(JNIEnv *env, jclass, jlong arrow_table_handle) {
JNI_NULL_CHECK(env, arrow_table_handle, "null arrow handle", 0);
std::shared_ptr<arrow::Table> *handle =
reinterpret_cast<std::shared_ptr<arrow::Table> *>(arrow_table_handle);
try {
cudf::jni::auto_set_device(env);
return convert_table_for_return(env, cudf::from_arrow(*(handle->get())));
}
CATCH_STD(env, 0)
}
JNIEXPORT void JNICALL Java_ai_rapids_cudf_Table_readArrowIPCEnd(JNIEnv *env, jclass,
jlong j_state) {
JNI_NULL_CHECK(env, j_state, "null state", );
cudf::jni::native_arrow_ipc_reader_handle *state =
reinterpret_cast<cudf::jni::native_arrow_ipc_reader_handle *>(j_state);
std::unique_ptr<cudf::jni::native_arrow_ipc_reader_handle> make_sure_we_delete(state);
try {
cudf::jni::auto_set_device(env);
state->close();
}
CATCH_STD(env, )
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_leftJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jboolean compare_nulls_equal) {
return cudf::jni::join_gather_maps(
env, j_left_keys, j_right_keys, compare_nulls_equal,
[](cudf::table_view const &left, cudf::table_view const &right, cudf::null_equality nulleq) {
return cudf::left_join(left, right, nulleq);
});
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_leftJoinRowCount(JNIEnv *env, jclass,
jlong j_left_table,
jlong j_right_hash_join) {
JNI_NULL_CHECK(env, j_left_table, "left table is null", 0);
JNI_NULL_CHECK(env, j_right_hash_join, "right hash join is null", 0);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto hash_join = reinterpret_cast<cudf::hash_join const *>(j_right_hash_join);
auto row_count = hash_join->left_join_size(*left_table);
return static_cast<jlong>(row_count);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_leftHashJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_hash_join) {
return cudf::jni::hash_join_gather_maps(
env, j_left_table, j_right_hash_join,
[](cudf::table_view const &left, cudf::hash_join const &hash) {
return hash.left_join(left);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_leftHashJoinGatherMapsWithCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_hash_join, jlong j_output_row_count) {
auto output_row_count = static_cast<std::size_t>(j_output_row_count);
return cudf::jni::hash_join_gather_maps(
env, j_left_table, j_right_hash_join,
[output_row_count](cudf::table_view const &left, cudf::hash_join const &hash) {
return hash.left_join(left, output_row_count);
});
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_conditionalLeftJoinRowCount(JNIEnv *env, jclass,
jlong j_left_table,
jlong j_right_table,
jlong j_condition) {
JNI_NULL_CHECK(env, j_left_table, "left_table is null", 0);
JNI_NULL_CHECK(env, j_right_table, "right_table is null", 0);
JNI_NULL_CHECK(env, j_condition, "condition is null", 0);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto right_table = reinterpret_cast<cudf::table_view const *>(j_right_table);
auto condition = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_condition);
auto row_count = cudf::conditional_left_join_size(*left_table, *right_table,
condition->get_top_expression());
return static_cast<jlong>(row_count);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalLeftJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition) {
return cudf::jni::cond_join_gather_maps(
env, j_left_table, j_right_table, j_condition,
[](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_left_join(left, right, cond_expr);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalLeftJoinGatherMapsWithCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition,
jlong j_row_count) {
auto row_count = static_cast<std::size_t>(j_row_count);
return cudf::jni::cond_join_gather_maps(
env, j_left_table, j_right_table, j_condition,
[row_count](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_left_join(left, right, cond_expr, row_count);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftJoinSize(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_size(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_join_size(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_gather_maps(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftJoinGatherMapsWithSize(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal, jlong j_output_row_count,
jlong j_matches_view) {
auto size_info = cudf::jni::get_mixed_size_info(env, j_output_row_count, j_matches_view);
return cudf::jni::mixed_join_gather_maps(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[&size_info](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal, size_info);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_innerJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jboolean compare_nulls_equal) {
return cudf::jni::join_gather_maps(
env, j_left_keys, j_right_keys, compare_nulls_equal,
[](cudf::table_view const &left, cudf::table_view const &right, cudf::null_equality nulleq) {
return cudf::inner_join(left, right, nulleq);
});
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_innerJoinRowCount(JNIEnv *env, jclass,
jlong j_left_table,
jlong j_right_hash_join) {
JNI_NULL_CHECK(env, j_left_table, "left table is null", 0);
JNI_NULL_CHECK(env, j_right_hash_join, "right hash join is null", 0);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto hash_join = reinterpret_cast<cudf::hash_join const *>(j_right_hash_join);
auto row_count = hash_join->inner_join_size(*left_table);
return static_cast<jlong>(row_count);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_innerHashJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_hash_join) {
return cudf::jni::hash_join_gather_maps(
env, j_left_table, j_right_hash_join,
[](cudf::table_view const &left, cudf::hash_join const &hash) {
return hash.inner_join(left);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_innerHashJoinGatherMapsWithCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_hash_join, jlong j_output_row_count) {
auto output_row_count = static_cast<std::size_t>(j_output_row_count);
return cudf::jni::hash_join_gather_maps(
env, j_left_table, j_right_hash_join,
[output_row_count](cudf::table_view const &left, cudf::hash_join const &hash) {
return hash.inner_join(left, output_row_count);
});
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_conditionalInnerJoinRowCount(JNIEnv *env, jclass,
jlong j_left_table,
jlong j_right_table,
jlong j_condition) {
JNI_NULL_CHECK(env, j_left_table, "left_table is null", 0);
JNI_NULL_CHECK(env, j_right_table, "right_table is null", 0);
JNI_NULL_CHECK(env, j_condition, "condition is null", 0);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto right_table = reinterpret_cast<cudf::table_view const *>(j_right_table);
auto condition = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_condition);
auto row_count = cudf::conditional_inner_join_size(*left_table, *right_table,
condition->get_top_expression());
return static_cast<jlong>(row_count);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalInnerJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition) {
return cudf::jni::cond_join_gather_maps(
env, j_left_table, j_right_table, j_condition,
[](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_inner_join(left, right, cond_expr);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalInnerJoinGatherMapsWithCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition,
jlong j_row_count) {
auto row_count = static_cast<std::size_t>(j_row_count);
return cudf::jni::cond_join_gather_maps(
env, j_left_table, j_right_table, j_condition,
[row_count](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_inner_join(left, right, cond_expr, row_count);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedInnerJoinSize(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_size(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_inner_join_size(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedInnerJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_gather_maps(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_inner_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedInnerJoinGatherMapsWithSize(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal, jlong j_output_row_count,
jlong j_matches_view) {
auto size_info = cudf::jni::get_mixed_size_info(env, j_output_row_count, j_matches_view);
return cudf::jni::mixed_join_gather_maps(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[&size_info](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_inner_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal, size_info);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_fullJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jboolean compare_nulls_equal) {
return cudf::jni::join_gather_maps(
env, j_left_keys, j_right_keys, compare_nulls_equal,
[](cudf::table_view const &left, cudf::table_view const &right, cudf::null_equality nulleq) {
return cudf::full_join(left, right, nulleq);
});
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_fullJoinRowCount(JNIEnv *env, jclass,
jlong j_left_table,
jlong j_right_hash_join) {
JNI_NULL_CHECK(env, j_left_table, "left table is null", 0);
JNI_NULL_CHECK(env, j_right_hash_join, "right hash join is null", 0);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto hash_join = reinterpret_cast<cudf::hash_join const *>(j_right_hash_join);
auto row_count = hash_join->full_join_size(*left_table);
return static_cast<jlong>(row_count);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_fullHashJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_hash_join) {
return cudf::jni::hash_join_gather_maps(
env, j_left_table, j_right_hash_join,
[](cudf::table_view const &left, cudf::hash_join const &hash) {
return hash.full_join(left);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_fullHashJoinGatherMapsWithCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_hash_join, jlong j_output_row_count) {
auto output_row_count = static_cast<std::size_t>(j_output_row_count);
return cudf::jni::hash_join_gather_maps(
env, j_left_table, j_right_hash_join,
[output_row_count](cudf::table_view const &left, cudf::hash_join const &hash) {
return hash.full_join(left, output_row_count);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalFullJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition) {
return cudf::jni::cond_join_gather_maps(
env, j_left_table, j_right_table, j_condition,
[](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_full_join(left, right, cond_expr);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedFullJoinGatherMaps(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_gather_maps(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_full_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_leftSemiJoinGatherMap(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jboolean compare_nulls_equal) {
return cudf::jni::join_gather_single_map(
env, j_left_keys, j_right_keys, compare_nulls_equal,
[](cudf::table_view const &left, cudf::table_view const &right, cudf::null_equality nulleq) {
return cudf::left_semi_join(left, right, nulleq);
});
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_conditionalLeftSemiJoinRowCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition) {
JNI_NULL_CHECK(env, j_left_table, "left_table is null", 0);
JNI_NULL_CHECK(env, j_right_table, "right_table is null", 0);
JNI_NULL_CHECK(env, j_condition, "condition is null", 0);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto right_table = reinterpret_cast<cudf::table_view const *>(j_right_table);
auto condition = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_condition);
auto row_count = cudf::conditional_left_semi_join_size(*left_table, *right_table,
condition->get_top_expression());
return static_cast<jlong>(row_count);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalLeftSemiJoinGatherMap(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition) {
return cudf::jni::cond_join_gather_single_map(
env, j_left_table, j_right_table, j_condition,
[](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_left_semi_join(left, right, cond_expr);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalLeftSemiJoinGatherMapWithCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition,
jlong j_row_count) {
auto row_count = static_cast<std::size_t>(j_row_count);
return cudf::jni::cond_join_gather_single_map(
env, j_left_table, j_right_table, j_condition,
[row_count](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_left_semi_join(left, right, cond_expr, row_count);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftSemiJoinSize(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_size(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_semi_join_size(left_keys, right_keys, left_condition,
right_condition, condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftSemiJoinGatherMap(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_gather_single_map(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_semi_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftSemiJoinGatherMapWithSize(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal, jlong j_output_row_count,
jlong j_matches_view) {
auto size_info = cudf::jni::get_mixed_size_info(env, j_output_row_count, j_matches_view);
return cudf::jni::mixed_join_gather_single_map(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[&size_info](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_semi_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal, size_info);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_leftAntiJoinGatherMap(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jboolean compare_nulls_equal) {
return cudf::jni::join_gather_single_map(
env, j_left_keys, j_right_keys, compare_nulls_equal,
[](cudf::table_view const &left, cudf::table_view const &right, cudf::null_equality nulleq) {
return cudf::left_anti_join(left, right, nulleq);
});
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_conditionalLeftAntiJoinRowCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition) {
JNI_NULL_CHECK(env, j_left_table, "left_table is null", 0);
JNI_NULL_CHECK(env, j_right_table, "right_table is null", 0);
JNI_NULL_CHECK(env, j_condition, "condition is null", 0);
try {
cudf::jni::auto_set_device(env);
auto left_table = reinterpret_cast<cudf::table_view const *>(j_left_table);
auto right_table = reinterpret_cast<cudf::table_view const *>(j_right_table);
auto condition = reinterpret_cast<cudf::jni::ast::compiled_expr const *>(j_condition);
auto row_count = cudf::conditional_left_anti_join_size(*left_table, *right_table,
condition->get_top_expression());
return static_cast<jlong>(row_count);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalLeftAntiJoinGatherMap(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition) {
return cudf::jni::cond_join_gather_single_map(
env, j_left_table, j_right_table, j_condition,
[](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_left_anti_join(left, right, cond_expr);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_conditionalLeftAntiJoinGatherMapWithCount(
JNIEnv *env, jclass, jlong j_left_table, jlong j_right_table, jlong j_condition,
jlong j_row_count) {
auto row_count = static_cast<std::size_t>(j_row_count);
return cudf::jni::cond_join_gather_single_map(
env, j_left_table, j_right_table, j_condition,
[row_count](cudf::table_view const &left, cudf::table_view const &right,
cudf::ast::expression const &cond_expr) {
return cudf::conditional_left_anti_join(left, right, cond_expr, row_count);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftAntiJoinSize(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_size(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_anti_join_size(left_keys, right_keys, left_condition,
right_condition, condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftAntiJoinGatherMap(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal) {
return cudf::jni::mixed_join_gather_single_map(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_anti_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_mixedLeftAntiJoinGatherMapWithSize(
JNIEnv *env, jclass, jlong j_left_keys, jlong j_right_keys, jlong j_left_condition,
jlong j_right_condition, jlong j_condition, jboolean j_nulls_equal, jlong j_output_row_count,
jlong j_matches_view) {
auto size_info = cudf::jni::get_mixed_size_info(env, j_output_row_count, j_matches_view);
return cudf::jni::mixed_join_gather_single_map(
env, j_left_keys, j_right_keys, j_left_condition, j_right_condition, j_condition,
j_nulls_equal,
[&size_info](cudf::table_view const &left_keys, cudf::table_view const &right_keys,
cudf::table_view const &left_condition, cudf::table_view const &right_condition,
cudf::ast::expression const &condition, cudf::null_equality nulls_equal) {
return cudf::mixed_left_anti_join(left_keys, right_keys, left_condition, right_condition,
condition, nulls_equal, size_info);
});
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_crossJoin(JNIEnv *env, jclass,
jlong left_table,
jlong right_table) {
JNI_NULL_CHECK(env, left_table, "left_table is null", NULL);
JNI_NULL_CHECK(env, right_table, "right_table is null", NULL);
try {
cudf::jni::auto_set_device(env);
auto const left = reinterpret_cast<cudf::table_view const *>(left_table);
auto const right = reinterpret_cast<cudf::table_view const *>(right_table);
return convert_table_for_return(env, cudf::cross_join(*left, *right));
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_interleaveColumns(JNIEnv *env, jclass,
jlongArray j_cudf_table_view) {
JNI_NULL_CHECK(env, j_cudf_table_view, "table is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::table_view *table_view = reinterpret_cast<cudf::table_view *>(j_cudf_table_view);
return release_as_jlong(cudf::interleave_columns(*table_view));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_concatenate(JNIEnv *env, jclass,
jlongArray table_handles) {
JNI_NULL_CHECK(env, table_handles, "input tables are null", NULL);
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jpointerArray<cudf::table_view> tables(env, table_handles);
std::vector<cudf::table_view> const to_concat = tables.get_dereferenced();
return convert_table_for_return(env, cudf::concatenate(to_concat));
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_partition(JNIEnv *env, jclass,
jlong input_table,
jlong partition_column,
jint number_of_partitions,
jintArray output_offsets) {
JNI_NULL_CHECK(env, input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, partition_column, "partition_column is null", NULL);
JNI_NULL_CHECK(env, output_offsets, "output_offsets is null", NULL);
JNI_ARG_CHECK(env, number_of_partitions > 0, "number_of_partitions is zero", NULL);
try {
cudf::jni::auto_set_device(env);
auto const n_input_table = reinterpret_cast<cudf::table_view const *>(input_table);
auto const n_part_column = reinterpret_cast<cudf::column_view const *>(partition_column);
auto [partitioned_table, partition_offsets] =
cudf::partition(*n_input_table, *n_part_column, number_of_partitions);
// for what ever reason partition returns the length of the result at then
// end and hash partition/round robin do not, so skip the last entry for
// consistency
cudf::jni::native_jintArray n_output_offsets(env, output_offsets);
std::copy(partition_offsets.begin(), partition_offsets.end() - 1, n_output_offsets.begin());
return convert_table_for_return(env, partitioned_table);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_hashPartition(
JNIEnv *env, jclass, jlong input_table, jintArray columns_to_hash, jint hash_function,
jint number_of_partitions, jint seed, jintArray output_offsets) {
JNI_NULL_CHECK(env, input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, columns_to_hash, "columns_to_hash is null", NULL);
JNI_NULL_CHECK(env, output_offsets, "output_offsets is null", NULL);
JNI_ARG_CHECK(env, number_of_partitions > 0, "number_of_partitions is zero", NULL);
try {
cudf::jni::auto_set_device(env);
auto const hash_func = static_cast<cudf::hash_id>(hash_function);
auto const hash_seed = static_cast<uint32_t>(seed);
auto const n_input_table = reinterpret_cast<cudf::table_view const *>(input_table);
cudf::jni::native_jintArray n_columns_to_hash(env, columns_to_hash);
JNI_ARG_CHECK(env, n_columns_to_hash.size() > 0, "columns_to_hash is zero", NULL);
std::vector<cudf::size_type> columns_to_hash_vec(n_columns_to_hash.begin(),
n_columns_to_hash.end());
auto [partitioned_table, partition_offsets] = cudf::hash_partition(
*n_input_table, columns_to_hash_vec, number_of_partitions, hash_func, hash_seed);
cudf::jni::native_jintArray n_output_offsets(env, output_offsets);
std::copy(partition_offsets.begin(), partition_offsets.end(), n_output_offsets.begin());
return convert_table_for_return(env, partitioned_table);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_roundRobinPartition(
JNIEnv *env, jclass, jlong input_table, jint num_partitions, jint start_partition,
jintArray output_offsets) {
JNI_NULL_CHECK(env, input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, output_offsets, "output_offsets is null", NULL);
JNI_ARG_CHECK(env, num_partitions > 0, "num_partitions <= 0", NULL);
JNI_ARG_CHECK(env, start_partition >= 0, "start_partition is negative", NULL);
try {
cudf::jni::auto_set_device(env);
auto n_input_table = reinterpret_cast<cudf::table_view *>(input_table);
auto [partitioned_table, partition_offsets] =
cudf::round_robin_partition(*n_input_table, num_partitions, start_partition);
cudf::jni::native_jintArray n_output_offsets(env, output_offsets);
std::copy(partition_offsets.begin(), partition_offsets.end(), n_output_offsets.begin());
return convert_table_for_return(env, partitioned_table);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_groupByAggregate(
JNIEnv *env, jclass, jlong input_table, jintArray keys, jintArray aggregate_column_indices,
jlongArray agg_instances, jboolean ignore_null_keys, jboolean jkey_sorted,
jbooleanArray jkeys_sort_desc, jbooleanArray jkeys_null_first) {
JNI_NULL_CHECK(env, input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, keys, "input keys are null", NULL);
JNI_NULL_CHECK(env, aggregate_column_indices, "input aggregate_column_indices are null", NULL);
JNI_NULL_CHECK(env, agg_instances, "agg_instances are null", NULL);
try {
cudf::jni::auto_set_device(env);
cudf::table_view *n_input_table = reinterpret_cast<cudf::table_view *>(input_table);
cudf::jni::native_jintArray n_keys(env, keys);
cudf::jni::native_jintArray n_values(env, aggregate_column_indices);
cudf::jni::native_jpointerArray<cudf::aggregation> n_agg_instances(env, agg_instances);
std::vector<cudf::column_view> n_keys_cols;
n_keys_cols.reserve(n_keys.size());
for (int i = 0; i < n_keys.size(); i++) {
n_keys_cols.push_back(n_input_table->column(n_keys[i]));
}
cudf::table_view n_keys_table(n_keys_cols);
auto column_order = cudf::jni::resolve_column_order(env, jkeys_sort_desc, n_keys.size());
auto null_precedence = cudf::jni::resolve_null_precedence(env, jkeys_null_first, n_keys.size());
cudf::groupby::groupby grouper(
n_keys_table, ignore_null_keys ? cudf::null_policy::EXCLUDE : cudf::null_policy::INCLUDE,
jkey_sorted ? cudf::sorted::YES : cudf::sorted::NO, column_order, null_precedence);
// Aggregates are passed in already grouped by column, so we just need to fill it in
// as we go.
std::vector<cudf::groupby::aggregation_request> requests;
int previous_index = -1;
for (int i = 0; i < n_values.size(); i++) {
cudf::groupby::aggregation_request req;
int col_index = n_values[i];
cudf::groupby_aggregation *agg =
dynamic_cast<cudf::groupby_aggregation *>(n_agg_instances[i]);
JNI_ARG_CHECK(env, agg != nullptr, "aggregation is not an instance of groupby_aggregation",
nullptr);
std::unique_ptr<cudf::groupby_aggregation> cloned(
dynamic_cast<cudf::groupby_aggregation *>(agg->clone().release()));
if (col_index == previous_index) {
requests.back().aggregations.push_back(std::move(cloned));
} else {
req.values = n_input_table->column(col_index);
req.aggregations.push_back(std::move(cloned));
requests.push_back(std::move(req));
}
previous_index = col_index;
}
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::groupby::aggregation_result>> result =
grouper.aggregate(requests);
std::vector<std::unique_ptr<cudf::column>> result_columns;
int agg_result_size = result.second.size();
for (int agg_result_index = 0; agg_result_index < agg_result_size; agg_result_index++) {
int col_agg_size = result.second[agg_result_index].results.size();
for (int col_agg_index = 0; col_agg_index < col_agg_size; col_agg_index++) {
result_columns.push_back(std::move(result.second[agg_result_index].results[col_agg_index]));
}
}
return convert_table_for_return(env, result.first, std::move(result_columns));
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_groupByScan(
JNIEnv *env, jclass, jlong input_table, jintArray keys, jintArray aggregate_column_indices,
jlongArray agg_instances, jboolean ignore_null_keys, jboolean jkey_sorted,
jbooleanArray jkeys_sort_desc, jbooleanArray jkeys_null_first) {
JNI_NULL_CHECK(env, input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, keys, "input keys are null", NULL);
JNI_NULL_CHECK(env, aggregate_column_indices, "input aggregate_column_indices are null", NULL);
JNI_NULL_CHECK(env, agg_instances, "agg_instances are null", NULL);
try {
cudf::jni::auto_set_device(env);
cudf::table_view *n_input_table = reinterpret_cast<cudf::table_view *>(input_table);
cudf::jni::native_jintArray n_keys(env, keys);
cudf::jni::native_jintArray n_values(env, aggregate_column_indices);
cudf::jni::native_jpointerArray<cudf::aggregation> n_agg_instances(env, agg_instances);
std::vector<cudf::column_view> n_keys_cols;
n_keys_cols.reserve(n_keys.size());
for (int i = 0; i < n_keys.size(); i++) {
n_keys_cols.push_back(n_input_table->column(n_keys[i]));
}
cudf::table_view n_keys_table(n_keys_cols);
auto column_order = cudf::jni::resolve_column_order(env, jkeys_sort_desc, n_keys.size());
auto null_precedence = cudf::jni::resolve_null_precedence(env, jkeys_null_first, n_keys.size());
cudf::groupby::groupby grouper(
n_keys_table, ignore_null_keys ? cudf::null_policy::EXCLUDE : cudf::null_policy::INCLUDE,
jkey_sorted ? cudf::sorted::YES : cudf::sorted::NO, column_order, null_precedence);
// Aggregates are passed in already grouped by column, so we just need to fill it in
// as we go.
std::vector<cudf::groupby::scan_request> requests;
int previous_index = -1;
for (int i = 0; i < n_values.size(); i++) {
cudf::groupby::scan_request req;
int col_index = n_values[i];
cudf::groupby_scan_aggregation *agg =
dynamic_cast<cudf::groupby_scan_aggregation *>(n_agg_instances[i]);
JNI_ARG_CHECK(env, agg != nullptr,
"aggregation is not an instance of groupby_scan_aggregation", nullptr);
std::unique_ptr<cudf::groupby_scan_aggregation> cloned(
dynamic_cast<cudf::groupby_scan_aggregation *>(agg->clone().release()));
if (col_index == previous_index) {
requests.back().aggregations.push_back(std::move(cloned));
} else {
req.values = n_input_table->column(col_index);
req.aggregations.push_back(std::move(cloned));
requests.push_back(std::move(req));
}
previous_index = col_index;
}
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::groupby::aggregation_result>> result =
grouper.scan(requests);
std::vector<std::unique_ptr<cudf::column>> result_columns;
int agg_result_size = result.second.size();
for (int agg_result_index = 0; agg_result_index < agg_result_size; agg_result_index++) {
int col_agg_size = result.second[agg_result_index].results.size();
for (int col_agg_index = 0; col_agg_index < col_agg_size; col_agg_index++) {
result_columns.push_back(std::move(result.second[agg_result_index].results[col_agg_index]));
}
}
return convert_table_for_return(env, result.first, std::move(result_columns));
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_groupByReplaceNulls(
JNIEnv *env, jclass, jlong input_table, jintArray keys, jintArray replace_column_indices,
jbooleanArray is_preceding, jboolean ignore_null_keys, jboolean jkey_sorted,
jbooleanArray jkeys_sort_desc, jbooleanArray jkeys_null_first) {
JNI_NULL_CHECK(env, input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, keys, "input keys are null", NULL);
JNI_NULL_CHECK(env, replace_column_indices, "input replace_column_indices are null", NULL);
JNI_NULL_CHECK(env, is_preceding, "is_preceding are null", NULL);
try {
cudf::jni::auto_set_device(env);
cudf::table_view *n_input_table = reinterpret_cast<cudf::table_view *>(input_table);
cudf::jni::native_jintArray n_keys(env, keys);
cudf::jni::native_jintArray n_values(env, replace_column_indices);
cudf::jni::native_jbooleanArray n_is_preceding(env, is_preceding);
std::vector<cudf::column_view> n_keys_cols;
n_keys_cols.reserve(n_keys.size());
for (int i = 0; i < n_keys.size(); i++) {
n_keys_cols.push_back(n_input_table->column(n_keys[i]));
}
cudf::table_view n_keys_table(n_keys_cols);
auto column_order = cudf::jni::resolve_column_order(env, jkeys_sort_desc, n_keys.size());
auto null_precedence = cudf::jni::resolve_null_precedence(env, jkeys_null_first, n_keys.size());
cudf::groupby::groupby grouper(
n_keys_table, ignore_null_keys ? cudf::null_policy::EXCLUDE : cudf::null_policy::INCLUDE,
jkey_sorted ? cudf::sorted::YES : cudf::sorted::NO, column_order, null_precedence);
// Aggregates are passed in already grouped by column, so we just need to fill it in
// as we go.
std::vector<cudf::column_view> n_replace_cols;
n_replace_cols.reserve(n_values.size());
for (int i = 0; i < n_values.size(); i++) {
n_replace_cols.push_back(n_input_table->column(n_values[i]));
}
cudf::table_view n_replace_table(n_replace_cols);
std::vector<cudf::replace_policy> policies = n_is_preceding.transform_if_else(
cudf::replace_policy::PRECEDING, cudf::replace_policy::FOLLOWING);
auto [keys, results] = grouper.replace_nulls(n_replace_table, policies);
return convert_table_for_return(env, keys, results);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_filter(JNIEnv *env, jclass,
jlong input_jtable, jlong mask_jcol) {
JNI_NULL_CHECK(env, input_jtable, "input table is null", 0);
JNI_NULL_CHECK(env, mask_jcol, "mask column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::table_view const *>(input_jtable);
auto const mask = reinterpret_cast<cudf::column_view const *>(mask_jcol);
return convert_table_for_return(env, cudf::apply_boolean_mask(*input, *mask));
}
CATCH_STD(env, 0);
}
JNIEXPORT jint JNICALL Java_ai_rapids_cudf_Table_distinctCount(JNIEnv *env, jclass,
jlong input_jtable,
jboolean nulls_equal) {
JNI_NULL_CHECK(env, input_jtable, "input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::table_view const *>(input_jtable);
return cudf::distinct_count(*input, nulls_equal ? cudf::null_equality::EQUAL :
cudf::null_equality::UNEQUAL);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_dropDuplicates(JNIEnv *env, jclass,
jlong input_jtable,
jintArray key_columns,
jint keep,
jboolean nulls_equal) {
JNI_NULL_CHECK(env, input_jtable, "input table is null", 0);
JNI_NULL_CHECK(env, key_columns, "input key_columns is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::table_view const *>(input_jtable);
static_assert(sizeof(jint) == sizeof(cudf::size_type), "Integer types mismatched.");
auto const native_keys_indices = cudf::jni::native_jintArray(env, key_columns);
auto const keys_indices =
std::vector<cudf::size_type>(native_keys_indices.begin(), native_keys_indices.end());
auto const keep_option = [&] {
switch (keep) {
case 0: return cudf::duplicate_keep_option::KEEP_ANY;
case 1: return cudf::duplicate_keep_option::KEEP_FIRST;
case 2: return cudf::duplicate_keep_option::KEEP_LAST;
case 3: return cudf::duplicate_keep_option::KEEP_NONE;
default:
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "Invalid `keep` option",
cudf::duplicate_keep_option::KEEP_ANY);
}
}();
auto result =
cudf::distinct(*input, keys_indices, keep_option,
nulls_equal ? cudf::null_equality::EQUAL : cudf::null_equality::UNEQUAL,
cudf::nan_equality::ALL_EQUAL, rmm::mr::get_current_device_resource());
return convert_table_for_return(env, result);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_gather(JNIEnv *env, jclass, jlong j_input,
jlong j_map, jboolean check_bounds) {
JNI_NULL_CHECK(env, j_input, "input table is null", 0);
JNI_NULL_CHECK(env, j_map, "map column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::table_view const *>(j_input);
auto const map = reinterpret_cast<cudf::column_view const *>(j_map);
auto bounds_policy =
check_bounds ? cudf::out_of_bounds_policy::NULLIFY : cudf::out_of_bounds_policy::DONT_CHECK;
return convert_table_for_return(env, cudf::gather(*input, *map, bounds_policy));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL
Java_ai_rapids_cudf_Table_convertToRowsFixedWidthOptimized(JNIEnv *env, jclass, jlong input_table) {
JNI_NULL_CHECK(env, input_table, "input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const n_input_table = reinterpret_cast<cudf::table_view const *>(input_table);
std::vector<std::unique_ptr<cudf::column>> cols =
cudf::jni::convert_to_rows_fixed_width_optimized(*n_input_table);
int num_columns = cols.size();
cudf::jni::native_jlongArray outcol_handles(env, num_columns);
std::transform(cols.begin(), cols.end(), outcol_handles.begin(),
[](auto &col) { return release_as_jlong(col); });
return outcol_handles.get_jArray();
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_scatterTable(JNIEnv *env, jclass,
jlong j_input, jlong j_map,
jlong j_target) {
JNI_NULL_CHECK(env, j_input, "input table is null", 0);
JNI_NULL_CHECK(env, j_map, "map column is null", 0);
JNI_NULL_CHECK(env, j_target, "target table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::table_view const *>(j_input);
auto const map = reinterpret_cast<cudf::column_view const *>(j_map);
auto const target = reinterpret_cast<cudf::table_view const *>(j_target);
return convert_table_for_return(env, cudf::scatter(*input, *map, *target));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_scatterScalars(JNIEnv *env, jclass,
jlongArray j_input,
jlong j_map, jlong j_target) {
JNI_NULL_CHECK(env, j_input, "input scalars array is null", 0);
JNI_NULL_CHECK(env, j_map, "map column is null", 0);
JNI_NULL_CHECK(env, j_target, "target table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const scalars_array = cudf::jni::native_jpointerArray<cudf::scalar>(env, j_input);
std::vector<std::reference_wrapper<cudf::scalar const>> input;
std::transform(scalars_array.begin(), scalars_array.end(), std::back_inserter(input),
[](auto &scalar) { return std::ref(*scalar); });
auto const map = reinterpret_cast<cudf::column_view const *>(j_map);
auto const target = reinterpret_cast<cudf::table_view const *>(j_target);
return convert_table_for_return(env, cudf::scatter(input, *map, *target));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_convertToRows(JNIEnv *env, jclass,
jlong input_table) {
JNI_NULL_CHECK(env, input_table, "input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const n_input_table = reinterpret_cast<cudf::table_view const *>(input_table);
std::vector<std::unique_ptr<cudf::column>> cols = cudf::jni::convert_to_rows(*n_input_table);
int num_columns = cols.size();
cudf::jni::native_jlongArray outcol_handles(env, num_columns);
std::transform(cols.begin(), cols.end(), outcol_handles.begin(),
[](auto &col) { return release_as_jlong(col); });
return outcol_handles.get_jArray();
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_convertFromRowsFixedWidthOptimized(
JNIEnv *env, jclass, jlong input_column, jintArray types, jintArray scale) {
JNI_NULL_CHECK(env, input_column, "input column is null", 0);
JNI_NULL_CHECK(env, types, "types is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::lists_column_view const list_input{*reinterpret_cast<cudf::column_view *>(input_column)};
cudf::jni::native_jintArray n_types(env, types);
cudf::jni::native_jintArray n_scale(env, scale);
if (n_types.size() != n_scale.size()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match size",
NULL);
}
std::vector<cudf::data_type> types_vec;
std::transform(n_types.begin(), n_types.end(), n_scale.begin(), std::back_inserter(types_vec),
[](jint type, jint scale) { return cudf::jni::make_data_type(type, scale); });
std::unique_ptr<cudf::table> result =
cudf::jni::convert_from_rows_fixed_width_optimized(list_input, types_vec);
return convert_table_for_return(env, result);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_convertFromRows(JNIEnv *env, jclass,
jlong input_column,
jintArray types,
jintArray scale) {
JNI_NULL_CHECK(env, input_column, "input column is null", 0);
JNI_NULL_CHECK(env, types, "types is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::lists_column_view const list_input{*reinterpret_cast<cudf::column_view *>(input_column)};
cudf::jni::native_jintArray n_types(env, types);
cudf::jni::native_jintArray n_scale(env, scale);
if (n_types.size() != n_scale.size()) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException", "types and scales must match size",
NULL);
}
std::vector<cudf::data_type> types_vec;
std::transform(n_types.begin(), n_types.end(), n_scale.begin(), std::back_inserter(types_vec),
[](jint type, jint scale) { return cudf::jni::make_data_type(type, scale); });
std::unique_ptr<cudf::table> result = cudf::jni::convert_from_rows(list_input, types_vec);
return convert_table_for_return(env, result);
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_repeatStaticCount(JNIEnv *env, jclass,
jlong input_jtable,
jint count) {
JNI_NULL_CHECK(env, input_jtable, "input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::table_view const *>(input_jtable);
return convert_table_for_return(env, cudf::repeat(*input, count));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_repeatColumnCount(JNIEnv *env, jclass,
jlong input_jtable,
jlong count_jcol) {
JNI_NULL_CHECK(env, input_jtable, "input table is null", 0);
JNI_NULL_CHECK(env, count_jcol, "count column is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::table_view const *>(input_jtable);
auto const count = reinterpret_cast<cudf::column_view const *>(count_jcol);
return convert_table_for_return(env, cudf::repeat(*input, *count));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_bound(JNIEnv *env, jclass, jlong input_jtable,
jlong values_jtable,
jbooleanArray desc_flags,
jbooleanArray are_nulls_smallest,
jboolean is_upper_bound) {
JNI_NULL_CHECK(env, input_jtable, "input table is null", 0);
JNI_NULL_CHECK(env, values_jtable, "values table is null", 0);
using cudf::column;
using cudf::table_view;
try {
cudf::jni::auto_set_device(env);
table_view *input = reinterpret_cast<table_view *>(input_jtable);
table_view *values = reinterpret_cast<table_view *>(values_jtable);
cudf::jni::native_jbooleanArray const n_desc_flags(env, desc_flags);
cudf::jni::native_jbooleanArray const n_are_nulls_smallest(env, are_nulls_smallest);
std::vector<cudf::order> column_desc_flags{
n_desc_flags.transform_if_else(cudf::order::DESCENDING, cudf::order::ASCENDING)};
std::vector<cudf::null_order> column_null_orders{
n_are_nulls_smallest.transform_if_else(cudf::null_order::BEFORE, cudf::null_order::AFTER)};
JNI_ARG_CHECK(env, (column_desc_flags.size() == column_null_orders.size()),
"null-order and sort-order size mismatch", 0);
return release_as_jlong(
is_upper_bound ? cudf::upper_bound(*input, *values, column_desc_flags, column_null_orders) :
cudf::lower_bound(*input, *values, column_desc_flags, column_null_orders));
}
CATCH_STD(env, 0);
}
JNIEXPORT jobjectArray JNICALL Java_ai_rapids_cudf_Table_contiguousSplit(JNIEnv *env, jclass,
jlong input_table,
jintArray split_indices) {
JNI_NULL_CHECK(env, input_table, "native handle is null", 0);
JNI_NULL_CHECK(env, split_indices, "split indices are null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::table_view *n_table = reinterpret_cast<cudf::table_view *>(input_table);
cudf::jni::native_jintArray n_split_indices(env, split_indices);
std::vector<cudf::size_type> indices(n_split_indices.data(),
n_split_indices.data() + n_split_indices.size());
std::vector<cudf::packed_table> result = cudf::contiguous_split(*n_table, indices);
cudf::jni::native_jobjectArray<jobject> n_result =
cudf::jni::contiguous_table_array(env, result.size());
for (size_t i = 0; i < result.size(); i++) {
n_result.set(
i, cudf::jni::contiguous_table_from(env, result[i].data, result[i].table.num_rows()));
}
return n_result.wrapped();
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_makeChunkedPack(JNIEnv *env, jclass,
jlong input_table,
jlong bounce_buffer_size,
jlong memoryResourceHandle) {
JNI_NULL_CHECK(env, input_table, "native handle is null", 0);
try {
cudf::jni::auto_set_device(env);
cudf::table_view *n_table = reinterpret_cast<cudf::table_view *>(input_table);
// `temp_mr` is the memory resource that `cudf::chunked_pack` will use to create temporary
// and scratch memory only.
auto temp_mr = memoryResourceHandle != 0 ?
reinterpret_cast<rmm::mr::device_memory_resource *>(memoryResourceHandle) :
rmm::mr::get_current_device_resource();
auto chunked_pack = cudf::chunked_pack::create(*n_table, bounce_buffer_size, temp_mr);
return reinterpret_cast<jlong>(chunked_pack.release());
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_rollingWindowAggregate(
JNIEnv *env, jclass, jlong j_input_table, jintArray j_keys, jlongArray j_default_output,
jintArray j_aggregate_column_indices, jlongArray j_agg_instances, jintArray j_min_periods,
jintArray j_preceding, jintArray j_following, jbooleanArray j_unbounded_preceding,
jbooleanArray j_unbounded_following, jboolean ignore_null_keys) {
JNI_NULL_CHECK(env, j_input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, j_keys, "input keys are null", NULL);
JNI_NULL_CHECK(env, j_aggregate_column_indices, "input aggregate_column_indices are null", NULL);
JNI_NULL_CHECK(env, j_agg_instances, "agg_instances are null", NULL);
JNI_NULL_CHECK(env, j_default_output, "default_outputs are null", NULL);
try {
cudf::jni::auto_set_device(env);
using cudf::jni::valid_window_parameters;
// Convert from j-types to native.
cudf::table_view *input_table{reinterpret_cast<cudf::table_view *>(j_input_table)};
cudf::jni::native_jintArray keys{env, j_keys};
cudf::jni::native_jintArray values{env, j_aggregate_column_indices};
cudf::jni::native_jpointerArray<cudf::aggregation> agg_instances(env, j_agg_instances);
cudf::jni::native_jpointerArray<cudf::column_view> default_output(env, j_default_output);
cudf::jni::native_jintArray min_periods{env, j_min_periods};
cudf::jni::native_jintArray preceding{env, j_preceding};
cudf::jni::native_jintArray following{env, j_following};
cudf::jni::native_jbooleanArray unbounded_preceding{env, j_unbounded_preceding};
cudf::jni::native_jbooleanArray unbounded_following{env, j_unbounded_following};
if (not valid_window_parameters(values, agg_instances, min_periods, preceding, following)) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"Number of aggregation columns must match number of agg ops, and window-specs",
nullptr);
}
// Extract table-view.
cudf::table_view groupby_keys{
input_table->select(std::vector<cudf::size_type>(keys.data(), keys.data() + keys.size()))};
std::vector<std::unique_ptr<cudf::column>> result_columns;
for (int i(0); i < values.size(); ++i) {
cudf::rolling_aggregation *agg = dynamic_cast<cudf::rolling_aggregation *>(agg_instances[i]);
JNI_ARG_CHECK(env, agg != nullptr, "aggregation is not an instance of rolling_aggregation",
nullptr);
int agg_column_index = values[i];
auto const preceding_window_bounds = unbounded_preceding[i] ?
cudf::window_bounds::unbounded() :
cudf::window_bounds::get(preceding[i]);
auto const following_window_bounds = unbounded_following[i] ?
cudf::window_bounds::unbounded() :
cudf::window_bounds::get(following[i]);
if (default_output[i] != nullptr) {
result_columns.emplace_back(cudf::grouped_rolling_window(
groupby_keys, input_table->column(agg_column_index), *default_output[i],
preceding_window_bounds, following_window_bounds, min_periods[i], *agg));
} else {
result_columns.emplace_back(cudf::grouped_rolling_window(
groupby_keys, input_table->column(agg_column_index), preceding_window_bounds,
following_window_bounds, min_periods[i], *agg));
}
}
auto result_table = std::make_unique<cudf::table>(std::move(result_columns));
return convert_table_for_return(env, result_table);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_rangeRollingWindowAggregate(
JNIEnv *env, jclass, jlong j_input_table, jintArray j_keys, jintArray j_orderby_column_indices,
jbooleanArray j_is_orderby_ascending, jintArray j_aggregate_column_indices,
jlongArray j_agg_instances, jintArray j_min_periods, jlongArray j_preceding,
jlongArray j_following, jintArray j_preceding_extent, jintArray j_following_extent,
jboolean ignore_null_keys) {
JNI_NULL_CHECK(env, j_input_table, "input table is null", NULL);
JNI_NULL_CHECK(env, j_keys, "input keys are null", NULL);
JNI_NULL_CHECK(env, j_orderby_column_indices, "input orderby_column_indices are null", NULL);
JNI_NULL_CHECK(env, j_is_orderby_ascending, "input orderby_ascending is null", NULL);
JNI_NULL_CHECK(env, j_aggregate_column_indices, "input aggregate_column_indices are null", NULL);
JNI_NULL_CHECK(env, j_agg_instances, "agg_instances are null", NULL);
JNI_NULL_CHECK(env, j_preceding, "preceding are null", NULL);
JNI_NULL_CHECK(env, j_following, "following are null", NULL);
try {
cudf::jni::auto_set_device(env);
using cudf::jni::valid_window_parameters;
// Convert from j-types to native.
cudf::table_view *input_table{reinterpret_cast<cudf::table_view *>(j_input_table)};
cudf::jni::native_jintArray keys{env, j_keys};
cudf::jni::native_jintArray orderbys{env, j_orderby_column_indices};
cudf::jni::native_jbooleanArray orderbys_ascending{env, j_is_orderby_ascending};
cudf::jni::native_jintArray values{env, j_aggregate_column_indices};
cudf::jni::native_jpointerArray<cudf::aggregation> agg_instances(env, j_agg_instances);
cudf::jni::native_jintArray min_periods{env, j_min_periods};
cudf::jni::native_jintArray preceding_extent{env, j_preceding_extent};
cudf::jni::native_jintArray following_extent{env, j_following_extent};
cudf::jni::native_jpointerArray<cudf::scalar> preceding(env, j_preceding);
cudf::jni::native_jpointerArray<cudf::scalar> following(env, j_following);
if (not valid_window_parameters(values, agg_instances, min_periods, preceding, following)) {
JNI_THROW_NEW(env, "java/lang/IllegalArgumentException",
"Number of aggregation columns must match number of agg ops, and window-specs",
nullptr);
}
// Extract table-view.
cudf::table_view groupby_keys{
input_table->select(std::vector<cudf::size_type>(keys.data(), keys.data() + keys.size()))};
std::vector<std::unique_ptr<cudf::column>> result_columns;
for (int i(0); i < values.size(); ++i) {
int agg_column_index = values[i];
cudf::column_view const &order_by_column = input_table->column(orderbys[i]);
cudf::data_type order_by_type = order_by_column.type();
cudf::data_type duration_type = order_by_type;
// Range extents are defined as:
// a) 0 == CURRENT ROW
// b) 1 == BOUNDED
// c) 2 == UNBOUNDED
// Must set unbounded_type for only the BOUNDED case.
auto constexpr CURRENT_ROW = 0;
auto constexpr BOUNDED = 1;
auto constexpr UNBOUNDED = 2;
if (preceding_extent[i] != BOUNDED || following_extent[i] != BOUNDED) {
switch (order_by_type.id()) {
case cudf::type_id::TIMESTAMP_DAYS:
duration_type = cudf::data_type{cudf::type_id::DURATION_DAYS};
break;
case cudf::type_id::TIMESTAMP_SECONDS:
duration_type = cudf::data_type{cudf::type_id::DURATION_SECONDS};
break;
case cudf::type_id::TIMESTAMP_MILLISECONDS:
duration_type = cudf::data_type{cudf::type_id::DURATION_MILLISECONDS};
break;
case cudf::type_id::TIMESTAMP_MICROSECONDS:
duration_type = cudf::data_type{cudf::type_id::DURATION_MICROSECONDS};
break;
case cudf::type_id::TIMESTAMP_NANOSECONDS:
duration_type = cudf::data_type{cudf::type_id::DURATION_NANOSECONDS};
break;
default: break;
}
}
cudf::rolling_aggregation *agg = dynamic_cast<cudf::rolling_aggregation *>(agg_instances[i]);
JNI_ARG_CHECK(env, agg != nullptr, "aggregation is not an instance of rolling_aggregation",
nullptr);
auto const make_window_bounds = [&](auto const &range_extent, auto const *p_scalar) {
if (range_extent == CURRENT_ROW) {
return cudf::range_window_bounds::current_row(duration_type);
} else if (range_extent == UNBOUNDED) {
return cudf::range_window_bounds::unbounded(duration_type);
} else {
return cudf::range_window_bounds::get(*p_scalar);
}
};
result_columns.emplace_back(cudf::grouped_range_rolling_window(
groupby_keys, order_by_column,
orderbys_ascending[i] ? cudf::order::ASCENDING : cudf::order::DESCENDING,
input_table->column(agg_column_index),
make_window_bounds(preceding_extent[i], preceding[i]),
make_window_bounds(following_extent[i], following[i]), min_periods[i], *agg));
}
auto result_table = std::make_unique<cudf::table>(std::move(result_columns));
return convert_table_for_return(env, result_table);
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_explode(JNIEnv *env, jclass,
jlong input_jtable,
jint column_index) {
JNI_NULL_CHECK(env, input_jtable, "explode: input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input_table = reinterpret_cast<cudf::table_view const *>(input_jtable);
auto const col_index = static_cast<cudf::size_type>(column_index);
return convert_table_for_return(env, cudf::explode(*input_table, col_index));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_explodePosition(JNIEnv *env, jclass,
jlong input_jtable,
jint column_index) {
JNI_NULL_CHECK(env, input_jtable, "explode: input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input_table = reinterpret_cast<cudf::table_view const *>(input_jtable);
auto const col_index = static_cast<cudf::size_type>(column_index);
return convert_table_for_return(env, cudf::explode_position(*input_table, col_index));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_explodeOuter(JNIEnv *env, jclass,
jlong input_jtable,
jint column_index) {
JNI_NULL_CHECK(env, input_jtable, "explode: input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input_table = reinterpret_cast<cudf::table_view const *>(input_jtable);
auto const col_index = static_cast<cudf::size_type>(column_index);
return convert_table_for_return(env, cudf::explode_outer(*input_table, col_index));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_explodeOuterPosition(JNIEnv *env, jclass,
jlong input_jtable,
jint column_index) {
JNI_NULL_CHECK(env, input_jtable, "explode: input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input_table = reinterpret_cast<cudf::table_view const *>(input_jtable);
auto const col_index = static_cast<cudf::size_type>(column_index);
return convert_table_for_return(env, cudf::explode_outer_position(*input_table, col_index));
}
CATCH_STD(env, 0);
}
JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Table_rowBitCount(JNIEnv *env, jclass, jlong j_table) {
JNI_NULL_CHECK(env, j_table, "table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input_table = reinterpret_cast<cudf::table_view const *>(j_table);
return release_as_jlong(cudf::row_bit_count(*input_table));
}
CATCH_STD(env, 0);
}
JNIEXPORT jobject JNICALL Java_ai_rapids_cudf_Table_contiguousSplitGroups(
JNIEnv *env, jclass, jlong jinput_table, jintArray jkey_indices, jboolean jignore_null_keys,
jboolean jkey_sorted, jbooleanArray jkeys_sort_desc, jbooleanArray jkeys_null_first,
jboolean genUniqKeys) {
JNI_NULL_CHECK(env, jinput_table, "table native handle is null", 0);
JNI_NULL_CHECK(env, jkey_indices, "key indices are null", 0);
// Two main steps to split the groups in the input table.
// 1) Calls `cudf::groupby::groupby::get_groups` to get the group offsets and
// the grouped table.
// 2) Calls `cudf::contiguous_split` to execute the split over the grouped table
// according to the group offsets.
try {
cudf::jni::auto_set_device(env);
cudf::jni::native_jintArray n_key_indices(env, jkey_indices);
auto const input_table = reinterpret_cast<cudf::table_view const *>(jinput_table);
// Prepares arguments for the groupby:
// (keys, null_handling, keys_are_sorted, column_order, null_precedence)
std::vector<cudf::size_type> key_indices(n_key_indices.data(),
n_key_indices.data() + n_key_indices.size());
auto keys = input_table->select(key_indices);
auto null_handling =
jignore_null_keys ? cudf::null_policy::EXCLUDE : cudf::null_policy::INCLUDE;
auto keys_are_sorted = jkey_sorted ? cudf::sorted::YES : cudf::sorted::NO;
auto column_order = cudf::jni::resolve_column_order(env, jkeys_sort_desc, key_indices.size());
auto null_precedence =
cudf::jni::resolve_null_precedence(env, jkeys_null_first, key_indices.size());
// Constructs a groupby
cudf::groupby::groupby grouper(keys, null_handling, keys_are_sorted, column_order,
null_precedence);
// 1) Gets the groups(keys, offsets, values) from groupby.
//
// Uses only the non-key columns as the input values instead of the whole table,
// to avoid duplicated key columns in output of `get_groups`.
// The code looks like a little more complicated, but it can reduce the peak memory.
auto num_value_cols = input_table->num_columns() - key_indices.size();
std::vector<cudf::size_type> value_indices;
value_indices.reserve(num_value_cols);
// column indices start with 0.
cudf::size_type index = 0;
while (value_indices.size() < num_value_cols) {
if (std::find(key_indices.begin(), key_indices.end(), index) == key_indices.end()) {
// not key column, so adds it as value column.
value_indices.emplace_back(index);
}
index++;
}
cudf::table_view values_view = input_table->select(value_indices);
// execute grouping
cudf::groupby::groupby::groups groups = grouper.get_groups(values_view);
// When builds the table view from keys and values of 'groups', restores the
// original order of columns (same order with that in input table).
std::vector<cudf::column_view> grouped_cols(key_indices.size() + num_value_cols);
// key columns
auto key_view = groups.keys->view();
auto key_view_it = key_view.begin();
for (auto key_id : key_indices) {
grouped_cols.at(key_id) = std::move(*key_view_it);
key_view_it++;
}
// value columns
auto value_view = groups.values->view();
auto value_view_it = value_view.begin();
for (auto value_id : value_indices) {
grouped_cols.at(value_id) = std::move(*value_view_it);
value_view_it++;
}
cudf::table_view grouped_table(grouped_cols);
// When no key columns, uses the input table instead, because the output
// of 'get_groups' is empty.
auto &grouped_view = key_indices.empty() ? *input_table : grouped_table;
// Resolves the split indices from offsets vector directly to avoid copying. Since
// the offsets vector may be very large if there are too many small groups.
std::vector<cudf::size_type> &split_indices = groups.offsets;
// Offsets layout is [0, split indices..., num_rows] or [0] for empty keys, so
// need to removes the first and last elements. First remove last one.
split_indices.pop_back();
// generate uniq keys by using `gather` method, this means remove the duplicated keys
std::unique_ptr<cudf::table> group_by_result_table;
if (genUniqKeys) {
// generate gather map column from `split_indices`
auto begin = std::cbegin(split_indices);
auto end = std::cend(split_indices);
auto const size = cudf::distance(begin, end);
auto const vec = thrust::host_vector<cudf::size_type>(begin, end);
auto buf = rmm::device_buffer{vec.data(), size * sizeof(cudf::size_type),
cudf::get_default_stream()};
auto gather_map_col = std::make_unique<cudf::column>(
cudf::data_type{cudf::type_id::INT32}, size, std::move(buf), rmm::device_buffer{}, 0);
// gather the first key in each group to remove duplicated ones.
group_by_result_table = cudf::gather(groups.keys->view(), gather_map_col->view());
}
// remove the first 0 if it exists
if (!split_indices.empty()) {
split_indices.erase(split_indices.begin());
}
// 2) Splits the groups.
std::vector<cudf::packed_table> result = cudf::contiguous_split(grouped_view, split_indices);
// Release the grouped table right away after split done.
groups.keys.reset(nullptr);
groups.values.reset(nullptr);
// Returns the split result.
cudf::jni::native_jobjectArray<jobject> n_result =
cudf::jni::contiguous_table_array(env, result.size());
for (size_t i = 0; i < result.size(); i++) {
n_result.set(
i, cudf::jni::contiguous_table_from(env, result[i].data, result[i].table.num_rows()));
}
jobjectArray groups_array = n_result.wrapped();
if (genUniqKeys) {
jlongArray keys_array = convert_table_for_return(env, group_by_result_table);
return cudf::jni::contig_split_group_by_result_from(env, groups_array, keys_array);
} else {
return cudf::jni::contig_split_group_by_result_from(env, groups_array);
}
}
CATCH_STD(env, NULL);
}
JNIEXPORT jlongArray JNICALL Java_ai_rapids_cudf_Table_sample(JNIEnv *env, jclass, jlong j_input,
jlong n, jboolean replacement,
jlong seed) {
JNI_NULL_CHECK(env, j_input, "input table is null", 0);
try {
cudf::jni::auto_set_device(env);
auto const input = reinterpret_cast<cudf::table_view const *>(j_input);
auto sample_with_replacement =
replacement ? cudf::sample_with_replacement::TRUE : cudf::sample_with_replacement::FALSE;
return convert_table_for_return(env, cudf::sample(*input, n, sample_with_replacement, seed));
}
CATCH_STD(env, 0);
}
} // extern "C"
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ColumnVectorTest.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.ColumnView.FindOptions;
import ai.rapids.cudf.HostColumnVector.*;
import com.google.common.collect.Lists;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
import static ai.rapids.cudf.AssertUtils.assertStructColumnsAreEqual;
import static ai.rapids.cudf.AssertUtils.assertTablesAreEqual;
import static ai.rapids.cudf.QuantileMethod.*;
import static org.junit.jupiter.api.Assertions.*;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
public class ColumnVectorTest extends CudfTestBase {
public static final double PERCENTAGE = 0.0001;
// IEEE 754 NaN values
static final float POSITIVE_FLOAT_NAN_LOWER_RANGE = Float.intBitsToFloat(0x7f800001);
static final float POSITIVE_FLOAT_NAN_UPPER_RANGE = Float.intBitsToFloat(0x7fffffff);
static final float NEGATIVE_FLOAT_NAN_LOWER_RANGE = Float.intBitsToFloat(0xff800001);
static final float NEGATIVE_FLOAT_NAN_UPPER_RANGE = Float.intBitsToFloat(0xffffffff);
static final double POSITIVE_DOUBLE_NAN_LOWER_RANGE = Double.longBitsToDouble(0x7ff0000000000001L);
static final double POSITIVE_DOUBLE_NAN_UPPER_RANGE = Double.longBitsToDouble(0x7fffffffffffffffL);
static final double NEGATIVE_DOUBLE_NAN_LOWER_RANGE = Double.longBitsToDouble(0xfff0000000000001L);
static final double NEGATIVE_DOUBLE_NAN_UPPER_RANGE = Double.longBitsToDouble(0xffffffffffffffffL);
// c = a * a - a
static String ptx = "***(" +
" .func _Z1fPii(" +
" .param .b64 _Z1fPii_param_0," +
" .param .b32 _Z1fPii_param_1" +
" )" +
" {" +
" .reg .b32 %r<4>;" +
" .reg .b64 %rd<3>;" +
" ld.param.u64 %rd1, [_Z1fPii_param_0];" +
" ld.param.u32 %r1, [_Z1fPii_param_1];" +
" cvta.to.global.u64 %rd2, %rd1;" +
" mul.lo.s32 %r2, %r1, %r1;" +
" sub.s32 %r3, %r2, %r1;" +
" st.global.u32 [%rd2], %r3;" +
" ret;" +
" }" +
")***";
static String cuda = "__device__ inline void f(" +
"int* output," +
"int input" +
"){" +
"*output = input*input - input;" +
"}";
@Test
void testTransformVector() {
try (ColumnVector cv = ColumnVector.fromBoxedInts(2,3,null,4);
ColumnVector cv1 = cv.transform(ptx, true);
ColumnVector cv2 = cv.transform(cuda, false);
ColumnVector expected = ColumnVector.fromBoxedInts(2*2-2, 3*3-3, null, 4*4-4)) {
assertColumnsAreEqual(expected, cv1);
assertColumnsAreEqual(expected, cv2);
}
}
@Test
void testDistinctCount() {
try (ColumnVector cv = ColumnVector.fromBoxedLongs(5L, 3L, null, null, 5L)) {
assertEquals(3, cv.distinctCount());
assertEquals(2, cv.distinctCount(NullPolicy.EXCLUDE));
}
}
@Test
void testClampDouble() {
try (ColumnVector cv = ColumnVector.fromDoubles(2.33d, 32.12d, -121.32d, 0.0d, 0.00001d,
Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NaN);
Scalar num = Scalar.fromDouble(0);
Scalar loReplace = Scalar.fromDouble(-1);
Scalar hiReplace = Scalar.fromDouble(1);
ColumnVector result = cv.clamp(num, loReplace, num, hiReplace);
ColumnVector expected = ColumnVector.fromDoubles(1.0d, 1.0d, -1.0d, 0.0d, 1.0d, -1.0d,
1.0d, Double.NaN)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testClampFloat() {
try (ColumnVector cv = ColumnVector.fromBoxedFloats(2.33f, 32.12f, null, -121.32f, 0.0f, 0.00001f, Float.NEGATIVE_INFINITY,
Float.POSITIVE_INFINITY, Float.NaN);
Scalar num = Scalar.fromFloat(0);
Scalar loReplace = Scalar.fromFloat(-1);
Scalar hiReplace = Scalar.fromFloat(1);
ColumnVector result = cv.clamp(num, loReplace, num, hiReplace);
ColumnVector expected = ColumnVector.fromBoxedFloats(1.0f, 1.0f, null, -1.0f, 0.0f, 1.0f, -1.0f, 1.0f, Float.NaN)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testClampLong() {
try (ColumnVector cv = ColumnVector.fromBoxedLongs(1l, 3l, 6l, -2l, 23l, -0l, -90l, null);
Scalar num = Scalar.fromLong(0);
Scalar loReplace = Scalar.fromLong(-1);
Scalar hiReplace = Scalar.fromLong(1);
ColumnVector result = cv.clamp(num, loReplace, num, hiReplace);
ColumnVector expected = ColumnVector.fromBoxedLongs(1l, 1l, 1l, -1l, 1l, 0l, -1l, null)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testClampShort() {
try (ColumnVector cv = ColumnVector.fromShorts(new short[]{1, 3, 6, -2, 23, -0, -90});
Scalar lo = Scalar.fromShort((short)1);
Scalar hi = Scalar.fromShort((short)2);
ColumnVector result = cv.clamp(lo, hi);
ColumnVector expected = ColumnVector.fromShorts(new short[]{1, 2, 2, 1, 2, 1, 1})) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testClampInt() {
try (ColumnVector cv = ColumnVector.fromInts(1, 3, 6, -2, 23, -0, -90);
Scalar num = Scalar.fromInt(0);
Scalar hiReplace = Scalar.fromInt(1);
Scalar loReplace = Scalar.fromInt(-1);
ColumnVector result = cv.clamp(num, loReplace, num, hiReplace);
ColumnVector expected = ColumnVector.fromInts(1, 1, 1, -1, 1, 0, -1)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testGetElementInt() {
try (ColumnVector cv = ColumnVector.fromBoxedInts(3, 2, 1, null);
Scalar s0 = cv.getScalarElement(0);
Scalar s1 = cv.getScalarElement(1);
Scalar s2 = cv.getScalarElement(2);
Scalar s3 = cv.getScalarElement(3)) {
assertEquals(3, s0.getInt());
assertEquals(2, s1.getInt());
assertEquals(1, s2.getInt());
assertFalse(s3.isValid());
}
}
@Test
void testGetElementByte() {
try (ColumnVector cv = ColumnVector.fromBoxedBytes((byte)3, (byte)2, (byte)1, null);
Scalar s0 = cv.getScalarElement(0);
Scalar s1 = cv.getScalarElement(1);
Scalar s2 = cv.getScalarElement(2);
Scalar s3 = cv.getScalarElement(3)) {
assertEquals(3, s0.getByte());
assertEquals(2, s1.getByte());
assertEquals(1, s2.getByte());
assertFalse(s3.isValid());
}
}
@Test
void testGetElementFloat() {
try (ColumnVector cv = ColumnVector.fromBoxedFloats(3f, 2f, 1f, null);
Scalar s0 = cv.getScalarElement(0);
Scalar s1 = cv.getScalarElement(1);
Scalar s2 = cv.getScalarElement(2);
Scalar s3 = cv.getScalarElement(3)) {
assertEquals(3f, s0.getFloat());
assertEquals(2f, s1.getFloat());
assertEquals(1f, s2.getFloat());
assertFalse(s3.isValid());
}
}
@Test
void testGetElementString() {
try (ColumnVector cv = ColumnVector.fromStrings("3a", "2b", "1c", null);
Scalar s0 = cv.getScalarElement(0);
Scalar s1 = cv.getScalarElement(1);
Scalar s2 = cv.getScalarElement(2);
Scalar s3 = cv.getScalarElement(3)) {
assertEquals("3a", s0.getJavaString());
assertEquals("2b", s1.getJavaString());
assertEquals("1c", s2.getJavaString());
assertFalse(s3.isValid());
}
}
@Test
void testGetElementDecimal() {
try (ColumnVector cv = ColumnVector.decimalFromLongs(1,3, 2, 1, -1);
Scalar s0 = cv.getScalarElement(0);
Scalar s1 = cv.getScalarElement(1);
Scalar s2 = cv.getScalarElement(2);
Scalar s3 = cv.getScalarElement(3)) {
assertEquals(1, s0.getType().getScale());
assertEquals(new BigDecimal("3E+1"), s0.getBigDecimal());
assertEquals(new BigDecimal("2E+1"), s1.getBigDecimal());
assertEquals(new BigDecimal("1E+1"), s2.getBigDecimal());
assertEquals(new BigDecimal("-1E+1"), s3.getBigDecimal());
}
}
@Test
void testGetElementList() {
HostColumnVector.DataType dt = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32));
try (ColumnVector cv = ColumnVector.fromLists(dt, Arrays.asList(3, 2),
Arrays.asList(1), Arrays.asList(), null);
Scalar s0 = cv.getScalarElement(0);
ColumnView s0Cv = s0.getListAsColumnView();
ColumnVector expected0 = ColumnVector.fromInts(3, 2);
Scalar s1 = cv.getScalarElement(1);
ColumnView s1Cv = s1.getListAsColumnView();
ColumnVector expected1 = ColumnVector.fromInts(1);
Scalar s2 = cv.getScalarElement(2);
ColumnView s2Cv = s2.getListAsColumnView();
ColumnVector expected2 = ColumnVector.fromInts();
Scalar s3 = cv.getScalarElement(3)) {
assertColumnsAreEqual(expected0, s0Cv);
assertColumnsAreEqual(expected1, s1Cv);
assertColumnsAreEqual(expected2, s2Cv);
assertFalse(s3.isValid());
}
}
@Test
void testStringCreation() {
try (ColumnVector cv = ColumnVector.fromStrings("d", "sd", "sde", null, "END");
HostColumnVector host = cv.copyToHost();
ColumnVector backAgain = host.copyToDevice()) {
assertColumnsAreEqual(cv, backAgain);
}
}
@Test
void testUTF8StringCreation() {
try (ColumnVector cv = ColumnVector.fromUTF8Strings(
"d".getBytes(StandardCharsets.UTF_8),
"sd".getBytes(StandardCharsets.UTF_8),
"sde".getBytes(StandardCharsets.UTF_8),
null,
"END".getBytes(StandardCharsets.UTF_8));
ColumnVector expected = ColumnVector.fromStrings("d", "sd", "sde", null, "END")) {
assertColumnsAreEqual(expected, cv);
}
}
@Test
void testRefCountLeak() throws InterruptedException {
assumeTrue(Boolean.getBoolean("ai.rapids.cudf.flaky-tests-enabled"));
long expectedLeakCount = MemoryCleaner.leakCount.get() + 1;
ColumnVector.fromInts(1, 2, 3);
long maxTime = System.currentTimeMillis() + 10_000;
long leakNow;
do {
System.gc();
Thread.sleep(50);
leakNow = MemoryCleaner.leakCount.get();
} while (leakNow != expectedLeakCount && System.currentTimeMillis() < maxTime);
assertEquals(expectedLeakCount, MemoryCleaner.leakCount.get());
}
@Test
void testJoinStrings() {
try (ColumnVector in = ColumnVector.fromStrings("A", "B", "C", "D", null, "E");
ColumnVector expected = ColumnVector.fromStrings("A-B-C-D-null-E");
Scalar sep = Scalar.fromString("-");
Scalar narep = Scalar.fromString("null");
ColumnVector found = in.joinStrings(sep, narep)) {
assertColumnsAreEqual(expected, found);
}
}
@Test
void testConcatTypeError() {
try (ColumnVector v0 = ColumnVector.fromInts(1, 2, 3, 4);
ColumnVector v1 = ColumnVector.fromFloats(5.0f, 6.0f)) {
assertThrows(CudfException.class, () -> ColumnVector.concatenate(v0, v1));
}
}
@Test
void testConcatNoNulls() {
try (ColumnVector v0 = ColumnVector.fromInts(1, 2, 3, 4);
ColumnVector v1 = ColumnVector.fromInts(5, 6, 7);
ColumnVector v2 = ColumnVector.fromInts(8, 9);
ColumnVector v = ColumnVector.concatenate(v0, v1, v2);
ColumnVector expected = ColumnVector.fromInts(1, 2, 3, 4, 5, 6, 7, 8, 9)) {
assertColumnsAreEqual(expected, v);
}
}
@Test
void testConcatWithNulls() {
try (ColumnVector v0 = ColumnVector.fromDoubles(1, 2, 3, 4);
ColumnVector v1 = ColumnVector.fromDoubles(5, 6, 7);
ColumnVector v2 = ColumnVector.fromBoxedDoubles(null, 9.0);
ColumnVector v = ColumnVector.concatenate(v0, v1, v2);
ColumnVector expected = ColumnVector.fromBoxedDoubles(1., 2., 3., 4., 5., 6., 7., null, 9.)) {
assertColumnsAreEqual(expected, v);
}
}
@Test
void testConcatStrings() {
try (ColumnVector v0 = ColumnVector.fromStrings("0","1","2",null);
ColumnVector v1 = ColumnVector.fromStrings(null, "5", "6","7");
ColumnVector expected = ColumnVector.fromStrings(
"0","1","2",null,
null,"5","6","7");
ColumnVector v = ColumnVector.concatenate(v0, v1)) {
assertColumnsAreEqual(v, expected);
}
}
@Test
void testConcatTimestamps() {
try (ColumnVector v0 = ColumnVector.timestampMicroSecondsFromBoxedLongs(0L, 1L, 2L, null);
ColumnVector v1 = ColumnVector.timestampMicroSecondsFromBoxedLongs(null, 5L, 6L, 7L);
ColumnVector expected = ColumnVector.timestampMicroSecondsFromBoxedLongs(
0L, 1L, 2L, null,
null, 5L, 6L, 7L);
ColumnVector v = ColumnVector.concatenate(v0, v1)) {
assertColumnsAreEqual(v, expected);
}
}
@Test
void testNormalizeNANsAndZeros() {
// Must check boundaries of NaN representation, as described in javadoc for Double#longBitsToDouble.
// @see java.lang.Double#longBitsToDouble
// <quote>
// If the argument is any value in the range 0x7ff0000000000001L through 0x7fffffffffffffffL,
// or in the range 0xfff0000000000001L through 0xffffffffffffffffL, the result is a NaN.
// </quote>
final double MIN_PLUS_NaN = Double.longBitsToDouble(0x7ff0000000000001L);
final double MAX_PLUS_NaN = Double.longBitsToDouble(0x7fffffffffffffffL);
final double MAX_MINUS_NaN = Double.longBitsToDouble(0xfff0000000000001L);
final double MIN_MINUS_NaN = Double.longBitsToDouble(0xffffffffffffffffL);
Double[] ins = new Double[] {0.0, -0.0, Double.NaN, MIN_PLUS_NaN, MAX_PLUS_NaN, MIN_MINUS_NaN, MAX_MINUS_NaN, null};
Double[] outs = new Double[] {0.0, 0.0, Double.NaN, Double.NaN, Double.NaN, Double.NaN, Double.NaN, null};
try (ColumnVector input = ColumnVector.fromBoxedDoubles(ins);
ColumnVector expectedColumn = ColumnVector.fromBoxedDoubles(outs);
ColumnVector normalizedColumn = input.normalizeNANsAndZeros()) {
try (HostColumnVector expected = expectedColumn.copyToHost();
HostColumnVector normalized = normalizedColumn.copyToHost()) {
for (int i = 0; i<input.getRowCount(); ++i) {
if (expected.isNull(i)) {
assertTrue(normalized.isNull(i));
}
else {
assertEquals(
Double.doubleToRawLongBits(expected.getDouble(i)),
Double.doubleToRawLongBits(normalized.getDouble(i))
);
}
}
}
}
}
@Test
void testMD5HashStrings() {
try (ColumnVector v0 = ColumnVector.fromStrings(
"a", "B\n", "dE\"\u0100\t\u0101 \ud720\ud721",
"A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the MD5 hash function. This string needed to be longer.",
null, null);
ColumnVector v1 = ColumnVector.fromStrings(
null, "c", "\\Fg2\'",
"A 60 character string to test MD5's message padding algorithm",
"hiJ\ud720\ud721\ud720\ud721", null);
ColumnVector v2 = ColumnVector.fromStrings(
"a", "B\nc", "dE\"\u0100\t\u0101 \ud720\ud721\\Fg2\'",
"A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the MD5 hash function. This string needed to be longer.A 60 character string to " +
"test MD5's message padding algorithm",
"hiJ\ud720\ud721\ud720\ud721", null);
ColumnVector result01 = ColumnVector.md5Hash(v0, v1);
ColumnVector result2 = ColumnVector.md5Hash(v2);
ColumnVector expected = ColumnVector.fromStrings(
"0cc175b9c0f1b6a831c399e269772661", "f5112705c2f6dc7d3fc6bd496df6c2e8",
"d49db62680847e0e7107e0937d29668e", "8fa29148f63c1fe9248fdc4644e3a193",
"1bc221b25e6c4825929e884092f4044f", "d41d8cd98f00b204e9800998ecf8427e")) {
assertColumnsAreEqual(result01, expected);
assertColumnsAreEqual(result2, expected);
}
}
@Test
void testMD5HashInts() {
try (ColumnVector v0 = ColumnVector.fromBoxedInts(0, 100, null, null, Integer.MIN_VALUE, null);
ColumnVector v1 = ColumnVector.fromBoxedInts(0, null, -100, null, null, Integer.MAX_VALUE);
ColumnVector result = ColumnVector.md5Hash(v0, v1);
ColumnVector expected = ColumnVector.fromStrings(
"7dea362b3fac8e00956a4952a3d4f474", "cdc824bf721df654130ed7447fb878ac",
"7fb89558e395330c6a10ab98915fcafb", "d41d8cd98f00b204e9800998ecf8427e",
"152afd7bf4dbe26f85032eee0269201a", "ed0622e1179e101cf7edc0b952cc5262")) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testMD5HashDoubles() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(
0.0, null, 100.0, -100.0, Double.MIN_NORMAL, Double.MAX_VALUE,
POSITIVE_DOUBLE_NAN_UPPER_RANGE, POSITIVE_DOUBLE_NAN_LOWER_RANGE,
NEGATIVE_DOUBLE_NAN_UPPER_RANGE, NEGATIVE_DOUBLE_NAN_LOWER_RANGE,
Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
ColumnVector result = ColumnVector.md5Hash(v);
ColumnVector expected = ColumnVector.fromStrings(
"7dea362b3fac8e00956a4952a3d4f474", "d41d8cd98f00b204e9800998ecf8427e",
"6f5b4a57fd3aeb25cd33aa6c56512fd4", "b36ce1b64164e8f12c52ee5f1131ec01",
"f7fbcdce3cf1bea8defd4ca29dabeb75", "d466cb643c6da6c31c88f4d482bccfd3",
"bf26d90b64827fdbc58da0aa195156fe", "bf26d90b64827fdbc58da0aa195156fe",
"bf26d90b64827fdbc58da0aa195156fe", "bf26d90b64827fdbc58da0aa195156fe",
"73c82437c94e197f7e35e14f0140497a", "740660a5f71e7a264fca45330c34da31")) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testMD5HashFloats() {
try (ColumnVector v = ColumnVector.fromBoxedFloats(
0f, 100f, -100f, Float.MIN_NORMAL, Float.MAX_VALUE, null,
POSITIVE_FLOAT_NAN_LOWER_RANGE, POSITIVE_FLOAT_NAN_UPPER_RANGE,
NEGATIVE_FLOAT_NAN_LOWER_RANGE, NEGATIVE_FLOAT_NAN_UPPER_RANGE,
Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY);
ColumnVector result = ColumnVector.md5Hash(v);
ColumnVector expected = ColumnVector.fromStrings(
"f1d3ff8443297732862df21dc4e57262", "a5d1e9463fae706307f90b05e9e6db9a",
"556915a037c2ce1adfbedd7ca24794ea", "59331a73da50b419339c0d67a9ec1a97",
"0ac9ada9698891bfc3f74bcee7e3f675", "d41d8cd98f00b204e9800998ecf8427e",
"d6fd2bac25776d9a7269ca0e24b21b36", "d6fd2bac25776d9a7269ca0e24b21b36",
"d6fd2bac25776d9a7269ca0e24b21b36", "d6fd2bac25776d9a7269ca0e24b21b36",
"55e3a4db046ad9065bd7d64243de408f", "33b552ad34a825b275f5f2b59778b5c3")){
assertColumnsAreEqual(expected, result);
}
}
@Test
void testMD5HashBools() {
try (ColumnVector v0 = ColumnVector.fromBoxedBooleans(null, true, false, true, null, false);
ColumnVector v1 = ColumnVector.fromBoxedBooleans(null, true, false, null, false, true);
ColumnVector result = ColumnVector.md5Hash(v0, v1);
ColumnVector expected = ColumnVector.fromStrings(
"d41d8cd98f00b204e9800998ecf8427e", "249ba6277758050695e8f5909bacd6d3",
"c4103f122d27677c9db144cae1394a66", "55a54008ad1ba589aa210d2629c1df41",
"93b885adfe0da089cdf634904fd59f71", "441077cc9e57554dd476bdfb8b8b8102")) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testMD5HashMixed() {
try (ColumnVector strings = ColumnVector.fromStrings(
"a", "B\n", "dE\"\u0100\t\u0101 \ud720\ud721",
"A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the MD5 hash function. This string needed to be longer.",
null, null);
ColumnVector integers = ColumnVector.fromBoxedInts(0, 100, -100, Integer.MIN_VALUE, Integer.MAX_VALUE, null);
ColumnVector doubles = ColumnVector.fromBoxedDoubles(
0.0, 100.0, -100.0, POSITIVE_DOUBLE_NAN_LOWER_RANGE, POSITIVE_DOUBLE_NAN_UPPER_RANGE, null);
ColumnVector floats = ColumnVector.fromBoxedFloats(
0f, 100f, -100f, NEGATIVE_FLOAT_NAN_LOWER_RANGE, NEGATIVE_FLOAT_NAN_UPPER_RANGE, null);
ColumnVector bools = ColumnVector.fromBoxedBooleans(true, false, null, false, true, null);
ColumnVector result = ColumnVector.md5Hash(strings, integers, doubles, floats, bools);
ColumnVector expected = ColumnVector.fromStrings(
"c12c8638819fdd8377bbf537a4ebf0b4", "abad86357c1ae28eeb89f4b59700946a",
"7e376255c6354716cd63418208dc7b90", "2f64d6a1d5b730fd97115924cf9aa486",
"9f9d26bb5d25d56453a91f0558370fa4", "d41d8cd98f00b204e9800998ecf8427e")) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testMD5HashLists() {
List<String> list1 = Arrays.asList("dE\"\u0100\t\u0101 \u0500\u0501", "\\Fg2\'");
List<String> list2 = Arrays.asList("A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the MD5 hash function. This string needed to be longer.", "", null, "A 60 character string to test MD5's message padding algorithm");
List<String> list3 = Arrays.asList("hiJ\ud720\ud721\ud720\ud721");
List<String> list4 = null;
try (ColumnVector v = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)), list1, list2, list3, list4);
ColumnVector result = ColumnVector.md5Hash(v);
ColumnVector expected = ColumnVector.fromStrings(
"675c30ce6d1b27dcb5009b01be42e9bd", "8fa29148f63c1fe9248fdc4644e3a193",
"1bc221b25e6c4825929e884092f4044f", "d41d8cd98f00b204e9800998ecf8427e")) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashStrings() {
try (ColumnVector v0 = ColumnVector.fromStrings(
"a", "B\nc", "dE\"\u0100\t\u0101 \ud720\ud721\\Fg2\'",
"A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the MD5 hash function. This string needed to be longer.A 60 character string to " +
"test MD5's message padding algorithm",
"hiJ\ud720\ud721\ud720\ud721", null);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(42, new ColumnVector[]{v0});
ColumnVector expected = ColumnVector.fromBoxedInts(1485273170, 1709559900, 1423943036, 176121990, 1199621434, 42)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashInts() {
try (ColumnVector v0 = ColumnVector.fromBoxedInts(0, 100, null, null, Integer.MIN_VALUE, null);
ColumnVector v1 = ColumnVector.fromBoxedInts(0, null, -100, null, null, Integer.MAX_VALUE);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(42, new ColumnVector[]{v0, v1});
ColumnVector expected = ColumnVector.fromBoxedInts(59727262, 751823303, -1080202046, 42, 723455942, 133916647)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashDoubles() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(
0.0, null, 100.0, -100.0, Double.MIN_NORMAL, Double.MAX_VALUE,
POSITIVE_DOUBLE_NAN_UPPER_RANGE, POSITIVE_DOUBLE_NAN_LOWER_RANGE,
NEGATIVE_DOUBLE_NAN_UPPER_RANGE, NEGATIVE_DOUBLE_NAN_LOWER_RANGE,
Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(new ColumnVector[]{v});
ColumnVector expected = ColumnVector.fromBoxedInts(1669671676, 0, -544903190, -1831674681, 150502665, 474144502, 1428788237, 1428788237, 1428788237, 1428788237, 420913893, 1915664072)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashTimestamps() {
// The hash values were derived from Apache Spark in a manner similar to the one documented at
// https://github.com/rapidsai/cudf/blob/aa7ca46dcd9e/cpp/tests/hashing/hash_test.cpp#L281-L307
try (ColumnVector v = ColumnVector.timestampMicroSecondsFromBoxedLongs(
0L, null, 100L, -100L, 0x123456789abcdefL, null, -0x123456789abcdefL);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(42, new ColumnVector[]{v});
ColumnVector expected = ColumnVector.fromBoxedInts(-1670924195, 42, 1114849490, 904948192, 657182333, 42, -57193045)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashDecimal64() {
// The hash values were derived from Apache Spark in a manner similar to the one documented at
// https://github.com/rapidsai/cudf/blob/aa7ca46dcd9e/cpp/tests/hashing/hash_test.cpp#L281-L307
try (ColumnVector v = ColumnVector.decimalFromLongs(-7,
0L, 100L, -100L, 0x123456789abcdefL, -0x123456789abcdefL);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(42, new ColumnVector[]{v});
ColumnVector expected = ColumnVector.fromBoxedInts(-1670924195, 1114849490, 904948192, 657182333, -57193045)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashDecimal32() {
// The hash values were derived from Apache Spark in a manner similar to the one documented at
// https://github.com/rapidsai/cudf/blob/aa7ca46dcd9e/cpp/tests/hashing/hash_test.cpp#L281-L307
try (ColumnVector v = ColumnVector.decimalFromInts(-3,
0, 100, -100, 0x12345678, -0x12345678);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(42, new ColumnVector[]{v});
ColumnVector expected = ColumnVector.fromBoxedInts(-1670924195, 1114849490, 904948192, -958054811, -1447702630)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashDates() {
// The hash values were derived from Apache Spark in a manner similar to the one documented at
// https://github.com/rapidsai/cudf/blob/aa7ca46dcd9e/cpp/tests/hashing/hash_test.cpp#L281-L307
try (ColumnVector v = ColumnVector.timestampDaysFromBoxedInts(
0, null, 100, -100, 0x12345678, null, -0x12345678);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(42, new ColumnVector[]{v});
ColumnVector expected = ColumnVector.fromBoxedInts(933211791, 42, 751823303, -1080202046, -1721170160, 42, 1852996993)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashFloats() {
try (ColumnVector v = ColumnVector.fromBoxedFloats(
0f, 100f, -100f, Float.MIN_NORMAL, Float.MAX_VALUE, null,
POSITIVE_FLOAT_NAN_LOWER_RANGE, POSITIVE_FLOAT_NAN_UPPER_RANGE,
NEGATIVE_FLOAT_NAN_LOWER_RANGE, NEGATIVE_FLOAT_NAN_UPPER_RANGE,
Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(411, new ColumnVector[]{v});
ColumnVector expected = ColumnVector.fromBoxedInts(-235179434, 1812056886, 2028471189, 1775092689, -1531511762, 411, -1053523253, -1053523253, -1053523253, -1053523253, -1526256646, 930080402)){
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashBools() {
try (ColumnVector v0 = ColumnVector.fromBoxedBooleans(null, true, false, true, null, false);
ColumnVector v1 = ColumnVector.fromBoxedBooleans(null, true, false, null, false, true);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(0, new ColumnVector[]{v0, v1});
ColumnVector expected = ColumnVector.fromBoxedInts(0, -1589400010, -239939054, -68075478, 593689054, -1194558265)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashMixed() {
try (ColumnVector strings = ColumnVector.fromStrings(
"a", "B\n", "dE\"\u0100\t\u0101 \ud720\ud721",
"A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the MD5 hash function. This string needed to be longer.",
null, null);
ColumnVector integers = ColumnVector.fromBoxedInts(0, 100, -100, Integer.MIN_VALUE, Integer.MAX_VALUE, null);
ColumnVector doubles = ColumnVector.fromBoxedDoubles(
0.0, 100.0, -100.0, POSITIVE_DOUBLE_NAN_LOWER_RANGE, POSITIVE_DOUBLE_NAN_UPPER_RANGE, null);
ColumnVector floats = ColumnVector.fromBoxedFloats(
0f, 100f, -100f, NEGATIVE_FLOAT_NAN_LOWER_RANGE, NEGATIVE_FLOAT_NAN_UPPER_RANGE, null);
ColumnVector bools = ColumnVector.fromBoxedBooleans(true, false, null, false, true, null);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(1868, new ColumnVector[]{strings, integers, doubles, floats, bools});
ColumnVector expected = ColumnVector.fromBoxedInts(1936985022, 720652989, 339312041, 1400354989, 769988643, 1868)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashStruct() {
try (ColumnVector strings = ColumnVector.fromStrings(
"a", "B\n", "dE\"\u0100\t\u0101 \ud720\ud721",
"A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the MD5 hash function. This string needed to be longer.",
null, null);
ColumnVector integers = ColumnVector.fromBoxedInts(0, 100, -100, Integer.MIN_VALUE, Integer.MAX_VALUE, null);
ColumnVector doubles = ColumnVector.fromBoxedDoubles(
0.0, 100.0, -100.0, POSITIVE_DOUBLE_NAN_LOWER_RANGE, POSITIVE_DOUBLE_NAN_UPPER_RANGE, null);
ColumnVector floats = ColumnVector.fromBoxedFloats(
0f, 100f, -100f, NEGATIVE_FLOAT_NAN_LOWER_RANGE, NEGATIVE_FLOAT_NAN_UPPER_RANGE, null);
ColumnVector bools = ColumnVector.fromBoxedBooleans(true, false, null, false, true, null);
ColumnView structs = ColumnView.makeStructView(strings, integers, doubles, floats, bools);
ColumnVector result = ColumnVector.spark32BitMurmurHash3(1868, new ColumnView[]{structs});
ColumnVector expected = ColumnVector.spark32BitMurmurHash3(1868, new ColumnVector[]{strings, integers, doubles, floats, bools})) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashNestedStruct() {
try (ColumnVector strings = ColumnVector.fromStrings(
"a", "B\n", "dE\"\u0100\t\u0101 \ud720\ud721",
"A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the MD5 hash function. This string needed to be longer.",
null, null);
ColumnVector integers = ColumnVector.fromBoxedInts(0, 100, -100, Integer.MIN_VALUE, Integer.MAX_VALUE, null);
ColumnVector doubles = ColumnVector.fromBoxedDoubles(
0.0, 100.0, -100.0, POSITIVE_DOUBLE_NAN_LOWER_RANGE, POSITIVE_DOUBLE_NAN_UPPER_RANGE, null);
ColumnVector floats = ColumnVector.fromBoxedFloats(
0f, 100f, -100f, NEGATIVE_FLOAT_NAN_LOWER_RANGE, NEGATIVE_FLOAT_NAN_UPPER_RANGE, null);
ColumnVector bools = ColumnVector.fromBoxedBooleans(true, false, null, false, true, null);
ColumnView structs1 = ColumnView.makeStructView(strings, integers);
ColumnView structs2 = ColumnView.makeStructView(structs1, doubles);
ColumnView structs3 = ColumnView.makeStructView(bools);
ColumnView structs = ColumnView.makeStructView(structs2, floats, structs3);
ColumnVector expected = ColumnVector.spark32BitMurmurHash3(1868, new ColumnVector[]{strings, integers, doubles, floats, bools});
ColumnVector result = ColumnVector.spark32BitMurmurHash3(1868, new ColumnView[]{structs})) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSpark32BitMurmur3HashListsAndNestedLists() {
try (ColumnVector stringListCV = ColumnVector.fromLists(
new ListType(true, new BasicType(true, DType.STRING)),
Arrays.asList(null, "a"),
Arrays.asList("B\n", ""),
Arrays.asList("dE\"\u0100\t\u0101", " \ud720\ud721"),
Collections.singletonList("A very long (greater than 128 bytes/char string) to test a multi" +
" hash-step data point in the Murmur3 hash function. This string needed to be longer."),
Collections.singletonList(""),
null);
ColumnVector strings1 = ColumnVector.fromStrings(
"a", "B\n", "dE\"\u0100\t\u0101",
"A very long (greater than 128 bytes/char string) to test a multi hash-step data point " +
"in the Murmur3 hash function. This string needed to be longer.", null, null);
ColumnVector strings2 = ColumnVector.fromStrings(
null, "", " \ud720\ud721", null, "", null);
ColumnView stringStruct = ColumnView.makeStructView(strings1, strings2);
ColumnVector stringExpected = ColumnVector.spark32BitMurmurHash3(1868, new ColumnView[]{stringStruct});
ColumnVector stringResult = ColumnVector.spark32BitMurmurHash3(1868, new ColumnView[]{stringListCV});
ColumnVector intListCV = ColumnVector.fromLists(
new ListType(true, new BasicType(true, DType.INT32)),
null,
Arrays.asList(0, -2, 3),
Collections.singletonList(Integer.MAX_VALUE),
Arrays.asList(5, -6, null),
Collections.singletonList(Integer.MIN_VALUE),
null);
ColumnVector integers1 = ColumnVector.fromBoxedInts(null, 0, null, 5, Integer.MIN_VALUE, null);
ColumnVector integers2 = ColumnVector.fromBoxedInts(null, -2, Integer.MAX_VALUE, null, null, null);
ColumnVector integers3 = ColumnVector.fromBoxedInts(null, 3, null, -6, null, null);
ColumnVector intExpected =
ColumnVector.spark32BitMurmurHash3(1868, new ColumnVector[]{integers1, integers2, integers3});
ColumnVector intResult = ColumnVector.spark32BitMurmurHash3(1868, new ColumnVector[]{intListCV});
ColumnVector doubles = ColumnVector.fromBoxedDoubles(
0.0, 100.0, -100.0, POSITIVE_DOUBLE_NAN_LOWER_RANGE, POSITIVE_DOUBLE_NAN_UPPER_RANGE, null);
ColumnVector floats = ColumnVector.fromBoxedFloats(
0f, 100f, -100f, NEGATIVE_FLOAT_NAN_LOWER_RANGE, NEGATIVE_FLOAT_NAN_UPPER_RANGE, null);
ColumnView structCV = ColumnView.makeStructView(intListCV, stringListCV, doubles, floats);
ColumnVector nestedExpected =
ColumnVector.spark32BitMurmurHash3(1868, new ColumnView[]{intListCV, strings1, strings2, doubles, floats});
ColumnVector nestedResult =
ColumnVector.spark32BitMurmurHash3(1868, new ColumnView[]{structCV})) {
assertColumnsAreEqual(stringExpected, stringResult);
assertColumnsAreEqual(intExpected, intResult);
assertColumnsAreEqual(nestedExpected, nestedResult);
}
}
@Test
void isNotNullTestEmptyColumn() {
try (ColumnVector v = ColumnVector.fromBoxedInts();
ColumnVector expected = ColumnVector.fromBoxedBooleans();
ColumnVector result = v.isNotNull()) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void isNotNullTest() {
try (ColumnVector v = ColumnVector.fromBoxedInts(1, 2, null, 4, null, 6);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, false, true, false, true);
ColumnVector result = v.isNotNull()) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void isNotNullTestAllNulls() {
try (ColumnVector v = ColumnVector.fromBoxedInts(null, null, null, null, null, null);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false, false, false);
ColumnVector result = v.isNotNull()) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void isNotNullTestAllNotNulls() {
try (ColumnVector v = ColumnVector.fromBoxedInts(1,2,3,4,5,6);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, true, true, true);
ColumnVector result = v.isNotNull()) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void isNullTest() {
try (ColumnVector v = ColumnVector.fromBoxedInts(1, 2, null, 4, null, 6);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, true, false, true, false);
ColumnVector result = v.isNull()) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void isNullTestEmptyColumn() {
try (ColumnVector v = ColumnVector.fromBoxedInts();
ColumnVector expected = ColumnVector.fromBoxedBooleans();
ColumnVector result = v.isNull()) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void isNanTestWithNulls() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(null, null, Double.NaN, null, Double.NaN, null);
ColumnVector vF = ColumnVector.fromBoxedFloats(null, null, Float.NaN, null, Float.NaN, null);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, true, false, true, false);
ColumnVector result = v.isNan();
ColumnVector resultF = vF.isNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNanForTypeMismatch() {
assertThrows(CudfException.class, () -> {
try (ColumnVector v = ColumnVector.fromStrings("foo", "bar", "baz");
ColumnVector result = v.isNan()) {}
});
}
@Test
void isNanTest() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(1.0, 2.0, Double.NaN, 4.0, Double.NaN, 6.0);
ColumnVector vF = ColumnVector.fromBoxedFloats(1.1f, 2.2f, Float.NaN, 4.4f, Float.NaN, 6.6f);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, true, false, true, false);
ColumnVector result = v.isNan();
ColumnVector resultF = vF.isNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNanTestEmptyColumn() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles();
ColumnVector vF = ColumnVector.fromBoxedFloats();
ColumnVector expected = ColumnVector.fromBoxedBooleans();
ColumnVector result = v.isNan();
ColumnVector resultF = vF.isNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNanTestAllNotNans() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
ColumnVector vF = ColumnVector.fromBoxedFloats(1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false, false, false);
ColumnVector result = v.isNan();
ColumnVector resultF = vF.isNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNanTestAllNans() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(Double.NaN, Double.NaN, Double.NaN, Double.NaN, Double.NaN, Double.NaN);
ColumnVector vF = ColumnVector.fromBoxedFloats(Float.NaN, Float.NaN, Float.NaN, Float.NaN, Float.NaN, Float.NaN);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, true, true, true);
ColumnVector result = v.isNan();
ColumnVector resultF = vF.isNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNotNanTestWithNulls() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(null, null, Double.NaN, null, Double.NaN, null);
ColumnVector vF = ColumnVector.fromBoxedFloats(null, null, Float.NaN, null, Float.NaN, null);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, false, true, false, true);
ColumnVector result = v.isNotNan();
ColumnVector resultF = vF.isNotNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNotNanForTypeMismatch() {
assertThrows(CudfException.class, () -> {
try (ColumnVector v = ColumnVector.fromStrings("foo", "bar", "baz");
ColumnVector result = v.isNotNan()) {}
});
}
@Test
void isNotNanTest() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(1.0, 2.0, Double.NaN, 4.0, Double.NaN, 6.0);
ColumnVector vF = ColumnVector.fromBoxedFloats(1.1f, 2.2f, Float.NaN, 4.4f, Float.NaN, 6.6f);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, false, true, false, true);
ColumnVector result = v.isNotNan();
ColumnVector resultF = vF.isNotNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNotNanTestEmptyColumn() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles();
ColumnVector vF = ColumnVector.fromBoxedFloats();
ColumnVector expected = ColumnVector.fromBoxedBooleans();
ColumnVector result = v.isNotNan();
ColumnVector resultF = vF.isNotNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNotNanTestAllNotNans() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
ColumnVector vF = ColumnVector.fromBoxedFloats(1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, true, true, true);
ColumnVector result = v.isNotNan();
ColumnVector resultF = vF.isNotNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void isNotNanTestAllNans() {
try (ColumnVector v = ColumnVector.fromBoxedDoubles(Double.NaN, Double.NaN, Double.NaN, Double.NaN, Double.NaN, Double.NaN);
ColumnVector vF = ColumnVector.fromBoxedFloats(Float.NaN, Float.NaN, Float.NaN, Float.NaN, Float.NaN, Float.NaN);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false, false, false);
ColumnVector result = v.isNotNan();
ColumnVector resultF = vF.isNotNan()) {
assertColumnsAreEqual(expected, result);
assertColumnsAreEqual(expected, resultF);
}
}
@Test
void roundFloatsHalfUp() {
try (ColumnVector v = ColumnVector.fromBoxedFloats(1.234f, 25.66f, null, 154.9f, 2346f);
ColumnVector result1 = v.round();
ColumnVector result2 = v.round(1, RoundMode.HALF_UP);
ColumnVector result3 = v.round(-1, RoundMode.HALF_UP);
ColumnVector expected1 = ColumnVector.fromBoxedFloats(1f, 26f, null, 155f, 2346f);
ColumnVector expected2 = ColumnVector.fromBoxedFloats(1.2f, 25.7f, null, 154.9f, 2346f);
ColumnVector expected3 = ColumnVector.fromBoxedFloats(0f, 30f, null, 150f, 2350f)) {
assertColumnsAreEqual(expected1, result1);
assertColumnsAreEqual(expected2, result2);
assertColumnsAreEqual(expected3, result3);
}
}
@Test
void roundFloatsHalfEven() {
try (ColumnVector v = ColumnVector.fromBoxedFloats(1.5f, 2.5f, 1.35f, null, 1.25f, 15f, 25f);
ColumnVector result1 = v.round(RoundMode.HALF_EVEN);
ColumnVector result2 = v.round(1, RoundMode.HALF_EVEN);
ColumnVector result3 = v.round(-1, RoundMode.HALF_EVEN);
ColumnVector expected1 = ColumnVector.fromBoxedFloats(2f, 2f, 1f, null, 1f, 15f, 25f);
ColumnVector expected2 = ColumnVector.fromBoxedFloats(1.5f, 2.5f, 1.4f, null, 1.2f, 15f, 25f);
ColumnVector expected3 = ColumnVector.fromBoxedFloats(0f, 0f, 0f, null, 0f, 20f, 20f)) {
assertColumnsAreEqual(expected1, result1);
assertColumnsAreEqual(expected2, result2);
assertColumnsAreEqual(expected3, result3);
}
}
@Test
void roundIntsHalfUp() {
try (ColumnVector v = ColumnVector.fromBoxedInts(12, 135, 160, -1454, null, -1500, -140, -150);
ColumnVector result1 = v.round(2, RoundMode.HALF_UP);
ColumnVector result2 = v.round(-2, RoundMode.HALF_UP);
ColumnVector expected1 = ColumnVector.fromBoxedInts(12, 135, 160, -1454, null, -1500, -140, -150);
ColumnVector expected2 = ColumnVector.fromBoxedInts(0, 100, 200, -1500, null, -1500, -100, -200)) {
assertColumnsAreEqual(expected1, result1);
assertColumnsAreEqual(expected2, result2);
}
}
@Test
void roundIntsHalfEven() {
try (ColumnVector v = ColumnVector.fromBoxedInts(12, 24, 135, 160, null, 1450, 1550, -1650);
ColumnVector result1 = v.round(2, RoundMode.HALF_EVEN);
ColumnVector result2 = v.round(-2, RoundMode.HALF_EVEN);
ColumnVector expected1 = ColumnVector.fromBoxedInts(12, 24, 135, 160, null, 1450, 1550, -1650);
ColumnVector expected2 = ColumnVector.fromBoxedInts(0, 0, 100, 200, null, 1400, 1600, -1600)) {
assertColumnsAreEqual(expected1, result1);
assertColumnsAreEqual(expected2, result2);
}
}
@Test
void roundDecimal() {
final int dec32Scale1 = -2;
final int resultScale1 = -3;
final int[] DECIMAL32_1 = new int[]{14, 15, 16, 24, 25, 26} ;
final int[] expectedHalfUp = new int[]{1, 2, 2, 2, 3, 3};
final int[] expectedHalfEven = new int[]{1, 2, 2, 2, 2, 3};
try (ColumnVector v = ColumnVector.decimalFromInts(-dec32Scale1, DECIMAL32_1);
ColumnVector roundHalfUp = v.round(-3, RoundMode.HALF_UP);
ColumnVector roundHalfEven = v.round(-3, RoundMode.HALF_EVEN);
ColumnVector answerHalfUp = ColumnVector.decimalFromInts(-resultScale1, expectedHalfUp);
ColumnVector answerHalfEven = ColumnVector.decimalFromInts(-resultScale1, expectedHalfEven)) {
assertColumnsAreEqual(answerHalfUp, roundHalfUp);
assertColumnsAreEqual(answerHalfEven, roundHalfEven);
}
}
@Test
void decimal128Cv() {
final int dec32Scale1 = -2;
BigInteger bigInteger1 = new BigInteger("-831457");
BigInteger bigInteger2 = new BigInteger("14");
BigInteger bigInteger3 = new BigInteger("152345742357340573405745");
final BigInteger[] bigInts = new BigInteger[] {bigInteger1, bigInteger2, bigInteger3};
try (ColumnVector v = ColumnVector.decimalFromBigInt(-dec32Scale1, bigInts);
HostColumnVector hostColumnVector = v.copyToHost()) {
assertEquals(bigInteger1, hostColumnVector.getBigDecimal(0).unscaledValue());
assertEquals(bigInteger2, hostColumnVector.getBigDecimal(1).unscaledValue());
assertEquals(bigInteger3, hostColumnVector.getBigDecimal(2).unscaledValue());
}
}
static final long HOST_ALIGN_BYTES = ColumnView.hostPaddingSizeInBytes();
static void assertHostAligned(long expectedDeviceSize, ColumnView cv) {
long deviceSize = cv.getDeviceMemorySize();
assertEquals(expectedDeviceSize, deviceSize);
long hostSize = cv.getHostBytesRequired();
assert(hostSize >= deviceSize);
long roundedHostSize = (hostSize / HOST_ALIGN_BYTES) * HOST_ALIGN_BYTES;
assertEquals(hostSize, roundedHostSize, "The host size should be a multiple of " +
HOST_ALIGN_BYTES);
}
@Test
void testGetDeviceMemorySizeNonStrings() {
try (ColumnVector v0 = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5, 6);
ColumnVector v1 = ColumnVector.fromBoxedInts(1, 2, 3, null, null, 4, 5, 6)) {
assertHostAligned(24, v0); // (6*4B)
assertHostAligned(96, v1); // (8*4B) + 64B(for validity vector)
}
}
@Test
void testGetDeviceMemorySizeStrings() {
if (ColumnView.hostPaddingSizeInBytes() != 8) {
System.err.println("HOST PADDING SIZE: " + ColumnView.hostPaddingSizeInBytes());
}
try (ColumnVector v0 = ColumnVector.fromStrings("onetwothree", "four", "five");
ColumnVector v1 = ColumnVector.fromStrings("onetwothree", "four", null, "five")) {
assertHostAligned(35, v0); //19B data + 4*4B offsets = 35
assertHostAligned(103, v1); //19B data + 5*4B + 64B validity vector = 103B
}
}
@SuppressWarnings("unchecked")
@Test
void testGetDeviceMemorySizeLists() {
DataType svType = new ListType(true, new BasicType(true, DType.STRING));
DataType ivType = new ListType(false, new BasicType(false, DType.INT32));
try (ColumnVector sv = ColumnVector.fromLists(svType,
Arrays.asList("first", "second", "third"),
Arrays.asList("fourth", null),
null);
ColumnVector iv = ColumnVector.fromLists(ivType,
Arrays.asList(1, 2, 3),
Collections.singletonList(4),
Arrays.asList(5, 6),
Collections.singletonList(7))) {
// 64 bytes for validity of list column
// 16 bytes for offsets of list column
// 64 bytes for validity of string column
// 24 bytes for offsets of string column
// 22 bytes of string character size
assertHostAligned(64+16+64+24+22, sv);
// 20 bytes for offsets of list column
// 28 bytes for data of INT32 column
assertHostAligned(20+28, iv);
}
}
@Test
void testGetDeviceMemorySizeStructs() {
DataType structType = new StructType(true,
new ListType(true, new BasicType(true, DType.STRING)),
new BasicType(true, DType.INT64));
try (ColumnVector v = ColumnVector.fromStructs(structType,
new StructData(
Arrays.asList("first", "second", "third"),
10L),
new StructData(
Arrays.asList("fourth", null),
20L),
new StructData(
null,
null),
null)) {
// 64 bytes for validity of the struct column
// 64 bytes for validity of list column
// 20 bytes for offsets of list column
// 64 bytes for validity of string column
// 28 bytes for offsets of string column
// 22 bytes of string character size
// 64 bytes for validity of int64 column
// 28 bytes for data of the int64 column
assertHostAligned(64+64+20+64+28+22+64+28, v);
}
}
@Test
void testSequenceInt() {
try (Scalar zero = Scalar.fromInt(0);
Scalar one = Scalar.fromInt(1);
Scalar negOne = Scalar.fromInt(-1);
Scalar nulls = Scalar.fromNull(DType.INT32)) {
try (
ColumnVector cv = ColumnVector.sequence(zero, 5);
ColumnVector expected = ColumnVector.fromInts(0, 1, 2, 3, 4)) {
assertColumnsAreEqual(expected, cv);
}
try (ColumnVector cv = ColumnVector.sequence(one, negOne,6);
ColumnVector expected = ColumnVector.fromInts(1, 0, -1, -2, -3, -4)) {
assertColumnsAreEqual(expected, cv);
}
try (ColumnVector cv = ColumnVector.sequence(zero, 0);
ColumnVector expected = ColumnVector.fromInts()) {
assertColumnsAreEqual(expected, cv);
}
assertThrows(IllegalArgumentException.class, () -> {
try (ColumnVector cv = ColumnVector.sequence(nulls, 5)) { }
});
assertThrows(CudfException.class, () -> {
try (ColumnVector cv = ColumnVector.sequence(zero, -3)) { }
});
}
}
@Test
void testSequenceDouble() {
try (Scalar zero = Scalar.fromDouble(0.0);
Scalar one = Scalar.fromDouble(1.0);
Scalar negOneDotOne = Scalar.fromDouble(-1.1)) {
try (
ColumnVector cv = ColumnVector.sequence(zero, 5);
ColumnVector expected = ColumnVector.fromDoubles(0, 1, 2, 3, 4)) {
assertColumnsAreEqual(expected, cv);
}
try (ColumnVector cv = ColumnVector.sequence(one, negOneDotOne,6);
ColumnVector expected = ColumnVector.fromDoubles(1, -0.1, -1.2, -2.3, -3.4, -4.5)) {
assertColumnsAreEqual(expected, cv);
}
try (ColumnVector cv = ColumnVector.sequence(zero, 0);
ColumnVector expected = ColumnVector.fromDoubles()) {
assertColumnsAreEqual(expected, cv);
}
}
}
@Test
void testSequenceOtherTypes() {
assertThrows(CudfException.class, () -> {
try (Scalar s = Scalar.fromString("0");
ColumnVector cv = ColumnVector.sequence(s, s, 5)) {}
});
assertThrows(CudfException.class, () -> {
try (Scalar s = Scalar.fromBool(false);
ColumnVector cv = ColumnVector.sequence(s, s, 5)) {}
});
assertThrows(CudfException.class, () -> {
try (Scalar s = Scalar.timestampDaysFromInt(100);
ColumnVector cv = ColumnVector.sequence(s, s, 5)) {}
});
}
@Test
void testSequencesInt() {
try (ColumnVector start = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5);
ColumnVector size = ColumnVector.fromBoxedInts(2, 3, 2, 0, 1);
ColumnVector step = ColumnVector.fromBoxedInts(2, -1, 1, 1, 0);
ColumnVector cv = ColumnVector.sequence(start, size, step);
ColumnVector cv1 = ColumnVector.sequence(start, size);
ColumnVector expectCv = ColumnVector.fromLists(
new ListType(true, new BasicType(false, DType.INT32)),
Arrays.asList(1, 3),
Arrays.asList(2, 1, 0),
Arrays.asList(3, 4),
Arrays.asList(),
Arrays.asList(5));
ColumnVector expectCv1 = ColumnVector.fromLists(
new ListType(true, new BasicType(false, DType.INT32)),
Arrays.asList(1, 2),
Arrays.asList(2, 3, 4),
Arrays.asList(3, 4),
Arrays.asList(),
Arrays.asList(5))) {
assertColumnsAreEqual(expectCv, cv);
assertColumnsAreEqual(expectCv1, cv1);
}
}
@Test
void testSequencesDouble() {
try (ColumnVector start = ColumnVector.fromBoxedDoubles(1.2, 2.2, 3.2, 4.2, 5.2);
ColumnVector size = ColumnVector.fromBoxedInts(2, 3, 2, 0, 1);
ColumnVector step = ColumnVector.fromBoxedDoubles(2.1, -1.1, 1.1, 1.1, 0.1);
ColumnVector cv = ColumnVector.sequence(start, size, step);
ColumnVector cv1 = ColumnVector.sequence(start, size);
ColumnVector expectCv = ColumnVector.fromLists(
new ListType(true, new BasicType(false, DType.FLOAT64)),
Arrays.asList(1.2, 3.3),
Arrays.asList(2.2, 1.1, 0.0),
Arrays.asList(3.2, 4.3),
Arrays.asList(),
Arrays.asList(5.2));
ColumnVector expectCv1 = ColumnVector.fromLists(
new ListType(true, new BasicType(false, DType.FLOAT64)),
Arrays.asList(1.2, 2.2),
Arrays.asList(2.2, 3.2, 4.2),
Arrays.asList(3.2, 4.2),
Arrays.asList(),
Arrays.asList(5.2))) {
assertColumnsAreEqual(expectCv, cv);
assertColumnsAreEqual(expectCv1, cv1);
}
}
@Test
void testFromScalarZeroRows() {
// magic number to invoke factory method specialized for decimal types
int mockScale = -8;
for (DType.DTypeEnum type : DType.DTypeEnum.values()) {
Scalar s = null;
try {
switch (type) {
case BOOL8:
s = Scalar.fromBool(true);
break;
case INT8:
s = Scalar.fromByte((byte) 5);
break;
case UINT8:
s = Scalar.fromUnsignedByte((byte) 254);
break;
case INT16:
s = Scalar.fromShort((short) 12345);
break;
case UINT16:
s = Scalar.fromUnsignedShort((short) 65432);
break;
case INT32:
s = Scalar.fromInt(123456789);
break;
case UINT32:
s = Scalar.fromUnsignedInt(0xfedcba98);
break;
case INT64:
s = Scalar.fromLong(1234567890123456789L);
break;
case UINT64:
s = Scalar.fromUnsignedLong(0xfedcba9876543210L);
break;
case FLOAT32:
s = Scalar.fromFloat(1.2345f);
break;
case FLOAT64:
s = Scalar.fromDouble(1.23456789);
break;
case DECIMAL32:
s = Scalar.fromDecimal(mockScale, 123456789);
break;
case DECIMAL64:
s = Scalar.fromDecimal(mockScale, 1234567890123456789L);
break;
case DECIMAL128:
s = Scalar.fromDecimal(mockScale, new BigInteger("1234567890123456789"));
break;
case TIMESTAMP_DAYS:
s = Scalar.timestampDaysFromInt(12345);
break;
case TIMESTAMP_SECONDS:
case TIMESTAMP_MILLISECONDS:
case TIMESTAMP_MICROSECONDS:
case TIMESTAMP_NANOSECONDS:
s = Scalar.timestampFromLong(DType.create(type), 1234567890123456789L);
break;
case STRING:
s = Scalar.fromString("hello, world!");
break;
case DURATION_DAYS:
s = Scalar.durationDaysFromInt(3);
break;
case DURATION_SECONDS:
case DURATION_MILLISECONDS:
case DURATION_MICROSECONDS:
case DURATION_NANOSECONDS:
s = Scalar.durationFromLong(DType.create(type), 21313);
break;
case EMPTY:
continue;
case STRUCT:
try (ColumnVector col1 = ColumnVector.fromInts(1);
ColumnVector col2 = ColumnVector.fromStrings("A");
ColumnVector col3 = ColumnVector.fromDoubles(1.23)) {
s = Scalar.structFromColumnViews(col1, col2, col3);
}
break;
case LIST:
try (ColumnVector list = ColumnVector.fromInts(1, 2, 3)) {
s = Scalar.listFromColumnView(list);
}
break;
default:
throw new IllegalArgumentException("Unexpected type: " + type);
}
try (ColumnVector c = ColumnVector.fromScalar(s, 0)) {
if (type.isDecimalType()) {
assertEquals(DType.create(type, mockScale), c.getType());
} else {
assertEquals(DType.create(type), c.getType());
}
assertEquals(0, c.getRowCount());
assertEquals(0, c.getNullCount());
}
} finally {
if (s != null) {
s.close();
}
}
}
}
@Test
void testGetNativeView() {
try (ColumnVector cv = ColumnVector.fromInts(1, 3, 4, 5)) {
//not a real test whats being returned is a view but this is the best we can do
assertNotEquals(0L, cv.getNativeView());
}
}
@Test
void testFromScalar() {
final int rowCount = 4;
for (DType.DTypeEnum type : DType.DTypeEnum.values()) {
if(type.isDecimalType()) {
continue;
}
Scalar s = null;
ColumnVector expected = null;
ColumnVector result = null;
try {
switch (type) {
case BOOL8:
s = Scalar.fromBool(true);
expected = ColumnVector.fromBoxedBooleans(true, true, true, true);
break;
case INT8: {
byte v = (byte) 5;
s = Scalar.fromByte(v);
expected = ColumnVector.fromBoxedBytes(v, v, v, v);
break;
}
case UINT8: {
byte v = (byte) 254;
s = Scalar.fromUnsignedByte(v);
expected = ColumnVector.fromBoxedUnsignedBytes(v, v, v, v);
break;
}
case INT16: {
short v = (short) 12345;
s = Scalar.fromShort(v);
expected = ColumnVector.fromBoxedShorts(v, v, v, v);
break;
}
case UINT16: {
short v = (short) 0x89ab;
s = Scalar.fromUnsignedShort(v);
expected = ColumnVector.fromBoxedUnsignedShorts(v, v, v, v);
break;
}
case INT32: {
int v = 123456789;
s = Scalar.fromInt(v);
expected = ColumnVector.fromBoxedInts(v, v, v, v);
break;
}
case UINT32: {
int v = 0x89abcdef;
s = Scalar.fromUnsignedInt(v);
expected = ColumnVector.fromBoxedUnsignedInts(v, v, v, v);
break;
}
case INT64: {
long v = 1234567890123456789L;
s = Scalar.fromLong(v);
expected = ColumnVector.fromBoxedLongs(v, v, v, v);
break;
}
case UINT64: {
long v = 0xfedcba9876543210L;
s = Scalar.fromUnsignedLong(v);
expected = ColumnVector.fromBoxedUnsignedLongs(v, v, v, v);
break;
}
case FLOAT32: {
float v = 1.2345f;
s = Scalar.fromFloat(v);
expected = ColumnVector.fromBoxedFloats(v, v, v, v);
break;
}
case FLOAT64: {
double v = 1.23456789;
s = Scalar.fromDouble(v);
expected = ColumnVector.fromBoxedDoubles(v, v, v, v);
break;
}
case TIMESTAMP_DAYS: {
int v = 12345;
s = Scalar.timestampDaysFromInt(v);
expected = ColumnVector.daysFromInts(v, v, v, v);
break;
}
case TIMESTAMP_SECONDS: {
long v = 1234567890123456789L;
s = Scalar.timestampFromLong(DType.TIMESTAMP_SECONDS, v);
expected = ColumnVector.timestampSecondsFromLongs(v, v, v, v);
break;
}
case TIMESTAMP_MILLISECONDS: {
long v = 1234567890123456789L;
s = Scalar.timestampFromLong(DType.TIMESTAMP_MILLISECONDS, v);
expected = ColumnVector.timestampMilliSecondsFromLongs(v, v, v, v);
break;
}
case TIMESTAMP_MICROSECONDS: {
long v = 1234567890123456789L;
s = Scalar.timestampFromLong(DType.TIMESTAMP_MICROSECONDS, v);
expected = ColumnVector.timestampMicroSecondsFromLongs(v, v, v, v);
break;
}
case TIMESTAMP_NANOSECONDS: {
long v = 1234567890123456789L;
s = Scalar.timestampFromLong(DType.TIMESTAMP_NANOSECONDS, v);
expected = ColumnVector.timestampNanoSecondsFromLongs(v, v, v, v);
break;
}
case STRING: {
String v = "hello, world!";
s = Scalar.fromString(v);
expected = ColumnVector.fromStrings(v, v, v, v);
break;
}
case DURATION_DAYS: {
int v = 13;
s = Scalar.durationDaysFromInt(v);
expected = ColumnVector.durationDaysFromInts(v, v, v, v);
break;
}
case DURATION_MICROSECONDS: {
long v = 1123123123L;
s = Scalar.durationFromLong(DType.DURATION_MICROSECONDS, v);
expected = ColumnVector.durationMicroSecondsFromLongs(v, v, v, v);
break;
}
case DURATION_MILLISECONDS: {
long v = 11212432423L;
s = Scalar.durationFromLong(DType.DURATION_MILLISECONDS, v);
expected = ColumnVector.durationMilliSecondsFromLongs(v, v, v, v);
break;
}
case DURATION_NANOSECONDS: {
long v = 12353245343L;
s = Scalar.durationFromLong(DType.DURATION_NANOSECONDS, v);
expected = ColumnVector.durationNanoSecondsFromLongs(v, v, v, v);
break;
}
case DURATION_SECONDS: {
long v = 132342321123L;
s = Scalar.durationFromLong(DType.DURATION_SECONDS, v);
expected = ColumnVector.durationSecondsFromLongs(v, v, v, v);
break;
}
case EMPTY:
continue;
case STRUCT:
try (ColumnVector col0 = ColumnVector.fromInts(1);
ColumnVector col1 = ColumnVector.fromBoxedDoubles((Double) null);
ColumnVector col2 = ColumnVector.fromStrings("a");
ColumnVector col3 = ColumnVector.fromDecimals(BigDecimal.TEN);
ColumnVector col4 = ColumnVector.daysFromInts(10)) {
s = Scalar.structFromColumnViews(col0, col1, col2, col3, col4);
StructData structData = new StructData(1, null, "a", BigDecimal.TEN, 10);
expected = ColumnVector.fromStructs(new HostColumnVector.StructType(true,
new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.FLOAT64),
new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.create(DType.DTypeEnum.DECIMAL32, 0)),
new HostColumnVector.BasicType(true, DType.TIMESTAMP_DAYS)),
structData, structData, structData, structData);
}
break;
case LIST:
try (ColumnVector list = ColumnVector.fromInts(1, 2, 3)) {
s = Scalar.listFromColumnView(list);
expected = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(1, 2, 3),
Arrays.asList(1, 2, 3),
Arrays.asList(1, 2, 3),
Arrays.asList(1, 2, 3));
}
break;
default:
throw new IllegalArgumentException("Unexpected type: " + type);
}
result = ColumnVector.fromScalar(s, rowCount);
assertColumnsAreEqual(expected, result);
} finally {
if (s != null) {
s.close();
}
if (expected != null) {
expected.close();
}
if (result != null) {
result.close();
}
}
}
}
@Test
void testFromScalarNull() {
final int rowCount = 4;
for (DType.DTypeEnum typeEnum : DType.DTypeEnum.values()) {
if (typeEnum == DType.DTypeEnum.EMPTY || typeEnum == DType.DTypeEnum.LIST || typeEnum == DType.DTypeEnum.STRUCT) {
continue;
}
DType dType;
if (typeEnum.isDecimalType()) {
// magic number to invoke factory method specialized for decimal types
dType = DType.create(typeEnum, -8);
} else {
dType = DType.create(typeEnum);
}
try (Scalar s = Scalar.fromNull(dType);
ColumnVector c = ColumnVector.fromScalar(s, rowCount);
HostColumnVector hc = c.copyToHost()) {
assertEquals(typeEnum, c.getType().typeId);
assertEquals(rowCount, c.getRowCount());
assertEquals(rowCount, c.getNullCount());
for (int i = 0; i < rowCount; ++i) {
assertTrue(hc.isNull(i));
}
}
}
}
@Test
void testFromScalarNullByte() {
int numNulls = 3000;
try (Scalar s = Scalar.fromNull(DType.INT8);
ColumnVector tmp = ColumnVector.fromScalar(s, numNulls);
HostColumnVector input = tmp.copyToHost()) {
assertEquals(numNulls, input.getRowCount());
assertEquals(input.getNullCount(), numNulls);
for (int i = 0; i < numNulls; i++){
assertTrue(input.isNull(i));
}
}
}
@Test
void testFromScalarNullList() {
final int rowCount = 4;
for (DType.DTypeEnum typeEnum : DType.DTypeEnum.values()) {
DType dType = typeEnum.isDecimalType() ? DType.create(typeEnum, -8): DType.create(typeEnum);
DataType hDataType;
if (DType.EMPTY.equals(dType)) {
continue;
} else if (DType.LIST.equals(dType)) {
// list of list of int32
hDataType = new ListType(true, new BasicType(true, DType.INT32));
} else if (DType.STRUCT.equals(dType)) {
// list of struct of int32
hDataType = new StructType(true, new BasicType(true, DType.INT32));
} else {
// list of non nested type
hDataType = new BasicType(true, dType);
}
try (Scalar s = Scalar.listFromNull(hDataType);
ColumnVector c = ColumnVector.fromScalar(s, rowCount);
HostColumnVector hc = c.copyToHost()) {
assertEquals(DType.LIST, c.getType());
assertEquals(rowCount, c.getRowCount());
assertEquals(rowCount, c.getNullCount());
for (int i = 0; i < rowCount; ++i) {
assertTrue(hc.isNull(i));
}
try (ColumnView child = c.getChildColumnView(0)) {
assertEquals(dType, child.getType());
assertEquals(0L, child.getRowCount());
assertEquals(0L, child.getNullCount());
if (child.getType().isNestedType()) {
try (ColumnView grandson = child.getChildColumnView(0)) {
assertEquals(DType.INT32, grandson.getType());
assertEquals(0L, grandson.getRowCount());
assertEquals(0L, grandson.getNullCount());
}
}
}
}
}
}
@Test
void testFromScalarListOfList() {
HostColumnVector.DataType childType = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32));
HostColumnVector.DataType resultType = new HostColumnVector.ListType(true, childType);
try (ColumnVector list = ColumnVector.fromLists(childType,
Arrays.asList(1, 2, 3),
Arrays.asList(4, 5, 6));
Scalar s = Scalar.listFromColumnView(list)) {
try (ColumnVector ret = ColumnVector.fromScalar(s, 2);
ColumnVector expected = ColumnVector.fromLists(resultType,
Arrays.asList(Arrays.asList(1, 2, 3),Arrays.asList(4, 5, 6)),
Arrays.asList(Arrays.asList(1, 2, 3),Arrays.asList(4, 5, 6)))) {
assertColumnsAreEqual(expected, ret);
}
// empty row
try (ColumnVector ret = ColumnVector.fromScalar(s, 0)) {
assertEquals(ret.getRowCount(), 0);
assertEquals(ret.getNullCount(), 0);
}
}
}
@Test
void testFromScalarListOfStruct() {
HostColumnVector.DataType childType = new HostColumnVector.StructType(true,
new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.STRING));
HostColumnVector.DataType resultType = new HostColumnVector.ListType(true, childType);
try (ColumnVector list = ColumnVector.fromStructs(childType,
new HostColumnVector.StructData(1, "s1"),
new HostColumnVector.StructData(2, "s2"));
Scalar s = Scalar.listFromColumnView(list)) {
try (ColumnVector ret = ColumnVector.fromScalar(s, 2);
ColumnVector expected = ColumnVector.fromLists(resultType,
Arrays.asList(new HostColumnVector.StructData(1, "s1"),
new HostColumnVector.StructData(2, "s2")),
Arrays.asList(new HostColumnVector.StructData(1, "s1"),
new HostColumnVector.StructData(2, "s2")))) {
assertColumnsAreEqual(expected, ret);
}
// empty row
try (ColumnVector ret = ColumnVector.fromScalar(s, 0)) {
assertEquals(ret.getRowCount(), 0);
assertEquals(ret.getNullCount(), 0);
}
}
}
@Test
void testFromScalarNullStruct() {
final int rowCount = 4;
for (DType.DTypeEnum typeEnum : DType.DTypeEnum.values()) {
DType dType = typeEnum.isDecimalType() ? DType.create(typeEnum, -8) : DType.create(typeEnum);
DataType hDataType;
if (DType.EMPTY.equals(dType)) {
continue;
} else if (DType.LIST.equals(dType)) {
// list of list of int32
hDataType = new ListType(true, new BasicType(true, DType.INT32));
} else if (DType.STRUCT.equals(dType)) {
// list of struct of int32
hDataType = new StructType(true, new BasicType(true, DType.INT32));
} else {
// list of non nested type
hDataType = new BasicType(true, dType);
}
try (Scalar s = Scalar.structFromNull(hDataType, hDataType, hDataType);
ColumnVector c = ColumnVector.fromScalar(s, rowCount);
HostColumnVector hc = c.copyToHost()) {
assertEquals(DType.STRUCT, c.getType());
assertEquals(rowCount, c.getRowCount());
assertEquals(rowCount, c.getNullCount());
for (int i = 0; i < rowCount; ++i) {
assertTrue(hc.isNull(i));
}
assertEquals(3, c.getNumChildren());
ColumnView[] children = new ColumnView[]{c.getChildColumnView(0),
c.getChildColumnView(1), c.getChildColumnView(2)};
try {
for (ColumnView child : children) {
assertEquals(dType, child.getType());
assertEquals(rowCount, child.getRowCount());
assertEquals(rowCount, child.getNullCount());
if (child.getType() == DType.LIST) {
try (ColumnView childOfChild = child.getChildColumnView(0)) {
assertEquals(DType.INT32, childOfChild.getType());
assertEquals(0L, childOfChild.getRowCount());
assertEquals(0L, childOfChild.getNullCount());
}
} else if (child.getType() == DType.STRUCT) {
assertEquals(1, child.getNumChildren());
try (ColumnView childOfChild = child.getChildColumnView(0)) {
assertEquals(DType.INT32, childOfChild.getType());
assertEquals(rowCount, childOfChild.getRowCount());
assertEquals(rowCount, childOfChild.getNullCount());
}
}
}
} finally {
for (ColumnView cv : children) cv.close();
}
}
}
}
@Test
void testReplaceNullsScalarEmptyColumn() {
try (ColumnVector input = ColumnVector.fromBoxedBooleans();
ColumnVector expected = ColumnVector.fromBoxedBooleans();
Scalar s = Scalar.fromBool(false);
ColumnVector result = input.replaceNulls(s)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReplaceNullsScalarBoolsWithAllNulls() {
try (ColumnVector input = ColumnVector.fromBoxedBooleans(null, null, null, null);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false);
Scalar s = Scalar.fromBool(false);
ColumnVector result = input.replaceNulls(s)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReplaceNullsScalarSomeNullBools() {
try (ColumnVector input = ColumnVector.fromBoxedBooleans(false, null, null, false);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, false);
Scalar s = Scalar.fromBool(true);
ColumnVector result = input.replaceNulls(s)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReplaceNullsScalarIntegersWithAllNulls() {
try (ColumnVector input = ColumnVector.fromBoxedInts(null, null, null, null);
ColumnVector expected = ColumnVector.fromBoxedInts(0, 0, 0, 0);
Scalar s = Scalar.fromInt(0);
ColumnVector result = input.replaceNulls(s)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReplaceNullsScalarSomeNullIntegers() {
try (ColumnVector input = ColumnVector.fromBoxedInts(1, 2, null, 4, null);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, 999, 4, 999);
Scalar s = Scalar.fromInt(999);
ColumnVector result = input.replaceNulls(s)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReplaceNullsScalarFailsOnTypeMismatch() {
try (ColumnVector input = ColumnVector.fromBoxedInts(1, 2, null, 4, null);
Scalar s = Scalar.fromBool(true)) {
assertThrows(CudfException.class, () -> input.replaceNulls(s).close());
}
}
@Test
void testReplaceNullsWithNullScalar() {
try (ColumnVector input = ColumnVector.fromBoxedInts(1, 2, null, 4, null);
Scalar s = Scalar.fromNull(input.getType());
ColumnVector result = input.replaceNulls(s)) {
assertColumnsAreEqual(input, result);
}
}
@Test
void testReplaceNullsPolicy() {
try (ColumnVector input = ColumnVector.fromBoxedInts(null, 1, 2, null, 4, null);
ColumnVector preceding = input.replaceNulls(ReplacePolicy.PRECEDING);
ColumnVector expectedPre = ColumnVector.fromBoxedInts(null, 1, 2, 2, 4, 4);
ColumnVector following = input.replaceNulls(ReplacePolicy.FOLLOWING);
ColumnVector expectedFol = ColumnVector.fromBoxedInts(1, 1, 2, 4, 4, null)) {
assertColumnsAreEqual(expectedPre, preceding);
assertColumnsAreEqual(expectedFol, following);
}
}
@Test
void testReplaceNullsColumnEmptyColumn() {
try (ColumnVector input = ColumnVector.fromBoxedBooleans();
ColumnVector r = ColumnVector.fromBoxedBooleans();
ColumnVector expected = ColumnVector.fromBoxedBooleans();
ColumnVector result = input.replaceNulls(r)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReplaceNullsColumnBools() {
try (ColumnVector input = ColumnVector.fromBoxedBooleans(null, true, null, false);
ColumnVector r = ColumnVector.fromBoxedBooleans(false, null, true, true);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, false);
ColumnVector result = input.replaceNulls(r)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReplaceNullsColumnIntegers() {
try (ColumnVector input = ColumnVector.fromBoxedInts(1, 2, null, 4, null);
ColumnVector r = ColumnVector.fromBoxedInts(996, 997, 998, 909, null);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, 998, 4, null);
ColumnVector result = input.replaceNulls(r)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReplaceNullsColumnFailsOnTypeMismatch() {
try (ColumnVector input = ColumnVector.fromBoxedInts(1, 2, null, 4, null);
ColumnVector r = ColumnVector.fromBoxedBooleans(true)) {
assertThrows(CudfException.class, () -> input.replaceNulls(r).close());
}
}
static QuantileMethod[] methods = {LINEAR, LOWER, HIGHER, MIDPOINT, NEAREST};
static double[] quantiles = {0.0, 0.25, 0.33, 0.5, 1.0};
@Test
void testQuantilesOnIntegerInput() {
double[][] exactExpected = {
{-1.0, 1.0, 1.0, 2.5, 9.0}, // LINEAR
{ -1, 1, 1, 2, 9}, // LOWER
{ -1, 1, 1, 3, 9}, // HIGHER
{-1.0, 1.0, 1.0, 2.5, 9.0}, // MIDPOINT
{ -1, 1, 1, 2, 9}}; // NEAREST
try (ColumnVector cv = ColumnVector.fromBoxedInts(-1, 0, 1, 1, 2, 3, 4, 6, 7, 9)) {
for (int i = 0 ; i < methods.length ; i++) {
try (ColumnVector result = cv.quantile(methods[i], quantiles);
HostColumnVector hostResult = result.copyToHost()) {
double[] expected = exactExpected[i];
assertEquals(expected.length, hostResult.getRowCount());
for (int j = 0; j < expected.length; j++) {
assertEqualsWithinPercentage(expected[j], hostResult.getDouble(j), PERCENTAGE, methods[i] + " " + quantiles[j]);
}
}
}
}
}
@Test
void testQuantilesOnDoubleInput() {
double[][] exactExpected = {
{-1.01, 0.8, 0.9984, 2.13, 6.8}, // LINEAR
{-1.01, 0.8, 0.8, 2.13, 6.8}, // LOWER
{-1.01, 0.8, 1.11, 2.13, 6.8}, // HIGHER
{-1.01, 0.8, 0.955, 2.13, 6.8}, // MIDPOINT
{-1.01, 0.8, 1.11, 2.13, 6.8}}; // NEAREST
try (ColumnVector cv = ColumnVector.fromBoxedDoubles(-1.01, 0.15, 0.8, 1.11, 2.13, 3.4, 4.17, 5.7, 6.8)) {
for (int i = 0 ; i < methods.length ; i++) {
try (ColumnVector result = cv.quantile(methods[i], quantiles);
HostColumnVector hostResult = result.copyToHost()) {
double[] expected = exactExpected[i];
assertEquals(expected.length, hostResult.getRowCount());
for (int j = 0; j < expected.length; j++) {
assertEqualsWithinPercentage(expected[j], hostResult.getDouble(j), PERCENTAGE, methods[i] + " " + quantiles[j]);
}
}
}
}
}
@Test
void testSubvector() {
try (ColumnVector vec = ColumnVector.fromBoxedInts(1, 2, 3, null, 5);
ColumnVector expected = ColumnVector.fromBoxedInts(2, 3, null, 5);
ColumnVector found = vec.subVector(1, 5)) {
assertColumnsAreEqual(expected, found);
}
try (ColumnVector vec = ColumnVector.fromStrings("1", "2", "3", null, "5");
ColumnVector expected = ColumnVector.fromStrings("2", "3", null, "5");
ColumnVector found = vec.subVector(1, 5)) {
assertColumnsAreEqual(expected, found);
}
}
@Test
void testSlice() {
try(ColumnVector cv = ColumnVector.fromBoxedInts(10, 12, null, null, 18, 20, 22, 24, 26, 28)) {
Integer[][] expectedSlice = {
{12, null},
{20, 22, 24, 26},
{null, null},
{}};
ColumnVector[] slices = cv.slice(1, 3, 5, 9, 2, 4, 8, 8);
try {
for (int i = 0; i < slices.length; i++) {
final int sliceIndex = i;
try (HostColumnVector slice = slices[sliceIndex].copyToHost()) {
assertEquals(expectedSlice[sliceIndex].length, slices[sliceIndex].getRowCount());
IntStream.range(0, expectedSlice[sliceIndex].length).forEach(rowCount -> {
if (expectedSlice[sliceIndex][rowCount] == null) {
assertTrue(slice.isNull(rowCount));
} else {
assertEquals(expectedSlice[sliceIndex][rowCount],
slice.getInt(rowCount));
}
});
}
}
assertEquals(4, slices.length);
} finally {
for (int i = 0 ; i < slices.length ; i++) {
if (slices[i] != null) {
slices[i].close();
}
}
}
}
}
@Test
void testStringSlice() {
try(ColumnVector cv = ColumnVector.fromStrings("foo", "bar", null, null, "baz", "hello", "world", "cuda", "is", "great")) {
String[][] expectedSlice = {
{"foo", "bar"},
{null, null, "baz"},
{null, "baz", "hello"}};
ColumnVector[] slices = cv.slice(0, 2, 2, 5, 3, 6);
try {
for (int i = 0; i < slices.length; i++) {
final int sliceIndex = i;
try (HostColumnVector slice = slices[sliceIndex].copyToHost()) {
assertEquals(expectedSlice[sliceIndex].length, slices[sliceIndex].getRowCount());
IntStream.range(0, expectedSlice[sliceIndex].length).forEach(rowCount -> {
if (expectedSlice[sliceIndex][rowCount] == null) {
assertTrue(slice.isNull(rowCount));
} else {
assertEquals(expectedSlice[sliceIndex][rowCount],
slice.getJavaString(rowCount));
}
});
}
}
assertEquals(3, slices.length);
} finally {
for (int i = 0 ; i < slices.length ; i++) {
if (slices[i] != null) {
slices[i].close();
}
}
}
}
}
@Test
void testSplitWithArray() {
assumeTrue(Cuda.isEnvCompatibleForTesting());
try(ColumnVector cv = ColumnVector.fromBoxedInts(10, 12, null, null, 18, 20, 22, 24, 26, 28)) {
Integer[][] expectedData = {
{10},
{12, null},
{null, 18},
{20, 22, 24, 26},
{28}};
ColumnVector[] splits = cv.split(1, 3, 5, 9);
try {
assertEquals(expectedData.length, splits.length);
for (int splitIndex = 0; splitIndex < splits.length; splitIndex++) {
try (HostColumnVector subVec = splits[splitIndex].copyToHost()) {
assertEquals(expectedData[splitIndex].length, subVec.getRowCount());
for (int subIndex = 0; subIndex < expectedData[splitIndex].length; subIndex++) {
Integer expected = expectedData[splitIndex][subIndex];
if (expected == null) {
assertTrue(subVec.isNull(subIndex));
} else {
assertEquals(expected, subVec.getInt(subIndex));
}
}
}
}
} finally {
for (int i = 0 ; i < splits.length ; i++) {
if (splits[i] != null) {
splits[i].close();
}
}
}
}
}
@Test
void testWithOddSlices() {
try (ColumnVector cv = ColumnVector.fromBoxedInts(10, 12, null, null, 18, 20, 22, 24, 26, 28)) {
assertThrows(CudfException.class, () -> cv.slice(1, 3, 5, 9, 2, 4, 8));
}
}
@Test
void testTrimStringsWhiteSpace() {
try (ColumnVector cv = ColumnVector.fromStrings(" 123", "123 ", null, " 123 ", "\t\t123\n\n");
ColumnVector trimmed = cv.strip();
ColumnVector expected = ColumnVector.fromStrings("123", "123", null, "123", "123")) {
assertColumnsAreEqual(expected, trimmed);
}
}
@Test
void testTrimStrings() {
try (ColumnVector cv = ColumnVector.fromStrings("123", "123 ", null, "1231", "\t\t123\n\n");
Scalar one = Scalar.fromString(" 1");
ColumnVector trimmed = cv.strip(one);
ColumnVector expected = ColumnVector.fromStrings("23", "23", null, "23", "\t\t123\n\n")) {
assertColumnsAreEqual(expected, trimmed);
}
}
@Test
void testLeftTrimStringsWhiteSpace() {
try (ColumnVector cv = ColumnVector.fromStrings(" 123", "123 ", null, " 123 ", "\t\t123\n\n");
ColumnVector trimmed = cv.lstrip();
ColumnVector expected = ColumnVector.fromStrings("123", "123 ", null, "123 ", "123\n\n")) {
assertColumnsAreEqual(expected, trimmed);
}
}
@Test
void testLeftTrimStrings() {
try (ColumnVector cv = ColumnVector.fromStrings("123", " 123 ", null, "1231", "\t\t123\n\n");
Scalar one = Scalar.fromString(" 1");
ColumnVector trimmed = cv.lstrip(one);
ColumnVector expected = ColumnVector.fromStrings("23", "23 ", null, "231", "\t\t123\n\n")) {
assertColumnsAreEqual(expected, trimmed);
}
}
@Test
void testRightTrimStringsWhiteSpace() {
try (ColumnVector cv = ColumnVector.fromStrings(" 123", "123 ", null, " 123 ", "\t\t123\n\n");
ColumnVector trimmed = cv.rstrip();
ColumnVector expected = ColumnVector.fromStrings(" 123", "123", null, " 123", "\t\t123")) {
assertColumnsAreEqual(expected, trimmed);
}
}
@Test
void testRightTrimStrings() {
try (ColumnVector cv = ColumnVector.fromStrings("123", "123 ", null, "1231 ", "\t\t123\n\n");
Scalar one = Scalar.fromString(" 1");
ColumnVector trimmed = cv.rstrip(one);
ColumnVector expected = ColumnVector.fromStrings("123", "123", null, "123", "\t\t123\n\n")) {
assertColumnsAreEqual(expected, trimmed);
}
}
@Test
void testTrimStringsThrowsException() {
assertThrows(CudfException.class, () -> {
try (ColumnVector cv = ColumnVector.fromStrings("123", "123 ", null, "1231", "\t\t123\n\n");
Scalar nullStr = Scalar.fromString(null);
ColumnVector trimmed = cv.strip(nullStr)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromStrings("123", "123 ", null, "1231", "\t\t123\n\n");
Scalar one = Scalar.fromInt(1);
ColumnVector trimmed = cv.strip(one)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromStrings("123", "123 ", null, "1231", "\t\t123\n\n");
ColumnVector result = cv.strip(null)) {}
});
}
@Test
void testTrimEmptyStringsWithNulls() {
try (ColumnVector cv = ColumnVector.fromStrings("", null);
ColumnVector trimmed = cv.strip();
ColumnVector expected = ColumnVector.fromStrings("", null)) {
assertColumnsAreEqual(expected, trimmed);
}
}
@Test
void testAppendStrings() {
try (HostColumnVector cv = HostColumnVector.build(10, 0, (b) -> {
b.append("123456789");
b.append("1011121314151617181920");
b.append("");
b.appendNull();
})) {
assertEquals(4, cv.getRowCount());
assertEquals("123456789", cv.getJavaString(0));
assertEquals("1011121314151617181920", cv.getJavaString(1));
assertEquals("", cv.getJavaString(2));
assertTrue(cv.isNull(3));
}
}
@Test
void testCountElements() {
DataType dt = new ListType(true, new BasicType(true, DType.INT32));
try (ColumnVector cv = ColumnVector.fromLists(dt, Arrays.asList(1),
Arrays.asList(1, 2), null, Arrays.asList(null, null),
Arrays.asList(1, 2, 3), Arrays.asList(1, 2, 3, 4));
ColumnVector lengths = cv.countElements();
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, null, 2, 3, 4)) {
assertColumnsAreEqual(expected, lengths);
}
}
@Test
void testStringLengths() {
try (ColumnVector cv = ColumnVector.fromStrings("1", "12", null, "123", "1234");
ColumnVector lengths = cv.getCharLengths();
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, null, 3, 4)) {
assertColumnsAreEqual(expected, lengths);
}
}
@Test
void testGetByteCount() {
try (ColumnVector cv = ColumnVector.fromStrings("1", "12", "123", null, "1234");
ColumnVector byteLengthVector = cv.getByteCount();
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, 3, null, 4)) {
assertColumnsAreEqual(expected, byteLengthVector);
}
}
@Test
void testEmptyStringColumnOpts() {
try (ColumnVector cv = ColumnVector.fromStrings()) {
try (ColumnVector len = cv.getCharLengths()) {
assertEquals(0, len.getRowCount());
}
try (ColumnVector mask = ColumnVector.fromBoxedBooleans();
Table input = new Table(cv);
Table filtered = input.filter(mask)) {
assertEquals(0, filtered.getColumn(0).getRowCount());
}
try (ColumnVector len = cv.getByteCount()) {
assertEquals(0, len.getRowCount());
}
try (ColumnVector lower = cv.lower();
ColumnVector upper = cv.upper()) {
assertColumnsAreEqual(cv, lower);
assertColumnsAreEqual(cv, upper);
}
}
}
@Test
void testStringManipulation() {
try (ColumnVector v = ColumnVector.fromStrings("a", "B", "cd", "\u0480\u0481", "E\tf",
"g\nH", "IJ\"\u0100\u0101\u0500\u0501",
"kl m", "Nop1", "\\qRs2", "3tuV\'",
"wX4Yz", "\ud720\ud721");
ColumnVector e_lower = ColumnVector.fromStrings("a", "b", "cd", "\u0481\u0481", "e\tf",
"g\nh", "ij\"\u0101\u0101\u0501\u0501",
"kl m", "nop1", "\\qrs2", "3tuv\'",
"wx4yz", "\ud720\ud721");
ColumnVector e_upper = ColumnVector.fromStrings("A", "B", "CD", "\u0480\u0480", "E\tF",
"G\nH", "IJ\"\u0100\u0100\u0500\u0500",
"KL M", "NOP1", "\\QRS2", "3TUV\'",
"WX4YZ", "\ud720\ud721");
ColumnVector lower = v.lower();
ColumnVector upper = v.upper()) {
assertColumnsAreEqual(lower, e_lower);
assertColumnsAreEqual(upper, e_upper);
}
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromInts(1, 2, 3, 4);
ColumnVector lower = cv.lower()) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromInts(1, 2, 3, 4);
ColumnVector upper = cv.upper()) {}
});
}
@Test
void testStringManipulationWithNulls() {
// Special characters in order of usage, capital and small cyrillic koppa
// Latin A with macron, and cyrillic komi de
// \ud720 and \ud721 are UTF-8 characters without corresponding upper and lower characters
try (ColumnVector v = ColumnVector.fromStrings("a", "B", "cd", "\u0480\u0481", "E\tf",
"g\nH", "IJ\"\u0100\u0101\u0500\u0501",
"kl m", "Nop1", "\\qRs2", null,
"3tuV\'", "wX4Yz", "\ud720\ud721");
ColumnVector e_lower = ColumnVector.fromStrings("a", "b", "cd", "\u0481\u0481", "e\tf",
"g\nh", "ij\"\u0101\u0101\u0501\u0501",
"kl m", "nop1", "\\qrs2", null,
"3tuv\'", "wx4yz", "\ud720\ud721");
ColumnVector e_upper = ColumnVector.fromStrings("A", "B", "CD", "\u0480\u0480", "E\tF",
"G\nH", "IJ\"\u0100\u0100\u0500\u0500",
"KL M", "NOP1", "\\QRS2", null,
"3TUV\'", "WX4YZ", "\ud720\ud721");
ColumnVector lower = v.lower();
ColumnVector upper = v.upper();) {
assertColumnsAreEqual(lower, e_lower);
assertColumnsAreEqual(upper, e_upper);
}
}
@Test
void testStringConcat() {
try (ColumnVector v = ColumnVector.fromStrings("a", "B", "cd", "\u0480\u0481", "E\tf",
"g\nH", "IJ\"\u0100\u0101\u0500\u0501",
"kl m", "Nop1", "\\qRs2", "3tuV\'",
"wX4Yz", "\ud720\ud721");
ColumnVector e_concat = ColumnVector.fromStrings("aa", "BB", "cdcd",
"\u0480\u0481\u0480\u0481", "E\tfE\tf", "g\nHg\nH",
"IJ\"\u0100\u0101\u0500\u0501IJ\"\u0100\u0101\u0500\u0501",
"kl mkl m", "Nop1Nop1", "\\qRs2\\qRs2", "3tuV\'3tuV\'",
"wX4YzwX4Yz", "\ud720\ud721\ud720\ud721");
Scalar emptyString = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(emptyString, emptyString, new ColumnView[]{v, v})) {
assertColumnsAreEqual(concat, e_concat);
}
assertThrows(CudfException.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("B", "cd", "\u0480\u0481", "E\tf");
ColumnVector cv = ColumnVector.fromInts(1, 2, 3, 4);
Scalar emptyString = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(emptyString, emptyString, new ColumnView[]{sv, cv})) {
}
});
assertThrows(CudfException.class, () -> {
try (ColumnVector sv1 = ColumnVector.fromStrings("a", "B", "cd");
ColumnVector sv2 = ColumnVector.fromStrings("a", "B");
Scalar emptyString = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(emptyString, emptyString,
new ColumnVector[]{sv1, sv2})) {
}
});
assertThrows(AssertionError.class, () -> {
try (Scalar emptyString = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(emptyString, emptyString, new ColumnView[]{})) {
}
});
assertThrows(CudfException.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar emptyString = Scalar.fromString("");
Scalar nullString = Scalar.fromString(null);
ColumnVector concat = ColumnVector.stringConcatenate(nullString, emptyString, new ColumnView[]{sv, sv})) {
}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar emptyString = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(null, emptyString, new ColumnView[]{sv, sv})) {
}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar emptyString = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(emptyString, null, new ColumnView[]{sv, sv})) {
}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar emptyString = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(emptyString, emptyString, new ColumnView[]{sv, null})) {
}
});
}
@Test
void testStringConcatWithNulls() {
try (ColumnVector v = ColumnVector.fromStrings("a", "B", "cd", "\u0480\u0481", "E\tf",
"g\nH", "IJ\"\u0100\u0101\u0500\u0501",
"kl m", "Nop1", "\\qRs2", null,
"3tuV\'", "wX4Yz", "\ud720\ud721");
ColumnVector e_concat = ColumnVector.fromStrings("aa", "BB", "cdcd",
"\u0480\u0481\u0480\u0481", "E\tfE\tf", "g\nHg\nH",
"IJ\"\u0100\u0101\u0500\u0501IJ\"\u0100\u0101\u0500\u0501",
"kl mkl m", "Nop1Nop1", "\\qRs2\\qRs2", "NULLNULL",
"3tuV\'3tuV\'", "wX4YzwX4Yz", "\ud720\ud721\ud720\ud721");
Scalar emptyString = Scalar.fromString("");
Scalar nullSubstitute = Scalar.fromString("NULL");
ColumnVector concat = ColumnVector.stringConcatenate(emptyString, nullSubstitute, new ColumnView[]{v, v})) {
assertColumnsAreEqual(concat, e_concat);
}
assertThrows(CudfException.class, () -> {
try (ColumnVector v = ColumnVector.fromStrings("a", "B", "cd", "\u0480\u0481", "E\tf",
"g\nH", "IJ\"\u0100\u0101\u0500\u0501",
"kl m", "Nop1", "\\qRs2", null,
"3tuV\'", "wX4Yz", "\ud720\ud721");
Scalar emptyString = Scalar.fromString("");
Scalar nullSubstitute = Scalar.fromString("NULL");
ColumnVector concat = ColumnVector.stringConcatenate(emptyString, nullSubstitute, new ColumnView[]{v})) {
}
});
}
@Test
void testStringConcatSeparators() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a", "B", "cd", "\u0480\u0481", "E\tf", null, null, "\\G\u0100");
ColumnVector sv2 = ColumnVector.fromStrings("b", "C", "\u0500\u0501", "x\nYz", null, null, "", null);
ColumnVector e_concat = ColumnVector.fromStrings("aA1\t\ud721b", "BA1\t\ud721C", "cdA1\t\ud721\u0500\u0501",
"\u0480\u0481A1\t\ud721x\nYz", null, null, null, null);
Scalar separatorString = Scalar.fromString("A1\t\ud721");
Scalar nullString = Scalar.fromString(null);
ColumnVector concat = ColumnVector.stringConcatenate(separatorString, nullString, new ColumnView[]{sv1, sv2})) {
assertColumnsAreEqual(concat, e_concat);
}
}
@Test
void testStringConcatSeparatorsEmptyStringForNull() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a", "B", "cd", "\u0480\u0481", "E\tf", null, null, "\\G\u0100");
ColumnVector sv2 = ColumnVector.fromStrings("b", "C", "\u0500\u0501", "x\nYz", null, null, "", null);
ColumnVector e_concat = ColumnVector.fromStrings("aA1\t\ud721b", "BA1\t\ud721C", "cdA1\t\ud721\u0500\u0501",
"\u0480\u0481A1\t\ud721x\nYz", "E\tf", "", "", "\\G\u0100");
Scalar separatorString = Scalar.fromString("A1\t\ud721");
Scalar narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(separatorString, narep, new ColumnView[]{sv1, sv2}, false)) {
assertColumnsAreEqual(concat, e_concat);
}
}
@Test
void testConcatWsTypeError() {
try (ColumnVector v0 = ColumnVector.fromInts(1, 2, 3, 4);
ColumnVector v1 = ColumnVector.fromFloats(5.0f, 6.0f);
ColumnVector sep_col = ColumnVector.fromStrings("-*");
Scalar separatorString = Scalar.fromString(null);
Scalar nullString = Scalar.fromString(null)) {
assertThrows(CudfException.class, () -> ColumnVector.stringConcatenate(
new ColumnView[]{v0, v1}, sep_col, separatorString, nullString, false));
}
}
@Test
void testConcatWsNoColumn() {
try (ColumnVector sep_col = ColumnVector.fromStrings("-*");
Scalar separatorString = Scalar.fromString(null);
Scalar nullString = Scalar.fromString(null)) {
assertThrows(AssertionError.class, () -> ColumnVector.stringConcatenate(
new ColumnView[]{}, sep_col, separatorString, nullString, false));
}
}
@Test
void testStringConcatWsSimple() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a");
ColumnVector sv2 = ColumnVector.fromStrings("B");
ColumnVector sv3 = ColumnVector.fromStrings("cd");
ColumnVector sv4 = ColumnVector.fromStrings("\u0480\u0481");
ColumnVector sv5 = ColumnVector.fromStrings("E\tf");
ColumnVector sv6 = ColumnVector.fromStrings("M");
ColumnVector sv7 = ColumnVector.fromStrings("\\G\u0100");
ColumnVector sep_col = ColumnVector.fromStrings("-*");
ColumnVector e_concat = ColumnVector.fromStrings("a-*B-*cd-*\u0480\u0481-*E\tf-*M-*\\G\u0100");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(
new ColumnView[]{sv1, sv2, sv3, sv4, sv5, sv6, sv7}, sep_col, separatorString,
col_narep, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSimpleOtherApi() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a");
ColumnVector sv2 = ColumnVector.fromStrings("B");
ColumnVector sv3 = ColumnVector.fromStrings("cd");
ColumnVector sv4 = ColumnVector.fromStrings("\u0480\u0481");
ColumnVector sv5 = ColumnVector.fromStrings("E\tf");
ColumnVector sv6 = ColumnVector.fromStrings("M");
ColumnVector sv7 = ColumnVector.fromStrings("\\G\u0100");
ColumnVector sep_col = ColumnVector.fromStrings("-*");
ColumnVector e_concat = ColumnVector.fromStrings("a-*B-*cd-*\u0480\u0481-*E\tf-*M-*\\G\u0100");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(
new ColumnView[]{sv1, sv2, sv3, sv4, sv5, sv6, sv7}, sep_col)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsOneCol() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a");
ColumnVector sep_col = ColumnVector.fromStrings("-*");
ColumnVector e_concat = ColumnVector.fromStrings("a");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(
new ColumnView[]{sv1}, sep_col, separatorString,
col_narep, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsNullSep() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a", "c");
ColumnVector sv2 = ColumnVector.fromStrings("b", "d");
Scalar nullString = Scalar.fromString(null);
ColumnVector sep_col = ColumnVector.fromScalar(nullString, 2);
ColumnVector e_concat = ColumnVector.fromScalar(nullString, 2);
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(new ColumnView[]{sv1, sv2},
sep_col, separatorString, col_narep, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsNullValueInCol() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a", "c", null);
ColumnVector sv2 = ColumnVector.fromStrings("b", "", "e");
ColumnVector sep_col = ColumnVector.fromStrings("-", "-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("a-b", "c-", "e");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(new ColumnView[]{sv1, sv2},
sep_col, separatorString, col_narep, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsNullValueInColKeepNull() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a", "c", null);
ColumnVector sv2 = ColumnVector.fromStrings("b", "", "e");
ColumnVector sep_col = ColumnVector.fromStrings("-", "-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("a-b", "c-", null);
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString(null);
ColumnVector concat = ColumnVector.stringConcatenate(new ColumnView[]{sv1, sv2},
sep_col, separatorString, col_narep, true)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsNullValueInColSepTrue() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a", "c", null);
ColumnVector sv2 = ColumnVector.fromStrings("b", "", "e");
ColumnVector sep_col = ColumnVector.fromStrings("-", "-", "-");
// this is failing?
ColumnVector e_concat = ColumnVector.fromStrings("a-b", "c-", "-e");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(new ColumnView[]{sv1, sv2},
sep_col, separatorString, col_narep, true)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleCol() {
try (ColumnVector sv1 = ColumnVector.fromStrings("a", "c", "e");
ColumnVector sep_col = ColumnVector.fromStrings("-", "-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("a", "c", "e");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(new ColumnView[]{sv1},
sep_col, separatorString, col_narep, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsNullAllCol() {
try (Scalar nullString = Scalar.fromString(null);
ColumnVector sv1 = ColumnVector.fromScalar(nullString, 3);
ColumnVector sv2 = ColumnVector.fromScalar(nullString, 3);
ColumnVector sep_col = ColumnVector.fromStrings("-", "-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("", "", "");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(new ColumnView[]{sv1, sv2},
sep_col, separatorString, col_narep, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsNullAllColSepTrue() {
try (Scalar nullString = Scalar.fromString(null);
ColumnVector sv1 = ColumnVector.fromScalar(nullString, 3);
ColumnVector sv2 = ColumnVector.fromScalar(nullString, 3);
ColumnVector sep_col = ColumnVector.fromStrings("-", "-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("-", "-", "-");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = ColumnVector.stringConcatenate(new ColumnView[]{sv1, sv2},
sep_col, separatorString, col_narep, true)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListCol() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa"), Arrays.asList("b", "c", "d"),
Arrays.asList("\u0480\u0481", null, "asdfbe", null));
ColumnVector sep_col = ColumnVector.fromStrings("-", "-", "*");
ColumnVector e_concat = ColumnVector.fromStrings("aaa", "b-c-d", "\u0480\u0481*asdfbe");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = cv1.stringConcatenateListElements(sep_col, separatorString,
col_narep, false, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListColDefaultApi() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa"), Arrays.asList("b", "c", "d"),
Arrays.asList("\u0480\u0481", null, "asdfbe", null));
ColumnVector sep_col = ColumnVector.fromStrings("-", "-", "*");
ColumnVector e_concat = ColumnVector.fromStrings("aaa", "b-c-d", "\u0480\u0481*asdfbe");
ColumnVector concat = cv1.stringConcatenateListElements(sep_col)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListColScalarSep() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa"), Arrays.asList("b", "c", "d"),
Arrays.asList("\u0480\u0481", null, "asdfbe", null));
Scalar separatorString = Scalar.fromString("-");
ColumnVector e_concat = ColumnVector.fromStrings("aaa", "b-c-d", "\u0480\u0481-asdfbe");
Scalar narep = Scalar.fromString("");
ColumnVector concat = cv1.stringConcatenateListElements(separatorString, narep, false,
false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListColAllNulls() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa"), Arrays.asList(null, null, null));
ColumnVector sep_col = ColumnVector.fromStrings("-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("aaa", null);
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = cv1.stringConcatenateListElements(sep_col, separatorString,
col_narep, false, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListColAllNullsScalarSep() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa"), Arrays.asList(null, null, null));
ColumnVector e_concat = ColumnVector.fromStrings("aaa", null);
Scalar separatorString = Scalar.fromString("-");
Scalar narep = Scalar.fromString("");
ColumnVector concat = cv1.stringConcatenateListElements(separatorString, narep,
false, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListColAllNullsSepTrue() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa"), Arrays.asList(null, null, null));
ColumnVector sep_col = ColumnVector.fromStrings("-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("aaa", null);
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
ColumnVector concat = cv1.stringConcatenateListElements(sep_col, separatorString,
col_narep, true, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListColAllNullsKeepNulls() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa"), Arrays.asList(null, null, null));
ColumnVector sep_col = ColumnVector.fromStrings("-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("aaa", null);
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString(null);
ColumnVector concat = cv1.stringConcatenateListElements(sep_col, separatorString,
col_narep, true, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListColEmptyArray() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa", "bbbb"), Arrays.asList());
ColumnVector sep_col = ColumnVector.fromStrings("-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("aaa-bbbb", null);
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
// set the parameter to return null on empty array
ColumnVector concat = cv1.stringConcatenateListElements(sep_col, separatorString,
col_narep, false, false)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testStringConcatWsSingleListColEmptyArrayReturnEmpty() {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("aaa", "bbbb"), Arrays.asList());
ColumnVector sep_col = ColumnVector.fromStrings("-", "-");
ColumnVector e_concat = ColumnVector.fromStrings("aaa-bbbb", "");
Scalar separatorString = Scalar.fromString(null);
Scalar col_narep = Scalar.fromString("");
// set the parameter to return empty string on empty array
ColumnVector concat = cv1.stringConcatenateListElements(sep_col, separatorString,
col_narep, false, true)) {
assertColumnsAreEqual(e_concat, concat);
}
}
@Test
void testRepeatStringsWithScalarRepeatTimes() {
// Empty strings column.
try (ColumnVector input = ColumnVector.fromStrings("", "", "");
ColumnVector results = input.repeatStrings(1)) {
assertColumnsAreEqual(input, results);
}
// Zero repeatTimes.
try (ColumnVector input = ColumnVector.fromStrings("abc", "xyz", "123");
ColumnVector results = input.repeatStrings(0);
ColumnVector expected = ColumnVector.fromStrings("", "", "")) {
assertColumnsAreEqual(expected, results);
}
// Negative repeatTimes.
try (ColumnVector input = ColumnVector.fromStrings("abc", "xyz", "123");
ColumnVector results = input.repeatStrings(-1);
ColumnVector expected = ColumnVector.fromStrings("", "", "")) {
assertColumnsAreEqual(expected, results);
}
// Strings column containing both null and empty, output is copied exactly from input.
try (ColumnVector input = ColumnVector.fromStrings("abc", "", null, "123", null);
ColumnVector results = input.repeatStrings(1)) {
assertColumnsAreEqual(input, results);
}
// Strings column containing both null and empty.
try (ColumnVector input = ColumnVector.fromStrings("abc", "", null, "123", null);
ColumnVector results = input.repeatStrings( 2);
ColumnVector expected = ColumnVector.fromStrings("abcabc", "", null, "123123", null)) {
assertColumnsAreEqual(expected, results);
}
}
@Test
void testRepeatStringsWithColumnRepeatTimes() {
// Empty strings column.
try (ColumnVector input = ColumnVector.fromStrings("", "", "");
ColumnVector repeatTimes = ColumnVector.fromInts(-1, 0, 1);
ColumnVector results = input.repeatStrings(repeatTimes)) {
assertColumnsAreEqual(input, results);
}
// Zero and negative repeatTimes.
try (ColumnVector input = ColumnVector.fromStrings("abc", "xyz", "123", "456", "789", "a1");
ColumnVector repeatTimes = ColumnVector.fromInts(-200, -100, 0, 0, 1, 2);
ColumnVector results = input.repeatStrings(repeatTimes);
ColumnVector expected = ColumnVector.fromStrings("", "", "", "", "789", "a1a1")) {
assertColumnsAreEqual(expected, results);
}
// Strings column contains both null and empty, output is copied exactly from input.
try (ColumnVector input = ColumnVector.fromStrings("abc", "", null, "123", null);
ColumnVector repeatTimes = ColumnVector.fromInts(1, 1, 1, 1, 1);
ColumnVector results = input.repeatStrings(repeatTimes)) {
assertColumnsAreEqual(input, results);
}
// Strings column contains both null and empty.
try (ColumnVector input = ColumnVector.fromStrings("abc", "", null, "123", null);
ColumnVector repeatTimes = ColumnVector.fromInts(2, 3, 1, 3, 2);
ColumnVector results = input.repeatStrings(repeatTimes);
ColumnVector expected = ColumnVector.fromStrings("abcabc", "", null, "123123123", null)) {
assertColumnsAreEqual(expected, results);
}
}
@Test
void testListConcatByRow() {
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(0), Arrays.asList(1, 2, 3), null, Arrays.asList(), Arrays.asList());
ColumnVector result = ColumnVector.listConcatenateByRow(cv)) {
assertColumnsAreEqual(cv, result);
}
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(0), Arrays.asList(1, 2, 3), null, Arrays.asList(), Arrays.asList());
ColumnVector cv2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(1, 2, 3), Arrays.asList((Integer) null), Arrays.asList(10, 12), Arrays.asList(100, 200, 300),
Arrays.asList());
ColumnVector result = ColumnVector.listConcatenateByRow(cv1, cv2);
ColumnVector expect = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(0, 1, 2, 3), Arrays.asList(1, 2, 3, null), null, Arrays.asList(100, 200, 300),
Arrays.asList())) {
assertColumnsAreEqual(expect, result);
}
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("AAA", "BBB"), Arrays.asList("aaa"), Arrays.asList("111"), Arrays.asList("X"),
Arrays.asList());
ColumnVector cv2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList(), Arrays.asList("bbb", "ccc"), null, Arrays.asList((String) null),
Arrays.asList());
ColumnVector cv3 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("CCC"), Arrays.asList(), Arrays.asList("222", "333"), Arrays.asList("Z"),
Arrays.asList());
ColumnVector result = ColumnVector.listConcatenateByRow(cv1, cv2, cv3);
ColumnVector expect = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("AAA", "BBB", "CCC"), Arrays.asList("aaa", "bbb", "ccc"), null,
Arrays.asList("X", null, "Z"), Arrays.asList())) {
assertColumnsAreEqual(expect, result);
}
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.FLOAT64)),
Arrays.asList(1.23, 0.0, Double.NaN), Arrays.asList(), null, Arrays.asList(-1.23e10, null));
ColumnVector result = ColumnVector.listConcatenateByRow(cv, cv, cv);
ColumnVector expect = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.FLOAT64)),
Arrays.asList(1.23, 0.0, Double.NaN, 1.23, 0.0, Double.NaN, 1.23, 0.0, Double.NaN),
Arrays.asList(), null, Arrays.asList(-1.23e10, null, -1.23e10, null, -1.23e10, null))) {
assertColumnsAreEqual(expect, result);
}
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32))), Arrays.asList(Arrays.asList(1)));
ColumnVector result = ColumnVector.listConcatenateByRow(cv, cv);
ColumnVector expect = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32))), Arrays.asList(Arrays.asList(1), Arrays.asList(1)))){
assertColumnsAreEqual(expect, result);
}
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32))), Arrays.asList(Arrays.asList(1, null, 2)));
ColumnVector cv2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32))), Arrays.asList(Arrays.asList(null, null, 5, 6, null)));
ColumnVector result = ColumnVector.listConcatenateByRow(cv1, cv2);
ColumnVector expect = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32))), Arrays.asList(Arrays.asList(1, null, 2), Arrays.asList(null, null, 5, 6, null)))){
assertColumnsAreEqual(expect, result);
}
assertThrows(CudfException.class, () -> {
try (ColumnVector cv = ColumnVector.fromInts(1, 2, 3);
ColumnVector result = ColumnVector.listConcatenateByRow(cv, cv)) {
}
});
assertThrows(CudfException.class, () -> {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), Arrays.asList(1, 2, 3));
ColumnVector cv2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), Arrays.asList(1, 2), Arrays.asList(3));
ColumnVector result = ColumnVector.listConcatenateByRow(cv1, cv2)) {
}
});
assertThrows(CudfException.class, () -> {
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), Arrays.asList(1, 2, 3));
ColumnVector cv2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT64)), Arrays.asList(1L));
ColumnVector result = ColumnVector.listConcatenateByRow(cv1, cv2)) {
}
});
}
@Test
void testListConcatByRowIgnoreNull() {
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(0), Arrays.asList(1, 2, 3), null, Arrays.asList(), Arrays.asList());
ColumnVector result = ColumnVector.listConcatenateByRow(true, cv)) {
assertColumnsAreEqual(cv, result);
}
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList((Integer) null), Arrays.asList(1, 2, 3), null, Arrays.asList(), null);
ColumnVector cv2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(1, 2, 3), null, Arrays.asList(10, 12), Arrays.asList(100, 200, 300), null);
ColumnVector result = ColumnVector.listConcatenateByRow(true, cv1, cv2);
ColumnVector expect = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(null, 1, 2, 3), Arrays.asList(1, 2, 3), Arrays.asList(10, 12),
Arrays.asList(100, 200, 300), null)) {
assertColumnsAreEqual(expect, result);
}
try (ColumnVector cv1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("AAA", "BBB"), Arrays.asList("aaa"), Arrays.asList("111"), null, null);
ColumnVector cv2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
null, Arrays.asList("bbb", "ccc"), null, Arrays.asList("Y", null), null);
ColumnVector cv3 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("CCC"), Arrays.asList(), Arrays.asList("222", "333"), null, null);
ColumnVector result = ColumnVector.listConcatenateByRow(true, cv1, cv2, cv3);
ColumnVector expect = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("AAA", "BBB", "CCC"), Arrays.asList("aaa", "bbb", "ccc"),
Arrays.asList("111", "222", "333"), Arrays.asList("Y", null), null)) {
assertColumnsAreEqual(expect, result);
}
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.FLOAT64)),
Arrays.asList(1.23, 0.0, Double.NaN), Arrays.asList(), null, Arrays.asList(-1.23e10, null));
ColumnVector result = ColumnVector.listConcatenateByRow(true, cv, cv, cv);
ColumnVector expect = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.FLOAT64)),
Arrays.asList(1.23, 0.0, Double.NaN, 1.23, 0.0, Double.NaN, 1.23, 0.0, Double.NaN),
Arrays.asList(), null, Arrays.asList(-1.23e10, null, -1.23e10, null, -1.23e10, null))) {
assertColumnsAreEqual(expect, result);
}
}
@Test
void testFlattenLists() {
HostColumnVector.ListType listType = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32));
HostColumnVector.ListType listOfListsType = new HostColumnVector.ListType(true, listType);
// Input does not have nulls.
try (ColumnVector input = ColumnVector.fromLists(listOfListsType,
Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3), Arrays.asList(4, 5, 6)),
Arrays.asList(Arrays.asList(7, 8, 9), Arrays.asList(10, 11, 12, 13, 14, 15)));
ColumnVector result = input.flattenLists();
ColumnVector expected = ColumnVector.fromLists(listType,
Arrays.asList(1, 2, 3, 4, 5, 6),
Arrays.asList(7, 8, 9, 10, 11, 12, 13, 14, 15))) {
assertColumnsAreEqual(expected, result);
}
// Input has nulls.
try (ColumnVector input = ColumnVector.fromLists(listOfListsType,
Arrays.asList(null, Arrays.asList(3), Arrays.asList(4, 5, 6)),
Arrays.asList(Arrays.asList(null, 8, 9), Arrays.asList(10, 11, 12, 13, 14, null)))) {
try (ColumnVector result = input.flattenLists(false);
ColumnVector expected = ColumnVector.fromLists(listType,
null,
Arrays.asList(null, 8, 9, 10, 11, 12, 13, 14, null))) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = input.flattenLists(true);
ColumnVector expected = ColumnVector.fromLists(listType,
Arrays.asList(3, 4, 5, 6),
Arrays.asList(null, 8, 9, 10, 11, 12, 13, 14, null))) {
assertColumnsAreEqual(expected, result);
}
}
}
@Test
void testPrefixSum() {
try (ColumnVector v1 = ColumnVector.fromLongs(1, 2, 3, 5, 8, 10);
ColumnVector summed = v1.prefixSum();
ColumnVector expected = ColumnVector.fromLongs(1, 3, 6, 11, 19, 29)) {
assertColumnsAreEqual(expected, summed);
}
}
@Test
void testScanSum() {
try (ColumnVector v1 = ColumnVector.fromBoxedInts(1, 2, null, 3, 5, 8, 10)) {
try (ColumnVector result = v1.scan(ScanAggregation.sum(), ScanType.INCLUSIVE, NullPolicy.INCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 3, null, null, null, null, null)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.sum(), ScanType.INCLUSIVE, NullPolicy.EXCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 3, null, 6, 11, 19, 29)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.sum(), ScanType.EXCLUSIVE, NullPolicy.INCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(0, 1, 3, null, null, null, null)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.sum(), ScanType.EXCLUSIVE, NullPolicy.EXCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(0, 1, null, 3, 6, 11, 19)) {
assertColumnsAreEqual(expected, result);
}
}
}
@Test
void testScanMax() {
try (ColumnVector v1 = ColumnVector.fromBoxedInts(1, 2, null, 3, 5, 8, 10)) {
try (ColumnVector result = v1.scan(ScanAggregation.max(), ScanType.INCLUSIVE, NullPolicy.INCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, null, null, null, null, null)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.max(), ScanType.INCLUSIVE, NullPolicy.EXCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, null, 3, 5, 8, 10)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.max(), ScanType.EXCLUSIVE, NullPolicy.INCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(Integer.MIN_VALUE, 1, 2, null, null, null, null)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.max(), ScanType.EXCLUSIVE, NullPolicy.EXCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(Integer.MIN_VALUE, 1, null, 2, 3, 5, 8)) {
assertColumnsAreEqual(expected, result);
}
}
}
@Test
void testScanMin() {
try (ColumnVector v1 = ColumnVector.fromBoxedInts(1, 2, null, 3, 5, 8, 10)) {
try (ColumnVector result = v1.scan(ScanAggregation.min(), ScanType.INCLUSIVE, NullPolicy.INCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 1, null, null, null, null, null)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.min(), ScanType.INCLUSIVE, NullPolicy.EXCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 1, null, 1, 1, 1, 1)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.min(), ScanType.EXCLUSIVE, NullPolicy.INCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(Integer.MAX_VALUE, 1, 1, null, null, null, null)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.min(), ScanType.EXCLUSIVE, NullPolicy.EXCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(Integer.MAX_VALUE, 1, null, 1, 1, 1, 1)) {
assertColumnsAreEqual(expected, result);
}
}
}
@Test
void testScanProduct() {
try (ColumnVector v1 = ColumnVector.fromBoxedInts(1, 2, null, 3, 5, 8, 10)) {
try (ColumnVector result = v1.scan(ScanAggregation.product(), ScanType.INCLUSIVE, NullPolicy.INCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, null, null, null, null, null)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.product(), ScanType.INCLUSIVE, NullPolicy.EXCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, null, 6, 30, 240, 2400)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.product(), ScanType.EXCLUSIVE, NullPolicy.INCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 1, 2, null, null, null, null)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector result = v1.scan(ScanAggregation.product(), ScanType.EXCLUSIVE, NullPolicy.EXCLUDE);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 1, null, 2, 6, 30, 240)) {
assertColumnsAreEqual(expected, result);
}
}
}
@Test
void testScanRank() {
try (ColumnVector col1 = ColumnVector.fromBoxedInts(-97, -97, -97, null, -16, 5, null, null, 6, 6, 34, null);
ColumnVector col2 = ColumnVector.fromBoxedInts(3, 3, 4, 7, 7, 7, 7, 7, 8, 8, 8, 9);
ColumnVector struct_order = ColumnVector.makeStruct(col1, col2);
ColumnVector expected = ColumnVector.fromBoxedInts(
1, 1, 3, 4, 5, 6, 7, 7, 9, 9, 11, 12)) {
try (ColumnVector result = struct_order.scan(ScanAggregation.rank(),
ScanType.INCLUSIVE, NullPolicy.INCLUDE)) {
assertColumnsAreEqual(expected, result);
}
// Exclude should have identical results
try (ColumnVector result = struct_order.scan(ScanAggregation.rank(),
ScanType.INCLUSIVE, NullPolicy.EXCLUDE)
) {
assertColumnsAreEqual(expected, result);
}
// Rank aggregations do not support ScanType.EXCLUSIVE
}
}
@Test
void testScanDenseRank() {
try (ColumnVector col1 = ColumnVector.fromBoxedInts(-97, -97, -97, null, -16, 5, null, null, 6, 6, 34, null);
ColumnVector col2 = ColumnVector.fromBoxedInts(3, 3, 4, 7, 7, 7, 7, 7, 8, 8, 8, 9);
ColumnVector struct_order = ColumnVector.makeStruct(col1, col2);
ColumnVector expected = ColumnVector.fromBoxedInts(
1, 1, 2, 3, 4, 5, 6, 6, 7, 7, 8, 9)) {
try (ColumnVector result = struct_order.scan(ScanAggregation.denseRank(),
ScanType.INCLUSIVE, NullPolicy.INCLUDE)) {
assertColumnsAreEqual(expected, result);
}
// Exclude should have identical results
try (ColumnVector result = struct_order.scan(ScanAggregation.denseRank(),
ScanType.INCLUSIVE, NullPolicy.EXCLUDE)) {
assertColumnsAreEqual(expected, result);
}
// Dense rank aggregations do not support ScanType.EXCLUSIVE
}
}
@Test
void testScanPercentRank() {
try (ColumnVector col1 = ColumnVector.fromBoxedInts(-97, -97, -97, null, -16, 5, null, null, 6, 6, 34, null);
ColumnVector col2 = ColumnVector.fromBoxedInts( 3, 3, 4, 7, 7, 7, 7, 7, 8, 8, 8, 9);
ColumnVector struct_order = ColumnVector.makeStruct(col1, col2);
ColumnVector expected = ColumnVector.fromBoxedDoubles(
0.0, 0.0, 2.0/11, 3.0/11, 4.0/11, 5.0/11, 6.0/11, 6.0/11, 8.0/11, 8.0/11, 10.0/11, 1.0)) {
try (ColumnVector result = struct_order.scan(ScanAggregation.percentRank(),
ScanType.INCLUSIVE, NullPolicy.INCLUDE)) {
assertColumnsAreEqual(expected, result);
}
// Exclude should have identical results
try (ColumnVector result = struct_order.scan(ScanAggregation.percentRank(),
ScanType.INCLUSIVE, NullPolicy.EXCLUDE)) {
assertColumnsAreEqual(expected, result);
}
// Percent rank aggregations do not support ScanType.EXCLUSIVE
}
}
@Test
void testWindowStatic() {
try (Scalar one = Scalar.fromInt(1);
Scalar two = Scalar.fromInt(2);
WindowOptions options = WindowOptions.builder()
.window(two, one)
.minPeriods(2).build()) {
try (ColumnVector v1 = ColumnVector.fromInts(5, 4, 7, 6, 8)) {
try (ColumnVector expected = ColumnVector.fromLongs(9, 16, 17, 21, 14);
ColumnVector result = v1.rollingWindow(RollingAggregation.sum(), options)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector expected = ColumnVector.fromInts(4, 4, 4, 6, 6);
ColumnVector result = v1.rollingWindow(RollingAggregation.min(), options)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector expected = ColumnVector.fromInts(5, 7, 7, 8, 8);
ColumnVector result = v1.rollingWindow(RollingAggregation.max(), options)) {
assertColumnsAreEqual(expected, result);
}
// The rolling window produces the same result type as the input
try (ColumnVector expected = ColumnVector.fromDoubles(4.5, 16.0 / 3, 17.0 / 3, 7, 7);
ColumnVector result = v1.rollingWindow(RollingAggregation.mean(), options)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector expected = ColumnVector.fromBoxedInts(4, 7, 6, 8, null);
ColumnVector result = v1.rollingWindow(RollingAggregation.lead(1), options)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector expected = ColumnVector.fromBoxedInts(null, 5, 4, 7, 6);
ColumnVector result = v1.rollingWindow(RollingAggregation.lag(1), options)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector defaultOutput = ColumnVector.fromInts(-1, -2, -3, -4, -5);
ColumnVector expected = ColumnVector.fromBoxedInts(-1, 5, 4, 7, 6);
ColumnVector result = v1.rollingWindow(RollingAggregation.lag(1, defaultOutput), options)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector expected = ColumnVector.fromBoxedDoubles(0.7071d, 1.5275d, 1.5275d, 1d, 1.4142);
ColumnVector result = v1.rollingWindow(RollingAggregation.standardDeviation(), options)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector expected =
ColumnVector.fromBoxedDoubles(Double.POSITIVE_INFINITY, 2.1602d, 2.1602d, 1.4142d, Double.POSITIVE_INFINITY);
ColumnVector result = v1.rollingWindow(RollingAggregation.standardDeviation(2), options)) {
assertColumnsAreEqual(expected, result);
}
}
}
}
@Test
void testWindowStaticCounts() {
try (Scalar one = Scalar.fromInt(1);
Scalar two = Scalar.fromInt(2);
WindowOptions options = WindowOptions.builder()
.window(two, one)
.minPeriods(2).build()) {
try (ColumnVector v1 = ColumnVector.fromBoxedInts(5, 4, null, 6, 8)) {
try (ColumnVector expected = ColumnVector.fromInts(2, 2, 2, 2, 2);
ColumnVector result = v1.rollingWindow(RollingAggregation.count(NullPolicy.EXCLUDE), options)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector expected = ColumnVector.fromInts(2, 3, 3, 3, 2);
ColumnVector result = v1.rollingWindow(RollingAggregation.count(NullPolicy.INCLUDE), options)) {
assertColumnsAreEqual(expected, result);
}
}
}
}
@Test
void testWindowDynamicNegative() {
try (ColumnVector precedingCol = ColumnVector.fromInts(3, 3, 3, 4, 4);
ColumnVector followingCol = ColumnVector.fromInts(-1, -1, -1, -1, 0)) {
try (WindowOptions window = WindowOptions.builder()
.minPeriods(2).window(precedingCol, followingCol).build()) {
try (ColumnVector v1 = ColumnVector.fromInts(5, 4, 7, 6, 8);
ColumnVector expected = ColumnVector.fromBoxedLongs(null, null, 9L, 16L, 25L);
ColumnVector result = v1.rollingWindow(RollingAggregation.sum(), window)) {
assertColumnsAreEqual(expected, result);
}
}
}
}
@Test
void testWindowLag() {
try (Scalar negOne = Scalar.fromInt(-1);
Scalar two = Scalar.fromInt(2);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, negOne).build()) {
try (ColumnVector v1 = ColumnVector.fromInts(5, 4, 7, 6, 8);
ColumnVector expected = ColumnVector.fromBoxedInts(null, 5, 4, 7, 6);
ColumnVector result = v1.rollingWindow(RollingAggregation.max(), window)) {
assertColumnsAreEqual(expected, result);
}
}
}
@Test
void testWindowDynamic() {
try (ColumnVector precedingCol = ColumnVector.fromInts(1, 2, 3, 1, 2);
ColumnVector followingCol = ColumnVector.fromInts(2, 2, 2, 2, 2)) {
try (WindowOptions window = WindowOptions.builder().minPeriods(2)
.window(precedingCol, followingCol).build()) {
try (ColumnVector v1 = ColumnVector.fromInts(5, 4, 7, 6, 8);
ColumnVector expected = ColumnVector.fromLongs(16, 22, 30, 14, 14);
ColumnVector result = v1.rollingWindow(RollingAggregation.sum(), window)) {
assertColumnsAreEqual(expected, result);
}
}
}
}
@Test
void testWindowThrowsException() {
try (Scalar one = Scalar.fromInt(1);
Scalar two = Scalar.fromInt(2);
Scalar three = Scalar.fromInt(3);
ColumnVector arraywindowCol = ColumnVector.fromBoxedInts(1, 2, 3 ,1, 1)) {
assertThrows(IllegalStateException.class, () -> {
try (WindowOptions options = WindowOptions.builder()
.window(three, two).minPeriods(3)
.window(arraywindowCol, arraywindowCol).build()) {
}
});
assertThrows(IllegalArgumentException.class, () -> {
try (WindowOptions options = WindowOptions.builder()
.window(two, one)
.minPeriods(1)
.orderByColumnIndex(0)
.build()) {
arraywindowCol.rollingWindow(RollingAggregation.sum(), options);
}
});
}
}
@Test
void testFindAndReplaceAll() {
try(ColumnVector vector = ColumnVector.fromInts(1, 4, 1, 5, 3, 3, 1, 2, 9, 8);
ColumnVector oldValues = ColumnVector.fromInts(1, 4, 7); // 7 doesn't exist, nothing to replace
ColumnVector replacedValues = ColumnVector.fromInts(7, 6, 1);
ColumnVector expectedVector = ColumnVector.fromInts(7, 6, 7, 5, 3, 3, 7, 2, 9, 8);
ColumnVector newVector = vector.findAndReplaceAll(oldValues, replacedValues)) {
assertColumnsAreEqual(expectedVector, newVector);
}
}
@Test
void testFindAndReplaceAllFloat() {
try(ColumnVector vector = ColumnVector.fromFloats(1.0f, 4.2f, 1.3f, 5.7f, 3f, 3f, 1.0f, 2.6f, 0.9f, 8.3f);
ColumnVector oldValues = ColumnVector.fromFloats(1.0f, 4.2f, 7); // 7 doesn't exist, nothing to replace
ColumnVector replacedValues = ColumnVector.fromFloats(7.3f, 6.7f, 1.0f);
ColumnVector expectedVector = ColumnVector.fromFloats(7.3f, 6.7f, 1.3f, 5.7f, 3f, 3f, 7.3f, 2.6f, 0.9f, 8.3f);
ColumnVector newVector = vector.findAndReplaceAll(oldValues, replacedValues)) {
assertColumnsAreEqual(expectedVector, newVector);
}
}
@Test
void testFindAndReplaceAllTimeUnits() {
try(ColumnVector vector = ColumnVector.timestampMicroSecondsFromLongs(1l, 1l, 2l, 8l);
ColumnVector oldValues = ColumnVector.timestampMicroSecondsFromLongs(1l, 2l, 7l); // 7 dosn't exist, nothing to replace
ColumnVector replacedValues = ColumnVector.timestampMicroSecondsFromLongs(9l, 4l, 0l);
ColumnVector expectedVector = ColumnVector.timestampMicroSecondsFromLongs(9l, 9l, 4l, 8l);
ColumnVector newVector = vector.findAndReplaceAll(oldValues, replacedValues)) {
assertColumnsAreEqual(expectedVector, newVector);
}
}
@Test
void testFindAndReplaceAllMixingTypes() {
try(ColumnVector vector = ColumnVector.fromInts(1, 4, 1, 5, 3, 3, 1, 2, 9, 8);
ColumnVector oldValues = ColumnVector.fromInts(1, 4, 7); // 7 doesn't exist, nothing to replace
ColumnVector replacedValues = ColumnVector.fromFloats(7.0f, 6, 1)) {
assertThrows(CudfException.class, () -> vector.findAndReplaceAll(oldValues, replacedValues));
}
}
@Test
void testFindAndReplaceAllStrings() {
try(ColumnVector vector = ColumnVector.fromStrings("spark", "scala", "spark", "hello", "code");
ColumnVector oldValues = ColumnVector.fromStrings("spark","code","hello");
ColumnVector replacedValues = ColumnVector.fromStrings("sparked", "codec", "hi");
ColumnVector expectedValues = ColumnVector.fromStrings("sparked", "scala", "sparked", "hi", "codec");
ColumnVector cv = vector.findAndReplaceAll(oldValues, replacedValues)) {
assertColumnsAreEqual(expectedValues, cv);
}
}
@Test
void testFindAndReplaceAllWithNull() {
try(ColumnVector vector = ColumnVector.fromBoxedInts(1, 4, 1, 5, 3, 3, 1, null, 9, 8);
ColumnVector oldValues = ColumnVector.fromBoxedInts(1, 4, 8);
ColumnVector replacedValues = ColumnVector.fromBoxedInts(7, 6, null);
ColumnVector expectedVector = ColumnVector.fromBoxedInts(7, 6, 7, 5, 3, 3, 7, null, 9, null);
ColumnVector newVector = vector.findAndReplaceAll(oldValues, replacedValues)) {
assertColumnsAreEqual(expectedVector, newVector);
}
}
@Test
void testFindAndReplaceAllNulllWithValue() {
// null values cannot be replaced using findAndReplaceAll();
try(ColumnVector vector = ColumnVector.fromBoxedInts(1, 4, 1, 5, 3, 3, 1, null, 9, 8);
ColumnVector oldValues = ColumnVector.fromBoxedInts(1, 4, null);
ColumnVector replacedValues = ColumnVector.fromBoxedInts(7, 6, 8)) {
assertThrows(CudfException.class, () -> vector.findAndReplaceAll(oldValues, replacedValues));
}
}
@Test
void testFindAndReplaceAllFloatNan() {
// Float.NaN != Float.NaN therefore it cannot be replaced
try(ColumnVector vector = ColumnVector.fromFloats(1.0f, 4.2f, 1.3f, 5.7f, 3f, 3f, 1.0f, 2.6f, Float.NaN, 8.3f);
ColumnVector oldValues = ColumnVector.fromFloats(1.0f, 4.2f, Float.NaN);
ColumnVector replacedValues = ColumnVector.fromFloats(7.3f, 6.7f, 0);
ColumnVector expectedVector = ColumnVector.fromFloats(7.3f, 6.7f, 1.3f, 5.7f, 3f, 3f, 7.3f, 2.6f, Float.NaN, 8.3f);
ColumnVector newVector = vector.findAndReplaceAll(oldValues, replacedValues)) {
assertColumnsAreEqual(expectedVector, newVector);
}
}
@Test
void testFindAndReplaceAllWithFloatNan() {
try(ColumnVector vector = ColumnVector.fromFloats(1.0f, 4.2f, 1.3f, 5.7f, 3f, 3f, 1.0f, 2.6f, Float.NaN, 8.3f);
ColumnVector oldValues = ColumnVector.fromFloats(1.0f, 4.2f, 8.3f);
ColumnVector replacedValues = ColumnVector.fromFloats(7.3f, Float.NaN, 0);
ColumnVector expectedVector = ColumnVector.fromFloats(7.3f, Float.NaN, 1.3f, 5.7f, 3f, 3f, 7.3f, 2.6f, Float.NaN, 0);
ColumnVector newVector = vector.findAndReplaceAll(oldValues, replacedValues)) {
assertColumnsAreEqual(expectedVector, newVector);
}
}
@Test
void emptyStringColumnFindReplaceAll() {
try (ColumnVector cv = ColumnVector.fromStrings(null, "A", "B", "C", "");
ColumnVector expected = ColumnVector.fromStrings(null, "A", "B", "C", null);
ColumnVector from = ColumnVector.fromStrings("");
ColumnVector to = ColumnVector.fromStrings((String)null);
ColumnVector replaced = cv.findAndReplaceAll(from, to)) {
assertColumnsAreEqual(expected, replaced);
}
}
@Test
void testBitCast() {
try (ColumnVector cv = ColumnVector.decimalFromLongs(-2, 1L, 2L, 100L, 552L);
ColumnVector expected = ColumnVector.fromLongs(1L, 2L, 100L, 552L);
ColumnView casted = cv.bitCastTo(DType.INT64)) {
assertColumnsAreEqual(expected, casted);
}
}
@Test
void testFixedWidthCast() {
int[] values = new int[]{1,3,4,5,2};
long[] longValues = Arrays.stream(values).asLongStream().toArray();
double[] doubleValues = Arrays.stream(values).asDoubleStream().toArray();
byte[] byteValues = new byte[values.length];
float[] floatValues = new float[values.length];
short[] shortValues = new short[values.length];
IntStream.range(0, values.length).forEach(i -> {
byteValues[i] = (byte)values[i];
floatValues[i] = (float)values[i];
shortValues[i] = (short)values[i];
});
try (ColumnVector cv = ColumnVector.fromInts(values);
ColumnVector expectedUnsignedInts = ColumnVector.fromUnsignedInts(values);
ColumnVector unsignedInts = cv.asUnsignedInts();
ColumnVector expectedBytes = ColumnVector.fromBytes(byteValues);
ColumnVector bytes = cv.asBytes();
ColumnVector expectedUnsignedBytes = ColumnVector.fromUnsignedBytes(byteValues);
ColumnVector unsignedBytes = cv.asUnsignedBytes();
ColumnVector expectedFloats = ColumnVector.fromFloats(floatValues);
ColumnVector floats = cv.asFloats();
ColumnVector expectedDoubles = ColumnVector.fromDoubles(doubleValues);
ColumnVector doubles = cv.asDoubles();
ColumnVector expectedLongs = ColumnVector.fromLongs(longValues);
ColumnVector longs = cv.asLongs();
ColumnVector expectedUnsignedLongs = ColumnVector.fromUnsignedLongs(longValues);
ColumnVector unsignedLongs = cv.asUnsignedLongs();
ColumnVector expectedShorts = ColumnVector.fromShorts(shortValues);
ColumnVector shorts = cv.asShorts();
ColumnVector expectedUnsignedShorts = ColumnVector.fromUnsignedShorts(shortValues);
ColumnVector unsignedShorts = cv.asUnsignedShorts();
ColumnVector expectedDays = ColumnVector.daysFromInts(values);
ColumnVector days = cv.asTimestampDays();
ColumnVector expectedUs = ColumnVector.timestampMicroSecondsFromLongs(longValues);
ColumnVector us = longs.asTimestampMicroseconds();
ColumnVector expectedNs = ColumnVector.timestampNanoSecondsFromLongs(longValues);
ColumnVector ns = longs.asTimestampNanoseconds();
ColumnVector expectedMs = ColumnVector.timestampMilliSecondsFromLongs(longValues);
ColumnVector ms = longs.asTimestampMilliseconds();
ColumnVector expectedS = ColumnVector.timestampSecondsFromLongs(longValues);
ColumnVector s = longs.asTimestampSeconds();) {
assertColumnsAreEqual(expectedUnsignedInts, unsignedInts);
assertColumnsAreEqual(expectedBytes, bytes);
assertColumnsAreEqual(expectedUnsignedBytes, unsignedBytes);
assertColumnsAreEqual(expectedShorts, shorts);
assertColumnsAreEqual(expectedUnsignedShorts, unsignedShorts);
assertColumnsAreEqual(expectedLongs, longs);
assertColumnsAreEqual(expectedUnsignedLongs, unsignedLongs);
assertColumnsAreEqual(expectedDoubles, doubles);
assertColumnsAreEqual(expectedFloats, floats);
assertColumnsAreEqual(expectedDays, days);
assertColumnsAreEqual(expectedUs, us);
assertColumnsAreEqual(expectedMs, ms);
assertColumnsAreEqual(expectedNs, ns);
assertColumnsAreEqual(expectedS, s);
}
}
@Test
void testCastBigDecimalToString() {
BigDecimal[] bigValues = {new BigDecimal("923121331938210123.321"),
new BigDecimal("9223372036854775808.191"),
new BigDecimal("-9.223"),
new BigDecimal("0.000"),
new BigDecimal("9328323982309091029831.002")
};
try (ColumnVector cv = ColumnVector.fromDecimals(bigValues);
ColumnVector values = cv.castTo(DType.STRING);
ColumnVector expected = ColumnVector.fromStrings("923121331938210123.321",
"9223372036854775808.191",
"-9.223",
"0.000",
"9328323982309091029831.002")) {
assertColumnsAreEqual(expected, values);
}
BigDecimal[] bigValues0 = {new BigDecimal("992983283728193827182918744829283742232")};
try {
ColumnVector cv = ColumnVector.fromDecimals(bigValues0);
if (cv != null) {
cv.close();
}
fail("Precision check should've thrown an IllegalArgumentException");
} catch (IllegalArgumentException iae) {
}
}
@Test
void testCastStringToBigDecimal() {
String[] bigValues = {"923121331938210123.321",
"9223372036854775808.191",
"9328323982309091029831.002"
};
try (ColumnVector cv = ColumnVector.fromStrings(bigValues);
ColumnVector values = cv.castTo(DType.create(DType.DTypeEnum.DECIMAL128, -3));
ColumnVector expected = ColumnVector.fromDecimals(new BigDecimal("923121331938210123.321"),
new BigDecimal("9223372036854775808.191"),
new BigDecimal("9328323982309091029831.002"))) {
assertColumnsAreEqual(expected, values);
}
}
@Test
void testCastByteToString() {
Byte[] byteValues = {1, 3, 45, -0, null, Byte.MIN_VALUE, Byte.MAX_VALUE};
String[] stringByteValues = getStringArray(byteValues);
testCastFixedWidthToStringsAndBack(DType.INT8, () -> ColumnVector.fromBoxedBytes(byteValues), () -> ColumnVector.fromStrings(stringByteValues));
}
@Test
void testCastShortToString() {
Short[] shortValues = {1, 3, 45, -0, null, Short.MIN_VALUE, Short.MAX_VALUE};
String[] stringShortValues = getStringArray(shortValues);
testCastFixedWidthToStringsAndBack(DType.INT16, () -> ColumnVector.fromBoxedShorts(shortValues), () -> ColumnVector.fromStrings(stringShortValues));
}
@Test
void testCastIntToString() {
Integer[] integerArray = {1, -2, 3, null, 8, Integer.MIN_VALUE, Integer.MAX_VALUE};
String[] stringIntValues = getStringArray(integerArray);
testCastFixedWidthToStringsAndBack(DType.INT32, () -> ColumnVector.fromBoxedInts(integerArray), () -> ColumnVector.fromStrings(stringIntValues));
}
@Test
void testCastLongToString() {
Long[] longValues = {null, 3l, 2l, -43l, null, Long.MIN_VALUE, Long.MAX_VALUE};
String[] stringLongValues = getStringArray(longValues);
testCastFixedWidthToStringsAndBack(DType.INT64, () -> ColumnVector.fromBoxedLongs(longValues), () -> ColumnVector.fromStrings(stringLongValues));
}
@Test
void testCastFloatToString() {
Float[] floatValues = {Float.NaN, null, 03f, -004f, 12f};
String[] stringFloatValues = getStringArray(floatValues);
testCastFixedWidthToStringsAndBack(DType.FLOAT32, () -> ColumnVector.fromBoxedFloats(floatValues), () -> ColumnVector.fromStrings(stringFloatValues));
}
@Test
void testCastDoubleToString() {
Double[] doubleValues = {Double.NaN, Double.NEGATIVE_INFINITY, 4d, 98d, null, Double.POSITIVE_INFINITY};
//Creating the string array manually because of the way cudf converts POSITIVE_INFINITY to "Inf" instead of "INFINITY"
String[] stringDoubleValues = {"NaN","-Inf", "4.0", "98.0", null, "Inf"};
testCastFixedWidthToStringsAndBack(DType.FLOAT64, () -> ColumnVector.fromBoxedDoubles(doubleValues), () -> ColumnVector.fromStrings(stringDoubleValues));
}
@Test
void testCastBoolToString() {
Boolean[] booleans = {true, false, false};
String[] stringBools = getStringArray(booleans);
testCastFixedWidthToStringsAndBack(DType.BOOL8, () -> ColumnVector.fromBoxedBooleans(booleans), () -> ColumnVector.fromStrings(stringBools));
}
@Test
void testCastDecimal32ToString() {
Integer[] unScaledValues = {0, null, 3, 2, -43, null, 5234, -73451, 348093, -234810};
String[] strDecimalValues = new String[unScaledValues.length];
for (int scale : new int[]{-2, -1, 0, 1, 2}) {
for (int i = 0; i < strDecimalValues.length; i++) {
Long value = unScaledValues[i] == null ? null : Long.valueOf(unScaledValues[i]);
strDecimalValues[i] = dumpDecimal(value, scale);
}
testCastFixedWidthToStringsAndBack(DType.create(DType.DTypeEnum.DECIMAL32, scale),
() -> ColumnVector.decimalFromBoxedInts(scale, unScaledValues),
() -> ColumnVector.fromStrings(strDecimalValues));
}
}
@Test
void testCastDecimal64ToString() {
Long[] unScaledValues = {0l, null, 3l, 2l, -43l, null, 234802l, -94582l, 1234208124l, -2342348023812l};
String[] strDecimalValues = new String[unScaledValues.length];
for (int scale : new int[]{-5, -2, -1, 0, 1, 2, 5}) {
for (int i = 0; i < strDecimalValues.length; i++) {
strDecimalValues[i] = dumpDecimal(unScaledValues[i], scale);
}
testCastFixedWidthToStringsAndBack(DType.create(DType.DTypeEnum.DECIMAL64, scale),
() -> ColumnVector.decimalFromBoxedLongs(scale, unScaledValues),
() -> ColumnVector.fromStrings(strDecimalValues));
}
}
/**
* Helper function to create decimal strings which can be processed by castStringToDecimal functor.
* We can not simply create decimal string via `String.valueOf`, because castStringToDecimal doesn't
* support scientific notations so far.
*
* issue for scientific notation: https://github.com/rapidsai/cudf/issues/7665
*/
private static String dumpDecimal(Long unscaledValue, int scale) {
if (unscaledValue == null) return null;
StringBuilder builder = new StringBuilder();
if (unscaledValue < 0) builder.append('-');
String absValue = String.valueOf(Math.abs(unscaledValue));
if (scale >= 0) {
builder.append(absValue);
for (int i = 0; i < scale; i++) builder.append('0');
return builder.toString();
}
if (absValue.length() <= -scale) {
builder.append('0').append('.');
for (int i = 0; i < -scale - absValue.length(); i++) builder.append('0');
builder.append(absValue);
} else {
int split = absValue.length() + scale;
builder.append(absValue.substring(0, split))
.append('.')
.append(absValue.substring(split));
}
return builder.toString();
}
private static <T> String[] getStringArray(T[] input) {
String[] result = new String[input.length];
for (int i = 0 ; i < input.length ; i++) {
if (input[i] == null) {
result[i] = null;
} else {
result[i] = String.valueOf(input[i]);
}
}
return result;
}
private static void testCastFixedWidthToStringsAndBack(DType type, Supplier<ColumnVector> fixedWidthSupplier,
Supplier<ColumnVector> stringColumnSupplier) {
try (ColumnVector fixedWidthColumn = fixedWidthSupplier.get();
ColumnVector stringColumn = stringColumnSupplier.get();
ColumnVector fixedWidthCastedToString = fixedWidthColumn.castTo(DType.STRING);
ColumnVector stringCastedToFixedWidth = stringColumn.castTo(type)) {
assertColumnsAreEqual(stringColumn, fixedWidthCastedToString);
assertColumnsAreEqual(fixedWidthColumn, stringCastedToFixedWidth);
}
}
@Test
void testCastIntToDecimal() {
testCastNumericToDecimalsAndBack(DType.INT32, true, 0,
() -> ColumnVector.fromBoxedInts(1, -21, 345, null, 8008, Integer.MIN_VALUE, Integer.MAX_VALUE),
() -> ColumnVector.fromBoxedInts(1, -21, 345, null, 8008, Integer.MIN_VALUE, Integer.MAX_VALUE),
new Long[]{1L, -21L, 345L, null, 8008L, (long) Integer.MIN_VALUE, (long) Integer.MAX_VALUE}
);
testCastNumericToDecimalsAndBack(DType.INT32, false, -2,
() -> ColumnVector.fromBoxedInts(1, -21, 345, null, 8008, 0, 123456),
() -> ColumnVector.fromBoxedInts(1, -21, 345, null, 8008, 0, 123456),
new Long[]{100L, -2100L, 34500L, null, 800800L, 0L, 12345600L}
);
testCastNumericToDecimalsAndBack(DType.INT32, false, 2,
() -> ColumnVector.fromBoxedInts(1, -21, 345, null, 8008, 0, 123456),
() -> ColumnVector.fromBoxedInts(0, 0, 300, null, 8000, 0, 123400),
new Long[]{0L, 0L, 3L, null, 80L, 0L, 1234L}
);
}
@Test
void testCastLongToDecimal() {
testCastNumericToDecimalsAndBack(DType.INT64, false, 0,
() -> ColumnVector.fromBoxedLongs(1L, -21L, 345L, null, 8008L, Long.MIN_VALUE, Long.MAX_VALUE),
() -> ColumnVector.fromBoxedLongs(1L, -21L, 345L, null, 8008L, Long.MIN_VALUE, Long.MAX_VALUE),
new Long[]{1L, -21L, 345L, null, 8008L, Long.MIN_VALUE, Long.MAX_VALUE}
);
testCastNumericToDecimalsAndBack(DType.INT64, false, -1,
() -> ColumnVector.fromBoxedLongs(1L, -21L, 345L, null, 8008L, 0L, 123456L),
() -> ColumnVector.fromBoxedLongs(1L, -21L, 345L, null, 8008L, 0L, 123456L),
new Long[]{10L, -210L, 3450L, null, 80080L, 0L, 1234560L}
);
testCastNumericToDecimalsAndBack(DType.INT64, false, 1,
() -> ColumnVector.fromBoxedLongs(1L, -21L, 345L, null, 8018L, 0L, 123456L),
() -> ColumnVector.fromBoxedLongs(0L, -20L, 340L, null, 8010L, 0L, 123450L),
new Long[]{0L, -2L, 34L, null, 801L, 0L, 12345L}
);
}
@Test
void testCastDecimal64ToDecimal128() {
testCastDecimal128(DType.DTypeEnum.DECIMAL64, DType.DTypeEnum.DECIMAL128, 0,
() -> ColumnVector.fromBoxedLongs(1L, -21L, 345L, null, 8008L, Long.MIN_VALUE, Long.MAX_VALUE),
() -> ColumnVector.fromDecimals(new BigDecimal(1), new BigDecimal(-21), new BigDecimal(345),
null, new BigDecimal(8008), new BigDecimal(Long.MIN_VALUE), new BigDecimal(Long.MAX_VALUE)),
new BigInteger[]{new BigInteger("1"), new BigInteger("-21"),
new BigInteger("345"), null, new BigInteger("8008"),
new BigInteger(String.valueOf(Long.MIN_VALUE)),
new BigInteger(String.valueOf(Long.MAX_VALUE))}
);
testCastDecimal128(DType.DTypeEnum.DECIMAL32, DType.DTypeEnum.DECIMAL128, 0,
() -> ColumnVector.fromBoxedInts(1, 21, 345, null, 8008, Integer.MIN_VALUE, Integer.MAX_VALUE),
() -> ColumnVector.decimalFromBigInt(0, new BigInteger("1"), new BigInteger("21"),
new BigInteger("345"), null, new BigInteger("8008"),
new BigInteger(String.valueOf(Integer.MIN_VALUE)),
new BigInteger(String.valueOf(Integer.MAX_VALUE))),
new BigInteger[]{new BigInteger("1"), new BigInteger("21"),
new BigInteger("345"), null, new BigInteger("8008"),
new BigInteger(String.valueOf(Integer.MIN_VALUE)),
new BigInteger(String.valueOf(Integer.MAX_VALUE))}
);
}
@Test
void testCastFloatToDecimal() {
testCastNumericToDecimalsAndBack(DType.FLOAT32, true, 0,
() -> ColumnVector.fromBoxedFloats(1.0f, 2.1f, -3.23f, null, 2.41281f, 1378952.001f),
() -> ColumnVector.fromBoxedFloats(1f, 2f, -3f, null, 2f, 1378952f),
new Long[]{1L, 2L, -3L, null, 2L, 1378952L}
);
testCastNumericToDecimalsAndBack(DType.FLOAT32, true, -1,
() -> ColumnVector.fromBoxedFloats(1.0f, 2.1f, -3.23f, null, 2.41281f, 1378952.001f),
() -> ColumnVector.fromBoxedFloats(1f, 2.1f, -3.2f, null, 2.4f, 1378952f),
new Long[]{10L, 21L, -32L, null, 24L, 13789520L}
);
testCastNumericToDecimalsAndBack(DType.FLOAT32, true, 1,
() -> ColumnVector.fromBoxedFloats(1.0f, 21.1f, -300.23f, null, 24128.1f, 1378952.001f),
() -> ColumnVector.fromBoxedFloats(0f, 20f, -300f, null, 24120f, 1378950f),
new Long[]{0L, 2L, -30L, null, 2412L, 137895L}
);
}
@Test
void testCastDoubleToDecimal() {
testCastNumericToDecimalsAndBack(DType.FLOAT64, false, 0,
() -> ColumnVector.fromBoxedDoubles(1.0, 2.1, -3.23, null, 2.41281, (double) Long.MAX_VALUE),
() -> ColumnVector.fromBoxedDoubles(1.0, 2.0, -3.0, null, 2.0, (double) Long.MAX_VALUE),
new Long[]{1L, 2L, -3L, null, 2L, Long.MAX_VALUE}
);
testCastNumericToDecimalsAndBack(DType.FLOAT64, false, -2,
() -> ColumnVector.fromBoxedDoubles(1.0, 2.1, -3.23, null, 2.41281, -55.01999),
() -> ColumnVector.fromBoxedDoubles(1.0, 2.1, -3.23, null, 2.41, -55.01),
new Long[]{100L, 210L, -323L, null, 241L, -5501L}
);
testCastNumericToDecimalsAndBack(DType.FLOAT64, false, 1,
() -> ColumnVector.fromBoxedDoubles(1.0, 23.1, -3089.23, null, 200.41281, -199.01999),
() -> ColumnVector.fromBoxedDoubles(0.0, 20.0, -3080.0, null, 200.0, -190.0),
new Long[]{0L, 2L, -308L, null, 20L, -19L}
);
}
@Test
void testCastDecimalToDecimal() {
// DECIMAL32(scale: 0) -> DECIMAL32(scale: 0)
testCastNumericToDecimalsAndBack(DType.create(DType.DTypeEnum.DECIMAL32, 0), true, -0,
() -> ColumnVector.decimalFromInts(0, 1, 12, -234, 5678, Integer.MIN_VALUE / 100),
() -> ColumnVector.decimalFromInts(0, 1, 12, -234, 5678, Integer.MIN_VALUE / 100),
new Long[]{1L, 12L, -234L, 5678L, (long) Integer.MIN_VALUE / 100}
);
// DECIMAL32(scale: 0) -> DECIMAL64(scale: -2)
testCastNumericToDecimalsAndBack(DType.create(DType.DTypeEnum.DECIMAL32, 0), false, -2,
() -> ColumnVector.decimalFromInts(0, 1, 12, -234, 5678, Integer.MIN_VALUE / 100),
() -> ColumnVector.decimalFromInts(0, 1, 12, -234, 5678, Integer.MIN_VALUE / 100),
new Long[]{100L, 1200L, -23400L, 567800L, (long) Integer.MIN_VALUE / 100 * 100}
);
// DECIMAL64(scale: -3) -> DECIMAL64(scale: -1)
DType dt = DType.create(DType.DTypeEnum.DECIMAL64, -3);
testCastNumericToDecimalsAndBack(dt, false, -1,
() -> ColumnVector.decimalFromDoubles(dt, RoundingMode.UNNECESSARY, -1000.1, 1.222, 0.03, -4.678, 16789431.0),
() -> ColumnVector.decimalFromDoubles(dt, RoundingMode.UNNECESSARY, -1000.1, 1.2, 0, -4.6, 16789431.0),
new Long[]{-10001L, 12L, 0L, -46L, 167894310L}
);
// DECIMAL64(scale: -3) -> DECIMAL64(scale: 2)
DType dt2 = DType.create(DType.DTypeEnum.DECIMAL64, -3);
testCastNumericToDecimalsAndBack(dt2, false, 2,
() -> ColumnVector.decimalFromDoubles(dt2, RoundingMode.UNNECESSARY, -1013.1, 14.222, 780.03, -4.678, 16789431.0),
() -> ColumnVector.decimalFromDoubles(dt2, RoundingMode.UNNECESSARY, -1000, 0, 700, 0, 16789400),
new Long[]{-10L, 0L, 7L, 0L, 167894L}
);
// DECIMAL64(scale: -3) -> DECIMAL32(scale: -3)
testCastNumericToDecimalsAndBack(dt2, true, -3,
() -> ColumnVector.decimalFromDoubles(dt2, RoundingMode.UNNECESSARY, -1013.1, 14.222, 780.03, -4.678, 16789.0),
() -> ColumnVector.decimalFromDoubles(dt2, RoundingMode.UNNECESSARY, -1013.1, 14.222, 780.03, -4.678, 16789.0),
new Long[]{-1013100L, 14222L, 780030L, -4678L, 16789000L}
);
}
private static void testCastNumericToDecimalsAndBack(DType sourceType, boolean isDec32, int scale,
Supplier<ColumnVector> sourceData,
Supplier<ColumnVector> returnData,
Long[] unscaledDecimal) {
DType decimalType = DType.create(isDec32 ? DType.DTypeEnum.DECIMAL32 : DType.DTypeEnum.DECIMAL64, scale);
try (ColumnVector sourceColumn = sourceData.get();
ColumnVector expectedColumn = returnData.get();
ColumnVector decimalColumn = sourceColumn.castTo(decimalType);
HostColumnVector hostDecimalColumn = decimalColumn.copyToHost();
ColumnVector returnColumn = decimalColumn.castTo(sourceType)) {
for (int i = 0; i < sourceColumn.rows; i++) {
Long actual = hostDecimalColumn.isNull(i) ? null :
(isDec32 ? hostDecimalColumn.getInt(i) : hostDecimalColumn.getLong(i));
assertEquals(unscaledDecimal[i], actual);
}
assertColumnsAreEqual(expectedColumn, returnColumn);
}
}
private static void testCastDecimal128(DType.DTypeEnum sourceType, DType.DTypeEnum targetType, int scale,
Supplier<ColumnVector> sourceData,
Supplier<ColumnVector> returnData,
Object[] unscaledDecimal) {
DType decimalType = DType.create(targetType, scale);
try (ColumnVector sourceColumn = sourceData.get();
ColumnVector expectedColumn = returnData.get();
ColumnVector decimalColumn = sourceColumn.castTo(decimalType);
HostColumnVector hostDecimalColumn = decimalColumn.copyToHost();
ColumnVector returnColumn = decimalColumn.castTo(DType.create(decimalType.typeId, scale))) {
for (int i = 0; i < sourceColumn.rows; i++) {
Object actual = hostDecimalColumn.isNull(i) ? null :
(decimalType.typeId == DType.DTypeEnum.DECIMAL128 ? hostDecimalColumn.getBigDecimal(i).unscaledValue() :
((decimalType.typeId == DType.DTypeEnum.DECIMAL64) ? hostDecimalColumn.getLong(i) : hostDecimalColumn.getInt(i)));
assertEquals(unscaledDecimal[i], actual);
}
assertColumnsAreEqual(expectedColumn, returnColumn);
}
}
@Test
void testIsTimestamp() {
final String[] TIMESTAMP_STRINGS = {
"2018-07-04 12:00:00",
"",
null,
"2023-01-25",
"2023-01-25 07:32:12",
"2018-07-04 12:00:00"
};
try (ColumnVector timestampStrings = ColumnVector.fromStrings(TIMESTAMP_STRINGS);
ColumnVector isTimestamp = timestampStrings.isTimestamp("%Y-%m-%d %H:%M:%S");
ColumnVector expected = ColumnVector.fromBoxedBooleans(
true, false, null, false, true, true)) {
assertColumnsAreEqual(expected, isTimestamp);
}
try (ColumnVector timestampStrings = ColumnVector.fromStrings(TIMESTAMP_STRINGS);
ColumnVector isTimestamp = timestampStrings.isTimestamp("%Y-%m-%d");
ColumnVector expected = ColumnVector.fromBoxedBooleans(
true, false, null, true, true, true)) {
assertColumnsAreEqual(expected, isTimestamp);
}
}
@Test
void testCastTimestampAsString() {
final String[] TIMES_S_STRING = {
"2018-07-04 12:00:00",
"2023-01-25 07:32:12",
"2018-07-04 12:00:00"};
final long[] TIMES_S = {
1530705600L, //'2018-07-04 12:00:00'
1674631932L, //'2023-01-25 07:32:12'
1530705600L}; //'2018-07-04 12:00:00'
final long[] TIMES_NS = {
1530705600115254330L, //'2018-07-04 12:00:00.115254330'
1674631932929861604L, //'2023-01-25 07:32:12.929861604'
1530705600115254330L}; //'2018-07-04 12:00:00.115254330'
final String[] TIMES_NS_STRING = {
"2018-07-04 12:00:00.115254330",
"2023-01-25 07:32:12.929861604",
"2018-07-04 12:00:00.115254330"};
// all supported formats by cudf
final String[] TIMES_NS_STRING_ALL = {
"04::07::18::2018::12::00::00::115254330",
"25::01::23::2023::07::32::12::929861604",
"04::07::18::2018::12::00::00::115254330"};
// Seconds
try (ColumnVector s_string_times = ColumnVector.fromStrings(TIMES_S_STRING);
ColumnVector s_timestamps = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector timestampsAsStrings = s_timestamps.asStrings("%Y-%m-%d %H:%M:%S");
ColumnVector timestampsAsStringsUsingDefaultFormat = s_timestamps.asStrings()) {
assertColumnsAreEqual(s_string_times, timestampsAsStrings);
assertColumnsAreEqual(timestampsAsStringsUsingDefaultFormat, timestampsAsStrings);
}
// Nanoseconds
try (ColumnVector ns_string_times = ColumnVector.fromStrings(TIMES_NS_STRING);
ColumnVector ns_timestamps = ColumnVector.timestampNanoSecondsFromLongs(TIMES_NS);
ColumnVector ns_string_times_all = ColumnVector.fromStrings(TIMES_NS_STRING_ALL);
ColumnVector allSupportedFormatsTimestampAsStrings = ns_timestamps.asStrings("%d::%m::%y::%Y::%H::%M::%S::%9f");
ColumnVector timestampsAsStrings = ns_timestamps.asStrings("%Y-%m-%d %H:%M:%S.%9f")) {
assertColumnsAreEqual(ns_string_times, timestampsAsStrings);
assertColumnsAreEqual(allSupportedFormatsTimestampAsStrings, ns_string_times_all);
}
}
@Test
@Disabled("Negative timestamp values are not currently supported. " +
"See github issue https://github.com/rapidsai/cudf/issues/3116 for details")
void testCastNegativeTimestampAsString() {
final String[] NEG_TIME_S_STRING = {"1965-10-26 14:01:12",
"1960-02-06 19:22:11"};
final long[] NEG_TIME_S = {-131968728L, //'1965-10-26 14:01:12'
-312439069L}; //'1960-02-06 19:22:11'
final long[] NEG_TIME_NS = {-131968727761702469L}; //'1965-10-26 14:01:12.238297531'
final String[] NEG_TIME_NS_STRING = {"1965-10-26 14:01:12.238297531"};
// Seconds
try (ColumnVector unsupported_s_string_times = ColumnVector.fromStrings(NEG_TIME_S_STRING);
ColumnVector unsupported_s_timestamps = ColumnVector.timestampSecondsFromLongs(NEG_TIME_S)) {
assertColumnsAreEqual(unsupported_s_string_times, unsupported_s_timestamps);
}
// Nanoseconds
try (ColumnVector unsupported_ns_string_times = ColumnVector.fromStrings(NEG_TIME_NS_STRING);
ColumnVector unsupported_ns_timestamps = ColumnVector.timestampSecondsFromLongs(NEG_TIME_NS)) {
assertColumnsAreEqual(unsupported_ns_string_times, unsupported_ns_timestamps);
}
}
@Test
void testCastStringToByteList() {
List<Byte> list1 = Arrays.asList((byte)0x54, (byte)0x68, (byte)0xc3, (byte)0xa9, (byte)0x73,
(byte)0xc3, (byte)0xa9);
List<Byte> list2 = null;
List<Byte> list3 = Arrays.asList((byte)0x0d, (byte)0xed, (byte)0x9c, (byte)0xa0, (byte)0xc3,
(byte)0xa9, (byte)0xed, (byte)0x9c, (byte)0xa1);
List<Byte> list4 = Arrays.asList((byte)0x41, (byte)0x52, (byte)0xc3, (byte)0xa9);
List<Byte> list5 = Arrays.asList((byte)0x5c, (byte)0x54, (byte)0x48, (byte)0x45, (byte)0x09,
(byte)0x38, (byte)0xed, (byte)0x9c, (byte)0xa0);
List<Byte> list6 = Arrays.asList((byte)0x74, (byte)0xc3, (byte)0xa9, (byte)0x73, (byte)0x74,
(byte)0x20, (byte)0x73, (byte)0x74, (byte)0x72, (byte)0x69, (byte)0x6e, (byte)0x67, (byte)0x73);
List<Byte> list7 = Arrays.asList();
List<Byte> list8 = Arrays.asList((byte)0xc3, (byte)0xa9, (byte)0xc3, (byte)0xa9);
try(ColumnVector cv = ColumnVector.fromStrings("Thésé", null, "\r\ud720é\ud721", "ARé",
"\\THE\t8\ud720", "tést strings", "", "éé");
ColumnVector res = cv.asByteList(true);
ColumnVector expected = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.UINT8)), list1, list2, list3, list4, list5,
list6, list7, list8)) {
assertColumnsAreEqual(expected, res);
}
}
@Test
void testCastIntegerToByteList() {
List<Byte> list1 = Arrays.asList((byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00);
List<Byte> list2 = Arrays.asList((byte)0x00, (byte)0x00, (byte)0x00, (byte)0x64);
List<Byte> list3 = Arrays.asList((byte)0xff, (byte)0xff, (byte)0xff, (byte)0x9c);
List<Byte> list4 = Arrays.asList((byte)0x80, (byte)0x00, (byte)0x00, (byte)0x00);
List<Byte> list5 = Arrays.asList((byte)0x7f, (byte)0xff, (byte)0xff, (byte)0xff);
try(ColumnVector cv = ColumnVector.fromBoxedInts(0, 100, -100, Integer.MIN_VALUE, Integer.MAX_VALUE);
ColumnVector res = cv.asByteList(true);
ColumnVector expected = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.UINT8)), list1, list2, list3, list4, list5)) {
assertColumnsAreEqual(expected, res);
}
}
@Test
void testCastFloatToByteList() {
List<Byte> list1 = Arrays.asList((byte)0x00, (byte)0x00, (byte)0x00, (byte)0x00);
List<Byte> list2 = Arrays.asList((byte)0x00, (byte)0x00, (byte)0xc8, (byte)0x42);
List<Byte> list3 = Arrays.asList((byte)0x00, (byte)0x00, (byte)0xc8, (byte)0xc2);
List<Byte> list4 = Arrays.asList((byte)0x00, (byte)0x00, (byte)0xc0, (byte)0x7f);
List<Byte> list5 = Arrays.asList((byte)0xff, (byte)0xff, (byte)0x7f, (byte)0x7f);
List<Byte> list6 = Arrays.asList((byte)0x00, (byte)0x00, (byte)0x80, (byte)0xff);
try(ColumnVector cv = ColumnVector.fromBoxedFloats((float)0.0, (float)100.0, (float)-100.0,
-Float.NaN, Float.MAX_VALUE, Float.NEGATIVE_INFINITY);
ColumnVector res = cv.asByteList(false);
ColumnVector expected = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.UINT8)), list1, list2, list3, list4, list5, list6)) {
assertColumnsAreEqual(expected, res);
}
}
@Test
void testGetBytesFromList() {
List<Byte> list = Arrays.asList((byte)0x41, (byte)0x52, (byte)0xc3, (byte)0xa9);
try(ColumnVector cv = ColumnVector.fromStrings("ARé");
ColumnVector bytes = cv.asByteList(false);
HostColumnVector hostRes = bytes.copyToHost()) {
byte[] result = hostRes.getBytesFromList(0);
for(int i = 0; i < result.length; i++) {
assertEquals(list.get(i).byteValue(), result[i]);
}
}
}
@Test
void testContainsScalar() {
try (ColumnVector columnVector = ColumnVector.fromInts(1, 43, 42, 11, 2);
Scalar s0 = Scalar.fromInt(3);
Scalar s1 = Scalar.fromInt(43)) {
assertFalse(columnVector.contains(s0));
assertTrue(columnVector.contains(s1));
}
}
@Test
void testContainsVector() {
try (ColumnVector columnVector = ColumnVector.fromBoxedInts(1, null, 43, 42, 11, 2);
ColumnVector cv0 = ColumnVector.fromBoxedInts(1, 3, null, 11);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, null, false, false, true, false);
ColumnVector result = columnVector.contains(cv0)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector columnVector = ColumnVector.fromStrings("1", "43", "42", "11", "2");
ColumnVector cv0 = ColumnVector.fromStrings("1", "3", "11");
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, false, true, false);
ColumnVector result = columnVector.contains(cv0)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testStringOpsEmpty() {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd", null, "");
Scalar emptyString = Scalar.fromString("");
ColumnVector found = sv.stringContains(emptyString);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, null, true)) {
assertColumnsAreEqual(found, expected);
}
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd", null, "");
Scalar emptyString = Scalar.fromString("");
ColumnVector found = sv.startsWith(emptyString);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, null, true)) {
assertColumnsAreEqual(found, expected);
}
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd", null, "");
Scalar emptyString = Scalar.fromString("");
ColumnVector found = sv.endsWith(emptyString);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, null, true)) {
assertColumnsAreEqual(found, expected);
}
try (ColumnVector sv = ColumnVector.fromStrings("Héllo", "thésé", null, "ARé", "tést strings");
Scalar emptyString = Scalar.fromString("");
ColumnVector found = sv.stringLocate(emptyString, 0, -1);
ColumnVector expected = ColumnVector.fromBoxedInts(0, 0, null, 0, 0)) {
assertColumnsAreEqual(found, expected);
}
}
@Test
void testStringFindOperations() {
try (ColumnVector testStrings = ColumnVector.fromStrings("", null, "abCD", "1a\"\u0100B1", "a\"\u0100B1", "1a\"\u0100B",
"1a\"\u0100B1\n\t\'", "1a\"\u0100B1\u0453\u1322\u5112", "1a\"\u0100B1Fg26",
"1a\"\u0100B1\\\"\r1a\"\u0100B1", "1a\"\u0100B1\u0498\u1321\u51091a\"\u0100B1",
"1a\"\u0100B1H2O11a\"\u0100B1", "1a\"\u0100B1\\\"\r1a\"\u0100B1",
"\n\t\'1a\"\u0100B1", "\u0453\u1322\u51121a\"\u0100B1", "Fg261a\"\u0100B1");
ColumnVector emptyStrings = ColumnVector.fromStrings();
Scalar patternString = Scalar.fromString("1a\"\u0100B1");
ColumnVector startsResult = testStrings.startsWith(patternString);
ColumnVector endsResult = testStrings.endsWith(patternString);
ColumnVector containsResult = testStrings.stringContains(patternString);
ColumnVector expectedStarts = ColumnVector.fromBoxedBooleans(false, null, false, true, false,
false, true, true, true, true, true,
true, true, false, false, false);
ColumnVector expectedEnds = ColumnVector.fromBoxedBooleans(false, null, false, true, false,
false, false, false, false, true, true,
true, true, true, true, true);
ColumnVector expectedContains = ColumnVector.fromBoxedBooleans(false, null, false, true, false, false,
true, true, true, true, true,
true, true, true, true, true);
ColumnVector startsEmpty = emptyStrings.startsWith(patternString);
ColumnVector endsEmpty = emptyStrings.endsWith(patternString);
ColumnVector containsEmpty = emptyStrings.stringContains(patternString);
ColumnVector expectedEmpty = ColumnVector.fromBoxedBooleans()) {
assertColumnsAreEqual(startsResult, expectedStarts);
assertColumnsAreEqual(endsResult, expectedEnds);
assertColumnsAreEqual(expectedContains, containsResult);
assertColumnsAreEqual(startsEmpty, expectedEmpty);
assertColumnsAreEqual(endsEmpty, expectedEmpty);
assertColumnsAreEqual(expectedEmpty, containsEmpty);
}
}
@Test
void testExtractRe() {
try (ColumnVector input = ColumnVector.fromStrings("a1", "b2", "c3", null);
Table expected = new Table.TestBuilder()
.column("a", "b", null, null)
.column("1", "2", null, null)
.build()) {
try (Table found = input.extractRe("([ab])(\\d)")) {
assertTablesAreEqual(expected, found);
}
try (Table found = input.extractRe(new RegexProgram("([ab])(\\d)"))) {
assertTablesAreEqual(expected, found);
}
}
}
@Test
void testExtractAllRecord() {
String pattern = "([ab])(\\d)";
RegexProgram regexProg = new RegexProgram(pattern);
try (ColumnVector v = ColumnVector.fromStrings("a1", "b2", "c3", null, "a1b1c3a2");
ColumnVector expectedIdx0 = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("a1"),
Arrays.asList("b2"),
Arrays.asList(),
null,
Arrays.asList("a1", "b1", "a2"));
ColumnVector expectedIdx12 = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("a", "1"),
Arrays.asList("b", "2"),
null,
null,
Arrays.asList("a", "1", "b", "1", "a", "2"))) {
try (ColumnVector resultIdx0 = v.extractAllRecord(pattern, 0);
ColumnVector resultIdx1 = v.extractAllRecord(pattern, 1);
ColumnVector resultIdx2 = v.extractAllRecord(pattern, 2)) {
assertColumnsAreEqual(expectedIdx0, resultIdx0);
assertColumnsAreEqual(expectedIdx12, resultIdx1);
assertColumnsAreEqual(expectedIdx12, resultIdx2);
}
try (ColumnVector resultIdx0 = v.extractAllRecord(regexProg, 0);
ColumnVector resultIdx1 = v.extractAllRecord(regexProg, 1);
ColumnVector resultIdx2 = v.extractAllRecord(regexProg, 2)) {
assertColumnsAreEqual(expectedIdx0, resultIdx0);
assertColumnsAreEqual(expectedIdx12, resultIdx1);
assertColumnsAreEqual(expectedIdx12, resultIdx2);
}
}
}
@Test
void testMatchesRe() {
String patternString1 = "\\d+";
String patternString2 = "[A-Za-z]+\\s@[A-Za-z]+";
String patternString3 = ".*";
String patternString4 = "";
RegexProgram regexProg1 = new RegexProgram(patternString1, CaptureGroups.NON_CAPTURE);
RegexProgram regexProg2 = new RegexProgram(patternString2, CaptureGroups.NON_CAPTURE);
RegexProgram regexProg3 = new RegexProgram(patternString3, CaptureGroups.NON_CAPTURE);
RegexProgram regexProg4 = new RegexProgram(patternString4, CaptureGroups.NON_CAPTURE);
try (ColumnVector testStrings = ColumnVector.fromStrings("", null, "abCD", "ovér the",
"lazy @dog", "1234", "00:0:00");
ColumnVector expected1 = ColumnVector.fromBoxedBooleans(false, null, false, false, false,
true, true);
ColumnVector expected2 = ColumnVector.fromBoxedBooleans(false, null, false, false, true,
false, false);
ColumnVector expected3 = ColumnVector.fromBoxedBooleans(true, null, true, true, true,
true, true)) {
try (ColumnVector res1 = testStrings.matchesRe(patternString1);
ColumnVector res2 = testStrings.matchesRe(patternString2);
ColumnVector res3 = testStrings.matchesRe(patternString3)) {
assertColumnsAreEqual(expected1, res1);
assertColumnsAreEqual(expected2, res2);
assertColumnsAreEqual(expected3, res3);
}
try (ColumnVector res1 = testStrings.matchesRe(regexProg1);
ColumnVector res2 = testStrings.matchesRe(regexProg2);
ColumnVector res3 = testStrings.matchesRe(regexProg3)) {
assertColumnsAreEqual(expected1, res1);
assertColumnsAreEqual(expected2, res2);
assertColumnsAreEqual(expected3, res3);
}
assertThrows(AssertionError.class, () -> {
try (ColumnVector res = testStrings.matchesRe(patternString4)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector res = testStrings.matchesRe(regexProg4)) {}
});
}
}
@Test
void testContainsRe() {
String patternString1 = "\\d+";
String patternString2 = "[A-Za-z]+\\s@[A-Za-z]+";
String patternString3 = ".*";
String patternString4 = "";
RegexProgram regexProg1 = new RegexProgram(patternString1, CaptureGroups.NON_CAPTURE);
RegexProgram regexProg2 = new RegexProgram(patternString2, CaptureGroups.NON_CAPTURE);
RegexProgram regexProg3 = new RegexProgram(patternString3, CaptureGroups.NON_CAPTURE);
RegexProgram regexProg4 = new RegexProgram(patternString4, CaptureGroups.NON_CAPTURE);
try (ColumnVector testStrings = ColumnVector.fromStrings(null, "abCD", "ovér the",
"lazy @dog", "1234", "00:0:00", "abc1234abc", "there @are 2 lazy @dogs");
ColumnVector expected1 = ColumnVector.fromBoxedBooleans(null, false, false, false,
true, true, true, true);
ColumnVector expected2 = ColumnVector.fromBoxedBooleans(null, false, false, true,
false, false, false, true);
ColumnVector expected3 = ColumnVector.fromBoxedBooleans(null, true, true, true,
true, true, true, true)) {
try (ColumnVector res1 = testStrings.containsRe(patternString1);
ColumnVector res2 = testStrings.containsRe(patternString2);
ColumnVector res3 = testStrings.containsRe(patternString3)) {
assertColumnsAreEqual(expected1, res1);
assertColumnsAreEqual(expected2, res2);
assertColumnsAreEqual(expected3, res3);
}
try (ColumnVector res1 = testStrings.containsRe(regexProg1);
ColumnVector res2 = testStrings.containsRe(regexProg2);
ColumnVector res3 = testStrings.containsRe(regexProg3)) {
assertColumnsAreEqual(expected1, res1);
assertColumnsAreEqual(expected2, res2);
assertColumnsAreEqual(expected3, res3);
}
}
try (ColumnVector testStrings = ColumnVector.fromStrings("", null, "abCD", "ovér the",
"lazy @dog", "1234", "00:0:00", "abc1234abc", "there @are 2 lazy @dogs")) {
assertThrows(AssertionError.class, () -> {
try (ColumnVector res = testStrings.containsRe(patternString4)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector res = testStrings.containsRe(regexProg4)) {}
});
}
}
@Test
void testContainsReEmptyInput() {
String patternString1 = ".*";
RegexProgram regexProg1 = new RegexProgram(patternString1, CaptureGroups.NON_CAPTURE);
try (ColumnVector testStrings = ColumnVector.fromStrings("");
ColumnVector res1 = testStrings.containsRe(patternString1);
ColumnVector resReProg1 = testStrings.containsRe(regexProg1);
ColumnVector expected1 = ColumnVector.fromBoxedBooleans(true)) {
assertColumnsAreEqual(expected1, res1);
assertColumnsAreEqual(expected1, resReProg1);
}
}
@Test
void testLike() {
// Default escape character
try (ColumnVector testStrings = ColumnVector.fromStrings(
"a", "aa", "aaa", "aba", "b", "bb", "bba", "", "áéêú", "a1b2c3");
Scalar patternString1 = Scalar.fromString("a1b2c3");
Scalar patternString2 = Scalar.fromString("__a%");
Scalar defaultEscape = Scalar.fromString("\\");
ColumnVector res1 = testStrings.like(patternString1, defaultEscape);
ColumnVector res2 = testStrings.like(patternString2, defaultEscape);
ColumnVector expected1 = ColumnVector.fromBoxedBooleans(
false, false, false, false, false, false, false, false, false, true);
ColumnVector expected2 = ColumnVector.fromBoxedBooleans(
false, false, true, true, false, false, true, false, false, false)) {
assertColumnsAreEqual(expected1, res1);
assertColumnsAreEqual(expected2, res2);
}
// Non-default escape character
try (ColumnVector testStrings = ColumnVector.fromStrings(
"10%-20%", "10-20", "10%%-20%", "a_b", "b_a", "___", "", "aéb", "_%_", "_%a");
Scalar patternString1 = Scalar.fromString("10%%%%-20%%");
Scalar patternString2 = Scalar.fromString("___%%");
Scalar escapeChar1 = Scalar.fromString("%");
Scalar escapeChar2 = Scalar.fromString("_");
ColumnVector res1 = testStrings.like(patternString1, escapeChar1);
ColumnVector res2 = testStrings.like(patternString2, escapeChar2);
ColumnVector expected1 = ColumnVector.fromBoxedBooleans(
false, false, true, false, false, false, false, false, false, false);
ColumnVector expected2 = ColumnVector.fromBoxedBooleans(
false, false, false, false, false, false, false, false, true, true)) {
assertColumnsAreEqual(expected1, res1);
assertColumnsAreEqual(expected2, res2);
}
assertThrows(AssertionError.class, () -> {
try (ColumnVector testStrings = ColumnVector.fromStrings("a", "B", "cd", null, "");
Scalar defaultEscape = Scalar.fromString("\\");
ColumnVector res = testStrings.like(null, defaultEscape)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector testStrings = ColumnVector.fromStrings("a", "B", "cd", null, "");
Scalar patternString = Scalar.fromString("");
ColumnVector res = testStrings.like(patternString, null)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector testStrings = ColumnVector.fromStrings("a", "B", "cd", null, "");
Scalar patternString = Scalar.fromString("");
Scalar intScalar = Scalar.fromInt(1);
ColumnVector res = testStrings.like(patternString, intScalar)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector testStrings = ColumnVector.fromStrings("a", "B", "cd", null, "");
Scalar intScalar = Scalar.fromInt(1);
Scalar defaultEscape = Scalar.fromString("\\");
ColumnVector res = testStrings.like(intScalar, defaultEscape)) {}
});
}
@Test
void testUrlDecode() {
String[] inputs = new String[] {
"foobar.site%2Fq%3Fx%3D%C3%A9%25",
"a%2Bb%2Dc%2Ad%2Fe",
"1%092%0A3",
"abc%401%2523",
"abc123",
" %09%0D%0A%0C",
"",
null
};
String[] expectedOutputs = new String[] {
"foobar.site/q?x=é%",
"a+b-c*d/e",
"1\t2\n3",
"abc@1%23",
"abc123",
" \t\r\n\f",
"",
null
};
try (ColumnVector v = ColumnVector.fromStrings(inputs);
ColumnVector expected = ColumnVector.fromStrings(expectedOutputs);
ColumnVector actual = v.urlDecode()) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testUrlEncode() {
String[] inputs = new String[] {
"foobar.site/q?x=é%",
"a+b-c*d/e",
"1\t2\n3",
"abc@1%23",
"abc123",
" \t\r\n\f",
"",
null
};
String[] expectedOutputs = new String[] {
"foobar.site%2Fq%3Fx%3D%C3%A9%25",
"a%2Bb-c%2Ad%2Fe",
"1%092%0A3",
"abc%401%2523",
"abc123",
"%20%09%0D%0A%0C",
"",
null
};
try (ColumnVector v = ColumnVector.fromStrings(inputs);
ColumnVector expected = ColumnVector.fromStrings(expectedOutputs);
ColumnVector actual = v.urlEncode()) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testStringFindOperationsThrowsException() {
assertThrows(CudfException.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar emptyString = Scalar.fromString(null);
ColumnVector concat = sv.startsWith(emptyString)) {}
});
assertThrows(CudfException.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar emptyString = Scalar.fromString(null);
ColumnVector concat = sv.endsWith(emptyString)) {}
});
assertThrows(CudfException.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar emptyString = Scalar.fromString(null);
ColumnVector concat = sv.stringContains(emptyString)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
ColumnVector concat = sv.startsWith(null)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
ColumnVector concat = sv.endsWith(null)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar intScalar = Scalar.fromInt(1);
ColumnVector concat = sv.startsWith(intScalar)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar intScalar = Scalar.fromInt(1);
ColumnVector concat = sv.endsWith(intScalar)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector sv = ColumnVector.fromStrings("a", "B", "cd");
Scalar intScalar = Scalar.fromInt(1);
ColumnVector concat = sv.stringContains(intScalar)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector v = ColumnVector.fromInts(1, 43, 42, 11, 2);
Scalar patternString = Scalar.fromString("a");
ColumnVector concat = v.startsWith(patternString)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector v = ColumnVector.fromInts(1, 43, 42, 11, 2);
Scalar patternString = Scalar.fromString("a");
ColumnVector concat = v.endsWith(patternString)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector v = ColumnVector.fromInts(1, 43, 42, 11, 2);
Scalar patternString = Scalar.fromString("a");
ColumnVector concat = v.stringContains(patternString)) {}
});
}
@Test
void testStringLocate() {
try(ColumnVector v = ColumnVector.fromStrings("Héllo", "thésé", null, "\r\ud720é\ud721", "ARé",
"\\THE\t8\ud720", "tést strings", "", "éé");
ColumnVector e_locate1 = ColumnVector.fromBoxedInts(1, 2, null, 2, 2, -1, 1, -1, 0);
ColumnVector e_locate2 = ColumnVector.fromBoxedInts(-1, 2, null, -1, -1, -1, 1, -1, -1);
ColumnVector e_locate3 = ColumnVector.fromBoxedInts(-1, -1, null, 1, -1, 6, -1, -1, -1);
Scalar pattern1 = Scalar.fromString("é");
Scalar pattern2 = Scalar.fromString("és");
Scalar pattern3 = Scalar.fromString("\ud720");
ColumnVector locate1 = v.stringLocate(pattern1, 0, -1);
ColumnVector locate2 = v.stringLocate(pattern2, 0, -1);
ColumnVector locate3 = v.stringLocate(pattern3, 0, -1)) {
assertColumnsAreEqual(locate1, e_locate1);
assertColumnsAreEqual(locate2, e_locate2);
assertColumnsAreEqual(locate3, e_locate3);
}
}
@Test
void testStringLocateOffsets() {
try(ColumnVector v = ColumnVector.fromStrings("Héllo", "thésé", null, "\r\ud720é\ud721", "ARé",
"\\THE\t8\ud720", "tést strings", "", "éé");
Scalar pattern = Scalar.fromString("é");
ColumnVector e_empty = ColumnVector.fromBoxedInts(-1, -1, null, -1, -1, -1, -1, -1, -1);
ColumnVector e_start = ColumnVector.fromBoxedInts(-1, 2, null, 2, 2, -1, -1, -1, -1);
ColumnVector e_end = ColumnVector.fromBoxedInts(1, -1, null, -1, -1, -1, 1, -1, 0);
ColumnVector locate_empty = v.stringLocate(pattern, 13, -1);
ColumnVector locate_start = v.stringLocate(pattern, 2, -1);
ColumnVector locate_end = v.stringLocate(pattern, 0, 2)) {
assertColumnsAreEqual(locate_empty, e_empty);
assertColumnsAreEqual(locate_start, e_start);
assertColumnsAreEqual(locate_end, e_end);
}
}
@Test
void testStringLocateThrowsException() {
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromStrings("Héllo", "thésé", null, "ARé", "tést strings");
ColumnVector locate = cv.stringLocate(null, 0, -1)) {}
});
assertThrows(CudfException.class, () -> {
try (ColumnVector cv = ColumnVector.fromStrings("Héllo", "thésé", null, "ARé", "tést strings");
Scalar pattern = Scalar.fromString(null);
ColumnVector locate = cv.stringLocate(pattern, 0, -1)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromStrings("Héllo", "thésé", null, "ARé", "tést strings");
Scalar intScalar = Scalar.fromInt(1);
ColumnVector locate = cv.stringLocate(intScalar, 0, -1)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromStrings("Héllo", "thésé", null, "ARé", "tést strings");
Scalar pattern = Scalar.fromString("é");
ColumnVector locate = cv.stringLocate(pattern, -2, -1)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromStrings("Héllo", "thésé", null, "ARé", "tést strings");
Scalar pattern = Scalar.fromString("é");
ColumnVector locate = cv.stringLocate(pattern, 2, 1)) {}
});
assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromInts(1, 43, 42, 11, 2);
Scalar pattern = Scalar.fromString("é");
ColumnVector concat = cv.stringLocate(pattern, 0, -1)) {}
});
}
@Test
void testsubstring() {
try (ColumnVector v = ColumnVector.fromStrings("Héllo", "thésé", null,"", "ARé", "strings");
ColumnVector e_allParameters = ColumnVector.fromStrings("llo", "ésé", null, "", "é", "rin");
ColumnVector e_withoutStop = ColumnVector.fromStrings("llo", "ésé", null, "", "é", "rings");
ColumnVector substring_allParam = v.substring(2, 5);
ColumnVector substring_NoEnd = v.substring(2)) {
assertColumnsAreEqual(e_allParameters, substring_allParam);
assertColumnsAreEqual(e_withoutStop, substring_NoEnd);
}
}
@Test
void testExtractListElements() {
try (ColumnVector v = ColumnVector.fromStrings("Héllo there", "thésé", null, "", "ARé some", "test strings");
ColumnVector expected = ColumnVector.fromStrings("Héllo", "thésé", null, "", "ARé", "test");
ColumnVector list = v.stringSplitRecord(" ");
ColumnVector result = list.extractListElement(0)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testExtractListElementsV() {
try (ColumnVector v = ColumnVector.fromStrings("Héllo there", "thésé", null, "", "ARé some", "test strings");
ColumnVector indices = ColumnVector.fromInts(0, 2, 0, 0, 1, -1);
ColumnVector expected = ColumnVector.fromStrings("Héllo", null, null, "", "some", "strings");
ColumnVector list = v.stringSplitRecord(" ");
ColumnVector result = list.extractListElement(indices)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testDropListDuplicates() {
List<Integer> list1 = Arrays.asList(1, 2);
List<Integer> list2 = Arrays.asList(3, 4, 5);
List<Integer> list3 = Arrays.asList(null, 0, 6, 6, 0);
List<Integer> dedupeList3 = Arrays.asList(0, 6, null);
List<Integer> list4 = Arrays.asList(null, 6, 7, null, 7);
List<Integer> dedupeList4 = Arrays.asList(6, 7, null);
List<Integer> list5 = null;
HostColumnVector.DataType listType = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32));
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType, list1, list2, dedupeList3, dedupeList4, list5);
ColumnVector tmp = v.dropListDuplicates();
// Note dropping duplicates does not have any ordering guarantee, so sort to make it all
// consistent
ColumnVector result = tmp.listSortRows(false, false)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testDropListDuplicatesWithKeysValues() {
try(ColumnVector inputChildKeys = ColumnVector.fromBoxedInts(
1, 2, // list1
3, 4, 5, // list2
null, 0, 6, 6, 0, // list3
null, 6, 7, null, 7 // list 4
// list5 (empty)
);
ColumnVector inputChildVals = ColumnVector.fromBoxedInts(
10, 20, // list1
30, 40, 50, // list2
60, 70, 80, 90, 100, // list3
110, 120, 130, 140, 150 // list4
// list5 (empty)
);
ColumnVector inputStructsKeysVals = ColumnVector.makeStruct(inputChildKeys, inputChildVals);
ColumnVector inputOffsets = ColumnVector.fromInts(0, 2, 5, 10, 15, 15);
ColumnVector inputListsKeysVals = inputStructsKeysVals.makeListFromOffsets(5, inputOffsets)
) {
// Test full input:
try(ColumnVector expectedChildKeys = ColumnVector.fromBoxedInts(
1, 2, // list1
3, 4, 5, // list2
0, 6, null, // list3
6, 7, null // list4
// list5 (empty)
);
ColumnVector expectedChildVals = ColumnVector.fromBoxedInts(
10, 20, // list1
30, 40, 50, // list2
100, 90, 60, // list3
120, 150, 140 // list4
// list5 (empty)
);
ColumnVector expectedStructsKeysVals = ColumnVector.makeStruct(expectedChildKeys, expectedChildVals);
ColumnVector expectedOffsets = ColumnVector.fromInts(0, 2, 5, 8, 11, 11);
ColumnVector expectedListsKeysVals = expectedStructsKeysVals.makeListFromOffsets(5, expectedOffsets);
ColumnVector output = inputListsKeysVals.dropListDuplicatesWithKeysValues();
ColumnVector sortedOutput = output.listSortRows(false, false)
) {
assertColumnsAreEqual(expectedListsKeysVals, sortedOutput);
}
// Test sliced input:
try(ColumnVector expectedChildKeys = ColumnVector.fromBoxedInts(
3, 4, 5, // list1
0, 6, null // list2
);
ColumnVector expectedChildVals = ColumnVector.fromBoxedInts(
30, 40, 50, // list1
100, 90, 60 // list2
);
ColumnVector expectedStructsKeysVals = ColumnVector.makeStruct(expectedChildKeys, expectedChildVals);
ColumnVector expectedOffsets = ColumnVector.fromInts(0, 3, 6);
ColumnVector expectedListsKeysVals = expectedStructsKeysVals.makeListFromOffsets(2, expectedOffsets);
ColumnVector inputSliced = inputListsKeysVals.subVector(1, 3);
ColumnVector output = inputSliced.dropListDuplicatesWithKeysValues();
ColumnVector sortedOutput = output.listSortRows(false, false)
) {
assertColumnsAreEqual(expectedListsKeysVals, sortedOutput);
}
}
}
@Test
void testDropListDuplicatesWithKeysValuesNullable() {
try(ColumnVector inputChildKeys = ColumnVector.fromBoxedInts(
1, 2, // list1
// list2 (null)
3, 4, 5, // list3
null, 0, 6, 6, 0, // list4
null, 6, 7, null, 7 // list 5
// list6 (null)
);
ColumnVector inputChildVals = ColumnVector.fromBoxedInts(
10, 20, // list1
// list2 (null)
30, 40, 50, // list3
60, 70, 80, 90, 100, // list4
110, 120, 130, 140, 150 // list5
// list6 (null)
);
ColumnVector inputStructsKeysVals = ColumnVector.makeStruct(inputChildKeys, inputChildVals);
ColumnVector inputOffsets = ColumnVector.fromInts(0, 2, 2, 5, 10, 15, 15);
ColumnVector tmpInputListsKeysVals = inputStructsKeysVals.makeListFromOffsets(6,inputOffsets);
ColumnVector templateBitmask = ColumnVector.fromBoxedInts(1, null, 1, 1, 1, null);
ColumnVector inputListsKeysVals = tmpInputListsKeysVals.mergeAndSetValidity(BinaryOp.BITWISE_AND, templateBitmask)
) {
// Test full input:
try(ColumnVector expectedChildKeys = ColumnVector.fromBoxedInts(
1, 2, // list1
// list2 (null)
3, 4, 5, // list3
0, 6, null, // list4
6, 7, null // list5
// list6 (null)
);
ColumnVector expectedChildVals = ColumnVector.fromBoxedInts(
10, 20, // list1
// list2 (null)
30, 40, 50, // list3
100, 90, 60, // list4
120, 150, 140 // list5
// list6 (null)
);
ColumnVector expectedStructsKeysVals = ColumnVector.makeStruct(expectedChildKeys, expectedChildVals);
ColumnVector expectedOffsets = ColumnVector.fromInts(0, 2, 2, 5, 8, 11, 11);
ColumnVector tmpExpectedListsKeysVals = expectedStructsKeysVals.makeListFromOffsets(6, expectedOffsets);
ColumnVector expectedListsKeysVals = tmpExpectedListsKeysVals.mergeAndSetValidity(BinaryOp.BITWISE_AND, templateBitmask);
ColumnVector output = inputListsKeysVals.dropListDuplicatesWithKeysValues();
ColumnVector sortedOutput = output.listSortRows(false, false)
) {
assertColumnsAreEqual(expectedListsKeysVals, sortedOutput);
}
// Test sliced input:
try(ColumnVector expectedChildKeys = ColumnVector.fromBoxedInts(
// list1 (null)
3, 4, 5, // list2
0, 6, null // list3
);
ColumnVector expectedChildVals = ColumnVector.fromBoxedInts(
// list1 (null)
30, 40, 50, // list2
100, 90, 60 // list3
);
ColumnVector expectedStructsKeysVals = ColumnVector.makeStruct(expectedChildKeys, expectedChildVals);
ColumnVector expectedOffsets = ColumnVector.fromInts(0, 0, 3, 6);
ColumnVector tmpExpectedListsKeysVals = expectedStructsKeysVals.makeListFromOffsets(3, expectedOffsets);
ColumnVector slicedTemplateBitmask = ColumnVector.fromBoxedInts(null, 1, 1);
ColumnVector expectedListsKeysVals = tmpExpectedListsKeysVals.mergeAndSetValidity(BinaryOp.BITWISE_AND, slicedTemplateBitmask);
ColumnVector inputSliced = inputListsKeysVals.subVector(1, 4);
ColumnVector output = inputSliced.dropListDuplicatesWithKeysValues();
ColumnVector sortedOutput = output.listSortRows(false, false)
) {
assertColumnsAreEqual(expectedListsKeysVals, sortedOutput);
}
}
}
@SafeVarargs
public static <T> ColumnVector makeListsColumn(DType childDType, List<T>... rows) {
HostColumnVector.DataType childType = new HostColumnVector.BasicType(true, childDType);
HostColumnVector.DataType listType = new HostColumnVector.ListType(true, childType);
return ColumnVector.fromLists(listType, rows);
}
@Test
void testListContainsString() {
List<String> list0 = Arrays.asList("Héllo there", "thésé");
List<String> list1 = Arrays.asList("", "ARé some", "test strings");
List<String> list2 = Arrays.asList(null, "", "ARé some", "test strings", "thésé");
List<String> list3 = Arrays.asList(null, "", "ARé some", "test strings");
List<String> list4 = null;
try (ColumnVector input = makeListsColumn(DType.STRING, list0, list1, list2, list3, list4);
Scalar searchKey = Scalar.fromString("thésé");
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, true, false, null);
ColumnVector result = input.listContains(searchKey)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testListContainsInt() {
List<Integer> list0 = Arrays.asList(1, 2, 3);
List<Integer> list1 = Arrays.asList(4, 5, 6);
List<Integer> list2 = Arrays.asList(7, 8, 9);
List<Integer> list3 = null;
try (ColumnVector input = makeListsColumn(DType.INT32, list0, list1, list2, list3);
Scalar searchKey = Scalar.fromInt(7);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, true, null);
ColumnVector result = input.listContains(searchKey)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testListContainsStringCol() {
List<String> list0 = Arrays.asList("Héllo there", "thésé");
List<String> list1 = Arrays.asList("", "ARé some", "test strings");
List<String> list2 = Arrays.asList("FOO", "", "ARé some", "test");
List<String> list3 = Arrays.asList(null, "FOO", "", "ARé some", "test");
List<String> list4 = Arrays.asList(null, "FOO", "", "ARé some", "test");
List<String> list5 = null;
try (ColumnVector input = makeListsColumn(DType.STRING, list0, list1, list2, list3, list4, list5);
ColumnVector searchKeys = ColumnVector.fromStrings("thésé", "", "test", "test", "iotA", null);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, true, false, null);
ColumnVector result = input.listContainsColumn(searchKeys)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testListContainsIntCol() {
List<Integer> list0 = Arrays.asList(1, 2, 3);
List<Integer> list1 = Arrays.asList(4, 5, 6);
List<Integer> list2 = Arrays.asList(null, 8, 9);
List<Integer> list3 = Arrays.asList(null, 8, 9);
List<Integer> list4 = null;
try (ColumnVector input = makeListsColumn(DType.INT32, list0, list1, list2, list3, list4);
ColumnVector searchKeys = ColumnVector.fromBoxedInts(3, 3, 8, 3, null);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, true, false, null);
ColumnVector result = input.listContainsColumn(searchKeys)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testListContainsNulls() {
List<String> list0 = Arrays.asList("Héllo there", "thésé");
List<String> list1 = Arrays.asList("", "ARé some", "test strings");
List<String> list2 = Arrays.asList("FOO", "", "ARé some", "test");
List<String> list3 = Arrays.asList(null, "FOO", "", "ARé some", "test");
List<String> list4 = Arrays.asList(null, "FOO", "", "ARé some", "test");
List<String> list5 = null;
try (ColumnVector input = makeListsColumn(DType.STRING, list0, list1, list2, list3, list4, list5);
ColumnVector result = input.listContainsNulls();
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, true, true, null)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testListIndexOfString() {
List<String> list0 = Arrays.asList("Héllo there", "thésé");
List<String> list1 = Arrays.asList("", "ARé some", "test strings");
List<String> list2 = Arrays.asList(null, "", "ARé some", "thésé", "test strings", "thésé");
List<String> list3 = Arrays.asList(null, "", "ARé some", "test strings");
List<String> list4 = null;
try (ColumnVector input = makeListsColumn(DType.STRING, list0, list1, list2, list3, list4);
Scalar searchKey = Scalar.fromString("thésé");
ColumnVector expectedFirst = ColumnVector.fromBoxedInts(1, -1, 3, -1, null);
ColumnVector resultFirst = input.listIndexOf(searchKey, FindOptions.FIND_FIRST);
ColumnVector expectedLast = ColumnVector.fromBoxedInts(1, -1, 5, -1, null);
ColumnVector resultLast = input.listIndexOf(searchKey, FindOptions.FIND_LAST)) {
assertColumnsAreEqual(expectedFirst, resultFirst);
assertColumnsAreEqual(expectedLast, resultLast);
}
}
@Test
void testListIndexOfInt() {
List<Integer> list0 = Arrays.asList(1, 2, 3);
List<Integer> list1 = Arrays.asList(4, 5, 6);
List<Integer> list2 = Arrays.asList(7, 8, 9, 7);
List<Integer> list3 = null;
try (ColumnVector input = makeListsColumn(DType.INT32, list0, list1, list2, list3);
Scalar searchKey = Scalar.fromInt(7);
ColumnVector expectedFirst = ColumnVector.fromBoxedInts(-1, -1, 0, null);
ColumnVector resultFirst = input.listIndexOf(searchKey, FindOptions.FIND_FIRST);
ColumnVector expectedLast = ColumnVector.fromBoxedInts(-1, -1, 3, null);
ColumnVector resultLast = input.listIndexOf(searchKey, FindOptions.FIND_LAST)) {
assertColumnsAreEqual(expectedFirst, resultFirst);
assertColumnsAreEqual(expectedLast, resultLast);
}
}
@Test
void testListIndexOfStringCol() {
List<String> list0 = Arrays.asList("Héllo there", "thésé");
List<String> list1 = Arrays.asList("", "ARé some", "test strings");
List<String> list2 = Arrays.asList("FOO", "", "ARé some", "test");
List<String> list3 = Arrays.asList(null, "FOO", "", "test", "ARé some", "test");
List<String> list4 = Arrays.asList(null, "FOO", "", "ARé some", "test");
List<String> list5 = null;
try (ColumnVector input = makeListsColumn(DType.STRING, list0, list1, list2, list3, list4, list5);
ColumnVector searchKeys = ColumnVector.fromStrings("thésé", "", "test", "test", "iotA", null);
ColumnVector expectedFirst = ColumnVector.fromBoxedInts(1, 0, 3, 3, -1, null);
ColumnVector resultFirst = input.listIndexOf(searchKeys, FindOptions.FIND_FIRST);
ColumnVector expectedLast = ColumnVector.fromBoxedInts(1, 0, 3, 5, -1, null);
ColumnVector resultLast = input.listIndexOf(searchKeys, FindOptions.FIND_LAST)) {
assertColumnsAreEqual(expectedFirst, resultFirst);
assertColumnsAreEqual(expectedLast, resultLast);
}
}
@Test
void testListIndexOfIntCol() {
List<Integer> list0 = Arrays.asList(1, 2, 3);
List<Integer> list1 = Arrays.asList(4, 5, 6);
List<Integer> list2 = Arrays.asList(null, 8, 9, 8);
List<Integer> list3 = Arrays.asList(null, 8, 9);
List<Integer> list4 = null;
try (ColumnVector input = makeListsColumn(DType.INT32, list0, list1, list2, list3, list4);
ColumnVector searchKeys = ColumnVector.fromBoxedInts(3, 3, 8, 3, null);
ColumnVector expectedFirst = ColumnVector.fromBoxedInts(2, -1, 1, -1, null);
ColumnVector resultFirst = input.listIndexOf(searchKeys, FindOptions.FIND_FIRST);
ColumnVector expectedLast = ColumnVector.fromBoxedInts(2, -1, 3, -1, null);
ColumnVector resultLast = input.listIndexOf(searchKeys, FindOptions.FIND_LAST)) {
assertColumnsAreEqual(expectedFirst, resultFirst);
assertColumnsAreEqual(expectedLast, resultLast);
}
}
@Test
void testListSortRowsWithIntChild() {
List<Integer> list1 = Arrays.asList(1, 3, 0, 2);
List<Integer> ascSortedList1 = Arrays.asList(0, 1, 2, 3);
List<Integer> decSortedList1 = Arrays.asList(3, 2, 1, 0);
List<Integer> list2 = Arrays.asList(7, 5, 6, 4);
List<Integer> ascSortedList2 = Arrays.asList(4, 5, 6, 7);
List<Integer> decSortedList2 = Arrays.asList(7, 6, 5, 4);
List<Integer> list3 = Arrays.asList(-8, null, -9, -10);
List<Integer> ascSortedList3 = Arrays.asList(-10, -9, -8, null);
List<Integer> ascSortedNullMinList3 = Arrays.asList(null, -10, -9, -8);
List<Integer> decSortedList3 = Arrays.asList(null, -8, -9, -10);
List<Integer> decSortedNullMinList3 = Arrays.asList(-8, -9, -10, null);
List<Integer> list4 = Arrays.asList(null, -12, null, 11);
List<Integer> ascSortedList4 = Arrays.asList(-12, 11, null, null);
List<Integer> ascSortedNullMinList4 = Arrays.asList(null, null, -12, 11);
List<Integer> decSortedList4 = Arrays.asList(null, null, 11, -12);
List<Integer> decSortedNullMinList4 = Arrays.asList(11, -12, null, null);
List<Integer> list5 = null;
HostColumnVector.ListType listType = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32));
// Ascending + NullLargest
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType,
ascSortedList1, ascSortedList2, ascSortedList3, ascSortedList4, list5);
ColumnVector result = v.listSortRows(false, false)) {
assertColumnsAreEqual(expected, result);
}
// Descending + NullLargest
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType,
decSortedList1, decSortedList2, decSortedList3, decSortedList4, list5);
ColumnVector result = v.listSortRows(true, false)) {
assertColumnsAreEqual(expected, result);
}
// Ascending + NullSmallest
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType,
ascSortedList1, ascSortedList2, ascSortedNullMinList3, ascSortedNullMinList4, list5);
ColumnVector result = v.listSortRows(false, true)) {
assertColumnsAreEqual(expected, result);
}
// Descending + NullSmallest
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType,
decSortedList1, decSortedList2, decSortedNullMinList3, decSortedNullMinList4, list5);
ColumnVector result = v.listSortRows(true, true)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testListSortRowsWithStringChild() {
List<String> list1 = Arrays.asList("b", "d", "a", "c");
List<String> ascSortedList1 = Arrays.asList("a", "b", "c", "d");
List<String> decSortedList1 = Arrays.asList("d", "c", "b", "a");
List<String> list2 = Arrays.asList("h", "f", "g", "e");
List<String> ascSortedList2 = Arrays.asList("e", "f", "g", "h");
List<String> decSortedList2 = Arrays.asList("h", "g", "f", "e");
List<String> list3 = Arrays.asList("C", null, "B", "A");
List<String> ascSortedList3 = Arrays.asList("A", "B", "C", null);
List<String> ascSortedNullMinList3 = Arrays.asList(null, "A", "B", "C");
List<String> decSortedList3 = Arrays.asList(null, "C", "B", "A");
List<String> decSortedNullMinList3 = Arrays.asList("C", "B", "A", null);
List<String> list4 = Arrays.asList(null, "D", null, "d");
List<String> ascSortedList4 = Arrays.asList("D", "d", null, null);
List<String> ascSortedNullMinList4 = Arrays.asList(null, null, "D", "d");
List<String> decSortedList4 = Arrays.asList(null, null, "d", "D");
List<String> decSortedNullMinList4 = Arrays.asList("d", "D", null, null);
List<String> list5 = null;
HostColumnVector.ListType listType = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING));
// Ascending + NullLargest
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType,
ascSortedList1, ascSortedList2, ascSortedList3, ascSortedList4, list5);
ColumnVector result = v.listSortRows(false, false)) {
assertColumnsAreEqual(expected, result);
}
// Descending + NullLargest
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType,
decSortedList1, decSortedList2, decSortedList3, decSortedList4, list5);
ColumnVector result = v.listSortRows(true, false)) {
assertColumnsAreEqual(expected, result);
}
// Ascending + NullSmallest
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType,
ascSortedList1, ascSortedList2, ascSortedNullMinList3, ascSortedNullMinList4, list5);
ColumnVector result = v.listSortRows(false, true)) {
assertColumnsAreEqual(expected, result);
}
// Descending + NullSmallest
try (ColumnVector v = ColumnVector.fromLists(listType, list1, list2, list3, list4, list5);
ColumnVector expected = ColumnVector.fromLists(listType,
decSortedList1, decSortedList2, decSortedNullMinList3, decSortedNullMinList4, list5);
ColumnVector result = v.listSortRows(true, true)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testSetOperations() {
List<Double> lhsList1 = Arrays.asList(Double.NaN, 5.0, 0.0, 0.0, 0.0, 0.0, null, 0.0);
List<Double> lhsList2 = Arrays.asList(Double.NaN, 5.0, 0.0, 0.0, 0.0, 0.0, null, 1.0);
List<Double> lhsList3 = null;
List<Double> lhsList4 = Arrays.asList(Double.NaN, 5.0, 0.0, 0.0, 0.0, 0.0, null, 1.0);
List<Double> rhsList1 = Arrays.asList(1.0, 0.5, null, 0.0, 0.0, null, Double.NaN);
List<Double> rhsList2 = Arrays.asList(2.0, 1.0, null, 0.0, 0.0, null);
List<Double> rhsList3 = Arrays.asList(2.0, 1.0, null, 0.0, 0.0, null);
List<Double> rhsList4 = null;
// Set intersection result:
List<Double> expectedIntersectionList1 = Arrays.asList(null, 0.0, Double.NaN);
List<Double> expectedIntersectionList2 = Arrays.asList(null, 0.0, 1.0);
// Set union result:
List<Double> expectedUnionList1 = Arrays.asList(null, 0.0, 0.5, 1.0, 5.0, Double.NaN);
List<Double> expectedUnionList2 = Arrays.asList(null, 0.0, 1.0, 2.0, 5.0, Double.NaN);
// Set difference result:
List<Double> expectedDifferenceList1 = Arrays.asList(5.0);
List<Double> expectedDifferenceList2 = Arrays.asList(5.0, Double.NaN);
try(ColumnVector lhs = makeListsColumn(DType.FLOAT64, lhsList1, lhsList2, lhsList3, lhsList4);
ColumnVector rhs = makeListsColumn(DType.FLOAT64, rhsList1, rhsList2, rhsList3, rhsList4)) {
// Test listsHaveOverlap:
try(ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, null, null);
ColumnVector result = ColumnVector.listsHaveOverlap(lhs, rhs)) {
assertColumnsAreEqual(expected, result);
}
// Test listsIntersectDistinct:
try(ColumnVector expected = makeListsColumn(DType.FLOAT64, expectedIntersectionList1,
expectedIntersectionList2, null, null);
ColumnVector result = ColumnVector.listsIntersectDistinct(lhs, rhs);
ColumnVector resultSorted = result.listSortRows(false, true)) {
assertColumnsAreEqual(expected, resultSorted);
}
// Test listsUnionDistinct:
try(ColumnVector expected = makeListsColumn(DType.FLOAT64, expectedUnionList1,
expectedUnionList2, null, null);
ColumnVector result = ColumnVector.listsUnionDistinct(lhs, rhs);
ColumnVector resultSorted = result.listSortRows(false, true)) {
assertColumnsAreEqual(expected, resultSorted);
}
// Test listsDifferenceDistinct:
try(ColumnVector expected = makeListsColumn(DType.FLOAT64, expectedDifferenceList1,
expectedDifferenceList2, null, null);
ColumnVector result = ColumnVector.listsDifferenceDistinct(lhs, rhs);
ColumnVector resultSorted = result.listSortRows(false, true)) {
assertColumnsAreEqual(expected, resultSorted);
}
}
}
@Test
void testReverseString() {
try (ColumnVector input = ColumnVector.fromStrings("abcdef", "12345", "", "", "aébé",
"A é Z", "X", "é");
ColumnVector expected = ColumnVector.fromStrings("fedcba", "54321", "", "", "ébéa",
"Z é A", "X", "é");
ColumnVector result = input.reverseStringsOrLists()) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testReverseList() {
List<Integer> list0 = Arrays.asList(1, 2, 3);
List<Integer> list1 = Arrays.asList(4, 5, null);
List<Integer> emptyList = Collections.emptyList();
List<Integer> reversedList0 = Arrays.asList(3, 2, 1);
List<Integer> reversedList1 = Arrays.asList(null, 5, 4);
try (ColumnVector input = makeListsColumn(DType.INT32,
emptyList, list0, emptyList, null, list1);
ColumnVector expected = makeListsColumn(DType.INT32,
emptyList, reversedList0, emptyList, null, reversedList1);
ColumnVector result = input.reverseStringsOrLists()) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testStringSplit() {
String pattern = " ";
try (ColumnVector v = ColumnVector.fromStrings("Héllo there all", "thésé", null, "",
"ARé some things", "test strings here");
Table expectedSplitLimit2 = new Table.TestBuilder()
.column("Héllo", "thésé", null, "", "ARé", "test")
.column("there all", null, null, null, "some things", "strings here")
.build();
Table expectedSplitAll = new Table.TestBuilder()
.column("Héllo", "thésé", null, "", "ARé", "test")
.column("there", null, null, null, "some", "strings")
.column("all", null, null, null, "things", "here")
.build();
Table resultSplitLimit2 = v.stringSplit(pattern, 2);
Table resultSplitAll = v.stringSplit(pattern)) {
assertTablesAreEqual(expectedSplitLimit2, resultSplitLimit2);
assertTablesAreEqual(expectedSplitAll, resultSplitAll);
}
}
@Test
void testStringSplitByRegularExpression() {
String pattern = "[_ ]";
RegexProgram regexProg = new RegexProgram(pattern, CaptureGroups.NON_CAPTURE);
try (ColumnVector v = ColumnVector.fromStrings("Héllo_there all", "thésé", null, "",
"ARé some_things", "test_strings_here");
Table expectedSplitLimit2 = new Table.TestBuilder()
.column("Héllo", "thésé", null, "", "ARé", "test")
.column("there all", null, null, null, "some_things", "strings_here")
.build();
Table expectedSplitAll = new Table.TestBuilder()
.column("Héllo", "thésé", null, "", "ARé", "test")
.column("there", null, null, null, "some", "strings")
.column("all", null, null, null, "things", "here")
.build()) {
try (Table resultSplitLimit2 = v.stringSplit(pattern, 2, true);
Table resultSplitAll = v.stringSplit(pattern, true)) {
assertTablesAreEqual(expectedSplitLimit2, resultSplitLimit2);
assertTablesAreEqual(expectedSplitAll, resultSplitAll);
}
try (Table resultSplitLimit2 = v.stringSplit(regexProg, 2);
Table resultSplitAll = v.stringSplit(regexProg)) {
assertTablesAreEqual(expectedSplitLimit2, resultSplitLimit2);
assertTablesAreEqual(expectedSplitAll, resultSplitAll);
}
}
}
@Test
void testStringSplitRecord() {
String pattern = " ";
try (ColumnVector v = ColumnVector.fromStrings("Héllo there all", "thésé", null, "",
"ARé some things", "test strings here");
ColumnVector expectedSplitLimit2 = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("Héllo", "there all"),
Arrays.asList("thésé"),
null,
Arrays.asList(""),
Arrays.asList("ARé", "some things"),
Arrays.asList("test", "strings here"));
ColumnVector expectedSplitAll = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("Héllo", "there", "all"),
Arrays.asList("thésé"),
null,
Arrays.asList(""),
Arrays.asList("ARé", "some", "things"),
Arrays.asList("test", "strings", "here"));
ColumnVector resultSplitLimit2 = v.stringSplitRecord(pattern, 2);
ColumnVector resultSplitAll = v.stringSplitRecord(pattern)) {
assertColumnsAreEqual(expectedSplitLimit2, resultSplitLimit2);
assertColumnsAreEqual(expectedSplitAll, resultSplitAll);
}
}
@Test
void testStringSplitRecordByRegularExpression() {
String pattern = "[_ ]";
RegexProgram regexProg = new RegexProgram(pattern, CaptureGroups.NON_CAPTURE);
try (ColumnVector v = ColumnVector.fromStrings("Héllo_there all", "thésé", null, "",
"ARé some_things", "test_strings_here");
ColumnVector expectedSplitLimit2 = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("Héllo", "there all"),
Arrays.asList("thésé"),
null,
Arrays.asList(""),
Arrays.asList("ARé", "some_things"),
Arrays.asList("test", "strings_here"));
ColumnVector expectedSplitAll = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
Arrays.asList("Héllo", "there", "all"),
Arrays.asList("thésé"),
null,
Arrays.asList(""),
Arrays.asList("ARé", "some", "things"),
Arrays.asList("test", "strings", "here"))) {
try (ColumnVector resultSplitLimit2 = v.stringSplitRecord(pattern, 2, true);
ColumnVector resultSplitAll = v.stringSplitRecord(pattern, true)) {
assertColumnsAreEqual(expectedSplitLimit2, resultSplitLimit2);
assertColumnsAreEqual(expectedSplitAll, resultSplitAll);
}
try (ColumnVector resultSplitLimit2 = v.stringSplitRecord(regexProg, 2);
ColumnVector resultSplitAll = v.stringSplitRecord(regexProg)) {
assertColumnsAreEqual(expectedSplitLimit2, resultSplitLimit2);
assertColumnsAreEqual(expectedSplitAll, resultSplitAll);
}
}
}
@Test
void testsubstringColumn() {
try (ColumnVector v = ColumnVector.fromStrings("Héllo", "thésé", null, "", "ARé", "strings");
ColumnVector start = ColumnVector.fromInts(2, 1, 1, 1, 0, 1);
ColumnVector end = ColumnVector.fromInts(5, 3, 1, 1, -1, -1);
ColumnVector expected = ColumnVector.fromStrings("llo", "hé", null, "", "ARé", "trings");
ColumnVector result = v.substring(start, end)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testsubstringThrowsException() {
assertThrows(AssertionError.class, () -> {
try (ColumnVector v = ColumnVector.fromStrings("Héllo", "thésé", null, "", "ARé", "strings");
ColumnVector start = ColumnVector.fromInts(2, 1, 1, 1, 0, 1);
ColumnVector end = ColumnVector.fromInts(5, 3, 1, 1, -1);
ColumnVector substring = v.substring(start, end)) {
}
});
}
@Test
void teststringReplace() {
try (ColumnVector v = ColumnVector.fromStrings("Héllo", "thésssé", null, "", "ARé", "sssstrings");
ColumnVector e_allParameters = ColumnVector.fromStrings("Héllo", "théSsé", null, "", "ARé", "SStrings");
Scalar target = Scalar.fromString("ss");
Scalar replace = Scalar.fromString("S");
ColumnVector replace_allParameters = v.stringReplace(target, replace)) {
assertColumnsAreEqual(e_allParameters, replace_allParameters);
}
}
@Test
void teststringReplaceThrowsException() {
assertThrows(AssertionError.class, () -> {
try (ColumnVector testStrings = ColumnVector.fromStrings("Héllo", "thésé", null, "", "ARé", "strings");
Scalar target= Scalar.fromString("");
Scalar replace=Scalar.fromString("a");
ColumnVector result = testStrings.stringReplace(target,replace)){}
});
}
@Test
void teststringReplaceMulti() {
try (ColumnVector v = ColumnVector.fromStrings("Héllo", "thésssé", null, "", "ARé", "sssstrings");
ColumnVector e_allParameters = ColumnVector.fromStrings("Hello", "theSse", null, "", "ARe", "SStrings");
ColumnVector targets = ColumnVector.fromStrings("ss", "é");
ColumnVector repls = ColumnVector.fromStrings("S", "e");
ColumnVector replace_allParameters = v.stringReplace(targets, repls)) {
assertColumnsAreEqual(e_allParameters, replace_allParameters);
}
}
@Test
void teststringReplaceMultiThrowsException() {
assertThrows(AssertionError.class, () -> {
try (ColumnVector testStrings = ColumnVector.fromStrings("Héllo", "thésé", null, "", "ARé", "strings");
ColumnVector targets = ColumnVector.fromInts(0, 1);
ColumnVector repls = null;
ColumnVector result = testStrings.stringReplace(targets,repls)){}
});
}
@Test
void testReplaceRegex() {
try (ColumnVector v = ColumnVector.fromStrings("title and Title with title", "nothing", null, "Title");
Scalar repl = Scalar.fromString("Repl")) {
String pattern = "[tT]itle";
RegexProgram regexProg = new RegexProgram(pattern, CaptureGroups.NON_CAPTURE);
try (ColumnVector actual = v.replaceRegex(pattern, repl);
ColumnVector expected =
ColumnVector.fromStrings("Repl and Repl with Repl", "nothing", null, "Repl")) {
assertColumnsAreEqual(expected, actual);
}
try (ColumnVector actual = v.replaceRegex(pattern, repl, 0)) {
assertColumnsAreEqual(v, actual);
}
try (ColumnVector actual = v.replaceRegex(pattern, repl, 1);
ColumnVector expected =
ColumnVector.fromStrings("Repl and Title with title", "nothing", null, "Repl")) {
assertColumnsAreEqual(expected, actual);
}
try (ColumnVector actual = v.replaceRegex(regexProg, repl);
ColumnVector expected =
ColumnVector.fromStrings("Repl and Repl with Repl", "nothing", null, "Repl")) {
assertColumnsAreEqual(expected, actual);
}
try (ColumnVector actual = v.replaceRegex(regexProg, repl, 0)) {
assertColumnsAreEqual(v, actual);
}
try (ColumnVector actual = v.replaceRegex(regexProg, repl, 1);
ColumnVector expected =
ColumnVector.fromStrings("Repl and Title with title", "nothing", null, "Repl")) {
assertColumnsAreEqual(expected, actual);
}
}
}
@Test
void testReplaceMultiRegex() {
try (ColumnVector v =
ColumnVector.fromStrings("title and Title with title", "nothing", null, "Title");
ColumnVector repls = ColumnVector.fromStrings("Repl", "**");
ColumnVector actual = v.replaceMultiRegex(new String[] { "[tT]itle", "and|th" }, repls);
ColumnVector expected =
ColumnVector.fromStrings("Repl ** Repl wi** Repl", "no**ing", null, "Repl")) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testStringReplaceWithBackrefs() {
try (ColumnVector v = ColumnVector.fromStrings("<h1>title</h1>", "<h1>another title</h1>", null);
ColumnVector expected = ColumnVector.fromStrings("<h2>title</h2>",
"<h2>another title</h2>", null);
ColumnVector actual = v.stringReplaceWithBackrefs("<h1>(.*)</h1>", "<h2>\\1</h2>");
ColumnVector actualRe =
v.stringReplaceWithBackrefs(new RegexProgram("<h1>(.*)</h1>"), "<h2>\\1</h2>")) {
assertColumnsAreEqual(expected, actual);
assertColumnsAreEqual(expected, actualRe);
}
try (ColumnVector v = ColumnVector.fromStrings("2020-1-01", "2020-2-02", null);
ColumnVector expected = ColumnVector.fromStrings("2020-01-01", "2020-02-02", null);
ColumnVector actual = v.stringReplaceWithBackrefs("-([0-9])-", "-0\\1-");
ColumnVector actualRe =
v.stringReplaceWithBackrefs(new RegexProgram("-([0-9])-"), "-0\\1-")) {
assertColumnsAreEqual(expected, actual);
assertColumnsAreEqual(expected, actualRe);
}
try (ColumnVector v = ColumnVector.fromStrings("2020-01-1", "2020-02-2", "2020-03-3invalid", null);
ColumnVector expected = ColumnVector.fromStrings("2020-01-01", "2020-02-02",
"2020-03-3invalid", null);
ColumnVector actual = v.stringReplaceWithBackrefs("-([0-9])$", "-0\\1");
ColumnVector actualRe =
v.stringReplaceWithBackrefs(new RegexProgram("-([0-9])$"), "-0\\1")) {
assertColumnsAreEqual(expected, actual);
assertColumnsAreEqual(expected, actualRe);
}
try (ColumnVector v = ColumnVector.fromStrings("2020-01-1 random_text", "2020-02-2T12:34:56",
"2020-03-3invalid", null);
ColumnVector expected = ColumnVector.fromStrings("2020-01-01 random_text",
"2020-02-02T12:34:56", "2020-03-3invalid", null);
ColumnVector actual = v.stringReplaceWithBackrefs("-([0-9])([ T])", "-0\\1\\2");
ColumnVector actualRe =
v.stringReplaceWithBackrefs(new RegexProgram("-([0-9])([ T])"), "-0\\1\\2")) {
assertColumnsAreEqual(expected, actual);
assertColumnsAreEqual(expected, actualRe);
}
// test zero as group index
try (ColumnVector v = ColumnVector.fromStrings("aa-11 b2b-345", "aa-11a 1c-2b2 b2-c3", "11-aa", null);
ColumnVector expected = ColumnVector.fromStrings("aa-11:aa:11; b2b-345:b:345;",
"aa-11:aa:11;a 1c-2:c:2;b2 b2-c3", "11-aa", null);
ColumnVector actual = v.stringReplaceWithBackrefs("([a-z]+)-([0-9]+)", "${0}:${1}:${2};");
ColumnVector actualRe =
v.stringReplaceWithBackrefs(new RegexProgram("([a-z]+)-([0-9]+)"), "${0}:${1}:${2};")) {
assertColumnsAreEqual(expected, actual);
assertColumnsAreEqual(expected, actualRe);
}
// group index exceeds group count
assertThrows(CudfException.class, () -> {
try (ColumnVector v = ColumnVector.fromStrings("ABC123defgh");
ColumnVector r = v.stringReplaceWithBackrefs("([A-Z]+)([0-9]+)([a-z]+)", "\\4")) {
}
});
// group index exceeds group count
assertThrows(CudfException.class, () -> {
try (ColumnVector v = ColumnVector.fromStrings("ABC123defgh");
ColumnVector r =
v.stringReplaceWithBackrefs(new RegexProgram("([A-Z]+)([0-9]+)([a-z]+)"), "\\4")) {
}
});
}
@Test
void testLPad() {
try (ColumnVector v = ColumnVector.fromStrings("1", "23", "45678", null);
ColumnVector expected = ColumnVector.fromStrings("A1", "23", "45678", null);
ColumnVector actual = v.pad(2, PadSide.LEFT, "A")) {
assertColumnsAreEqual(expected, actual);
}
try (ColumnVector v = ColumnVector.fromStrings("1", "23", "45678", null);
ColumnVector expected = ColumnVector.fromStrings("___1", "__23", "45678", null);
ColumnVector actual = v.pad(4, PadSide.LEFT, "_")) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testRPad() {
try (ColumnVector v = ColumnVector.fromStrings("1", "23", "45678", null);
ColumnVector expected = ColumnVector.fromStrings("1A", "23", "45678", null);
ColumnVector actual = v.pad(2, PadSide.RIGHT, "A")) {
assertColumnsAreEqual(expected, actual);
}
try (ColumnVector v = ColumnVector.fromStrings("1", "23", "45678", null);
ColumnVector expected = ColumnVector.fromStrings("1___", "23__", "45678", null);
ColumnVector actual = v.pad(4, PadSide.RIGHT, "_")) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testPad() {
try (ColumnVector v = ColumnVector.fromStrings("1", "23", "45678", null);
ColumnVector expected = ColumnVector.fromStrings("1A", "23", "45678", null);
ColumnVector actual = v.pad(2, PadSide.BOTH, "A")) {
assertColumnsAreEqual(expected, actual);
}
try (ColumnVector v = ColumnVector.fromStrings("1", "23", "45678", null);
ColumnVector expected = ColumnVector.fromStrings("_1__", "_23_", "45678", null);
ColumnVector actual = v.pad(4, PadSide.BOTH, "_")) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testZfill() {
try (ColumnVector v = ColumnVector.fromStrings("1", "23", "45678", null);
ColumnVector expected = ColumnVector.fromStrings("01", "23", "45678", null);
ColumnVector actual = v.zfill(2)) {
assertColumnsAreEqual(expected, actual);
}
try (ColumnVector v = ColumnVector.fromStrings("1", "23", "45678", null);
ColumnVector expected = ColumnVector.fromStrings("0001", "0023", "45678", null);
ColumnVector actual = v.zfill(4)) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testStringTitlize() {
try (ColumnVector cv = ColumnVector.fromStrings("sPark", "sqL", "lowercase", null, "", "UPPERCASE");
ColumnVector result = cv.toTitle();
ColumnVector expected = ColumnVector.fromStrings("Spark", "Sql", "Lowercase", null, "", "Uppercase")) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testStringCapitalize() {
try (ColumnVector cv = ColumnVector.fromStrings("s Park", "S\nqL", "lower \tcase",
null, "", "UPPER\rCASE")) {
try (Scalar deli = Scalar.fromString("");
ColumnVector result = cv.capitalize(deli);
ColumnVector expected = ColumnVector.fromStrings("S park", "S\nql", "Lower \tcase",
null, "", "Upper\rcase")) {
assertColumnsAreEqual(expected, result);
}
try (Scalar deli = Scalar.fromString(" ");
ColumnVector result = cv.capitalize(deli);
ColumnVector expected = ColumnVector.fromStrings("S Park", "S\nql", "Lower \tcase",
null, "", "Upper\rcase")) {
assertColumnsAreEqual(expected, result);
}
try (Scalar deli = Scalar.fromString(" \t\n");
ColumnVector result = cv.capitalize(deli);
ColumnVector expected = ColumnVector.fromStrings("S Park", "S\nQl", "Lower \tCase",
null, "", "Upper\rcase")) {
assertColumnsAreEqual(expected, result);
}
}
}
@Test
void testNansToNulls() {
Float[] floats = new Float[]{1.2f, Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY, null,
Float.NaN, Float.MAX_VALUE, Float.MIN_VALUE, 435243.2323f, POSITIVE_FLOAT_NAN_LOWER_RANGE,
POSITIVE_FLOAT_NAN_UPPER_RANGE, NEGATIVE_FLOAT_NAN_LOWER_RANGE,
NEGATIVE_FLOAT_NAN_UPPER_RANGE};
Float[] expectedFloats = new Float[]{1.2f, Float.POSITIVE_INFINITY,
Float.NEGATIVE_INFINITY, null, null, Float.MAX_VALUE, Float.MIN_VALUE, 435243.2323f,
null, null, null, null};
Double[] doubles = new Double[]{1.2d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, null,
Double.NaN, Double.MAX_VALUE, Double.MIN_VALUE, 435243.2323d, POSITIVE_DOUBLE_NAN_LOWER_RANGE,
POSITIVE_DOUBLE_NAN_UPPER_RANGE, NEGATIVE_DOUBLE_NAN_LOWER_RANGE,
NEGATIVE_DOUBLE_NAN_UPPER_RANGE};
Double[] expectedDoubles = new Double[]{1.2d, Double.POSITIVE_INFINITY,
Double.NEGATIVE_INFINITY, null, null, Double.MAX_VALUE, Double.MIN_VALUE,
435243.2323d, null, null, null, null};
try (ColumnVector cvFloat = ColumnVector.fromBoxedFloats(floats);
ColumnVector cvDouble = ColumnVector.fromBoxedDoubles(doubles);
ColumnVector resultFloat = cvFloat.nansToNulls();
ColumnVector resultDouble = cvDouble.nansToNulls();
ColumnVector expectedFloat = ColumnVector.fromBoxedFloats(expectedFloats);
ColumnVector expectedDouble = ColumnVector.fromBoxedDoubles(expectedDoubles)) {
assertColumnsAreEqual(expectedFloat, resultFloat);
assertColumnsAreEqual(expectedDouble, resultDouble);
}
}
@Test
void testIsIntegerWithBounds() {
String[] intStrings = {"A", "nan", "Inf", "-Inf", "3.5",
String.valueOf(Byte.MIN_VALUE),
String.valueOf(Byte.MIN_VALUE + 1L),
String.valueOf(Byte.MIN_VALUE - 1L),
String.valueOf(Byte.MAX_VALUE),
String.valueOf(Byte.MAX_VALUE + 1L),
String.valueOf(Byte.MAX_VALUE - 1L),
String.valueOf(Short.MIN_VALUE),
String.valueOf(Short.MIN_VALUE + 1L),
String.valueOf(Short.MIN_VALUE - 1L),
String.valueOf(Short.MAX_VALUE),
String.valueOf(Short.MAX_VALUE + 1L),
String.valueOf(Short.MAX_VALUE - 1L),
String.valueOf(Integer.MIN_VALUE),
String.valueOf(Integer.MIN_VALUE + 1L),
String.valueOf(Integer.MIN_VALUE - 1L),
String.valueOf(Integer.MAX_VALUE),
String.valueOf(Integer.MAX_VALUE + 1L),
String.valueOf(Integer.MAX_VALUE - 1L),
String.valueOf(Long.MIN_VALUE),
String.valueOf(Long.MIN_VALUE + 1L),
"-9223372036854775809",
String.valueOf(Long.MAX_VALUE),
"9223372036854775808",
String.valueOf(Long.MAX_VALUE - 1L)};
try (ColumnVector intStringCV = ColumnVector.fromStrings(intStrings);
ColumnVector isByte = intStringCV.isInteger(DType.INT8);
ColumnVector expectedByte = ColumnVector.fromBoxedBooleans(
false, false, false, false, false,
true, true, false, true, false, true,
false, false, false, false, false, false,
false, false, false, false, false, false,
false, false, false, false, false, false);
ColumnVector isShort = intStringCV.isInteger(DType.INT16);
ColumnVector expectedShort = ColumnVector.fromBoxedBooleans(
false, false, false, false, false,
true, true, true, true, true, true,
true, true, false, true, false, true,
false, false, false, false, false, false,
false, false, false, false, false, false);
ColumnVector isInt = intStringCV.isInteger(DType.INT32);
ColumnVector expectedInt = ColumnVector.fromBoxedBooleans(
false, false, false, false, false,
true, true, true, true, true, true,
true, true, true, true, true, true,
true, true, false, true, false, true,
false, false, false, false, false, false);
ColumnVector isLong = intStringCV.isInteger(DType.INT64);
ColumnVector expectedLong = ColumnVector.fromBoxedBooleans(
false, false, false, false, false,
true, true, true, true, true, true,
true, true, true, true, true, true,
true, true, true, true, true, true,
true, true, false, true, false, true)) {
assertColumnsAreEqual(expectedByte, isByte);
assertColumnsAreEqual(expectedShort, isShort);
assertColumnsAreEqual(expectedInt, isInt);
assertColumnsAreEqual(expectedLong, isLong);
}
}
@Test
void testIsInteger() {
String[] intStrings = {"A", "nan", "Inf", "-Inf", "Infinity", "infinity", "2147483647",
"2147483648", "-2147483648", "-2147483649", "NULL", "null", null, "1.2", "1.2e-4", "0.00012"};
String[] longStrings = {"A", "nan", "Inf", "-Inf", "Infinity", "infinity",
"9223372036854775807", "9223372036854775808", "-9223372036854775808",
"-9223372036854775809", "NULL", "null", null, "1.2", "1.2e-4", "0.00012"};
try (ColumnVector intStringCV = ColumnVector.fromStrings(intStrings);
ColumnVector longStringCV = ColumnVector.fromStrings(longStrings);
ColumnVector isInt = intStringCV.isInteger();
ColumnVector isLong = longStringCV.isInteger();
ColumnVector ints = intStringCV.asInts();
ColumnVector longs = longStringCV.asLongs();
ColumnVector expectedInts = ColumnVector.fromBoxedInts(0, 0, 0, 0, 0, 0, Integer.MAX_VALUE,
Integer.MIN_VALUE, Integer.MIN_VALUE, Integer.MAX_VALUE, 0, 0, null, 1, 1, 0);
ColumnVector expectedLongs = ColumnVector.fromBoxedLongs(0l, 0l, 0l, 0l, 0l, 0l, Long.MAX_VALUE,
Long.MIN_VALUE, Long.MIN_VALUE, Long.MAX_VALUE, 0l, 0l, null, 1l, 1l, 0l);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false, false,
false, true, true, true, true, false, false, null, false, false, false)) {
assertColumnsAreEqual(expected, isInt);
assertColumnsAreEqual(expected, isLong);
assertColumnsAreEqual(expectedInts, ints);
assertColumnsAreEqual(expectedLongs, longs);
}
}
@Test
void testIsFixedPoint() {
String[] decimalStrings = {"A", "nan", "Inf", "-Inf", "Infinity", "infinity",
"2.1474", "112.383", "-2.14748", "NULL", "null", null, "1.2", "1.2e-4", "0.00012"};
DType dt = DType.create(DType.DTypeEnum.DECIMAL32, -3);
try (ColumnVector decStringCV = ColumnVector.fromStrings(decimalStrings);
ColumnVector isFixedPoint = decStringCV.isFixedPoint(dt);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false, false
, false, true, true, true, false, false, null, true, true, true)) {
assertColumnsAreEqual(expected, isFixedPoint);
}
}
@Test
void testIsFloat() {
String[] floatStrings = {"A", "nan", "Inf", "-Inf", "Infinity", "infinity", "-0.0", "0.0",
"3.4028235E38", "3.4028236E38", "-3.4028235E38", "-3.4028236E38", "1.2e-24", "NULL", "null",
null, "423"};
try (ColumnVector floatStringCV = ColumnVector.fromStrings(floatStrings);
ColumnVector isFloat = floatStringCV.isFloat();
ColumnVector floats = floatStringCV.asFloats();
ColumnVector expectedFloats = ColumnVector.fromBoxedFloats(0f, Float.NaN, Float.POSITIVE_INFINITY,
Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, Float.POSITIVE_INFINITY, -0f, 0f,
Float.MAX_VALUE, Float.POSITIVE_INFINITY, -Float.MAX_VALUE, Float.NEGATIVE_INFINITY,
1.2e-24f, 0f, 0f, null, 423f);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, true, true,
true, true, true, true, true, true, true, true, false, false, null, true)) {
assertColumnsAreEqual(expected, isFloat);
assertColumnsAreEqual(expectedFloats, floats);
}
}
@Test
void testIsDouble() {
String[] doubleStrings = {"A", "nan", "Inf", "-Inf", "Infinity", "infinity", "-0.0", "0.0",
"1.7976931348623157E308",
// Current CUDF Code does not detect overflow for this. "1.7976931348623158E308",
// So we make it a little larger for this test
"1.7976931348623159E308",
"-1.7976931348623157E308",
// Current CUDF Code does not detect overflow for this. "-1.7976931348623158E308",
// So we make it a little larger for this test
"-1.7976931348623159E308",
"1.2e-234", "NULL", "null", null, "423"};
try (ColumnVector doubleStringCV = ColumnVector.fromStrings(doubleStrings);
ColumnVector isDouble = doubleStringCV.isFloat();
ColumnVector doubles = doubleStringCV.asDoubles();
ColumnVector expectedDoubles = ColumnVector.fromBoxedDoubles(0d, Double.NaN,
Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY,
-0d, 0d, Double.MAX_VALUE, Double.POSITIVE_INFINITY, -Double.MAX_VALUE, Double.NEGATIVE_INFINITY,
1.2e-234d, 0d, 0d, null, 423d);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, true, true,
true, true, true, true, true, true, true, true, false, false, null, true)) {
assertColumnsAreEqual(expected, isDouble);
assertColumnsAreEqual(expectedDoubles, doubles);
}
}
@Test
void testCreateDurationDays() {
Integer[] days = {100, 10, 23, 1, -1, 0, Integer.MAX_VALUE, null, Integer.MIN_VALUE};
try (ColumnVector durationDays = ColumnVector.durationDaysFromBoxedInts(days);
HostColumnVector hc = durationDays.copyToHost()) {
assertTrue(hc.hasNulls());
assertEquals(DType.DURATION_DAYS, hc.getType());
for (int i = 0; i < days.length; i++) {
assertEquals(days[i] == null, hc.isNull(i));
if (!hc.isNull(i)) {
assertEquals(days[i], hc.getInt(i));
}
}
}
}
@Test
void testCreateDurationSeconds() {
Long[] secs = {10230L, 10L, 203L, 1L, -1L, 0L, Long.MAX_VALUE, null, Long.MIN_VALUE};
try (ColumnVector durationSeconds = ColumnVector.durationSecondsFromBoxedLongs(secs);
HostColumnVector hc = durationSeconds.copyToHost()) {
assertTrue(hc.hasNulls());
assertEquals(DType.DURATION_SECONDS, hc.getType());
for (int i = 0 ; i < secs.length ; i++) {
assertEquals(secs[i] == null, hc.isNull(i));
if (!hc.isNull(i)) {
assertEquals(secs[i], hc.getLong(i));
}
}
}
}
@Test
void testCreateDurationMilliseconds() {
Long[] ms = {12342340230L, 12112340L, 2230233L, 1L, -1L, 0L, Long.MAX_VALUE, null,
Long.MIN_VALUE};
try (ColumnVector durationMs = ColumnVector.durationMilliSecondsFromBoxedLongs(ms);
HostColumnVector hc = durationMs.copyToHost()) {
assertTrue(hc.hasNulls());
assertEquals(DType.DURATION_MILLISECONDS, hc.getType());
for (int i = 0 ; i < ms.length ; i++) {
assertEquals(ms[i] == null, hc.isNull(i));
if (!hc.isNull(i)) {
assertEquals(ms[i], hc.getLong(i));
}
}
}
}
@Test
void testCreateDurationMicroseconds() {
Long[] us = {1234234230L, 132350L, 289877803L, 1L, -1L, 0L, Long.MAX_VALUE, null,
Long.MIN_VALUE};
try (ColumnVector durationUs = ColumnVector.durationMicroSecondsFromBoxedLongs(us);
HostColumnVector hc = durationUs.copyToHost()) {
assertTrue(hc.hasNulls());
assertEquals(DType.DURATION_MICROSECONDS, hc.getType());
for (int i = 0 ; i < us.length ; i++) {
assertEquals(us[i] == null, hc.isNull(i));
if (!hc.isNull(i)) {
assertEquals(us[i], hc.getLong(i));
}
}
}
}
@Test
void testCreateDurationNanoseconds() {
Long[] ns = {1234234230L, 198832350L, 289877803L, 1L, -1L, 0L, Long.MAX_VALUE, null,
Long.MIN_VALUE};
try (ColumnVector durationNs = ColumnVector.durationNanoSecondsFromBoxedLongs(ns);
HostColumnVector hc = durationNs.copyToHost()) {
assertTrue(hc.hasNulls());
assertEquals(DType.DURATION_NANOSECONDS, hc.getType());
for (int i = 0 ; i < ns.length ; i++) {
assertEquals(ns[i] == null, hc.isNull(i));
if (!hc.isNull(i)) {
assertEquals(ns[i], hc.getLong(i));
}
}
}
}
@Test
void testListCv() {
List<Integer> list1 = Arrays.asList(0, 1, 2, 3);
List<Integer> list2 = Arrays.asList(6, 2, 4, 5);
List<Integer> list3 = Arrays.asList(0, 7, 3, 4, 2);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<Integer> ret1 = hcv.getList(0);
List<Integer> ret2 = hcv.getList(1);
List<Integer> ret3 = hcv.getList(2);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
assertEquals(list3, ret3, "Lists don't match");
}
}
@Test
void testListCvEmpty() {
List<Integer> list1 = Arrays.asList(0, 1, 2, 3);
List<Integer> list2 = Arrays.asList(6, 2, 4, 5);
List<Integer> list3 = new ArrayList<>();
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<Integer> ret1 = hcv.getList(0);
List<Integer> ret2 = hcv.getList(1);
List<Integer> ret3 = hcv.getList(2);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
assertEquals(list3, ret3, "Lists don't match");
}
}
@Test
void testListCvStrings() {
List<String> list1 = Arrays.asList("0", "1", "2", "3");
List<String> list2 = Arrays.asList("4", null, "6", null);
List<String> list3 = null;
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<String> ret1 = hcv.getList(0);
List<String> ret2 = hcv.getList(1);
List<String> ret3 = hcv.getList(2);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
assertEquals(list3, ret3, "Lists don't match");
}
}
@Test
void testListCvDoubles() {
List<Double> list1 = Arrays.asList(0.1, 1.2, 2.3, 3.4);
List<Double> list2 = Arrays.asList(6.7, 7.8, 8.9, 5.6);
List<Double> list3 = Arrays.asList(0.1, 7.8, 3.4, 4.5, 2.3);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.FLOAT64)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<Double> ret1 = hcv.getList(0);
List<Double> ret2 = hcv.getList(1);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
}
}
@Test
void testListCvBytes() {
List<Byte> list1 = Arrays.asList((byte)1, (byte)3, (byte)5, (byte)7);
List<Byte> list2 = Arrays.asList((byte)0, (byte)2, (byte)4, (byte)6);
List<Byte> list3 = Arrays.asList((byte)1, (byte)4, (byte)9, (byte)0);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT8)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<Byte> ret1 = hcv.getList(0);
List<Byte> ret2 = hcv.getList(1);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
}
}
@Test
void testListCvShorts() {
List<Short> list1 = Arrays.asList((short)1, (short)3, (short)5, (short)7);
List<Short> list2 = Arrays.asList((short)0, (short)2, (short)4, (short)6);
List<Short> list3 = Arrays.asList((short)1, (short)4, (short)9, (short)0);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT16)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<Short> ret1 = hcv.getList(0);
List<Short> ret2 = hcv.getList(1);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
}
}
@Test
void testListCvFloats() {
List<Float> list1 = Arrays.asList(0.1F, 1.2F, 2.3F, 3.4F);
List<Float> list2 = Arrays.asList(6.7F, 7.8F, 8.9F, 5.6F);
List<Float> list3 = Arrays.asList(0.1F, 7.8F, 3.4F, 4.5F, 2.3F);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.FLOAT32)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<Double> ret1 = hcv.getList(0);
List<Double> ret2 = hcv.getList(1);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
}
}
@Test
void testListCvLongs() {
List<Long> list1 = Arrays.asList(10L, 20L, 30L, 40L);
List<Long> list2 = Arrays.asList(6L, 7L, 8L, 9L);
List<Long> list3 = Arrays.asList(1L, 100L, 200L, 300L, 400L);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT64)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<Long> ret1 = hcv.getList(0);
List<Long> ret2 = hcv.getList(1);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
}
}
@Test
void testListCvBools() {
List<Boolean> list1 = Arrays.asList(true, false, false, true);
List<Boolean> list2 = Arrays.asList(false, true, false, false);
List<Boolean> list3 = Arrays.asList(true, true, true, true);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.BOOL8)), list1, list2, list3);
HostColumnVector hcv = res.copyToHost()) {
List<Boolean> ret1 = hcv.getList(0);
List<Boolean> ret2 = hcv.getList(1);
assertEquals(list1, ret1, "Lists don't match");
assertEquals(list2, ret2, "Lists don't match");
}
}
@Test
void testListOfListsCv() {
List<Integer> list1 = Arrays.asList(1, 2, 3);
List<Integer> list2 = Arrays.asList(4, 5, 6);
List<Integer> list3 = Arrays.asList(10, 20, 30);
List<Integer> list4 = Arrays.asList(40, 50, 60);
List<List<Integer>> mainList1 = new ArrayList<>();
mainList1.add(list1);
mainList1.add(list2);
List<List<Integer>> mainList2 = new ArrayList<>();
mainList2.add(list3);
mainList2.add(list4);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32))),
mainList1, mainList2);
HostColumnVector hcv = res.copyToHost()) {
List<List<Integer>> ret1 = hcv.getList(0);
List<List<Integer>> ret2 = hcv.getList(1);
assertEquals(mainList1, ret1, "Lists don't match");
assertEquals(mainList2, ret2, "Lists don't match");
}
}
@Test
void testListOfListsCvStrings() {
List<String> list1 = Arrays.asList("1", "23", "10");
List<String> list2 = Arrays.asList("13", "14", "17");
List<String> list3 = Arrays.asList("24", "25", "27");
List<String> list4 = Arrays.asList("29", "88", "19");
List<List<String>> mainList1 = new ArrayList<>();
mainList1.add(list1);
mainList1.add(list2);
List<List<String>> mainList2 = new ArrayList<>();
mainList2.add(list3);
mainList2.add(list4);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING))), mainList1, mainList2);
HostColumnVector hcv = res.copyToHost()) {
List<List<String>> ret1 = hcv.getList(0);
List<List<String>> ret2 = hcv.getList(1);
assertEquals(mainList1, ret1, "Lists don't match");
assertEquals(mainList2, ret2, "Lists don't match");
}
}
@Test
void testListOfListsCvDoubles() {
List<Double> list1 = Arrays.asList(1.1, 2.2, 3.3);
List<Double> list2 = Arrays.asList(4.4, 5.5, 6.6);
List<Double> list3 = Arrays.asList(10.1, 20.2, 30.3);
List<List<Double>> mainList1 = new ArrayList<>();
mainList1.add(list1);
mainList1.add(list2);
List<List<Double>> mainList2 = new ArrayList<>();
mainList2.add(list3);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.FLOAT64))), mainList1, mainList2);
HostColumnVector hcv = res.copyToHost()) {
List<List<Double>> ret1 = hcv.getList(0);
List<List<Double>> ret2 = hcv.getList(1);
assertEquals(mainList1, ret1, "Lists don't match");
assertEquals(mainList2, ret2, "Lists don't match");
}
}
@Test
void testListOfListsCvDecimals() {
List<BigDecimal> list1 = Arrays.asList(BigDecimal.valueOf(1.1), BigDecimal.valueOf(2.2), BigDecimal.valueOf(3.3));
List<BigDecimal> list2 = Arrays.asList(BigDecimal.valueOf(4.4), BigDecimal.valueOf(5.5), BigDecimal.valueOf(6.6));
List<BigDecimal> list3 = Arrays.asList(BigDecimal.valueOf(10.1), BigDecimal.valueOf(20.2), BigDecimal.valueOf(30.3));
List<List<BigDecimal>> mainList1 = new ArrayList<>();
mainList1.add(list1);
mainList1.add(list2);
List<List<BigDecimal>> mainList2 = new ArrayList<>();
mainList2.add(list3);
HostColumnVector.BasicType basicType = new HostColumnVector.BasicType(true, DType.create(DType.DTypeEnum.DECIMAL32, -1));
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true, basicType)), mainList1, mainList2);
HostColumnVector hcv = res.copyToHost()) {
List<List<BigDecimal>> ret1 = hcv.getList(0);
List<List<BigDecimal>> ret2 = hcv.getList(1);
assertEquals(mainList1, ret1, "Lists don't match");
assertEquals(mainList2, ret2, "Lists don't match");
}
}
@Test
void testConcatLists() {
List<Integer> list1 = Arrays.asList(0, 1, 2, 3);
List<Integer> list2 = Arrays.asList(6, 2, 4, 5);
List<Integer> list3 = Arrays.asList(0, 7, 3, 4, 2);
List<Integer> list4 = Arrays.asList(10, 11, 12, 13);
List<Integer> list5 = Arrays.asList(16, 12, 14, 15);
List<Integer> list6 = Arrays.asList(1, 10, 20, 30, 40);
try(ColumnVector res1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), list1, list2, list3);
ColumnVector res2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), list4, list5, list6);
ColumnVector v = ColumnVector.concatenate(res1, res2);
ColumnVector expected = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), list1, list2, list3, list4, list5, list6)) {
assertEquals(expected.getRowCount(), 6L, "Expected column row count is incorrect");
assertColumnsAreEqual(expected, v);
}
}
@Test
void testConcatListsStrings() {
List<String> list = Arrays.asList("0", "1", "2", "3");
List<String> list2 = Arrays.asList("4", null, "6", null);
List<String> list3 = null;
try (ColumnVector res1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)), list, list3);
ColumnVector res2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)), list2);
ColumnVector v = ColumnVector.concatenate(res1, res2);
ColumnVector expected = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)) , list, list3, list2)) {
assert res1.getNullCount() == 1: "Null count is incorrect on input column";
assert res2.getNullCount() == 0 : "Null count is incorrect on input column";
try(ColumnView cView1 = res1.getChildColumnView(0);
ColumnView cView2 = res2.getChildColumnView(0)) {
assert cView1.getNullCount() == 0 : "Null count is incorrect on input column";
assert cView2.getNullCount() == 2 : "Null count is incorrect on input column";
}
assertColumnsAreEqual(expected, v);
}
}
@Test
void testNullsInLists() {
List<String> val1 = Arrays.asList("Hello", "there");
List<String> val2 = Arrays.asList("these");
List<String> val3 = null;
List<String> val4 = Arrays.asList();
List<String> val5 = Arrays.asList("ARe", "some");
List<String> val6 = Arrays.asList("test", "strings");
try(ColumnVector expected = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.STRING)),
val1, val2, val3, val4, val5, val6);
HostColumnVector hostColumnVector = expected.copyToHost()) {
List<String> ret1 = hostColumnVector.getList(0);
List<String> ret2 = hostColumnVector.getList(1);
List<String> ret3 = hostColumnVector.getList(2);
List<String> ret4 = hostColumnVector.getList(3);
List<String> ret5 = hostColumnVector.getList(4);
List<String> ret6 = hostColumnVector.getList(5);
assertEquals(val1, ret1, "Lists don't match");
assertEquals(val2, ret2, "Lists don't match");
assertEquals(val3, ret3, "Lists don't match");
//TODO this is not clear semantically to me right now
assertEquals(val4, ret4, "Lists should be empty");
assertEquals(val5, ret5, "Lists don't match");
assertEquals(val6, ret6, "Lists don't match");
}
}
@Test
void testHcvOfInts() {
List<Integer> val1 = Arrays.asList(1, 22);
List<Integer> val2 = Arrays.asList(333);
List<Integer> val3 = null;
List<Integer> val4 = Arrays.asList();
List<Integer> val5 = Arrays.asList(4444, 55555);
List<Integer> val6 = Arrays.asList(666666, 7777777);
try(ColumnVector expected = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)),
val1, val2, val3, val4, val5, val6);
HostColumnVector hostColumnVector = expected.copyToHost()) {
List<String> ret1 = hostColumnVector.getList(0);
List<String> ret2 = hostColumnVector.getList(1);
List<String> ret3 = hostColumnVector.getList(2);
List<String> ret4 = hostColumnVector.getList(3);
List<String> ret5 = hostColumnVector.getList(4);
List<String> ret6 = hostColumnVector.getList(5);
assertEquals(val1, ret1, "Lists don't match");
assertEquals(val2, ret2, "Lists don't match");
assertEquals(val3, ret3, "Lists don't match");
assertEquals(val4, ret4, "Lists don't match");
assertEquals(val5, ret5, "Lists don't match");
assertEquals(val6, ret6, "Lists don't match");
}
}
@Test
void testHcvOfDecimals() {
List<BigDecimal>[] data = new List[6];
data[0] = Arrays.asList(BigDecimal.ONE, BigDecimal.TEN);
data[1] = Arrays.asList(BigDecimal.ZERO);
data[2] = null;
data[3] = Arrays.asList();
data[4] = Arrays.asList(BigDecimal.valueOf(123), BigDecimal.valueOf(1, -2));
data[5] = Arrays.asList(BigDecimal.valueOf(100, -3), BigDecimal.valueOf(2, -4));
try(ColumnVector expected = ColumnVector.fromLists(
new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.create(DType.DTypeEnum.DECIMAL32, 0))), data);
HostColumnVector hcv = expected.copyToHost()) {
for (int i = 0; i < data.length; i++) {
if (data[i] == null) {
assertNull(hcv.getList(i));
continue;
}
List<BigDecimal> exp = data[i].stream()
.map((dec -> (dec == null) ? null : dec.setScale(0, RoundingMode.UNNECESSARY)))
.collect(Collectors.toList());
assertEquals(exp, hcv.getList(i));
}
}
}
@Test
void testConcatListsOfLists() {
List<Integer> list1 = Arrays.asList(1, 2, 3);
List<Integer> list2 = Arrays.asList(4, 5, 6);
List<Integer> list3 = Arrays.asList(10, 20, 30);
List<Integer> list4 = Arrays.asList(40, 50, 60);
List<List<Integer>> mainList = new ArrayList<>();
mainList.add(list1);
mainList.add(list2);
List<List<Integer>> mainList2 = new ArrayList<>();
mainList2.add(list3);
mainList2.add(list4);
try (ColumnVector res1 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32))), mainList);
ColumnVector res2 = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32))), mainList2);
ColumnVector v = ColumnVector.concatenate(res1, res2);
ColumnVector expected = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32))), mainList, mainList2)) {
assertColumnsAreEqual(expected, v);
}
}
@Test
void testContiguousSplitConstructor() {
try (Table tmp = new Table.TestBuilder().column(1, 2).column(3, 4).build();
ContiguousTable ct = tmp.contiguousSplit()[0]) {
// table should not be referencing the device buffer yet
assertEquals(1, ct.getBuffer().getRefCount());
// get the table to force it to be instantiated
Table ignored = ct.getTable();
// one reference for the device buffer itself, two more for the column using it
assertEquals(3, ct.getBuffer().getRefCount());
}
}
@Test
void testHcvForStruct() {
List<HostColumnVector.DataType> children =
Arrays.asList(new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.INT64));
HostColumnVector.StructType type = new HostColumnVector.StructType(true, children);
List data1 = Arrays.asList(10, 20L);
List data2 = Arrays.asList(50, 60L);
List data3 = Arrays.asList(null, 80L);
List data4 = null;
HostColumnVector.StructData structData1 = new HostColumnVector.StructData(data1);
HostColumnVector.StructData structData2 = new HostColumnVector.StructData(data2);
HostColumnVector.StructData structData3 = new HostColumnVector.StructData(data3);
HostColumnVector.StructData structData4 = new HostColumnVector.StructData(data4);
try (HostColumnVector hcv = HostColumnVector.fromStructs(type, Arrays.asList(structData1, structData2, structData3, structData4));
ColumnVector columnVector = hcv.copyToDevice();
HostColumnVector hcv1 = columnVector.copyToHost();
ColumnVector expected = hcv1.copyToDevice()) {
assertEquals(expected.getRowCount(), 4L, "Expected column row count is incorrect");
HostColumnVector.StructData retData1 = hcv1.getStruct(0);
HostColumnVector.StructData retData2 = hcv1.getStruct(1);
HostColumnVector.StructData retData3 = hcv1.getStruct(2);
HostColumnVector.StructData retData4 = hcv1.getStruct(3);
assertEquals(data1, retData1.dataRecord);
assertEquals(data2, retData2.dataRecord);
assertEquals(data3, retData3.dataRecord);
assertEquals(data4, retData4);
assertStructColumnsAreEqual(expected, columnVector);
}
}
@Test
void testStructChildValidity() {
List<HostColumnVector.DataType> children =
Arrays.asList(new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.INT64));
HostColumnVector.StructType type = new HostColumnVector.StructType(true, children);
List data1 = Arrays.asList(1, 2L);
List data2 = Arrays.asList(4, 5L);
List data3 = null;
List data4 = Arrays.asList(8, null);
HostColumnVector.StructData structData1 = new HostColumnVector.StructData(data1);
HostColumnVector.StructData structData2 = new HostColumnVector.StructData(data2);
HostColumnVector.StructData structData3 = new HostColumnVector.StructData(data3);
HostColumnVector.StructData structData4 = new HostColumnVector.StructData(data4);
try (HostColumnVector hcv = HostColumnVector.fromStructs(type, Arrays.asList(structData1, structData2, structData3, structData4));
ColumnVector columnVector = hcv.copyToDevice();
HostColumnVector hcv1 = columnVector.copyToHost();
ColumnVector expected = hcv1.copyToDevice()) {
assertFalse(hcv.isNull(0));
assertFalse(hcv.isNull(1));
assertTrue(hcv.isNull(2));
assertFalse(hcv.isNull(3));
HostColumnVectorCore intChildCol = hcv.children.get(0);
HostColumnVectorCore longChildCol = hcv.children.get(1);
assertFalse(intChildCol.isNull(0));
assertFalse(intChildCol.isNull(1));
assertTrue(intChildCol.isNull(2));
assertFalse(intChildCol.isNull(3));
assertFalse(longChildCol.isNull(0));
assertFalse(longChildCol.isNull(1));
assertTrue(longChildCol.isNull(2));
assertTrue(longChildCol.isNull(3));
intChildCol = hcv1.children.get(0);
longChildCol = hcv1.children.get(1);
assertFalse(intChildCol.isNull(0));
assertFalse(intChildCol.isNull(1));
assertTrue(intChildCol.isNull(2));
assertFalse(intChildCol.isNull(3));
assertFalse(longChildCol.isNull(0));
assertFalse(longChildCol.isNull(1));
assertTrue(longChildCol.isNull(2));
assertTrue(longChildCol.isNull(3));
assertStructColumnsAreEqual(expected, columnVector);
}
}
@Test
void testGetMapValueForKeys() {
List<HostColumnVector.StructData> list1 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList(1, 2)));
List<HostColumnVector.StructData> list2 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList(2, 3)));
List<HostColumnVector.StructData> list3 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList(5, 4)));
HostColumnVector.StructType structType = new HostColumnVector.StructType(true, Arrays.asList(new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.INT32)));
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType), list1, list2, list3);
ColumnVector lookupKey = ColumnVector.fromInts(1, 6, 5);
ColumnVector res = cv.getMapValue(lookupKey);
ColumnVector expected = ColumnVector.fromBoxedInts(2, null, 4)) {
assertColumnsAreEqual(expected, res);
}
}
@Test
void testGetMapValueForInteger() {
List<HostColumnVector.StructData> list1 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList(1, 2)));
List<HostColumnVector.StructData> list2 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList(1, 3)));
List<HostColumnVector.StructData> list3 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList(5, 4)));
HostColumnVector.StructType structType = new HostColumnVector.StructType(true, Arrays.asList(new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.INT32)));
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType), list1, list2, list3);
Scalar lookupKey = Scalar.fromInt(1);
ColumnVector res = cv.getMapValue(lookupKey);
ColumnVector expected = ColumnVector.fromBoxedInts(2, 3, null)) {
assertColumnsAreEqual(expected, res);
}
}
@Test
void testGetMapValueForStrings() {
List<HostColumnVector.StructData> list1 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a", "b")));
List<HostColumnVector.StructData> list2 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a", "c")));
List<HostColumnVector.StructData> list3 = Arrays.asList(new HostColumnVector.StructData(Arrays.asList("e", "d")));
HostColumnVector.StructType structType = new HostColumnVector.StructType(true, Arrays.asList(new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.STRING)));
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType), list1, list2, list3);
Scalar lookupKey = Scalar.fromString("a");
ColumnVector res = cv.getMapValue(lookupKey);
ColumnVector expected = ColumnVector.fromStrings("b", "c", null)) {
assertColumnsAreEqual(expected, res);
}
}
@Test
void testGetMapValueEmptyInput() {
HostColumnVector.StructType structType = new HostColumnVector.StructType(true, Arrays.asList(new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.STRING)));
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType));
Scalar lookupKey = Scalar.fromString("a");
ColumnVector res = cv.getMapValue(lookupKey);
ColumnVector expected = ColumnVector.fromStrings()) {
assertColumnsAreEqual(expected, res);
}
}
@Test
void testGetMapKeyExistenceForKeys() {
List<HostColumnVector.StructData> list1 = Arrays.asList(new HostColumnVector.StructData(1, 2));
List<HostColumnVector.StructData> list2 = Arrays.asList(new HostColumnVector.StructData(1, 3));
List<HostColumnVector.StructData> list3 = Arrays.asList(new HostColumnVector.StructData(5, 4));
List<HostColumnVector.StructData> list4 = Arrays.asList(new HostColumnVector.StructData(1, 7));
List<HostColumnVector.StructData> list5 = Arrays.asList(new HostColumnVector.StructData(1, null));
List<HostColumnVector.StructData> list6 = Arrays.asList(new HostColumnVector.StructData(null, null));
List<HostColumnVector.StructData> list7 = Arrays.asList(new HostColumnVector.StructData());
HostColumnVector.StructType structType = new HostColumnVector.StructType(true, Arrays.asList(new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.INT32)));
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType), list1, list2, list3, list4, list5, list6, list7);
ColumnVector lookup = ColumnVector.fromInts(1, 5, 5, 5, 5, 5, 6);
ColumnVector resValidKey = cv.getMapKeyExistence(lookup);
ColumnVector expectedValid = ColumnVector.fromBooleans(true, false, true, false, false, false, false)) {
assertColumnsAreEqual(expectedValid, resValidKey);
}
}
@Test
void testGetMapKeyExistenceForInteger() {
List<HostColumnVector.StructData> list1 = Arrays.asList(new HostColumnVector.StructData(1, 2));
List<HostColumnVector.StructData> list2 = Arrays.asList(new HostColumnVector.StructData(1, 3));
List<HostColumnVector.StructData> list3 = Arrays.asList(new HostColumnVector.StructData(5, 4));
List<HostColumnVector.StructData> list4 = Arrays.asList(new HostColumnVector.StructData(1, 7));
List<HostColumnVector.StructData> list5 = Arrays.asList(new HostColumnVector.StructData(1, null));
List<HostColumnVector.StructData> list6 = Arrays.asList(new HostColumnVector.StructData(null, null));
List<HostColumnVector.StructData> list7 = Arrays.asList(new HostColumnVector.StructData());
HostColumnVector.StructType structType = new HostColumnVector.StructType(true, Arrays.asList(new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.INT32)));
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType), list1, list2, list3, list4, list5, list6, list7);
Scalar lookup1 = Scalar.fromInt(1);
ColumnVector resValidKey = cv.getMapKeyExistence(lookup1);
ColumnVector expectedValid = ColumnVector.fromBoxedBooleans(true, true, false, true, true, false, false);
ColumnVector expectedNull = ColumnVector.fromBoxedBooleans(false, false, false, false, false, false, false);
Scalar lookupNull = Scalar.fromNull(DType.INT32);
ColumnVector resNullKey = cv.getMapKeyExistence(lookupNull)) {
assertColumnsAreEqual(expectedValid, resValidKey);
assertColumnsAreEqual(expectedNull, resNullKey);
}
AssertionError e = assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType), list1, list2, list3, list4, list5, list6, list7);
Scalar key = null;
ColumnVector resNullKey = cv.getMapKeyExistence(key)) {
}
});
assertTrue(e.getMessage().contains("Lookup key may not be null"));
}
@Test
void testGetMapKeyExistenceForStrings() {
List<HostColumnVector.StructData> list1 = Arrays.asList(new HostColumnVector.StructData("a", "b"));
List<HostColumnVector.StructData> list2 = Arrays.asList(new HostColumnVector.StructData("a", "c"));
List<HostColumnVector.StructData> list3 = Arrays.asList(new HostColumnVector.StructData("e", "d"));
List<HostColumnVector.StructData> list4 = Arrays.asList(new HostColumnVector.StructData("a", "g"));
List<HostColumnVector.StructData> list5 = Arrays.asList(new HostColumnVector.StructData("a", null));
List<HostColumnVector.StructData> list6 = Arrays.asList(new HostColumnVector.StructData(null, null));
List<HostColumnVector.StructData> list7 = Arrays.asList(new HostColumnVector.StructData());
HostColumnVector.StructType structType = new HostColumnVector.StructType(true, Arrays.asList(new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.STRING)));
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType), list1, list2, list3, list4, list5, list6, list7);
Scalar lookupA = Scalar.fromString("a");
ColumnVector resValidKey = cv.getMapKeyExistence(lookupA);
ColumnVector expectedValid = ColumnVector.fromBoxedBooleans(true, true, false, true, true, false, false);
ColumnVector expectedNull = ColumnVector.fromBoxedBooleans(false, false, false, false, false, false, false);
Scalar lookupNull = Scalar.fromNull(DType.STRING);
ColumnVector resNullKey = cv.getMapKeyExistence(lookupNull)) {
assertColumnsAreEqual(expectedValid, resValidKey);
assertColumnsAreEqual(expectedNull, resNullKey);
}
AssertionError e = assertThrows(AssertionError.class, () -> {
try (ColumnVector cv = ColumnVector.fromLists(new HostColumnVector.ListType(true, structType), list1, list2, list3, list4, list5, list6, list7);
Scalar key = null;
ColumnVector resNullKey = cv.getMapKeyExistence(key)) {
}
});
assertTrue(e.getMessage().contains("Lookup key may not be null"));
}
@Test
void testListOfStructsOfStructs() {
List<HostColumnVector.StructData> list1 = Arrays.asList(
new HostColumnVector.StructData(Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a")))));
List<HostColumnVector.StructData> list2 = Arrays.asList(
new HostColumnVector.StructData(Arrays.asList(new HostColumnVector.StructData(Arrays.asList("b")))));
List<HostColumnVector.StructData> list3 = Arrays.asList(
new HostColumnVector.StructData(Arrays.asList(new HostColumnVector.StructData(Arrays.asList("c")))));
HostColumnVector.StructType structType = new HostColumnVector.StructType(true, Arrays.asList(new HostColumnVector.StructType(true,
Arrays.asList(new HostColumnVector.BasicType(true, DType.STRING)))));
HostColumnVector.ListType schema = new HostColumnVector.ListType(true, structType);
try (ColumnVector cv = ColumnVector.fromLists(schema, list1, list2, list3);
HostColumnVector hostColumnVector = cv.copyToHost();
ColumnVector expected = hostColumnVector.copyToDevice()) {
assertColumnsAreEqual(expected, cv);
}
}
@Test
void testCopyToColumnVector() {
List<Integer> list1 = Arrays.asList(10, 11, 12, 13);
List<Integer> list2 = Arrays.asList(16, 12, 14, 15);
List<Integer> list3 = Arrays.asList(0, 7, 3, 4, 2);
try(ColumnVector res = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32)), list1, list2, list3);
ColumnView childColumnView = res.getChildColumnView(0);
ColumnVector copiedChildCv = childColumnView.copyToColumnVector();
ColumnVector expected =
ColumnVector.fromInts(10, 11, 12, 13, 16, 12, 14, 15, 0, 7, 3, 4, 2)) {
assertColumnsAreEqual(expected, copiedChildCv);
}
}
@Test
void testGetJSONObject() {
String jsonString = "{ \"store\": {\n" +
" \"book\": [\n" +
" { \"category\": \"reference\",\n" +
" \"author\": \"Nigel Rees\",\n" +
" \"title\": \"Sayings of the Century\",\n" +
" \"price\": 8.95\n" +
" },\n" +
" { \"category\": \"fiction\",\n" +
" \"author\": \"Evelyn Waugh\",\n" +
" \"title\": \"Sword of Honour\",\n" +
" \"price\": 12.99\n" +
" },\n" +
" { \"category\": \"fiction\",\n" +
" \"author\": \"Herman Melville\",\n" +
" \"title\": \"Moby Dick\",\n" +
" \"isbn\": \"0-553-21311-3\",\n" +
" \"price\": 8.99\n" +
" },\n" +
" { \"category\": \"fiction\",\n" +
" \"author\": \"J. R. R. Tolkien\",\n" +
" \"title\": \"The Lord of the Rings\",\n" +
" \"isbn\": \"0-395-19395-8\",\n" +
" \"price\": 22.99\n" +
" }\n" +
" ],\n" +
" \"bicycle\": {\n" +
" \"color\": \"red\",\n" +
" \"price\": 19.95\n" +
" }\n" +
" }\n" +
"}";
try (ColumnVector json = ColumnVector.fromStrings(jsonString, jsonString);
ColumnVector expectedAuthors = ColumnVector.fromStrings("[\"Nigel Rees\",\"Evelyn " +
"Waugh\",\"Herman Melville\",\"J. R. R. Tolkien\"]", "[\"Nigel Rees\",\"Evelyn " +
"Waugh\",\"Herman Melville\",\"J. R. R. Tolkien\"]");
Scalar path = Scalar.fromString("$.store.book[*].author");
ColumnVector gotAuthors = json.getJSONObject(path)) {
assertColumnsAreEqual(expectedAuthors, gotAuthors);
}
}
@Test
void testMakeStructEmpty() {
final int numRows = 10;
try (ColumnVector expected = ColumnVector.emptyStructs(new StructType(false, new ArrayList<>()), numRows);
ColumnVector created = ColumnVector.makeStruct(numRows)) {
assertColumnsAreEqual(expected, created);
}
}
@Test
void testMakeStruct() {
try (ColumnVector expected = ColumnVector.fromStructs(new StructType(false,
Arrays.asList(
new BasicType(false, DType.INT32),
new BasicType(false, DType.INT32),
new BasicType(false, DType.INT32))),
new HostColumnVector.StructData(1, 2, 3),
new HostColumnVector.StructData(4, 5, 6));
ColumnVector child1 = ColumnVector.fromInts(1, 4);
ColumnVector child2 = ColumnVector.fromInts(2, 5);
ColumnVector child3 = ColumnVector.fromInts(3, 6);
ColumnVector created = ColumnVector.makeStruct(child1, child2, child3)) {
assertColumnsAreEqual(expected, created);
}
}
@Test
void testMakeListEmpty() {
final int numRows = 4;
List<List<String>> emptyListOfList = new ArrayList<>();
emptyListOfList.add(Arrays.asList());
try (
ColumnVector expectedList =
ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.STRING)),
Arrays.asList(),
Arrays.asList(),
Arrays.asList(),
Arrays.asList());
ColumnVector expectedListOfList = ColumnVector.fromLists(new HostColumnVector.ListType(false,
new HostColumnVector.ListType(false,
new HostColumnVector.BasicType(false, DType.STRING))),
emptyListOfList, emptyListOfList, emptyListOfList, emptyListOfList);
ColumnVector createdList = ColumnVector.makeList(numRows, DType.STRING);
ColumnVector createdListOfList = ColumnVector.makeList(createdList)) {
assertColumnsAreEqual(expectedList, createdList);
assertColumnsAreEqual(expectedListOfList, createdListOfList);
}
}
@Test
void testMakeList() {
List<Integer> list1 = Arrays.asList(1, 3);
List<Integer> list2 = Arrays.asList(2, 4);
List<Integer> list3 = Arrays.asList(5, 7, 9);
List<Integer> list4 = Arrays.asList(6, 8, 10);
List<List<Integer>> mainList1 = new ArrayList<>(Arrays.asList(list1, list3));
List<List<Integer>> mainList2 = new ArrayList<>(Arrays.asList(list2, list4));
try (ColumnVector expectedList1 =
ColumnVector.fromLists(new ListType(false,
new BasicType(false, DType.INT32)), list1, list2);
ColumnVector expectedList2 =
ColumnVector.fromLists(new ListType(false,
new BasicType(false, DType.INT32)), list3, list4);
ColumnVector expectedListOfList = ColumnVector.fromLists(new HostColumnVector.ListType(true,
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32))),
mainList1, mainList2);
ColumnVector child1 = ColumnVector.fromInts(1, 2);
ColumnVector child2 = ColumnVector.fromInts(3, 4);
ColumnVector child3 = ColumnVector.fromInts(5, 6);
ColumnVector child4 = ColumnVector.fromInts(7, 8);
ColumnVector child5 = ColumnVector.fromInts(9, 10);
ColumnVector createdList1 = ColumnVector.makeList(child1, child2);
ColumnVector createdList2 = ColumnVector.makeList(child3, child4, child5);
ColumnVector createdListOfList = ColumnVector.makeList(createdList1, createdList2);
HostColumnVector hcv = createdListOfList.copyToHost()) {
assertColumnsAreEqual(expectedList1, createdList1);
assertColumnsAreEqual(expectedList2, createdList2);
assertColumnsAreEqual(expectedListOfList, createdListOfList);
List<List<Integer>> ret1 = hcv.getList(0);
List<List<Integer>> ret2 = hcv.getList(1);
assertEquals(mainList1, ret1, "Lists don't match");
assertEquals(mainList2, ret2, "Lists don't match");
}
}
@Test
void testReplaceLeafNodeInList() {
try (
ColumnVector c1 = ColumnVector.fromInts(1, 2);
ColumnVector c2 = ColumnVector.fromInts(8, 3);
ColumnVector c3 = ColumnVector.fromInts(9, 8);
ColumnVector c4 = ColumnVector.fromInts(2, 6);
ColumnVector expected = ColumnVector.makeList(c1, c2, c3, c4);
ColumnVector child1 =
ColumnVector.decimalFromDoubles(DType.create(DType.DTypeEnum.DECIMAL64, 3),
RoundingMode.HALF_UP, 770.892, 961.110);
ColumnVector child2 =
ColumnVector.decimalFromDoubles(DType.create(DType.DTypeEnum.DECIMAL64, 3),
RoundingMode.HALF_UP, 524.982, 479.946);
ColumnVector child3 =
ColumnVector.decimalFromDoubles(DType.create(DType.DTypeEnum.DECIMAL64, 3),
RoundingMode.HALF_UP, 346.997, 479.946);
ColumnVector child4 =
ColumnVector.decimalFromDoubles(DType.create(DType.DTypeEnum.DECIMAL64, 3),
RoundingMode.HALF_UP, 87.764, 414.239);
ColumnVector created = ColumnVector.makeList(child1, child2, child3, child4);
ColumnVector newChild = ColumnVector.fromInts(1, 8, 9, 2, 2, 3, 8, 6);
ColumnView replacedView = created.replaceListChild(newChild)) {
try (ColumnVector replaced = replacedView.copyToColumnVector()) {
assertColumnsAreEqual(expected, replaced);
}
}
}
@Test
void testReplaceLeafNodeInListWithIllegal() {
Exception e = assertThrows(IllegalArgumentException.class, () -> {
try (ColumnVector child1 =
ColumnVector.decimalFromDoubles(DType.create(DType.DTypeEnum.DECIMAL64, 3),
RoundingMode.HALF_UP, 770.892, 961.110);
ColumnVector child2 =
ColumnVector.decimalFromDoubles(DType.create(DType.DTypeEnum.DECIMAL64, 3),
RoundingMode.HALF_UP, 524.982, 479.946);
ColumnVector child3 =
ColumnVector.decimalFromDoubles(DType.create(DType.DTypeEnum.DECIMAL64, 3),
RoundingMode.HALF_UP, 346.997, 479.946);
ColumnVector child4 =
ColumnVector.decimalFromDoubles(DType.create(DType.DTypeEnum.DECIMAL64, 3),
RoundingMode.HALF_UP, 87.764, 414.239);
ColumnVector created = ColumnVector.makeList(child1, child2, child3, child4);
ColumnVector newChild = ColumnVector.fromInts(0, 1, 8, 9, 2, 2, 3, 8, 6);
ColumnView replacedView = created.replaceListChild(newChild)) {
}
});
assertTrue(e.getMessage().contains("Child row count doesn't match the old child"));
}
@Test
void testReplaceColumnInStruct() {
try (ColumnVector expected = ColumnVector.fromStructs(new StructType(false,
Arrays.asList(
new BasicType(false, DType.INT32),
new BasicType(false, DType.INT32),
new BasicType(false, DType.INT32))),
new HostColumnVector.StructData(1, 5, 3),
new HostColumnVector.StructData(4, 9, 6));
ColumnVector child1 = ColumnVector.fromInts(1, 4);
ColumnVector child2 = ColumnVector.fromInts(2, 5);
ColumnVector child3 = ColumnVector.fromInts(3, 6);
ColumnVector created = ColumnVector.makeStruct(child1, child2, child3);
ColumnVector replaceWith = ColumnVector.fromInts(5, 9);
ColumnView replacedView = created.replaceChildrenWithViews(new int[]{1},
new ColumnVector[]{replaceWith})) {
try (ColumnVector replaced = replacedView.copyToColumnVector()) {
assertColumnsAreEqual(expected, replaced);
}
}
}
@Test
void testReplaceIllegalIndexColumnInStruct() {
Exception e = assertThrows(IllegalArgumentException.class, () -> {
try (ColumnVector child1 = ColumnVector.fromInts(1, 4);
ColumnVector child2 = ColumnVector.fromInts(2, 5);
ColumnVector child3 = ColumnVector.fromInts(3, 6);
ColumnVector created = ColumnVector.makeStruct(child1, child2, child3);
ColumnVector replaceWith = ColumnVector.fromInts(5, 9);
ColumnView replacedView = created.replaceChildrenWithViews(new int[]{5},
new ColumnVector[]{replaceWith})) {
}
});
assertTrue(e.getMessage().contains("One or more invalid child indices passed to be replaced"));
}
@Test
void testReplaceSameIndexColumnInStruct() {
Exception e = assertThrows(IllegalArgumentException.class, () -> {
try (ColumnVector child1 = ColumnVector.fromInts(1, 4);
ColumnVector child2 = ColumnVector.fromInts(2, 5);
ColumnVector child3 = ColumnVector.fromInts(3, 6);
ColumnVector created = ColumnVector.makeStruct(child1, child2, child3);
ColumnVector replaceWith = ColumnVector.fromInts(5, 9);
ColumnView replacedView = created.replaceChildrenWithViews(new int[]{1, 1},
new ColumnVector[]{replaceWith, replaceWith})) {
}
});
assertTrue(e.getMessage().contains("Duplicate mapping found for replacing child index"));
}
@Test
void testCopyWithBooleanColumnAsValidity() {
final Boolean T = true;
final Boolean F = false;
final Integer X = null;
// Straight-line: Invalidate every other row.
try (ColumnVector exemplar = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
ColumnVector validity = ColumnVector.fromBoxedBooleans(F, T, F, T, F, T, F, T, F, T);
ColumnVector expected = ColumnVector.fromBoxedInts(X, 2, X, 4, X, 6, X, 8, X, 10);
ColumnVector result = exemplar.copyWithBooleanColumnAsValidity(validity)) {
assertColumnsAreEqual(expected, result);
}
// Straight-line: Invalidate all Rows.
try (ColumnVector exemplar = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
ColumnVector validity = ColumnVector.fromBoxedBooleans(F, F, F, F, F, F, F, F, F, F);
ColumnVector expected = ColumnVector.fromBoxedInts(X, X, X, X, X, X, X, X, X, X);
ColumnVector result = exemplar.copyWithBooleanColumnAsValidity(validity)) {
assertColumnsAreEqual(expected, result);
}
// Nulls in the validity column are treated as invalid.
try (ColumnVector exemplar = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
ColumnVector validity = ColumnVector.fromBoxedBooleans(F, T, F, T, F, T, F, null, F, null);
ColumnVector expected = ColumnVector.fromBoxedInts(X, 2, X, 4, X, 6, X, X, X, X);
ColumnVector result = exemplar.copyWithBooleanColumnAsValidity(validity)) {
assertColumnsAreEqual(expected, result);
}
// Negative case: Mismatch in row count.
Exception x = assertThrows(CudfException.class, () -> {
try (ColumnVector exemplar = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
ColumnVector validity = ColumnVector.fromBoxedBooleans(F, T, F, T);
ColumnVector result = exemplar.copyWithBooleanColumnAsValidity(validity)) {
}
});
assertTrue(x.getMessage().contains("Exemplar and validity columns must have the same size"));
}
@Test
void testSegmentedGather() {
HostColumnVector.DataType dt = new ListType(true, new BasicType(true, DType.STRING));
try (ColumnVector source = ColumnVector.fromLists(dt,
Lists.newArrayList("a", "b", null, "c"),
null,
Lists.newArrayList(),
Lists.newArrayList(null, "A", "B", "C", "D"));
ColumnVector gatherMap = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.INT32)),
Lists.newArrayList(-3, 0, 2, 3, 4),
Lists.newArrayList(),
Lists.newArrayList(1),
Lists.newArrayList(1, -4, 5, -1, -6));
ColumnVector actual = source.segmentedGather(gatherMap);
ColumnVector expected = ColumnVector.fromLists(dt,
Lists.newArrayList("b", "a", null, "c", null),
null,
Lists.newArrayList((String) null),
Lists.newArrayList("A", "A", null, "D", null))) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testGenerateListOffsets() {
try (ColumnVector index = ColumnVector.fromInts(1, 3, 3, 0, 2, 0, 0, 5, 10, 25);
ColumnVector actual = index.generateListOffsets();
ColumnVector expected = ColumnVector.fromInts(0, 1, 4, 7, 7, 9, 9, 9, 14, 24, 49)) {
assertColumnsAreEqual(expected, actual);
}
try (ColumnVector index = ColumnVector.fromInts(0, 0, 1, 0, 0);
ColumnVector actual = index.generateListOffsets();
ColumnVector expected = ColumnVector.fromInts(0, 0, 0, 1, 1, 1)) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testApplyBooleanMaskFromListOfInt() {
try (
ColumnVector elementCv = ColumnVector.fromBoxedInts(
11, 12, // list1
21, 22, 23, // list2
null, 32, 33, null, 35, // list3
null, 42, 43, null, 45 // list 4
// list5 (empty)
);
ColumnVector offsetsCv = ColumnVector.fromInts(0, 2, 5, 10, 15, 15);
ColumnVector listOfIntCv = elementCv.makeListFromOffsets(5, offsetsCv);
ColumnVector boolCv = ColumnVector.fromBoxedBooleans(
true, false, // list1
true, false, true, // list2
true, false, true, false, true, // list3
true, false, true, false, true // list 4
// list5 (empty)
);
ColumnVector listOfBoolCv = boolCv.makeListFromOffsets(5, offsetsCv);
// apply boolean mask
ColumnVector actualCv = listOfIntCv.applyBooleanMask(listOfBoolCv);
ColumnVector expectedElementCv = ColumnVector.fromBoxedInts(
11, // list1
21, 23, // list2
null, 33, 35, // list3
null, 43, 45 // list 4
// list5 (empty)
);
ColumnVector expectedOffsetsCv = ColumnVector.fromInts(0, 1, 3, 6, 9, 9);
ColumnVector expectedCv = expectedElementCv.makeListFromOffsets(5, expectedOffsetsCv)
) {
assertColumnsAreEqual(expectedCv, actualCv);
}
}
@Test
void testApplyBooleanMaskFromListOfStructure() {
try (
ColumnVector keyCv = ColumnVector.fromBoxedInts(
11, 12, // list1
21, 22, 23, // list2
null, 32, 33, null, 35, // list3
null, 42, 43, null, 45 // list 4
// list5 (empty)
);
ColumnVector valCv = ColumnVector.fromBoxedInts(
11, 12, // list1
21, 22, 23, // list2
31, 32, 33, 34, 35, // list3
41, 42, 43, 44, 45 // list4
// list5 (empty)
);
ColumnVector structCv = ColumnVector.makeStruct(keyCv, valCv);
ColumnVector offsetsCv = ColumnVector.fromInts(0, 2, 5, 10, 15, 15);
ColumnVector listOfStructCv = structCv.makeListFromOffsets(5, offsetsCv);
ColumnVector boolCv = ColumnVector.fromBoxedBooleans(
true, false, // list1
true, false, true, // list2
true, false, true, false, true, // list3
true, false, true, false, true // list 4
// list5 (empty)
);
ColumnVector listOfBoolCv = boolCv.makeListFromOffsets(5, offsetsCv);
// apply boolean mask
ColumnVector actualCv = listOfStructCv.applyBooleanMask(listOfBoolCv);
ColumnVector expectedKeyCv = ColumnVector.fromBoxedInts(
11, // list1
21, 23, // list2
null, 33, 35, // list3
null, 43, 45 // list 4
// list5 (empty)
);
ColumnVector expectedValCv = ColumnVector.fromBoxedInts(
11, // list1
21, 23, // list2
31, 33, 35, // list3
41, 43, 45 // list4
// list5 (empty)
);
ColumnVector expectedStructCv = ColumnVector.makeStruct(expectedKeyCv, expectedValCv);
ColumnVector expectedOffsetsCv = ColumnVector.fromInts(0, 1, 3, 6, 9, 9);
ColumnVector expectedCv = expectedStructCv.makeListFromOffsets(5, expectedOffsetsCv)
) {
assertColumnsAreEqual(expectedCv, actualCv);
}
}
@Test
void testColumnViewWithNonEmptyNullsIsCleared() {
List<Integer> list0 = Arrays.asList(1, 2, 3);
List<Integer> list1 = Arrays.asList(4, 5, null);
List<Integer> list2 = Arrays.asList(7, 8, 9);
List<Integer> list3 = null;
final HostMemoryAllocator hostMemoryAllocator = DefaultHostMemoryAllocator.get();
try (ColumnVector input = ColumnVectorTest.makeListsColumn(DType.INT32, list0, list1, list2, list3);
BaseDeviceMemoryBuffer baseValidityBuffer = input.getDeviceBufferFor(BufferType.VALIDITY);
BaseDeviceMemoryBuffer baseOffsetBuffer = input.getDeviceBufferFor(BufferType.OFFSET);
HostMemoryBuffer newValidity = hostMemoryAllocator.allocate(BitVectorHelper.getValidityAllocationSizeInBytes(4))) {
newValidity.copyFromDeviceBuffer(baseValidityBuffer);
// we are setting list1 with 3 elements to null. This will result in a non-empty null in the
// ColumnView at index 1
BitVectorHelper.setNullAt(newValidity, 1);
// validityBuffer will be closed by offHeapState later
DeviceMemoryBuffer validityBuffer = DeviceMemoryBuffer.allocate(BitVectorHelper.getValidityAllocationSizeInBytes(4));
try {
// offsetBuffer will be closed by offHeapState later
DeviceMemoryBuffer offsetBuffer = DeviceMemoryBuffer.allocate(baseOffsetBuffer.getLength());
try {
validityBuffer.copyFromHostBuffer(newValidity);
offsetBuffer.copyFromMemoryBuffer(0, baseOffsetBuffer, 0,
baseOffsetBuffer.length, Cuda.DEFAULT_STREAM);
// The new offHeapState will have 2 nulls, one null at index 4 from the original ColumnVector
// the other at index 1 which is non-empty
ColumnVector.OffHeapState offHeapState = ColumnVector.makeOffHeap(input.type, input.rows, Optional.of(2L),
null, validityBuffer, offsetBuffer,
null, Arrays.stream(input.getChildColumnViews()).mapToLong((c) -> c.viewHandle).toArray());
try {
new ColumnView(offHeapState);
} catch (AssertionError ae) {
assert offHeapState.isClean();
}
} catch (Exception e) {
if (!offsetBuffer.closed) {
offsetBuffer.close();
}
}
} catch (Exception e) {
if (!validityBuffer.closed) {
validityBuffer.close();
}
}
}
}
@Test
public void testEventHandlerIsCalledForEachClose() {
final AtomicInteger onClosedWasCalled = new AtomicInteger(0);
try (ColumnVector cv = ColumnVector.fromInts(1,2,3,4)) {
cv.setEventHandler((col, refCount) -> {
assertEquals(cv, col);
onClosedWasCalled.incrementAndGet();
});
}
assertEquals(1, onClosedWasCalled.get());
}
@Test
public void testHostEventHandlerIsCalledForEachClose() {
final AtomicInteger onClosedWasCalled = new AtomicInteger(0);
try (HostColumnVector cv = HostColumnVector.fromInts(1,2,3,4)) {
cv.setEventHandler((col, refCount) -> {
assertEquals(cv, col);
onClosedWasCalled.incrementAndGet();
});
}
assertEquals(1, onClosedWasCalled.get());
}
@Test
public void testEventHandlerIsNotCalledIfNotSet() {
final AtomicInteger onClosedWasCalled = new AtomicInteger(0);
try (ColumnVector cv = ColumnVector.fromInts(1,2,3,4)) {
assertNull(cv.getEventHandler());
}
assertEquals(0, onClosedWasCalled.get());
try (ColumnVector cv = ColumnVector.fromInts(1,2,3,4)) {
cv.setEventHandler((col, refCount) -> {
onClosedWasCalled.incrementAndGet();
});
cv.setEventHandler(null);
}
assertEquals(0, onClosedWasCalled.get());
}
@Test
public void testHostEventHandlerIsNotCalledIfNotSet() {
final AtomicInteger onClosedWasCalled = new AtomicInteger(0);
try (HostColumnVector cv = HostColumnVector.fromInts(1,2,3,4)) {
assertNull(cv.getEventHandler());
}
assertEquals(0, onClosedWasCalled.get());
try (HostColumnVector cv = HostColumnVector.fromInts(1,2,3,4)) {
cv.setEventHandler((col, refCount) -> {
onClosedWasCalled.incrementAndGet();
});
cv.setEventHandler(null);
}
assertEquals(0, onClosedWasCalled.get());
}
/**
* Test that the ColumnView with unknown null-counts still returns
* the correct null-count when queried.
*/
@Test
public void testColumnViewNullCount() {
try (ColumnVector vector = ColumnVector.fromBoxedInts(1, 2, null, 3, null, 4, null, 5, null, 6);
ColumnView view = new ColumnView(DType.INT32,
vector.getRowCount(),
Optional.empty(), // Unknown null count.
vector.getDeviceBufferFor(BufferType.DATA),
vector.getDeviceBufferFor(BufferType.VALIDITY),
vector.getDeviceBufferFor(BufferType.OFFSET))) {
assertEquals(vector.getNullCount(), view.getNullCount());
}
}
@Test
public void testUseAfterFree() {
ColumnVector vector = ColumnVector.fromBoxedInts(1, 2, 3);
vector.close();
assertThrows(NullPointerException.class, vector::getDeviceMemorySize);
}
@Test
public void testConvertIntegerToHex() {
try (
ColumnVector input = ColumnVector.fromInts(14, 2621, 50);
ColumnVector expected = ColumnVector.fromStrings("0E", "0A3D", "32");
ColumnVector actual = input.toHex()) {
assertColumnsAreEqual(expected, actual);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/PinnedMemoryPoolTest.java
|
/*
*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.*;
class PinnedMemoryPoolTest extends CudfTestBase {
private static final Logger log = LoggerFactory.getLogger(PinnedMemoryPoolTest.class);
@AfterEach
void teardown() {
if (PinnedMemoryPool.isInitialized()) {
PinnedMemoryPool.shutdown();
}
}
@Test
void init() {
assertFalse(PinnedMemoryPool.isInitialized());
PinnedMemoryPool.initialize(1024*1024*500L);
assertTrue(PinnedMemoryPool.isInitialized());
PinnedMemoryPool.shutdown();
assertFalse(PinnedMemoryPool.isInitialized());
}
@Test
void allocate() {
PinnedMemoryPool.initialize(1024*1024*500L);
for (int i = 2048000; i < 1024*1024*1024; i = i * 2) {
log.warn("STARTING TEST FOR size = " + i);
HostMemoryBuffer buff = null;
HostMemoryBuffer buff2 = null;
HostMemoryBuffer buff3 = null;
try {
buff = PinnedMemoryPool.allocate(i);
assertEquals(i, buff.length);
buff2 = PinnedMemoryPool.allocate(i / 2);
assertEquals(i/2, buff2.length);
buff.close();
buff = null;
buff3 = PinnedMemoryPool.allocate(i * 2);
assertEquals(i * 2, buff3.length);
} finally {
if (buff != null) {
buff.close();
}
if (buff3 != null) {
buff3.close();
}
if (buff2 != null) {
buff2.close();
}
}
log.warn("DONE TEST FOR size = " + i + "\n");
}
}
@Test
void testFragmentationAndExhaustion() {
final long poolSize = 15 * 1024L;
PinnedMemoryPool.initialize(poolSize);
assertEquals(poolSize, PinnedMemoryPool.getAvailableBytes());
HostMemoryBuffer[] buffers = new HostMemoryBuffer[5];
try {
buffers[0] = PinnedMemoryPool.tryAllocate(1024);
assertNotNull(buffers[0]);
assertEquals(14*1024L, PinnedMemoryPool.getAvailableBytes());
buffers[1] = PinnedMemoryPool.tryAllocate(2048);
assertNotNull(buffers[1]);
assertEquals(12*1024L, PinnedMemoryPool.getAvailableBytes());
buffers[2] = PinnedMemoryPool.tryAllocate(4096);
assertNotNull(buffers[2]);
assertEquals(8*1024L, PinnedMemoryPool.getAvailableBytes());
buffers[1].close();
assertEquals(10*1024L, PinnedMemoryPool.getAvailableBytes());
buffers[1] = null;
buffers[1] = PinnedMemoryPool.tryAllocate(8192);
assertNotNull(buffers[1]);
assertEquals(2*1024L, PinnedMemoryPool.getAvailableBytes());
buffers[3] = PinnedMemoryPool.tryAllocate(2048);
assertNotNull(buffers[3]);
assertEquals(0L, PinnedMemoryPool.getAvailableBytes());
buffers[4] = PinnedMemoryPool.tryAllocate(64);
assertNull(buffers[4]);
buffers[0].close();
assertEquals(1024L, PinnedMemoryPool.getAvailableBytes());
buffers[0] = null;
buffers[4] = PinnedMemoryPool.tryAllocate(64);
assertNotNull(buffers[4]);
assertEquals(1024L - 64, PinnedMemoryPool.getAvailableBytes());
} finally {
for (HostMemoryBuffer buffer : buffers) {
if (buffer != null) {
buffer.close();
}
}
}
assertEquals(poolSize, PinnedMemoryPool.getAvailableBytes());
}
@Test
void testZeroSizedAllocation() {
final long poolSize = 4 * 1024L;
PinnedMemoryPool.initialize(poolSize);
assertEquals(poolSize, PinnedMemoryPool.getAvailableBytes());
try (HostMemoryBuffer buffer = PinnedMemoryPool.tryAllocate(0)) {
assertNotNull(buffer);
assertEquals(0, buffer.getLength());
assertEquals(poolSize, PinnedMemoryPool.getAvailableBytes());
}
assertEquals(poolSize, PinnedMemoryPool.getAvailableBytes());
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ByteColumnVectorTest.java
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.junit.jupiter.api.Test;
import java.util.Random;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ByteColumnVectorTest extends CudfTestBase {
@Test
public void testCreateColumnVectorBuilder() {
try (HostColumnVector shortColumnVector = HostColumnVector.build(DType.INT8, 3,
(b) -> b.append((byte) 1))) {
assertFalse(shortColumnVector.hasNulls());
}
}
@Test
public void testArrayAllocation() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertEquals(cv.getByte(0), 2);
assertEquals(cv.getByte(1), 3);
assertEquals(cv.getByte(2), 5);
};
try (HostColumnVector bcv = HostColumnVector.fromBytes(new byte[]{2, 3, 5})) {
verify.accept(bcv);
}
try (HostColumnVector bcv = ColumnBuilderHelper.fromBytes(true, new byte[]{2, 3, 5})) {
verify.accept(bcv);
}
}
@Test
public void testUnsignedArrayAllocation() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertEquals(0xff, Byte.toUnsignedInt(cv.getByte(0)), 0xff);
assertEquals(128, Byte.toUnsignedInt(cv.getByte(1)), 128);
assertEquals(5, Byte.toUnsignedInt(cv.getByte(2)), 5);
};
try (HostColumnVector bcv = HostColumnVector.fromUnsignedBytes(new byte[]{(byte)0xff, (byte)128, 5})) {
verify.accept(bcv);
}
try (HostColumnVector bcv = ColumnBuilderHelper.fromBytes(false,
new byte[]{(byte)0xff, (byte)128, 5})) {
verify.accept(bcv);
}
}
@Test
public void testAppendRepeatingValues() {
try (HostColumnVector byteColumnVector = HostColumnVector.build(DType.INT8, 3,
(b) -> b.append((byte) 2, 3L))) {
assertFalse(byteColumnVector.hasNulls());
assertEquals(byteColumnVector.getByte(0), 2);
assertEquals(byteColumnVector.getByte(1), 2);
assertEquals(byteColumnVector.getByte(2), 2);
}
}
@Test
public void testUpperIndexOutOfBoundsException() {
Consumer<HostColumnVector> verify = (cv) -> {
assertThrows(AssertionError.class, () -> cv.getByte(3));
assertFalse(cv.hasNulls());
};
try (HostColumnVector bcv = HostColumnVector.fromBytes(new byte[]{2, 3, 5})) {
verify.accept(bcv);
}
try (HostColumnVector bcv = ColumnBuilderHelper.fromBytes(true, new byte[]{2, 3, 5})) {
verify.accept(bcv);
}
}
@Test
public void testLowerIndexOutOfBoundsException() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertThrows(AssertionError.class, () -> cv.getByte(-1));
};
try (HostColumnVector bcv = HostColumnVector.fromBytes(new byte[]{2, 3, 5})) {
verify.accept(bcv);
}
try (HostColumnVector bcv = ColumnBuilderHelper.fromBytes(true, new byte[]{2, 3, 5})) {
verify.accept(bcv);
}
}
@Test
public void testAddingNullValues() {
Consumer<HostColumnVector> verify = (cv) -> {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
};
try (HostColumnVector bcv = HostColumnVector.fromBoxedBytes(
new Byte[]{2, 3, 4, 5, 6, 7, null, null})) {
verify.accept(bcv);
}
try (HostColumnVector bcv = ColumnBuilderHelper.fromBoxedBytes(true,
new Byte[]{2, 3, 4, 5, 6, 7, null, null})) {
verify.accept(bcv);
}
}
@Test
public void testAddingUnsignedNullValues() {
Consumer<HostColumnVector> verify = (cv) -> {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertEquals(128, Byte.toUnsignedInt(cv.getByte(4)));
assertEquals(254, Byte.toUnsignedInt(cv.getByte(5)));
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
};
try (HostColumnVector bcv = HostColumnVector.fromBoxedUnsignedBytes(
new Byte[]{2, 3, 4, 5, (byte)128, (byte)254, null, null})) {
verify.accept(bcv);
}
try (HostColumnVector bcv = ColumnBuilderHelper.fromBoxedBytes(false,
new Byte[]{2, 3, 4, 5, (byte)128, (byte)254, null, null})) {
verify.accept(bcv);
}
}
@Test
public void testCastToByte() {
final int[] DATES = {17897}; //Jan 01, 2019
try (ColumnVector doubleColumnVector = ColumnVector.fromDoubles(4.3, 3.8, 8);
ColumnVector shortColumnVector = ColumnVector.fromShorts(new short[]{100});
ColumnVector dateColumnVector = ColumnVector.daysFromInts(DATES);
ColumnVector byteColumnVector1 = doubleColumnVector.asBytes();
ColumnVector byteColumnVector2 = shortColumnVector.asBytes();
ColumnVector byteColumnVector3 = dateColumnVector.asBytes();
ColumnVector expected1 = ColumnVector.fromBytes((byte)4, (byte)3, (byte)8);
ColumnVector expected2 = ColumnVector.fromBytes((byte)100);
ColumnVector expected3 = ColumnVector.fromBytes((byte)-23)) {
AssertUtils.assertColumnsAreEqual(expected1, byteColumnVector1);
AssertUtils.assertColumnsAreEqual(expected2, byteColumnVector2);
AssertUtils.assertColumnsAreEqual(expected3, byteColumnVector3);
}
}
@Test
public void testOverrunningTheBuffer() {
try (Builder builder = HostColumnVector.builder(DType.INT8, 3)) {
assertThrows(AssertionError.class,
() -> builder.append((byte) 2).appendNull().append((byte) 5, (byte) 4).build());
}
}
@Test
void testAppendVector() {
Random random = new Random(192312989128L);
for (int dstSize = 1; dstSize <= 100; dstSize++) {
for (int dstPrefilledSize = 0; dstPrefilledSize < dstSize; dstPrefilledSize++) {
final int srcSize = dstSize - dstPrefilledSize;
for (int sizeOfDataNotToAdd = 0; sizeOfDataNotToAdd <= dstPrefilledSize; sizeOfDataNotToAdd++) {
try (Builder dst = HostColumnVector.builder(DType.INT8, dstSize);
HostColumnVector src = HostColumnVector.build(DType.INT8, srcSize, (b) -> {
for (int i = 0; i < srcSize; i++) {
if (random.nextBoolean()) {
b.appendNull();
} else {
b.append((byte) random.nextInt());
}
}
});
Builder gtBuilder = HostColumnVector.builder(DType.INT8, dstPrefilledSize)) {
assertEquals(dstSize, srcSize + dstPrefilledSize);
//add the first half of the prefilled list
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
if (random.nextBoolean()) {
dst.appendNull();
gtBuilder.appendNull();
} else {
byte a = (byte) random.nextInt();
dst.append(a);
gtBuilder.append(a);
}
}
// append the src vector
dst.append(src);
try (HostColumnVector dstVector = dst.build();
HostColumnVector gt = gtBuilder.build()) {
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
assertEquals(gt.isNull(i), dstVector.isNull(i));
if (!gt.isNull(i)) {
assertEquals(gt.getByte(i), dstVector.getByte(i));
}
}
for (int i = dstPrefilledSize - sizeOfDataNotToAdd, j = 0; i < dstSize - sizeOfDataNotToAdd && j < srcSize; i++, j++) {
assertEquals(src.isNull(j), dstVector.isNull(i));
if (!src.isNull(j)) {
assertEquals(src.getByte(j), dstVector.getByte(i));
}
}
if (dstVector.hasValidityVector()) {
long maxIndex =
BitVectorHelper.getValidityAllocationSizeInBytes(dstVector.getRowCount()) * 8;
for (long i = dstSize - sizeOfDataNotToAdd; i < maxIndex; i++) {
assertFalse(dstVector.isNullExtendedRange(i));
}
}
}
}
}
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/BinaryOpTest.java
|
/*
*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.BasicType;
import ai.rapids.cudf.HostColumnVector.Builder;
import ai.rapids.cudf.HostColumnVector.DataType;
import ai.rapids.cudf.HostColumnVector.StructData;
import ai.rapids.cudf.HostColumnVector.StructType;
import org.junit.jupiter.api.Test;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.util.Arrays;
import java.util.List;
import java.util.stream.IntStream;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
import static ai.rapids.cudf.TestUtils.*;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class BinaryOpTest extends CudfTestBase {
private static final int dec32Scale_1 = 2;
private static final int dec32Scale_2 = -3;
private static final int dec64Scale_1 = 6;
private static final int dec64Scale_2 = -2;
private static final Integer[] INTS_1 = new Integer[]{1, 2, 3, 4, 5, null, 100};
private static final Integer[] INTS_2 = new Integer[]{10, 20, 30, 40, 50, 60, 100};
private static final Integer[] UINTS_1 = new Integer[]{10, -20, 30, -40, 50, -60, 100};
private static final Integer[] UINTS_2 = new Integer[]{-10, 20, -30, 40, 50, -60, 100};
private static final Byte[] BYTES_1 = new Byte[]{-1, 7, 123, null, 50, 60, 100};
private static final Byte[] UBYTES_1 = new Byte[]{-1, 7, 123, null, -50, 60, -100};
private static final Float[] FLOATS_1 = new Float[]{1f, 10f, 100f, 5.3f, 50f, 100f, null};
private static final Float[] FLOATS_2 = new Float[]{10f, 20f, 30f, 40f, 50f, 60f, 100f};
private static final Long[] LONGS_1 = new Long[]{1L, 2L, 3L, 4L, 5L, null, 100L};
private static final Long[] LONGS_2 = new Long[]{10L, 20L, 30L, 40L, 50L, 60L, 100L};
private static final Double[] DOUBLES_1 = new Double[]{1.0, 10.0, 100.0, 5.3, 50.0, 100.0, null};
private static final Double[] DOUBLES_2 = new Double[]{10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 100.0};
private static final Boolean[] BOOLEANS_1 = new Boolean[]{true, true, false, false, null};
private static final Boolean[] BOOLEANS_2 = new Boolean[]{true, false, true, false, true};
private static final int[] SHIFT_BY = new int[]{1, 2, 3, 4, 5, 10, 20};
private static final int[] DECIMAL32_1 = new int[]{1000, 2000, 3000, 4000, 5000};
private static final int[] DECIMAL32_2 = new int[]{100, 200, 300, 400, 50};
private static final long[] DECIMAL64_1 = new long[]{10L, 23L, 12L, 24L, 123456789L};
private static final long[] DECIMAL64_2 = new long[]{33041L, 97290L, 36438L, 25379L, 48473L};
private static final StructData INT_SD_1 = new StructData(1);
private static final StructData INT_SD_2 = new StructData(2);
private static final StructData INT_SD_3 = new StructData(3);
private static final StructData INT_SD_4 = new StructData(4);
private static final StructData INT_SD_5 = new StructData(5);
private static final StructData INT_SD_NULL = new StructData((List) null);
private static final StructData INT_SD_100 = new StructData(100);
private static final StructData[] int_struct_data_1 =
new StructData[]{null, INT_SD_1, null, INT_SD_3, INT_SD_4, INT_SD_5, INT_SD_NULL, INT_SD_100};
private static final StructData[] int_struct_data_2 =
new StructData[]{null, null, INT_SD_2, INT_SD_3, INT_SD_100, INT_SD_5, INT_SD_NULL, INT_SD_4};
private static final DataType structType =
new StructType(true, new BasicType(true, DType.INT32));
private static final BigInteger[] DECIMAL128_1 = new BigInteger[]{new BigInteger("1234567891234567"), new BigInteger("1234567891234567"),
new BigInteger("1234567891234567"), new BigInteger("1234567891234567"), new BigInteger("1234567891234567")};
private static final BigInteger[] DECIMAL128_2 = new BigInteger[]{new BigInteger("234567891234567"), new BigInteger("234567891234567"),
new BigInteger("234567891234567"), new BigInteger("234567891234567"), new BigInteger("234567891234567")};
private static final BigDecimal[] BIGDECIMAL32_1 = new BigDecimal[]{
BigDecimal.valueOf(12, dec32Scale_1),
BigDecimal.valueOf(11, dec32Scale_1),
BigDecimal.valueOf(20, dec32Scale_1),
null,
BigDecimal.valueOf(25, dec32Scale_1)
};
private static final BigDecimal[] BIGDECIMAL32_2 = new BigDecimal[]{
BigDecimal.valueOf(12, dec32Scale_2),
BigDecimal.valueOf(2, dec32Scale_2),
null,
BigDecimal.valueOf(16, dec32Scale_2),
BigDecimal.valueOf(10, dec32Scale_2)
};
interface CpuOpVV {
void computeNullSafe(Builder ret, HostColumnVector lhs, HostColumnVector rhs, int index);
}
interface CpuOpVS<S> {
void computeNullSafe(Builder ret, HostColumnVector lhs, S rhs, int index);
}
interface CpuOpSV<S> {
void computeNullSafe(Builder ret, S lhs, HostColumnVector rhs, int index);
}
public static ColumnVector forEach(DType retType, ColumnVector lhs, ColumnVector rhs, CpuOpVV op) {
return forEach(retType, lhs, rhs, op, false);
}
public static ColumnVector forEach(DType retType, ColumnVector lhs, ColumnVector rhs, CpuOpVV op, boolean evalNulls) {
int len = (int)lhs.getRowCount();
try (HostColumnVector hostLHS = lhs.copyToHost();
HostColumnVector hostRHS = rhs.copyToHost();
Builder builder = HostColumnVector.builder(retType, len)) {
for (int i = 0; i < len; i++) {
if (!evalNulls && (hostLHS.isNull(i) || hostRHS.isNull(i))) {
builder.appendNull();
} else {
op.computeNullSafe(builder, hostLHS, hostRHS, i);
}
}
return builder.buildAndPutOnDevice();
}
}
public static <S> ColumnVector forEachS(DType retType, ColumnVector lhs, S rhs, CpuOpVS<S> op) {
return forEachS(retType, lhs, rhs, op, false);
}
public static <S> ColumnVector forEachS(DType retType, ColumnVector lhs, S rhs, CpuOpVS<S> op, boolean evalNulls) {
int len = (int)lhs.getRowCount();
try (HostColumnVector hostLHS = lhs.copyToHost();
Builder builder = HostColumnVector.builder(retType, len)) {
for (int i = 0; i < len; i++) {
if (!evalNulls && (hostLHS.isNull(i) || rhs == null)) {
builder.appendNull();
} else {
op.computeNullSafe(builder, hostLHS, rhs, i);
}
}
return builder.buildAndPutOnDevice();
}
}
public static <S> ColumnVector forEachS(DType retType, S lhs, ColumnVector rhs, CpuOpSV<S> op) {
return forEachS(retType, lhs, rhs, op, false);
}
public static <S> ColumnVector forEachS(DType retType, S lhs, ColumnVector rhs, CpuOpSV<S> op, boolean evalNulls) {
int len = (int)rhs.getRowCount();
try (HostColumnVector hostRHS = rhs.copyToHost();
Builder builder = HostColumnVector.builder(retType, len)) {
for (int i = 0; i < len; i++) {
if (!evalNulls && (hostRHS.isNull(i) || lhs == null)) {
builder.appendNull();
} else {
op.computeNullSafe(builder, lhs, hostRHS, i);
}
}
return builder.buildAndPutOnDevice();
}
}
private double pmod(double i1, double i2) {
double r = i1 % i2;
if (r < 0) return (r + i2) % i2;
else return r;
}
private long pmod(long i1, long i2) {
long r = i1 % i2;
if (r < 0) return (r + i2) % i2;
else return r;
}
private int pmod(int i1, int i2) {
int r = i1 % i2;
if (r < 0) return (r + i2) % i2;
else return r;
}
@Test
public void testPmod() {
Double[] d1 = TestUtils.getDoubles(23423423424L, 50, ALL ^ NULL);
Double[] d2 = TestUtils.getDoubles(56456456454L, 50, NULL);
Integer[] i1 = TestUtils.getIntegers(76576554564L, 50, NULL);
Integer[] i2 = TestUtils.getIntegers(34502395934L, 50, NULL);
Long[] l1 = TestUtils.getLongs(29843248234L, 50, NULL);
Long[] l2 = TestUtils.getLongs(23423049234L, 50, NULL);
try (ColumnVector icv1 = ColumnVector.fromBoxedInts(i1);
ColumnVector icv2 = ColumnVector.fromBoxedInts(i2);
ColumnVector lcv1 = ColumnVector.fromBoxedLongs(l1);
ColumnVector lcv2 = ColumnVector.fromBoxedLongs(l2);
ColumnVector dcv1 = ColumnVector.fromBoxedDoubles(d1);
ColumnVector dcv2 = ColumnVector.fromBoxedDoubles(d2)) {
// Ints
try (ColumnVector pmod = icv1.pmod(icv2);
ColumnVector expected = forEach(DType.INT32, icv1, icv2,
(b, l, r, i) -> b.append(pmod(l.getInt(i), r.getInt(i))))) {
assertColumnsAreEqual(expected, pmod, "int32");
}
try (Scalar s = Scalar.fromInt(11);
ColumnVector pmod = icv1.pmod(s);
ColumnVector expected = forEachS(DType.INT32, icv1, 11,
(b, l, r, i) -> b.append(pmod(l.getInt(i) , r)))) {
assertColumnsAreEqual(expected, pmod, "int32 + scalar int32");
}
try (Scalar s = Scalar.fromInt(11);
ColumnVector pmod = s.pmod(icv2);
ColumnVector expected = forEachS(DType.INT32, 11, icv2,
(b, l, r, i) -> b.append(pmod(l , r.getInt(i))))) {
assertColumnsAreEqual(expected, pmod, "scalar int32 + int32");
}
// Long
try (ColumnVector pmod = lcv1.pmod(lcv2);
ColumnVector expected = forEach(DType.INT64, lcv1, lcv2,
(b, l, r, i) -> b.append(pmod(l.getLong(i), r.getLong(i))))) {
assertColumnsAreEqual(expected, pmod, "int64");
}
try (Scalar s = Scalar.fromLong(11L);
ColumnVector pmod = lcv1.pmod(s);
ColumnVector expected = forEachS(DType.INT64, lcv1, 11L,
(b, l, r, i) -> b.append(pmod(l.getLong(i) , r)))) {
assertColumnsAreEqual(expected, pmod, "int64 + scalar int64");
}
try (Scalar s = Scalar.fromLong(11L);
ColumnVector pmod = s.pmod(lcv2);
ColumnVector expected = forEachS(DType.INT64, 11L, lcv2,
(b, l, r, i) -> b.append(pmod(l , r.getLong(i))))) {
assertColumnsAreEqual(expected, pmod, "scalar int64 + int64");
}
// Double
try (ColumnVector pmod = dcv1.pmod(dcv2);
ColumnVector expected = forEach(DType.FLOAT64, dcv1, dcv2,
(b, l, r, i) -> b.append(pmod(l.getDouble(i), r.getDouble(i))))) {
assertColumnsAreEqual(expected, pmod, "float64");
}
try (Scalar s = Scalar.fromDouble(1.1d);
ColumnVector pmod = dcv1.pmod(s);
ColumnVector expected = forEachS(DType.FLOAT64, dcv1, 1.1d,
(b, l, r, i) -> b.append(pmod(l.getDouble(i) , r)))) {
assertColumnsAreEqual(expected, pmod, "float64 + scalar float64");
}
try (Scalar s = Scalar.fromDouble(1.1d);
ColumnVector pmod = s.pmod(dcv2);
ColumnVector expected = forEachS(DType.FLOAT64, 1.1d, dcv2,
(b, l, r, i) -> b.append(pmod(l , r.getDouble(i))))) {
assertColumnsAreEqual(expected, pmod, "scalar float64 + float64");
}
}
}
@Test
public void testAdd() {
try (ColumnVector icv1 = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector icv2 = ColumnVector.fromBoxedInts(INTS_2);
ColumnVector uicv1 = ColumnVector.fromBoxedUnsignedInts(UINTS_1);
ColumnVector uicv2 = ColumnVector.fromBoxedUnsignedInts(UINTS_2);
ColumnVector bcv1 = ColumnVector.fromBoxedBytes(BYTES_1);
ColumnVector ubcv1 = ColumnVector.fromBoxedUnsignedBytes(UBYTES_1);
ColumnVector fcv1 = ColumnVector.fromBoxedFloats(FLOATS_1);
ColumnVector fcv2 = ColumnVector.fromBoxedFloats(FLOATS_2);
ColumnVector lcv1 = ColumnVector.fromBoxedLongs(LONGS_1);
ColumnVector lcv2 = ColumnVector.fromBoxedLongs(LONGS_2);
ColumnVector ulcv1 = ColumnVector.fromBoxedUnsignedLongs(LONGS_1);
ColumnVector dcv1 = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector dcv2 = ColumnVector.fromBoxedDoubles(DOUBLES_2);
ColumnVector dec32cv1 = ColumnVector.fromDecimals(BIGDECIMAL32_1);
ColumnVector dec32cv2 = ColumnVector.fromDecimals(BIGDECIMAL32_2);
ColumnVector dec64cv1 = ColumnVector.decimalFromLongs(-dec64Scale_1, DECIMAL64_1);
ColumnVector dec64cv2 = ColumnVector.decimalFromLongs(-dec64Scale_2, DECIMAL64_2);
ColumnVector dec128cv1 = ColumnVector.decimalFromBigInt(-dec64Scale_1, DECIMAL128_1);
ColumnVector dec128cv2 = ColumnVector.decimalFromBigInt(-dec64Scale_2, DECIMAL128_2)) {
try (ColumnVector add = icv1.add(icv2);
ColumnVector expected = forEach(DType.INT32, icv1, icv2,
(b, l, r, i) -> b.append(l.getInt(i) + r.getInt(i)))) {
assertColumnsAreEqual(expected, add, "int32");
}
try (ColumnVector add = uicv1.add(uicv2);
ColumnVector expected = forEach(DType.UINT32, uicv1, uicv2,
(b, l, r, i) -> b.append(l.getInt(i) + r.getInt(i)))) {
assertColumnsAreEqual(expected, add, "uint32");
}
try (ColumnVector add = icv1.add(bcv1);
ColumnVector expected = forEach(DType.INT32, icv1, bcv1,
(b, l, r, i) -> b.append(l.getInt(i) + r.getByte(i)))) {
assertColumnsAreEqual(expected, add, "int32 + byte");
}
try (ColumnVector add = uicv1.add(ubcv1);
ColumnVector expected = forEach(DType.UINT32, uicv1, ubcv1,
(b, l, r, i) -> b.append(l.getInt(i) + Byte.toUnsignedInt(r.getByte(i))))) {
assertColumnsAreEqual(expected, add, "uint32 + uint8");
}
try (ColumnVector add = fcv1.add(fcv2);
ColumnVector expected = forEach(DType.FLOAT32, fcv1, fcv2,
(b, l, r, i) -> b.append(l.getFloat(i) + r.getFloat(i)))) {
assertColumnsAreEqual(expected, add, "float32");
}
try (ColumnVector addIntFirst = icv1.add(fcv2, DType.FLOAT32);
ColumnVector addFloatFirst = fcv2.add(icv1)) {
assertColumnsAreEqual(addIntFirst, addFloatFirst, "int + float vs float + int");
}
try (ColumnVector add = lcv1.add(lcv2);
ColumnVector expected = forEach(DType.INT64, lcv1, lcv2,
(b, l, r, i) -> b.append(l.getLong(i) + r.getLong(i)))) {
assertColumnsAreEqual(expected, add, "int64");
}
try (ColumnVector add = lcv1.add(bcv1);
ColumnVector expected = forEach(DType.INT64, lcv1, bcv1,
(b, l, r, i) -> b.append(l.getLong(i) + r.getByte(i)))) {
assertColumnsAreEqual(expected, add, "int64 + byte");
}
try (ColumnVector add = ulcv1.add(ubcv1);
ColumnVector expected = forEach(DType.UINT64, ulcv1, ubcv1,
(b, l, r, i) -> b.append(l.getLong(i) + Byte.toUnsignedLong(r.getByte(i))))) {
assertColumnsAreEqual(expected, add, "int64 + byte");
}
try (ColumnVector add = dcv1.add(dcv2);
ColumnVector expected = forEach(DType.FLOAT64, dcv1, dcv2,
(b, l, r, i) -> b.append(l.getDouble(i) + r.getDouble(i)))) {
assertColumnsAreEqual(expected, add, "float64");
}
try (ColumnVector addIntFirst = icv1.add(dcv2, DType.FLOAT64);
ColumnVector addDoubleFirst = dcv2.add(icv1)) {
assertColumnsAreEqual(addIntFirst, addDoubleFirst, "int + double vs double + int");
}
try (ColumnVector add = dec32cv1.add(dec32cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL32, -2), dec32cv1, dec32cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).add(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, add, "dec32");
}
}
try (ColumnVector add = dec64cv1.add(dec64cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL64, -6), dec64cv1, dec64cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).add(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, add, "dec64");
}
}
try (ColumnVector add = dec128cv1.add(dec128cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL128, -6), dec128cv1, dec128cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).add(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, add, "dec128");
}
}
try (Scalar s = Scalar.fromDecimal(2, 100);
ColumnVector add = dec32cv1.add(s)) {
try (ColumnVector expected = forEachS(
DType.create(DType.DTypeEnum.DECIMAL32, -2), dec32cv1, BigDecimal.valueOf(100, -2),
(b, l, r, i) -> b.append(l.getBigDecimal(i).add(r)))) {
assertColumnsAreEqual(expected, add, "dec32 + scalar");
}
}
try (Scalar s = Scalar.fromFloat(1.1f);
ColumnVector add = lcv1.add(s);
ColumnVector expected = forEachS(DType.FLOAT32, lcv1, 1.1f,
(b, l, r, i) -> b.append(l.getLong(i) + r))) {
assertColumnsAreEqual(expected, add, "int64 + scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector add = s.add(bcv1);
ColumnVector expected = forEachS(DType.INT16, (short) 100, bcv1,
(b, l, r, i) -> b.append((short)(l + r.getByte(i))))) {
assertColumnsAreEqual(expected, add, "scalar short + byte");
}
try (Scalar s = Scalar.fromUnsignedShort((short) 0x89ab);
ColumnVector add = s.add(ubcv1);
ColumnVector expected = forEachS(DType.UINT16, (short) 0x89ab, ubcv1,
(b, l, r, i) -> b.append((short)(Short.toUnsignedInt(l) + Byte.toUnsignedInt(r.getByte(i)))))) {
assertColumnsAreEqual(expected, add, "scalar uint16 + uint8");
}
}
}
@Test
public void testSub() {
try (ColumnVector icv1 = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector icv2 = ColumnVector.fromBoxedInts(INTS_2);
ColumnVector uicv1 = ColumnVector.fromBoxedUnsignedInts(UINTS_1);
ColumnVector uicv2 = ColumnVector.fromBoxedUnsignedInts(UINTS_2);
ColumnVector bcv1 = ColumnVector.fromBoxedBytes(BYTES_1);
ColumnVector ubcv1 = ColumnVector.fromBoxedUnsignedBytes(UBYTES_1);
ColumnVector fcv1 = ColumnVector.fromBoxedFloats(FLOATS_1);
ColumnVector fcv2 = ColumnVector.fromBoxedFloats(FLOATS_2);
ColumnVector lcv1 = ColumnVector.fromBoxedLongs(LONGS_1);
ColumnVector lcv2 = ColumnVector.fromBoxedLongs(LONGS_2);
ColumnVector ulcv1 = ColumnVector.fromBoxedUnsignedLongs(LONGS_1);
ColumnVector dcv1 = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector dcv2 = ColumnVector.fromBoxedDoubles(DOUBLES_2);
ColumnVector dec32cv1 = ColumnVector.fromDecimals(BIGDECIMAL32_1);
ColumnVector dec32cv2 = ColumnVector.fromDecimals(BIGDECIMAL32_2);
ColumnVector dec64cv1 = ColumnVector.decimalFromLongs(-dec64Scale_1, DECIMAL64_1);
ColumnVector dec64cv2 = ColumnVector.decimalFromLongs(-dec64Scale_2, DECIMAL64_2);
ColumnVector dec128cv1 = ColumnVector.decimalFromBigInt(-dec64Scale_1, DECIMAL128_1);
ColumnVector dec128cv2 = ColumnVector.decimalFromBigInt(-dec64Scale_2, DECIMAL128_2)) {
try (ColumnVector sub = icv1.sub(icv2);
ColumnVector expected = forEach(DType.INT32, icv1, icv2,
(b, l, r, i) -> b.append(l.getInt(i) - r.getInt(i)))) {
assertColumnsAreEqual(expected, sub, "int32");
}
try (ColumnVector sub = uicv1.sub(uicv2);
ColumnVector expected = forEach(DType.UINT32, uicv1, uicv2,
(b, l, r, i) -> b.append(l.getInt(i) - r.getInt(i)))) {
assertColumnsAreEqual(expected, sub, "uint32");
}
try (ColumnVector sub = icv1.sub(bcv1);
ColumnVector expected = forEach(DType.INT32, icv1, bcv1,
(b, l, r, i) -> b.append(l.getInt(i) - r.getByte(i)))) {
assertColumnsAreEqual(expected, sub, "int32 - byte");
}
try (ColumnVector sub = uicv1.sub(ubcv1);
ColumnVector expected = forEach(DType.UINT32, uicv1, ubcv1,
(b, l, r, i) -> b.append(l.getInt(i) - Byte.toUnsignedInt(r.getByte(i))))) {
assertColumnsAreEqual(expected, sub, "uint32 - uint8");
}
try (ColumnVector sub = fcv1.sub(fcv2);
ColumnVector expected = forEach(DType.FLOAT32, fcv1, fcv2,
(b, l, r, i) -> b.append(l.getFloat(i) - r.getFloat(i)))) {
assertColumnsAreEqual(expected, sub, "float32");
}
try (ColumnVector sub = icv1.sub(fcv2, DType.FLOAT32);
ColumnVector expected = forEach(DType.FLOAT32, icv1, fcv2,
(b, l, r, i) -> b.append(l.getInt(i) - r.getFloat(i)))) {
assertColumnsAreEqual(expected, sub, "int - float");
}
try (ColumnVector sub = lcv1.sub(lcv2);
ColumnVector expected = forEach(DType.INT64, lcv1, lcv2,
(b, l, r, i) -> b.append(l.getLong(i) - r.getLong(i)))) {
assertColumnsAreEqual(expected, sub, "int64");
}
try (ColumnVector sub = lcv1.sub(bcv1);
ColumnVector expected = forEach(DType.INT64, lcv1, bcv1,
(b, l, r, i) -> b.append(l.getLong(i) - r.getByte(i)))) {
assertColumnsAreEqual(expected, sub, "int64 - byte");
}
try (ColumnVector sub = ulcv1.sub(ubcv1);
ColumnVector expected = forEach(DType.UINT64, ulcv1, ubcv1,
(b, l, r, i) -> b.append(l.getLong(i) - Byte.toUnsignedLong(r.getByte(i))))) {
assertColumnsAreEqual(expected, sub, "uint64 - uint8");
}
try (ColumnVector sub = dcv1.sub(dcv2);
ColumnVector expected = forEach(DType.FLOAT64, dcv1, dcv2,
(b, l, r, i) -> b.append(l.getDouble(i) - r.getDouble(i)))) {
assertColumnsAreEqual(expected, sub, "float64");
}
try (ColumnVector sub = dcv2.sub(icv1);
ColumnVector expected = forEach(DType.FLOAT64, dcv2, icv1,
(b, l, r, i) -> b.append(l.getDouble(i) - r.getInt(i)))) {
assertColumnsAreEqual(expected, sub, "double - int");
}
try (ColumnVector sub = dec32cv1.sub(dec32cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL32, -2), dec32cv1, dec32cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).subtract(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, sub, "dec32");
}
}
try (ColumnVector sub = dec64cv1.sub(dec64cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL64, -6), dec64cv1, dec64cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).subtract(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, sub, "dec64");
}
}
try (Scalar s = Scalar.fromDecimal(2, 100);
ColumnVector sub = dec32cv1.sub(s)) {
try (ColumnVector expected = forEachS(
DType.create(DType.DTypeEnum.DECIMAL32, -2), dec32cv1, BigDecimal.valueOf(100, -2),
(b, l, r, i) -> b.append(l.getBigDecimal(i).subtract(r)))) {
assertColumnsAreEqual(expected, sub, "dec32 - scalar");
}
}
try (ColumnVector sub = dec128cv1.sub(dec128cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL128, -6), dec128cv1, dec128cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).subtract(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, sub, "dec128");
}
}
try (Scalar s = Scalar.fromFloat(1.1f);
ColumnVector sub = lcv1.sub(s);
ColumnVector expected = forEachS(DType.FLOAT32, lcv1, 1.1f,
(b, l, r, i) -> b.append(l.getLong(i) - r))) {
assertColumnsAreEqual(expected, sub, "int64 - scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector sub = s.sub(bcv1);
ColumnVector expected = forEachS(DType.INT16, (short) 100, bcv1,
(b, l, r, i) -> b.append((short)(l - r.getByte(i))))) {
assertColumnsAreEqual(expected, sub, "scalar short - byte");
}
try (Scalar s = Scalar.fromUnsignedShort((short) 0x89ab);
ColumnVector sub = s.sub(ubcv1);
ColumnVector expected = forEachS(DType.UINT16, (short) 0x89ab, ubcv1,
(b, l, r, i) -> b.append((short)(Short.toUnsignedInt(l) - Byte.toUnsignedInt(r.getByte(i)))))) {
assertColumnsAreEqual(expected, sub, "scalar uint16 - uint8");
}
}
}
// The rest of the tests are very basic to ensure that operations plumbing is in place, not to
// exhaustively test
// The underlying implementation.
@Test
public void testMul() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector dec32cv1 = ColumnVector.fromDecimals(BIGDECIMAL32_1);
ColumnVector dec32cv2 = ColumnVector.fromDecimals(BIGDECIMAL32_2);
ColumnVector dec64cv1 = ColumnVector.decimalFromLongs(-dec64Scale_1, DECIMAL64_1);
ColumnVector dec64cv2 = ColumnVector.decimalFromLongs(-dec64Scale_2, DECIMAL64_2);
ColumnVector dec128cv1 = ColumnVector.decimalFromBigInt(-dec64Scale_1, DECIMAL128_1);
ColumnVector dec128cv2 = ColumnVector.decimalFromBigInt(-dec64Scale_2, DECIMAL128_2)) {
try (ColumnVector answer = icv.mul(dcv);
ColumnVector expected = forEach(DType.FLOAT64, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) * r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 * double");
}
try (ColumnVector mul = dec32cv1.mul(dec32cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL32, 1), dec32cv1, dec32cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).multiply(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, mul, "dec32");
}
}
try (ColumnVector mul = dec64cv1.mul(dec64cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL64, -4), dec64cv1, dec64cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).multiply(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, mul, "dec64");
}
}
try (Scalar s = Scalar.fromDecimal(2, 100);
ColumnVector mul = dec32cv1.mul(s)) {
try (ColumnVector expected = forEachS(
DType.create(DType.DTypeEnum.DECIMAL32, 0), dec32cv1, BigDecimal.valueOf(100, -2),
(b, l, r, i) -> b.append(l.getBigDecimal(i).multiply(r)))) {
assertColumnsAreEqual(expected, mul, "dec32 * scalar");
}
}
try (Scalar s = Scalar.fromFloat(1.1f);
ColumnVector answer = icv.mul(s);
ColumnVector expected = forEachS(DType.FLOAT32, icv, 1.1f,
(b, l, r, i) -> b.append(l.getInt(i) * r))) {
assertColumnsAreEqual(expected, answer, "int64 * scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.mul(icv);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv,
(b, l, r, i) -> b.append(l * r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short * int32");
}
try (Scalar s = Scalar.fromUnsignedShort((short) 0x89ab);
ColumnVector uicv = ColumnVector.fromBoxedUnsignedInts(UINTS_1);
ColumnVector answer = s.mul(uicv);
ColumnVector expected = forEachS(DType.UINT32, (short) 0x89ab, uicv,
(b, l, r, i) -> b.append(Short.toUnsignedInt(l) * r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar uint16 * uint32");
}
try (ColumnVector mul = dec128cv1.mul(dec128cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL128, dec128cv1.type.getScale() + dec128cv2.type.getScale()), dec128cv1, dec128cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).multiply(r.getBigDecimal(i))))) {
assertColumnsAreEqual(expected, mul, "dec128");
}
}
}
}
@Test
public void testDiv() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector dec32cv1 = ColumnVector.fromDecimals(BIGDECIMAL32_1);
ColumnVector dec32cv2 = ColumnVector.fromDecimals(BIGDECIMAL32_2);
ColumnVector dec64cv1 = ColumnVector.decimalFromLongs(-dec64Scale_1, DECIMAL64_1);
ColumnVector dec64cv2 = ColumnVector.decimalFromLongs(-dec64Scale_2, DECIMAL64_2)) {
try (ColumnVector answer = icv.div(dcv);
ColumnVector expected = forEach(DType.FLOAT64, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) / r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 / double");
}
try (ColumnVector div = dec32cv1.div(dec32cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL32, -5), dec32cv1, dec32cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).divide(
r.getBigDecimal(i), 5, RoundingMode.DOWN), RoundingMode.DOWN))) {
assertColumnsAreEqual(expected, div, "dec32");
}
}
try (ColumnVector div = dec64cv1.div(dec64cv2)) {
try (ColumnVector expected = forEach(
DType.create(DType.DTypeEnum.DECIMAL64, -8), dec64cv1, dec64cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).divide(
r.getBigDecimal(i), 8, RoundingMode.DOWN), RoundingMode.DOWN))) {
assertColumnsAreEqual(expected, div, "dec64");
}
}
try (Scalar s = Scalar.fromDecimal(2, 100);
ColumnVector div = s.div(dec32cv1)) {
try (ColumnVector expected = forEachS(
DType.create(DType.DTypeEnum.DECIMAL32, 4), BigDecimal.valueOf(100, -2), dec32cv1,
(b, l, r, i) -> b.append(l.divide(r.getBigDecimal(i), -4, RoundingMode.DOWN)))) {
assertColumnsAreEqual(expected, div, "scalar dec32 / dec32");
}
}
try (Scalar s = Scalar.fromFloat(1.1f);
ColumnVector answer = icv.div(s);
ColumnVector expected = forEachS(DType.FLOAT32, icv, 1.1f,
(b, l, r, i) -> b.append(l.getInt(i) / r))) {
assertColumnsAreEqual(expected, answer, "int64 / scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.div(icv);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv,
(b, l, r, i) -> b.append(l / r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short / int32");
}
try (Scalar s = Scalar.fromUnsignedShort((short) 0x89ab);
ColumnVector uicv = ColumnVector.fromBoxedUnsignedInts(UINTS_1);
ColumnVector answer = s.div(uicv);
ColumnVector expected = forEachS(DType.UINT32, (short) 0x89ab, uicv,
(b, l, r, i) -> b.append((int)(Short.toUnsignedLong(l) / Integer.toUnsignedLong(r.getInt(i)))))) {
assertColumnsAreEqual(expected, answer, "scalar uint16 / uint32");
}
}
}
@Test
public void testTrueDiv() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1)) {
try (ColumnVector answer = icv.trueDiv(dcv);
ColumnVector expected = forEach(DType.FLOAT64, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) / r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 / double");
}
try (Scalar s = Scalar.fromFloat(1.1f);
ColumnVector answer = icv.trueDiv(s);
ColumnVector expected = forEachS(DType.FLOAT32, icv, 1.1f,
(b, l, r, i) -> b.append(l.getInt(i) / r))) {
assertColumnsAreEqual(expected, answer, "int64 / scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.trueDiv(icv);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv,
(b, l, r, i) -> b.append(l / r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short / int32");
}
}
}
@Test
public void testFloorDiv() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1)) {
try (ColumnVector answer = icv.floorDiv(dcv);
ColumnVector expected = forEach(DType.FLOAT64, icv, dcv,
(b, l, r, i) -> b.append(Math.floor(l.getInt(i) / r.getDouble(i))))) {
assertColumnsAreEqual(expected, answer, "int32 / double");
}
try (Scalar s = Scalar.fromFloat(1.1f);
ColumnVector answer = icv.floorDiv(s);
ColumnVector expected = forEachS(DType.FLOAT32, icv, 1.1f,
(b, l, r, i) -> b.append((float)Math.floor(l.getInt(i) / r)))) {
assertColumnsAreEqual(expected, answer, "int64 / scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.floorDiv(icv);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv,
(b, l, r, i) -> b.append(l / r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short / int32");
}
try (Scalar s = Scalar.fromUnsignedShort((short) 0x89ab);
ColumnVector uicv = ColumnVector.fromBoxedUnsignedInts(UINTS_1);
ColumnVector answer = s.floorDiv(uicv);
ColumnVector expected = forEachS(DType.UINT32, (short) 0x89ab, uicv,
(b, l, r, i) -> b.append((int)(Short.toUnsignedLong(l) / Integer.toUnsignedLong(r.getInt(i)))))) {
assertColumnsAreEqual(expected, answer, "scalar uint16 / uint32");
}
}
}
@Test
public void testMod() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1)) {
try (ColumnVector answer = icv.mod(dcv);
ColumnVector expected = forEach(DType.FLOAT64, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) % r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 % double");
}
try (Scalar s = Scalar.fromFloat(1.1f);
ColumnVector answer = icv.mod(s);
ColumnVector expected = forEachS(DType.FLOAT32, icv, 1.1f,
(b, l, r, i) -> b.append(l.getInt(i) % r))) {
assertColumnsAreEqual(expected, answer, "int64 % scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.mod(icv);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv,
(b, l, r, i) -> b.append(l % r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short % int32");
}
}
}
@Test
public void testPow() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1)) {
try (ColumnVector answer = icv.pow(dcv);
ColumnVector expected = forEach(DType.FLOAT64, icv, dcv,
(b, l, r, i) -> b.append(Math.pow(l.getInt(i), r.getDouble(i))))) {
assertColumnsAreEqual(expected, answer, "int32 pow double");
}
try (Scalar s = Scalar.fromFloat(1.1f);
ColumnVector answer = icv.pow(s);
ColumnVector expected = forEachS(DType.FLOAT32, icv, 1.1f,
(b, l, r, i) -> b.append((float)Math.pow(l.getInt(i), r)))) {
assertColumnsAreEqual(expected, answer, "int64 pow scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.pow(icv);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv,
(b, l, r, i) -> b.append((int)Math.pow(l, r.getInt(i))))) {
assertColumnsAreEqual(expected, answer, "scalar short pow int32");
}
}
}
@Test
public void testEqual() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector intscalar = ColumnVector.fromInts(4);
Scalar sscv = Scalar.structFromColumnViews(intscalar);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector structcv1 = ColumnVector.fromStructs(structType, int_struct_data_1);
ColumnVector structcv2 = ColumnVector.fromStructs(structType, int_struct_data_2);
ColumnVector dec32cv_1 = ColumnVector.decimalFromInts(-dec32Scale_1, DECIMAL32_1);
ColumnVector dec32cv_2 = ColumnVector.decimalFromInts(-dec32Scale_2, DECIMAL32_2)) {
try (ColumnVector answer = icv.equalTo(dcv);
ColumnVector expected = forEach(DType.BOOL8, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) == r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 == double");
}
try (ColumnVector answer = dec32cv_1.equalTo(dec32cv_2);
ColumnVector expected = forEach(DType.BOOL8, dec32cv_1, dec32cv_2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).compareTo(r.getBigDecimal(i)) == 0))) {
assertColumnsAreEqual(expected, answer, "dec32 == dec32 ");
}
try (Scalar s = Scalar.fromDecimal(-2, 200);
ColumnVector answer = dec32cv_2.equalTo(s)) {
try (ColumnVector expected = forEachS(DType.BOOL8, dec32cv_1, BigDecimal.valueOf(200, 2),
(b, l, r, i) -> b.append(l.getBigDecimal(i).compareTo(r) == 0))) {
assertColumnsAreEqual(expected, answer, "dec32 == scalar dec32");
}
}
try (Scalar s = Scalar.fromFloat(1.0f);
ColumnVector answer = icv.equalTo(s);
ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f,
(b, l, r, i) -> b.append(l.getInt(i) == r))) {
assertColumnsAreEqual(expected, answer, "int64 == scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.equalTo(icv);
ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv,
(b, l, r, i) -> b.append(l == r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short == int32");
}
Short[] unsignedShorts = new Short[]{(short)0x89ab, (short)0xffff, 0, 1};
Integer[] unsignedInts = new Integer[]{0x89ab, 0xffff, 0, 1};
try (ColumnVector uscv = ColumnVector.fromBoxedUnsignedShorts(unsignedShorts);
ColumnVector uicv = ColumnVector.fromBoxedUnsignedInts(unsignedInts);
ColumnVector answer = uscv.equalTo(uicv);
ColumnVector expected = forEach(DType.BOOL8, uscv, uicv,
(b, l, r, i) -> b.append(Short.toUnsignedInt(l.getShort(i)) == r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "uint16 == uint32");
}
try (ColumnVector answersv = sscv.equalTo(structcv1);
ColumnVector expectedsv = forEachS(DType.BOOL8, 4, structcv1,
(b, l, r, i) -> b.append(r.isNull(i) ? false :
l == r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 == struct int32");
}
try (ColumnVector answervs = structcv1.equalTo(sscv);
ColumnVector expectedvs = forEachS(DType.BOOL8, structcv1, 4,
(b, l, r, i) -> b.append(l.isNull(i) ? false :
r == l.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedvs, answervs, "struct int32 == scalar struct int32");
}
try (ColumnVector answervv = structcv1.equalTo(structcv2);
ColumnVector expectedvv = forEach(DType.BOOL8, structcv1, structcv2,
(b, l, r, i) -> b.append(l.isNull(i) || r.isNull(i) ||
l.getStruct(i).dataRecord.get(0) == null || r.getStruct(i).dataRecord.get(0) == null ?
false : l.getStruct(i).dataRecord.get(0) == r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedvv, answervv, "struct int32 == struct int32");
}
}
}
@Test
public void testStringEqualScalar() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("b")) {
try (ColumnVector answer = a.equalTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.equalTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.equalTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, null, true, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testStringEqualScalarNotPresent() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("boo")) {
try (ColumnVector answer = a.equalTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.equalTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, null, false, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testNotEqual() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector intscalar = ColumnVector.fromInts(4);
Scalar sscv = Scalar.structFromColumnViews(intscalar);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector structcv1 = ColumnVector.fromStructs(structType, int_struct_data_1);
ColumnVector structcv2 = ColumnVector.fromStructs(structType, int_struct_data_2);
ColumnVector dec32cv_1 = ColumnVector.decimalFromInts(-dec32Scale_1, DECIMAL32_1);
ColumnVector dec32cv_2 = ColumnVector.decimalFromInts(-dec32Scale_2, DECIMAL32_2)) {
try (ColumnVector answer = icv.notEqualTo(dcv);
ColumnVector expected = forEach(DType.BOOL8, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) != r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 != double");
}
try (ColumnVector answer = dec32cv_1.notEqualTo(dec32cv_2);
ColumnVector expected = forEach(DType.BOOL8, dec32cv_1, dec32cv_2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).compareTo(r.getBigDecimal(i)) != 0))) {
assertColumnsAreEqual(expected, answer, "dec32 != dec32 ");
}
try (Scalar s = Scalar.fromDecimal(-2, 200);
ColumnVector answer = dec32cv_2.notEqualTo(s)) {
try (ColumnVector expected = forEachS(DType.BOOL8, dec32cv_1, BigDecimal.valueOf(200, 2),
(b, l, r, i) -> b.append(l.getBigDecimal(i).compareTo(r) != 0))) {
assertColumnsAreEqual(expected, answer, "dec32 != scalar dec32");
}
}
try (Scalar s = Scalar.fromFloat(1.0f);
ColumnVector answer = icv.notEqualTo(s);
ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f,
(b, l, r, i) -> b.append(l.getInt(i) != r))) {
assertColumnsAreEqual(expected, answer, "int64 != scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.notEqualTo(icv);
ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv,
(b, l, r, i) -> b.append(l != r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short != int32");
}
try (ColumnVector answersv = sscv.notEqualTo(structcv1);
ColumnVector expectedsv = forEachS(DType.BOOL8, 4, structcv1,
(b, l, r, i) -> b.append(r.isNull(i) ? true : l != r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 != struct int32");
}
try (ColumnVector answervs = structcv1.notEqualTo(sscv);
ColumnVector expectedvs = forEachS(DType.BOOL8, structcv1, 4,
(b, l, r, i) -> b.append(l.isNull(i) ? true : l.getStruct(i).dataRecord.get(0) != r))) {
assertColumnsAreEqual(expectedvs, answervs, "struct int32 != scalar struct int32");
}
try (ColumnVector answervv = structcv1.notEqualTo(structcv2);
ColumnVector expectedvv = forEach(DType.BOOL8, structcv1, structcv2,
(b, l, r, i) -> b.append(l.isNull(i) ? !r.isNull(i) :
r.isNull(i) || l.getStruct(i).dataRecord.get(0) != r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedvv, answervv, "struct int32 != struct int32");
}
}
}
@Test
public void testStringNotEqualScalar() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("b")) {
try (ColumnVector answer = a.notEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, true, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.notEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, false, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.notEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, null, false, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testStringNotEqualScalarNotPresent() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("abc")) {
try (ColumnVector answer = a.notEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.notEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, null, true, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testLessThan() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector intscalar = ColumnVector.fromInts(4);
Scalar sscv = Scalar.structFromColumnViews(intscalar);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector structcv1 = ColumnVector.fromStructs(structType, int_struct_data_1);
ColumnVector structcv2 = ColumnVector.fromStructs(structType, int_struct_data_2);
ColumnVector dec32cv_1 = ColumnVector.decimalFromInts(-dec32Scale_1, DECIMAL32_1);
ColumnVector dec32cv_2 = ColumnVector.decimalFromInts(-dec32Scale_2, DECIMAL32_2)) {
try (ColumnVector answer = icv.lessThan(dcv);
ColumnVector expected = forEach(DType.BOOL8, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) < r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 < double");
}
try (ColumnVector answer = dec32cv_1.lessThan(dec32cv_2);
ColumnVector expected = forEach(DType.BOOL8, dec32cv_1, dec32cv_2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).compareTo(r.getBigDecimal(i)) < 0))) {
assertColumnsAreEqual(expected, answer, "dec32 < dec32 ");
}
try (Scalar s = Scalar.fromFloat(1.0f);
ColumnVector answer = icv.lessThan(s);
ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f,
(b, l, r, i) -> b.append(l.getInt(i) < r))) {
assertColumnsAreEqual(expected, answer, "int64 < scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.lessThan(icv);
ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv,
(b, l, r, i) -> b.append(l < r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short < int32");
}
try (ColumnVector answersv = sscv.lessThan(structcv1);
ColumnVector expectedsv = forEachS(DType.BOOL8, 4, structcv1,
(b, l, r, i) -> b.append(r.isNull(i) ? false :
l < (Integer) r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 < struct int32");
}
try (ColumnVector answervs = structcv1.lessThan(sscv);
ColumnVector expectedvs = forEachS(DType.BOOL8, structcv1, 4,
(b, l, r, i) -> b.append(l.isNull(i) ? true :
(Integer) l.getStruct(i).dataRecord.get(0) < r))) {
assertColumnsAreEqual(expectedvs, answervs, "struct int32 < scalar struct int32");
}
try (ColumnVector answervv = structcv1.lessThan(structcv2);
ColumnVector expectedvv = forEach(DType.BOOL8, structcv1, structcv2,
(b, l, r, i) -> b.append(l.isNull(i) ? true : r.isNull(i) ||
(Integer)l.getStruct(i).dataRecord.get(0) < (Integer)r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedvv, answervv, "struct int32 < struct int32");
}
}
}
@Test
public void testStringLessThanScalar() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("b")) {
try (ColumnVector answer = a.lessThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.lessThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, false, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.lessThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, null, false, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testStringLessThanScalarNotPresent() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("abc")) {
try (ColumnVector answer = a.lessThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.lessThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, false, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.lessThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, null, false, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testGreaterThan() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector intscalar = ColumnVector.fromInts(4);
Scalar sscv = Scalar.structFromColumnViews(intscalar);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector structcv1 = ColumnVector.fromStructs(structType, int_struct_data_1);
ColumnVector structcv2 = ColumnVector.fromStructs(structType, int_struct_data_2);
ColumnVector dec32cv1 = ColumnVector.fromDecimals(BIGDECIMAL32_1);
ColumnVector dec32cv2 = ColumnVector.fromDecimals(BIGDECIMAL32_2)) {
try (ColumnVector answer = icv.greaterThan(dcv);
ColumnVector expected = forEach(DType.BOOL8, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) > r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 > double");
}
try (ColumnVector answer = dec32cv2.greaterThan(dec32cv1);
ColumnVector expected = forEach(DType.BOOL8, dec32cv2, dec32cv1,
(b, l, r, i) -> b.append(l.getBigDecimal(i).compareTo(r.getBigDecimal(i)) > 0))) {
assertColumnsAreEqual(expected, answer, "dec32 > dec32 ");
}
try (Scalar s = Scalar.fromFloat(1.0f);
ColumnVector answer = icv.greaterThan(s);
ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f,
(b, l, r, i) -> b.append(l.getInt(i) > r))) {
assertColumnsAreEqual(expected, answer, "int64 > scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.greaterThan(icv);
ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv,
(b, l, r, i) -> b.append(l > r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short > int32");
}
try (ColumnVector answersv = sscv.greaterThan(structcv1);
ColumnVector expectedsv = forEachS(DType.BOOL8, 4, structcv1,
(b, l, r, i) -> b.append(r.isNull(i) ? true :
l > (Integer) r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 > struct int32");
}
try (ColumnVector answervs = structcv1.greaterThan(sscv);
ColumnVector expectedvs = forEachS(DType.BOOL8, structcv1, 4,
(b, l, r, i) -> b.append(l.isNull(i) ? false :
(Integer) l.getStruct(i).dataRecord.get(0) > r))) {
assertColumnsAreEqual(expectedvs, answervs, "struct int32 > scalar struct int32");
}
try (ColumnVector answervv = structcv1.greaterThan(structcv2);
ColumnVector expectedvv = forEach(DType.BOOL8, structcv1, structcv2,
(b, l, r, i) -> b.append(l.isNull(i) ? false : r.isNull(i) ||
(Integer)l.getStruct(i).dataRecord.get(0) > (Integer)r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedvv, answervv, "struct int32 > struct int32");
}
}
}
@Test
public void testStringGreaterThanScalar() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("b")) {
try (ColumnVector answer = a.greaterThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, true, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.greaterThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.greaterThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, null, false, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testStringGreaterThanScalarNotPresent() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("boo")) {
try (ColumnVector answer = a.greaterThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, true, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.greaterThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.greaterThan(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, null, false, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testLessOrEqualTo() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector intscalar = ColumnVector.fromInts(4);
Scalar sscv = Scalar.structFromColumnViews(intscalar);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector structcv1 = ColumnVector.fromStructs(structType, int_struct_data_1);
ColumnVector structcv2 = ColumnVector.fromStructs(structType, int_struct_data_2);
ColumnVector dec32cv = ColumnVector.decimalFromInts(-dec32Scale_2, DECIMAL32_2)) {
try (ColumnVector answer = icv.lessOrEqualTo(dcv);
ColumnVector expected = forEach(DType.BOOL8, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) <= r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 <= double");
}
try (Scalar s = Scalar.fromFloat(1.0f);
ColumnVector answer = icv.lessOrEqualTo(s);
ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f,
(b, l, r, i) -> b.append(l.getInt(i) <= r))) {
assertColumnsAreEqual(expected, answer, "int64 <= scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.lessOrEqualTo(icv);
ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv,
(b, l, r, i) -> b.append(l <= r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short <= int32");
}
try (Scalar s = Scalar.fromDecimal(-2, 200);
ColumnVector answer = dec32cv.lessOrEqualTo(s)) {
try (ColumnVector expected = forEachS(DType.BOOL8, dec32cv, BigDecimal.valueOf(200, 2),
(b, l, r, i) -> b.append(l.getBigDecimal(i).compareTo(r) <= 0))) {
assertColumnsAreEqual(expected, answer, "dec32 <= scalar dec32");
}
}
try (ColumnVector answersv = sscv.lessOrEqualTo(structcv1);
ColumnVector expectedsv = forEachS(DType.BOOL8, 4, structcv1,
(b, l, r, i) -> b.append(r.isNull(i) ? false :
l <= (Integer) r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 <= struct int32");
}
try (ColumnVector answervs = structcv1.lessOrEqualTo(sscv);
ColumnVector expectedvs = forEachS(DType.BOOL8, structcv1, 4,
(b, l, r, i) -> b.append(l.isNull(i) ? true :
(Integer) l.getStruct(i).dataRecord.get(0) <= r))) {
assertColumnsAreEqual(expectedvs, answervs, "struct int32 <= scalar struct int32");
}
try (ColumnVector answervv = structcv1.lessOrEqualTo(structcv2);
ColumnVector expectedvv = forEach(DType.BOOL8, structcv1, structcv2,
(b, l, r, i) -> b.append(l.isNull(i) ? true : !r.isNull(i) &&
(Integer)l.getStruct(i).dataRecord.get(0) <= (Integer)r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedvv, answervv, "struct int32 <= struct int32");
}
}
}
@Test
public void testStringLessOrEqualToScalar() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("b")) {
try (ColumnVector answer = a.lessOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.lessOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.lessOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, null, true, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testStringLessOrEqualToScalarNotPresent() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("boo")) {
try (ColumnVector answer = a.lessOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.lessOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, true, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.lessOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, null, true, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testGreaterOrEqualTo() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector intscalar = ColumnVector.fromInts(4);
Scalar sscv = Scalar.structFromColumnViews(intscalar);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector structcv1 = ColumnVector.fromStructs(structType, int_struct_data_1);
ColumnVector structcv2 = ColumnVector.fromStructs(structType, int_struct_data_2);
ColumnVector dec32cv = ColumnVector.decimalFromInts(-dec32Scale_2, DECIMAL32_2)) {
try (ColumnVector answer = icv.greaterOrEqualTo(dcv);
ColumnVector expected = forEach(DType.BOOL8, icv, dcv,
(b, l, r, i) -> b.append(l.getInt(i) >= r.getDouble(i)))) {
assertColumnsAreEqual(expected, answer, "int32 >= double");
}
try (Scalar s = Scalar.fromFloat(1.0f);
ColumnVector answer = icv.greaterOrEqualTo(s);
ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f,
(b, l, r, i) -> b.append(l.getInt(i) >= r))) {
assertColumnsAreEqual(expected, answer, "int64 >= scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.greaterOrEqualTo(icv);
ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv,
(b, l, r, i) -> b.append(l >= r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short >= int32");
}
try (Scalar s = Scalar.fromDecimal(-2, 200);
ColumnVector answer = dec32cv.greaterOrEqualTo(s)) {
try (ColumnVector expected = forEachS(DType.BOOL8, dec32cv, BigDecimal.valueOf(200, 2),
(b, l, r, i) -> b.append(l.getBigDecimal(i).compareTo(r) >= 0))) {
assertColumnsAreEqual(expected, answer, "dec32 >= scalar dec32");
}
}
try (ColumnVector answersv = sscv.greaterOrEqualTo(structcv1);
ColumnVector expectedsv = forEachS(DType.BOOL8, 4, structcv1,
(b, l, r, i) -> b.append(r.isNull(i) ? true : l >= (Integer) r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 >= struct int32");
}
try (ColumnVector answervs = structcv1.greaterOrEqualTo(sscv);
ColumnVector expectedvs = forEachS(DType.BOOL8, structcv1, 4,
(b, l, r, i) -> b.append(l.isNull(i) ? false : (Integer) l.getStruct(i).dataRecord.get(0) >= r))) {
assertColumnsAreEqual(expectedvs, answervs, "struct int32 >= scalar struct int32");
}
try (ColumnVector answervv = structcv1.greaterOrEqualTo(structcv2);
ColumnVector expectedvv = forEach(DType.BOOL8, structcv1, structcv2,
(b, l, r, i) -> b.append(l.isNull(i) ? false : !r.isNull(i) &&
(Integer)l.getStruct(i).dataRecord.get(0) >= (Integer)r.getStruct(i).dataRecord.get(0)))) {
assertColumnsAreEqual(expectedvv, answervv, "struct int32 >= struct int32");
}
}
}
@Test
public void testStringGreaterOrEqualToScalar() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("b")) {
try (ColumnVector answer = a.greaterOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.greaterOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.greaterOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, null, true, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testStringGreaterOrEqualToScalarNotPresent() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("abc")) {
try (ColumnVector answer = a.greaterOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, true)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.greaterOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.greaterOrEqualTo(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, null, true, null)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testBitAnd() {
try (ColumnVector icv1 = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector icv2 = ColumnVector.fromBoxedInts(INTS_2)) {
try (ColumnVector answer = icv1.bitAnd(icv2);
ColumnVector expected = forEach(DType.INT32, icv1, icv2,
(b, l, r, i) -> b.append(l.getInt(i) & r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "int32 & int32");
}
try (Scalar s = Scalar.fromInt(0x01);
ColumnVector answer = icv1.bitAnd(s);
ColumnVector expected = forEachS(DType.INT32, icv1, 0x01,
(b, l, r, i) -> b.append(l.getInt(i) & r))) {
assertColumnsAreEqual(expected, answer, "int32 & scalar int32");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.bitAnd(icv1);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv1,
(b, l, r, i) -> b.append(l & r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short & int32");
}
}
}
@Test
public void testBitOr() {
try (ColumnVector icv1 = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector icv2 = ColumnVector.fromBoxedInts(INTS_2)) {
try (ColumnVector answer = icv1.bitOr(icv2);
ColumnVector expected = forEach(DType.INT32, icv1, icv2,
(b, l, r, i) -> b.append(l.getInt(i) | r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "int32 | int32");
}
try (Scalar s = Scalar.fromInt(0x01);
ColumnVector answer = icv1.bitOr(s);
ColumnVector expected = forEachS(DType.INT32, icv1, 0x01,
(b, l, r, i) -> b.append(l.getInt(i) | r))) {
assertColumnsAreEqual(expected, answer, "int32 | scalar int32");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.bitOr(icv1);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv1,
(b, l, r, i) -> b.append(l | r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short | int32");
}
}
}
@Test
public void testBitXor() {
try (ColumnVector icv1 = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector icv2 = ColumnVector.fromBoxedInts(INTS_2)) {
try (ColumnVector answer = icv1.bitXor(icv2);
ColumnVector expected = forEach(DType.INT32, icv1, icv2,
(b, l, r, i) -> b.append(l.getInt(i) ^ r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "int32 ^ int32");
}
try (Scalar s = Scalar.fromInt(0x01);
ColumnVector answer = icv1.bitXor(s);
ColumnVector expected = forEachS(DType.INT32, icv1, 0x01,
(b, l, r, i) -> b.append(l.getInt(i) ^ r))) {
assertColumnsAreEqual(expected, answer, "int32 ^ scalar int32");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.bitXor(icv1);
ColumnVector expected = forEachS(DType.INT32, (short) 100, icv1,
(b, l, r, i) -> b.append(l ^ r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "scalar short ^ int32");
}
}
}
@Test
public void testNullAnd() {
try (ColumnVector icv1 = ColumnVector.fromBoxedBooleans(
true, true, true,
false, false, false,
null, null, null);
ColumnVector icv2 = ColumnVector.fromBoxedBooleans(
true, false, null,
true, false, null,
true, false, null)) {
try (ColumnVector answer = icv1.binaryOp(BinaryOp.NULL_LOGICAL_AND, icv2, DType.BOOL8);
ColumnVector expected = ColumnVector.fromBoxedBooleans(
true, false, null,
false, false, false,
null, false, null)) {
assertColumnsAreEqual(expected, answer, "boolean NULL AND boolean");
}
}
}
@Test
public void testNullOr() {
try (ColumnVector icv1 = ColumnVector.fromBoxedBooleans(
true, true, true,
false, false, false,
null, null, null);
ColumnVector icv2 = ColumnVector.fromBoxedBooleans(
true, false, null,
true, false, null,
true, false, null)) {
try (ColumnVector answer = icv1.binaryOp(BinaryOp.NULL_LOGICAL_OR, icv2, DType.BOOL8);
ColumnVector expected = ColumnVector.fromBoxedBooleans(
true, true, true,
true, false, null,
true, null, null)) {
assertColumnsAreEqual(expected, answer, "boolean NULL OR boolean");
}
}
}
@Test
public void testAnd() {
try (ColumnVector icv1 = ColumnVector.fromBoxedBooleans(BOOLEANS_1);
ColumnVector icv2 = ColumnVector.fromBoxedBooleans(BOOLEANS_2)) {
try (ColumnVector answer = icv1.and(icv2);
ColumnVector expected = forEach(DType.BOOL8, icv1, icv2,
(b, l, r, i) -> b.append(l.getBoolean(i) && r.getBoolean(i)))) {
assertColumnsAreEqual(expected, answer, "boolean AND boolean");
}
try (Scalar s = Scalar.fromBool(true);
ColumnVector answer = icv1.and(s);
ColumnVector expected = forEachS(DType.BOOL8, icv1, true,
(b, l, r, i) -> b.append(l.getBoolean(i) && r))) {
assertColumnsAreEqual(expected, answer, "boolean AND true");
}
try (Scalar s = Scalar.fromBool(false);
ColumnVector answer = icv1.and(s);
ColumnVector expected = forEachS(DType.BOOL8, icv1, false,
(b, l, r, i) -> b.append(l.getBoolean(i) && r))) {
assertColumnsAreEqual(expected, answer, "boolean AND false");
}
try (Scalar s = Scalar.fromBool(true);
ColumnVector answer = icv1.and(s);
ColumnVector expected = forEachS(DType.BOOL8, true, icv1,
(b, l, r, i) -> b.append(l && r.getBoolean(i)))) {
assertColumnsAreEqual(expected, answer, "true AND boolean");
}
try (Scalar s = Scalar.fromBool(false);
ColumnVector answer = icv1.and(s);
ColumnVector expected = forEachS(DType.BOOL8, false, icv1,
(b, l, r, i) -> b.append(l && r.getBoolean(i)))) {
assertColumnsAreEqual(expected, answer, "false AND boolean");
}
}
}
@Test
public void testOr() {
try (ColumnVector icv1 = ColumnVector.fromBoxedBooleans(BOOLEANS_1);
ColumnVector icv2 = ColumnVector.fromBoxedBooleans(BOOLEANS_2)) {
try (ColumnVector answer = icv1.or(icv2);
ColumnVector expected = forEach(DType.BOOL8, icv1, icv2,
(b, l, r, i) -> b.append(l.getBoolean(i) || r.getBoolean(i)))) {
assertColumnsAreEqual(expected, answer, "boolean OR boolean");
}
try (Scalar s = Scalar.fromBool(true);
ColumnVector answer = icv1.or(s);
ColumnVector expected = forEachS(DType.BOOL8, icv1, true,
(b, l, r, i) -> b.append(l.getBoolean(i) || r))) {
assertColumnsAreEqual(expected, answer, "boolean OR true");
}
try (Scalar s = Scalar.fromBool(false);
ColumnVector answer = icv1.or(s);
ColumnVector expected = forEachS(DType.BOOL8, icv1, false,
(b, l, r, i) -> b.append(l.getBoolean(i) || r))) {
assertColumnsAreEqual(expected, answer, "boolean OR false");
}
try (Scalar s = Scalar.fromBool(true);
ColumnVector answer = icv1.or(s);
ColumnVector expected = forEachS(DType.BOOL8, true, icv1,
(b, l, r, i) -> b.append(l || r.getBoolean(i)))) {
assertColumnsAreEqual(expected, answer, "true OR boolean");
}
try (Scalar s = Scalar.fromBool(false);
ColumnVector answer = icv1.or(s);
ColumnVector expected = forEachS(DType.BOOL8, false, icv1,
(b, l, r, i) -> b.append(l || r.getBoolean(i)))) {
assertColumnsAreEqual(expected, answer, "false OR boolean");
}
}
}
@Test
public void testShiftLeft() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_2);
ColumnVector shiftBy = ColumnVector.fromInts(SHIFT_BY)) {
try (ColumnVector answer = icv.shiftLeft(shiftBy);
ColumnVector expected = forEach(DType.INT32, icv, shiftBy,
(b, l, r, i) -> b.append(l.getInt(i) << r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "int32 shifted left");
}
try (Scalar s = Scalar.fromInt(4);
ColumnVector answer = icv.shiftLeft(s, DType.INT64);
ColumnVector expected = forEachS(DType.INT64, icv, 4,
(b, l, r, i) -> b.append(((long)l.getInt(i) << r)))) {
assertColumnsAreEqual(expected, answer, "int32 << scalar = int64");
}
try (Scalar s = Scalar.fromShort((short) 0x0000FFFF);
ColumnVector answer = s.shiftLeft(shiftBy, DType.INT16);
ColumnVector expected = forEachS(DType.INT16, (short) 0x0000FFFF, shiftBy,
(b, l, r, i) -> {
int shifted = l << r.getInt(i);
b.append((short) shifted);
})) {
assertColumnsAreEqual(expected, answer, "scalar short << int32");
}
}
}
@Test
public void testShiftRight() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_2);
ColumnVector shiftBy = ColumnVector.fromInts(SHIFT_BY)) {
try (ColumnVector answer = icv.shiftRight(shiftBy);
ColumnVector expected = forEach(DType.INT32, icv, shiftBy,
(b, l, r, i) -> b.append(l.getInt(i) >> r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "int32 shifted right");
}
try (Scalar s = Scalar.fromInt(4);
ColumnVector answer = icv.shiftRight(s, DType.INT64);
ColumnVector expected = forEachS(DType.INT64, icv, 4,
(b, l, r, i) -> b.append(((long)(l.getInt(i) >> r))))) {
assertColumnsAreEqual(expected, answer, "int32 >> scalar = int64");
}
try (Scalar s = Scalar.fromShort((short) 0x0000FFFF);
ColumnVector answer = s.shiftRight(shiftBy, DType.INT16);
ColumnVector expected = forEachS(DType.INT16, (short) 0x0000FFFF, shiftBy,
(b, l, r, i) -> {
int shifted = l >> r.getInt(i);
b.append((short) shifted);
})) {
assertColumnsAreEqual(expected, answer, "scalar short >> int32 = int16");
}
}
}
@Test
public void testShiftRightUnsigned() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_2);
ColumnVector shiftBy = ColumnVector.fromInts(SHIFT_BY)) {
try (ColumnVector answer = icv.shiftRightUnsigned(shiftBy);
ColumnVector expected = forEach(DType.INT32, icv, shiftBy,
(b, l, r, i) -> b.append(l.getInt(i) >>> r.getInt(i)))) {
assertColumnsAreEqual(expected, answer, "int32 shifted right unsigned");
}
try (Scalar s = Scalar.fromInt(4);
ColumnVector answer = icv.shiftRightUnsigned(s, DType.INT64);
ColumnVector expected = forEachS(DType.INT64, icv, 4,
(b, l, r, i) -> b.append(((long)(l.getInt(i) >>> r))))) {
assertColumnsAreEqual(expected, answer, "int32 >>> scalar = int64");
}
}
}
@Test
public void testLogBase10() {
try (ColumnVector dcv1 = ColumnVector.fromBoxedDoubles(DOUBLES_2);
Scalar base = Scalar.fromInt(10);
ColumnVector answer = dcv1.log(base);
ColumnVector expected = ColumnVector.fromBoxedDoubles(Arrays.stream(DOUBLES_2)
.map(Math::log10)
.toArray(Double[]::new))) {
assertColumnsAreEqual(expected, answer, "log10");
}
}
@Test
public void testLogBase2() {
try (ColumnVector dcv1 = ColumnVector.fromBoxedDoubles(DOUBLES_2);
Scalar base = Scalar.fromInt(2);
ColumnVector answer = dcv1.log(base);
ColumnVector expected = ColumnVector.fromBoxedDoubles(Arrays.stream(DOUBLES_2)
.map(n -> Math.log(n) / Math.log(2))
.toArray(Double[]::new))) {
assertColumnsAreEqual(expected, answer, "log2");
}
}
@Test
public void testArctan2() {
Integer[] xInt = {7, 1, 2, 10};
Integer[] yInt = {4, 10, 8, 2};
Double[] xDouble = TestUtils.getDoubles(98234234523432423L, 50, ALL ^ NULL);
Double[] yDouble = TestUtils.getDoubles(23623274238423532L, 50, ALL ^ NULL);
try (ColumnVector yDoubleCV = ColumnVector.fromBoxedDoubles(yDouble);
ColumnVector xDoubleCV = ColumnVector.fromBoxedDoubles(xDouble);
ColumnVector yIntCV = ColumnVector.fromBoxedInts(yInt);
ColumnVector xIntCV = ColumnVector.fromBoxedInts(xInt);
ColumnVector resultDouble = yDoubleCV.arctan2(xDoubleCV);
ColumnVector resultInt = yIntCV.arctan2(xIntCV, DType.FLOAT64);
ColumnVector expectedInt = ColumnVector.fromDoubles(IntStream.range(0,xInt.length)
.mapToDouble(n -> Math.atan2(yInt[n], xInt[n])).toArray());
ColumnVector expectedDouble = ColumnVector.fromDoubles(IntStream.range(0,xDouble.length)
.mapToDouble(n -> Math.atan2(yDouble[n], xDouble[n])).toArray())) {
assertColumnsAreEqual(expectedInt, resultInt);
assertColumnsAreEqual(expectedDouble, resultDouble);
}
}
@Test
public void testEqualNullAware() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector intscalar = ColumnVector.fromInts(4);
Scalar sscv = Scalar.structFromColumnViews(intscalar);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector structcv1 = ColumnVector.fromStructs(structType, int_struct_data_1);
ColumnVector structcv2 = ColumnVector.fromStructs(structType, int_struct_data_2)) {
try (ColumnVector answer = icv.equalToNullAware(dcv);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, false, false, false,
false, false)) {
assertColumnsAreEqual(expected, answer, "int32 <=> double");
}
try (Scalar s = Scalar.fromFloat(1.0f);
ColumnVector answer = icv.equalToNullAware(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, false, false, false, false,
false, false)) {
assertColumnsAreEqual(expected, answer, "int32 <=> scalar float");
}
try (Scalar s = Scalar.fromShort((short) 100);
ColumnVector answer = s.equalToNullAware(icv);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, false, false, false,
false, true)) {
assertColumnsAreEqual(expected, answer, "scalar short <=> int32");
}
try (ColumnVector answersv = sscv.equalToNullAware(structcv1);
ColumnVector expectedsv = forEachS(DType.BOOL8, 4, structcv1,
(b, l, r, i) -> b.append(r.isNull(i) ? false :
l == r.getStruct(i).dataRecord.get(0)), true)) {
assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 <=> struct int32");
}
try (ColumnVector answervs = structcv1.equalToNullAware(sscv);
ColumnVector expectedvs = forEachS(DType.BOOL8, structcv1, 4,
(b, l, r, i) -> b.append(l.isNull(i) ? false :
l.getStruct(i).dataRecord.get(0) == r), true)) {
assertColumnsAreEqual(expectedvs, answervs, "struct int32 <=> scalar struct int32");
}
try (ColumnVector answervv = structcv1.equalToNullAware(structcv2);
ColumnVector expectedvv = forEach(DType.BOOL8, structcv1, structcv2,
(b, l, r, i) -> b.append(l.isNull(i) || r.isNull(i) ? l.isNull(i) && r.isNull(i) :
l.getStruct(i).dataRecord.get(0) == r.getStruct(i).dataRecord.get(0)), true)) {
assertColumnsAreEqual(expectedvv, answervv, "struct int32 <=> struct int32");
}
}
}
@Test
public void testStringEqualNullAwareScalar() {
try (ColumnVector a = ColumnVector.fromStrings("a", "b", "c", "d");
ColumnVector b = ColumnVector.fromStrings("a", "b", "b", "a");
ColumnVector c = ColumnVector.fromStrings("a", null, "b", null);
Scalar s = Scalar.fromString("b")) {
try (ColumnVector answer = a.equalToNullAware(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, false, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = b.equalToNullAware(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, true, true, false)) {
assertColumnsAreEqual(expected, answer);
}
try (ColumnVector answer = c.equalToNullAware(s);
ColumnVector expected = ColumnVector.fromBoxedBooleans(false, false, true, false)) {
assertColumnsAreEqual(expected, answer);
}
}
}
@Test
public void testMaxNullAware() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1)) {
try (ColumnVector answer = icv.maxNullAware(dcv);
ColumnVector expected = ColumnVector.fromBoxedDoubles(1.0, 10.0, 100.0, 5.3, 50.0,
100.0, 100.0)) {
assertColumnsAreEqual(expected, answer, "max(int32, double)");
}
try (Scalar s = Scalar.fromFloat(1.0f);
ColumnVector answer = icv.maxNullAware(s);
ColumnVector expected = ColumnVector.fromBoxedFloats(1f, 2f, 3f, 4f, 5f, 1f, 100f)) {
assertColumnsAreEqual(expected, answer, "max(int32, scalar float)");
}
try (Scalar s = Scalar.fromShort((short) 99);
ColumnVector answer = s.maxNullAware(icv);
ColumnVector expected = ColumnVector.fromBoxedInts(99, 99, 99, 99, 99, 99, 100)) {
assertColumnsAreEqual(expected, answer, "max(scalar short, int32)");
}
}
}
@Test
public void testMinNullAware() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1)) {
try (ColumnVector answer = icv.minNullAware(dcv);
ColumnVector expected = ColumnVector.fromBoxedDoubles(1.0, 2.0, 3.0, 4.0, 5.0, 100.0, 100.0)) {
assertColumnsAreEqual(expected, answer, "min(int32, double)");
}
try (Scalar s = Scalar.fromFloat(3.1f);
ColumnVector answer = icv.minNullAware(s);
ColumnVector expected = ColumnVector.fromBoxedFloats(1f, 2f, 3f, 3.1f, 3.1f, 3.1f, 3.1f)) {
assertColumnsAreEqual(expected, answer, "min(int32, scalar float)");
}
try (Scalar s = Scalar.fromShort((short) 99);
ColumnVector answer = s.minNullAware(icv);
ColumnVector expected = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5, 99, 99)) {
assertColumnsAreEqual(expected, answer, "min(scalar short, int32)");
}
}
}
@Test
public void testDecimalTypeThrowsException() {
try (ColumnVector dec64cv1 = ColumnVector.decimalFromLongs(-dec64Scale_1+10, DECIMAL64_1);
ColumnVector dec64cv2 = ColumnVector.decimalFromLongs(-dec64Scale_2- 10 , DECIMAL64_2)) {
assertThrows(ArithmeticException.class,
() -> {
try (ColumnVector expected = forEach
(DType.create(DType.DTypeEnum.DECIMAL64, -6), dec64cv1, dec64cv2,
(b, l, r, i) -> b.append(l.getBigDecimal(i).add(r.getBigDecimal(i))))) {
}
});
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/DoubleColumnVectorTest.java
|
/*
*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.junit.jupiter.api.Test;
import java.util.Random;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class DoubleColumnVectorTest extends CudfTestBase {
@Test
public void testCreateColumnVectorBuilder() {
try (ColumnVector doubleColumnVector = ColumnVector.build(DType.FLOAT64, 3,
(b) -> b.append(1.0))) {
assertFalse(doubleColumnVector.hasNulls());
}
}
@Test
public void testArrayAllocation() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertEqualsWithinPercentage(cv.getDouble(0), 2.1, 0.01);
assertEqualsWithinPercentage(cv.getDouble(1), 3.02, 0.01);
assertEqualsWithinPercentage(cv.getDouble(2), 5.003, 0.001);
};
try (HostColumnVector dcv = HostColumnVector.fromDoubles(2.1, 3.02, 5.003)) {
verify.accept(dcv);
}
try (HostColumnVector dcv = ColumnBuilderHelper.fromDoubles(2.1, 3.02, 5.003)) {
verify.accept(dcv);
}
}
@Test
public void testUpperIndexOutOfBoundsException() {
Consumer<HostColumnVector> verify = (cv) -> {
assertThrows(AssertionError.class, () -> cv.getDouble(3));
assertFalse(cv.hasNulls());
};
try (HostColumnVector dcv = HostColumnVector.fromDoubles(2.1, 3.02, 5.003)) {
verify.accept(dcv);
}
try (HostColumnVector dcv = ColumnBuilderHelper.fromDoubles(2.1, 3.02, 5.003)) {
verify.accept(dcv);
}
}
@Test
public void testLowerIndexOutOfBoundsException() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertThrows(AssertionError.class, () -> cv.getDouble(-1));
};
try (HostColumnVector dcv = HostColumnVector.fromDoubles(2.1, 3.02, 5.003)) {
verify.accept(dcv);
}
try (HostColumnVector dcv = ColumnBuilderHelper.fromDoubles(2.1, 3.02, 5.003)) {
verify.accept(dcv);
}
}
@Test
public void testAddingNullValues() {
Consumer<HostColumnVector> verify = (cv) -> {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
};
try (HostColumnVector dcv =
HostColumnVector.fromBoxedDoubles(2.0, 3.0, 4.0, 5.0, 6.0, 7.0, null, null)) {
verify.accept(dcv);
}
try (HostColumnVector dcv = ColumnBuilderHelper.fromBoxedDoubles(
2.0, 3.0, 4.0, 5.0, 6.0, 7.0, null, null)) {
verify.accept(dcv);
}
}
@Test
public void testOverrunningTheBuffer() {
try (Builder builder = HostColumnVector.builder(DType.FLOAT64, 3)) {
assertThrows(AssertionError.class,
() -> builder.append(2.1).appendNull().appendArray(new double[]{5.003, 4.0}).build());
}
}
@Test
void testAppendVector() {
Random random = new Random(192312989128L);
for (int dstSize = 1; dstSize <= 100; dstSize++) {
for (int dstPrefilledSize = 0; dstPrefilledSize < dstSize; dstPrefilledSize++) {
final int srcSize = dstSize - dstPrefilledSize;
for (int sizeOfDataNotToAdd = 0; sizeOfDataNotToAdd <= dstPrefilledSize; sizeOfDataNotToAdd++) {
try (Builder dst = HostColumnVector.builder(DType.FLOAT64, dstSize);
HostColumnVector src = HostColumnVector.build(DType.FLOAT64, srcSize, (b) -> {
for (int i = 0; i < srcSize; i++) {
if (random.nextBoolean()) {
b.appendNull();
} else {
b.append(random.nextDouble());
}
}
});
Builder gtBuilder = HostColumnVector.builder(DType.FLOAT64, dstPrefilledSize)) {
assertEquals(dstSize, srcSize + dstPrefilledSize);
//add the first half of the prefilled list
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
if (random.nextBoolean()) {
dst.appendNull();
gtBuilder.appendNull();
} else {
double a = random.nextDouble();
dst.append(a);
gtBuilder.append(a);
}
}
// append the src vector
dst.append(src);
try (HostColumnVector dstVector = dst.build();
HostColumnVector gt = gtBuilder.build()) {
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
assertEquals(gt.isNull(i), dstVector.isNull(i));
if (!gt.isNull(i)) {
assertEquals(gt.getDouble(i), dstVector.getDouble(i));
}
}
for (int i = dstPrefilledSize - sizeOfDataNotToAdd, j = 0; i < dstSize - sizeOfDataNotToAdd && j < srcSize; i++, j++) {
assertEquals(src.isNull(j), dstVector.isNull(i));
if (!src.isNull(j)) {
assertEquals(src.getDouble(j), dstVector.getDouble(i));
}
}
if (dstVector.hasValidityVector()) {
long maxIndex =
BitVectorHelper.getValidityAllocationSizeInBytes(dstVector.getRowCount()) * 8;
for (long i = dstSize - sizeOfDataNotToAdd; i < maxIndex; i++) {
assertFalse(dstVector.isNullExtendedRange(i));
}
}
}
}
}
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/CuFileTest.java
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.*;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
public class CuFileTest extends CudfTestBase {
private static final HostMemoryAllocator hostMemoryAllocator = DefaultHostMemoryAllocator.get();
@AfterEach
void tearDown() {
if (PinnedMemoryPool.isInitialized()) {
PinnedMemoryPool.shutdown();
}
}
@Test
public void testCopyToFile(@TempDir File tempDir) {
assumeTrue(CuFile.libraryLoaded());
File tempFile = new File(tempDir, "tempFile");
assertFalse(tempFile.exists());
verifyCopyToFile(tempFile);
}
@Test
public void testCopyToExistingFile(@TempDir File tempDir) throws IOException {
assumeTrue(CuFile.libraryLoaded());
File tempFile = new File(tempDir, "tempFile");
assertTrue(tempFile.createNewFile());
verifyCopyToFile(tempFile);
}
@Test
public void testAppendToFile(@TempDir File tempDir) {
assumeTrue(CuFile.libraryLoaded());
File tempFile = new File(tempDir, "tempFile");
assertFalse(tempFile.exists());
verifyAppendToFile(tempFile);
}
@Test
public void testAppendToExistingFile(@TempDir File tempDir) throws IOException {
assumeTrue(CuFile.libraryLoaded());
File tempFile = new File(tempDir, "tempFile");
assertTrue(tempFile.createNewFile());
verifyAppendToFile(tempFile);
}
private void verifyCopyToFile(File tempFile) {
try (HostMemoryBuffer orig = hostMemoryAllocator.allocate(16);
DeviceMemoryBuffer from = DeviceMemoryBuffer.allocate(16);
DeviceMemoryBuffer to = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer dest = hostMemoryAllocator.allocate(16)) {
orig.setLong(0, 123456789);
from.copyFromHostBuffer(orig);
CuFile.writeDeviceBufferToFile(tempFile, 0, from);
CuFile.readFileToDeviceBuffer(to, tempFile, 0);
dest.copyFromDeviceBuffer(to);
assertEquals(123456789, dest.getLong(0));
}
}
private void verifyAppendToFile(File tempFile) {
try (HostMemoryBuffer orig = hostMemoryAllocator.allocate(16);
DeviceMemoryBuffer from = DeviceMemoryBuffer.allocate(16);
DeviceMemoryBuffer to = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer dest = hostMemoryAllocator.allocate(16)) {
orig.setLong(0, 123456789);
from.copyFromHostBuffer(orig);
assertEquals(0, CuFile.appendDeviceBufferToFile(tempFile, from));
orig.setLong(0, 987654321);
from.copyFromHostBuffer(orig);
assertEquals(16, CuFile.appendDeviceBufferToFile(tempFile, from));
CuFile.readFileToDeviceBuffer(to, tempFile, 0);
dest.copyFromDeviceBuffer(to);
assertEquals(123456789, dest.getLong(0));
CuFile.readFileToDeviceBuffer(to, tempFile, 16);
dest.copyFromDeviceBuffer(to);
assertEquals(987654321, dest.getLong(0));
}
}
@Test
public void testRegisteringUnalignedBufferThrowsException() {
assumeTrue(CuFile.libraryLoaded());
assertThrows(IllegalArgumentException.class, () -> {
//noinspection EmptyTryBlock
try (CuFileBuffer ignored = CuFileBuffer.allocate(4095, true)) {
}
});
}
@Test
public void testReadWriteUnregisteredBuffer(@TempDir File tempDir) {
assumeTrue(CuFile.libraryLoaded());
File tempFile = new File(tempDir, "tempFile");
verifyReadWrite(tempFile, 16, false);
}
@Test
public void testReadWriteRegisteredBuffer(@TempDir File tempDir) {
assumeTrue(CuFile.libraryLoaded());
File tempFile = new File(tempDir, "tempFile");
verifyReadWrite(tempFile, 4096, true);
}
private void verifyReadWrite(File tempFile, int length, boolean registerBuffer) {
try (HostMemoryBuffer orig = hostMemoryAllocator.allocate(length);
CuFileBuffer from = CuFileBuffer.allocate(length, registerBuffer);
CuFileWriteHandle writer = new CuFileWriteHandle(tempFile.getAbsolutePath())) {
orig.setLong(0, 123456789);
from.copyFromHostBuffer(orig);
writer.write(from, length, 0);
orig.setLong(0, 987654321);
from.copyFromHostBuffer(orig);
assertEquals(length, writer.append(from, length));
}
try (CuFileBuffer to = CuFileBuffer.allocate(length, registerBuffer);
CuFileReadHandle reader = new CuFileReadHandle(tempFile.getAbsolutePath());
HostMemoryBuffer dest = hostMemoryAllocator.allocate(length)) {
reader.read(to, 0);
dest.copyFromDeviceBuffer(to);
assertEquals(123456789, dest.getLong(0));
reader.read(to, length);
dest.copyFromDeviceBuffer(to);
assertEquals(987654321, dest.getLong(0));
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/HostMemoryBufferTest.java
|
/*
*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Random;
import org.junit.jupiter.api.AfterEach;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
public class HostMemoryBufferTest extends CudfTestBase {
@AfterEach
void teardown() {
if (PinnedMemoryPool.isInitialized()) {
PinnedMemoryPool.shutdown();
}
}
@Test
void testRefCountLeak() throws InterruptedException {
assumeTrue(Boolean.getBoolean("ai.rapids.cudf.flaky-tests-enabled"));
long expectedLeakCount = MemoryCleaner.leakCount.get() + 1;
HostMemoryBuffer.allocate(1);
long maxTime = System.currentTimeMillis() + 10_000;
long leakNow;
do {
System.gc();
Thread.sleep(50);
leakNow = MemoryCleaner.leakCount.get();
} while (leakNow != expectedLeakCount && System.currentTimeMillis() < maxTime);
assertEquals(expectedLeakCount, MemoryCleaner.leakCount.get());
}
@Test
void asByteBuffer() {
final long size = 1024;
try (HostMemoryBuffer buff = HostMemoryBuffer.allocate(size)) {
ByteBuffer dbuff = buff.asByteBuffer();
assertEquals(size, dbuff.capacity());
assertEquals(ByteOrder.nativeOrder(), dbuff.order());
dbuff.putInt(101);
dbuff.putDouble(101.1);
assertEquals(101, buff.getInt(0));
assertEquals(101.1, buff.getDouble(4));
}
}
@Test
void testDoubleFree() {
HostMemoryBuffer buffer = HostMemoryBuffer.allocate(1);
buffer.close();
assertThrows(IllegalStateException.class, () -> buffer.close() );
}
@Test
public void testGetInt() {
try (HostMemoryBuffer hostMemoryBuffer = HostMemoryBuffer.allocate(16)) {
long offset = 1;
hostMemoryBuffer.setInt(offset * DType.INT32.getSizeInBytes(), 2);
assertEquals(2, hostMemoryBuffer.getInt(offset * DType.INT32.getSizeInBytes()));
}
}
@Test
public void testGetByte() {
try (HostMemoryBuffer hostMemoryBuffer = HostMemoryBuffer.allocate(16)) {
long offset = 1;
hostMemoryBuffer.setByte(offset * DType.INT8.getSizeInBytes(), (byte) 2);
assertEquals((byte) 2, hostMemoryBuffer.getByte(offset * DType.INT8.getSizeInBytes()));
}
}
@Test
public void testGetLong() {
try (HostMemoryBuffer hostMemoryBuffer = HostMemoryBuffer.allocate(16)) {
long offset = 1;
hostMemoryBuffer.setLong(offset * DType.INT64.getSizeInBytes(), 3);
assertEquals(3, hostMemoryBuffer.getLong(offset * DType.INT64.getSizeInBytes()));
}
}
@Test
public void testGetLongs() {
try (HostMemoryBuffer hostMemoryBuffer = HostMemoryBuffer.allocate(16)) {
hostMemoryBuffer.setLong(0, 3);
hostMemoryBuffer.setLong(DType.INT64.getSizeInBytes(), 10);
long[] results = new long[2];
hostMemoryBuffer.getLongs(results, 0, 0, 2);
assertEquals(3, results[0]);
assertEquals(10, results[1]);
}
}
@Test
public void testGetLength() {
try (HostMemoryBuffer hostMemoryBuffer = HostMemoryBuffer.allocate(16)) {
long length = hostMemoryBuffer.getLength();
assertEquals(16, length);
}
}
@Test
public void testCopyFromDeviceBuffer() {
try (HostMemoryBuffer init = HostMemoryBuffer.allocate(16);
DeviceMemoryBuffer tmp = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer to = HostMemoryBuffer.allocate(16)) {
init.setLong(0, 123456789);
tmp.copyFromHostBuffer(init);
to.copyFromDeviceBuffer(tmp);
assertEquals(123456789, to.getLong(0));
}
}
@Test
public void testFilemap() throws Exception {
Random random = new Random(12345L);
final int pageSize = UnsafeMemoryAccessor.pageSize();
final int bufferSize = pageSize * 5;
byte[] testbuf = new byte[bufferSize];
random.nextBytes(testbuf);
Path tempFile = Files.createTempFile("mmaptest", ".data");
try {
Files.write(tempFile, testbuf);
// verify we can map the whole file
try (HostMemoryBuffer hmb = HostMemoryBuffer.mapFile(tempFile.toFile(),
FileChannel.MapMode.READ_ONLY, 0,bufferSize)) {
assertEquals(bufferSize, hmb.length);
byte[] bytes = new byte[(int) hmb.length];
hmb.getBytes(bytes, 0, 0, hmb.length);
assertArrayEquals(testbuf, bytes);
}
// verify we can map at offsets that aren't a page boundary
int mapOffset = pageSize + 1;
int mapLength = pageSize * 2 + 7;
try (HostMemoryBuffer hmb = HostMemoryBuffer.mapFile(tempFile.toFile(),
FileChannel.MapMode.READ_ONLY, mapOffset, mapLength)) {
assertEquals(mapLength, hmb.length);
byte[] expected = Arrays.copyOfRange(testbuf, mapOffset, mapOffset + mapLength);
byte[] bytes = new byte[(int) hmb.length];
hmb.getBytes(bytes, 0, 0, hmb.length);
assertArrayEquals(expected, bytes);
}
// verify we can modify the file via a writable mapping
mapOffset = pageSize * 3 + 123;
mapLength = bufferSize - mapOffset - 456;
byte[] newData = new byte[mapLength];
random.nextBytes(newData);
try (HostMemoryBuffer hmb = HostMemoryBuffer.mapFile(tempFile.toFile(),
FileChannel.MapMode.READ_WRITE, mapOffset, mapLength)) {
hmb.setBytes(0, newData, 0, newData.length);
}
byte[] data = Files.readAllBytes(tempFile);
System.arraycopy(newData, 0, testbuf, mapOffset, mapLength);
assertArrayEquals(testbuf, data);
} finally {
Files.delete(tempFile);
}
}
public static void initPinnedPoolIfNeeded(long size) {
long available = PinnedMemoryPool.getAvailableBytes();
if (available < size) {
if (PinnedMemoryPool.isInitialized()) {
PinnedMemoryPool.shutdown();
}
PinnedMemoryPool.initialize(size + 2048);
}
}
public static byte[] rba(int size, long seed) {
Random random = new Random(12345L);
byte[] data = new byte[size];
random.nextBytes(data);
return data;
}
public static byte[] rba(int size) {
return rba(size, 12345L);
}
@Test
public void testCopyWithStream() {
long length = 1 * 1024 * 1024;
initPinnedPoolIfNeeded(length * 2);
byte[] data = rba((int)length);
byte[] result = new byte[data.length];
try (Cuda.Stream stream1 = new Cuda.Stream(true);
Cuda.Stream stream2 = new Cuda.Stream(true);
HostMemoryBuffer hostBuffer = PinnedMemoryPool.allocate(data.length);
DeviceMemoryBuffer devBuffer = DeviceMemoryBuffer.allocate(data.length);
HostMemoryBuffer hostBuffer2 = PinnedMemoryPool.allocate(data.length)) {
hostBuffer.setBytes(0, data, 0, data.length);
devBuffer.copyFromHostBuffer(hostBuffer, stream1);
hostBuffer2.copyFromDeviceBuffer(devBuffer, stream2);
hostBuffer2.getBytes(result, 0, 0, result.length);
assertArrayEquals(data, result);
}
}
@Test
public void simpleEventTest() {
long length = 1 * 1024 * 1024;
initPinnedPoolIfNeeded(length * 2);
byte[] data = rba((int)length);
byte[] result = new byte[data.length];
try (Cuda.Stream stream1 = new Cuda.Stream(true);
Cuda.Stream stream2 = new Cuda.Stream(true);
Cuda.Event event1 = new Cuda.Event();
Cuda.Event event2 = new Cuda.Event();
HostMemoryBuffer hostBuffer = PinnedMemoryPool.allocate(data.length);
DeviceMemoryBuffer devBuffer = DeviceMemoryBuffer.allocate(data.length);
HostMemoryBuffer hostBuffer2 = PinnedMemoryPool.allocate(data.length)) {
hostBuffer.setBytes(0, data, 0, data.length);
devBuffer.copyFromHostBufferAsync(hostBuffer, stream1);
event1.record(stream1);
stream2.waitOn(event1);
hostBuffer2.copyFromDeviceBufferAsync(devBuffer, stream2);
event2.record(stream2);
event2.sync();
hostBuffer2.getBytes(result, 0, 0, result.length);
assertArrayEquals(data, result);
}
}
@Test
public void simpleEventQueryTest() throws InterruptedException {
long length = 1 * 1024 * 1024;
initPinnedPoolIfNeeded(length * 2);
byte[] data = rba((int)length);
byte[] result = new byte[data.length];
try (Cuda.Stream stream1 = new Cuda.Stream(true);
Cuda.Stream stream2 = new Cuda.Stream(true);
Cuda.Event event1 = new Cuda.Event();
Cuda.Event event2 = new Cuda.Event();
HostMemoryBuffer hostBuffer = PinnedMemoryPool.allocate(data.length);
DeviceMemoryBuffer devBuffer = DeviceMemoryBuffer.allocate(data.length);
HostMemoryBuffer hostBuffer2 = PinnedMemoryPool.allocate(data.length)) {
hostBuffer.setBytes(0, data, 0, data.length);
devBuffer.copyFromHostBufferAsync(hostBuffer, stream1);
event1.record(stream1);
stream2.waitOn(event1);
hostBuffer2.copyFromDeviceBufferAsync(devBuffer, stream2);
event2.record(stream2);
while (!event2.hasCompleted()) {
Thread.sleep(100);
}
hostBuffer2.getBytes(result, 0, 0, result.length);
assertArrayEquals(data, result);
}
}
@Test
public void simpleStreamSynchTest() {
long length = 1 * 1024 * 1024;
initPinnedPoolIfNeeded(length * 2);
byte[] data = rba((int)length);
byte[] result = new byte[data.length];
try (Cuda.Stream stream1 = new Cuda.Stream(true);
Cuda.Stream stream2 = new Cuda.Stream(true);
HostMemoryBuffer hostBuffer = PinnedMemoryPool.allocate(data.length);
DeviceMemoryBuffer devBuffer = DeviceMemoryBuffer.allocate(data.length);
HostMemoryBuffer hostBuffer2 = PinnedMemoryPool.allocate(data.length)) {
hostBuffer.setBytes(0, data, 0, data.length);
devBuffer.copyFromHostBufferAsync(hostBuffer, stream1);
stream1.sync();
hostBuffer2.copyFromDeviceBufferAsync(devBuffer, stream2);
stream2.sync();
hostBuffer2.getBytes(result, 0, 0, result.length);
assertArrayEquals(data, result);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/SegmentedReductionTest.java
|
/*
*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
class SegmentedReductionTest extends CudfTestBase {
@Test
public void testListSum() {
HostColumnVector.DataType dt = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32));
try (ColumnVector listCv = ColumnVector.fromLists(dt,
Arrays.asList(1, 2, 3),
Arrays.asList(2, 3, 4),
null,
Arrays.asList(null, 1, 2));
ColumnVector excludeExpected = ColumnVector.fromBoxedInts(6, 9, null, 3);
ColumnVector nullExcluded = listCv.listReduce(SegmentedReductionAggregation.sum(), NullPolicy.EXCLUDE, DType.INT32);
ColumnVector includeExpected = ColumnVector.fromBoxedInts(6, 9, null, null);
ColumnVector nullIncluded = listCv.listReduce(SegmentedReductionAggregation.sum(), NullPolicy.INCLUDE, DType.INT32)) {
AssertUtils.assertColumnsAreEqual(excludeExpected, nullExcluded);
AssertUtils.assertColumnsAreEqual(includeExpected, nullIncluded);
}
}
@Test
public void testListMin() {
HostColumnVector.DataType dt = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32));
try (ColumnVector listCv = ColumnVector.fromLists(dt,
Arrays.asList(1, 2, 3),
Arrays.asList(2, 3, 4),
null,
Arrays.asList(null, 1, 2));
ColumnVector excludeExpected = ColumnVector.fromBoxedInts(1, 2, null, 1);
ColumnVector nullExcluded = listCv.listReduce(SegmentedReductionAggregation.min(), NullPolicy.EXCLUDE, DType.INT32);
ColumnVector includeExpected = ColumnVector.fromBoxedInts(1, 2, null, null);
ColumnVector nullIncluded = listCv.listReduce(SegmentedReductionAggregation.min(), NullPolicy.INCLUDE, DType.INT32)) {
AssertUtils.assertColumnsAreEqual(excludeExpected, nullExcluded);
AssertUtils.assertColumnsAreEqual(includeExpected, nullIncluded);
}
}
@Test
public void testListMax() {
HostColumnVector.DataType dt = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.INT32));
try (ColumnVector listCv = ColumnVector.fromLists(dt,
Arrays.asList(1, 2, 3),
Arrays.asList(2, 3, 4),
null,
Arrays.asList(null, 1, 2));
ColumnVector excludeExpected = ColumnVector.fromBoxedInts(3, 4, null, 2);
ColumnVector nullExcluded = listCv.listReduce(SegmentedReductionAggregation.max(), NullPolicy.EXCLUDE, DType.INT32);
ColumnVector includeExpected = ColumnVector.fromBoxedInts(3, 4, null, null);
ColumnVector nullIncluded = listCv.listReduce(SegmentedReductionAggregation.max(), NullPolicy.INCLUDE, DType.INT32)) {
AssertUtils.assertColumnsAreEqual(excludeExpected, nullExcluded);
AssertUtils.assertColumnsAreEqual(includeExpected, nullIncluded);
}
}
@Test
public void testListAny() {
HostColumnVector.DataType dt = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.BOOL8));
try (ColumnVector listCv = ColumnVector.fromLists(dt,
Arrays.asList(true, false, false),
Arrays.asList(false, false, false),
null,
Arrays.asList(null, true, false));
ColumnVector excludeExpected = ColumnVector.fromBoxedBooleans(true, false, null, true);
ColumnVector nullExcluded = listCv.listReduce(SegmentedReductionAggregation.any(), NullPolicy.EXCLUDE, DType.BOOL8);
ColumnVector includeExpected = ColumnVector.fromBoxedBooleans(true, false, null, null);
ColumnVector nullIncluded = listCv.listReduce(SegmentedReductionAggregation.any(), NullPolicy.INCLUDE, DType.BOOL8)) {
AssertUtils.assertColumnsAreEqual(excludeExpected, nullExcluded);
AssertUtils.assertColumnsAreEqual(includeExpected, nullIncluded);
}
}
@Test
public void testListAll() {
HostColumnVector.DataType dt = new HostColumnVector.ListType(true,
new HostColumnVector.BasicType(true, DType.BOOL8));
try (ColumnVector listCv = ColumnVector.fromLists(dt,
Arrays.asList(true, true, true),
Arrays.asList(false, true, false),
null,
Arrays.asList(null, true, true));
ColumnVector excludeExpected = ColumnVector.fromBoxedBooleans(true, false, null, true);
ColumnVector nullExcluded = listCv.listReduce(SegmentedReductionAggregation.all(), NullPolicy.EXCLUDE, DType.BOOL8);
ColumnVector includeExpected = ColumnVector.fromBoxedBooleans(true, false, null, null);
ColumnVector nullIncluded = listCv.listReduce(SegmentedReductionAggregation.all(), NullPolicy.INCLUDE, DType.BOOL8)) {
AssertUtils.assertColumnsAreEqual(excludeExpected, nullExcluded);
AssertUtils.assertColumnsAreEqual(includeExpected, nullIncluded);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/AssertUtils.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
/** Utility methods for asserting in unit tests */
public class AssertUtils {
/**
* Checks and asserts that passed in columns match
* @param expect The expected result column
* @param cv The input column
*/
public static void assertColumnsAreEqual(ColumnView expect, ColumnView cv) {
assertColumnsAreEqual(expect, cv, "unnamed");
}
/**
* Checks and asserts that passed in columns match
* @param expected The expected result column
* @param cv The input column
* @param colName The name of the column
*/
public static void assertColumnsAreEqual(ColumnView expected, ColumnView cv, String colName) {
assertPartialColumnsAreEqual(expected, 0, expected.getRowCount(), cv, colName, true, false);
}
/**
* Checks and asserts that passed in host columns match
* @param expected The expected result host column
* @param cv The input host column
* @param colName The name of the host column
*/
public static void assertColumnsAreEqual(HostColumnVector expected, HostColumnVector cv, String colName) {
assertPartialColumnsAreEqual(expected, 0, expected.getRowCount(), cv, colName, true, false);
}
/**
* Checks and asserts that passed in Struct columns match
* @param expected The expected result Struct column
* @param cv The input Struct column
*/
public static void assertStructColumnsAreEqual(ColumnView expected, ColumnView cv) {
assertPartialStructColumnsAreEqual(expected, 0, expected.getRowCount(), cv, "unnamed", true, false);
}
/**
* Checks and asserts that passed in Struct columns match
* @param expected The expected result Struct column
* @param rowOffset The row number to look from
* @param length The number of rows to consider
* @param cv The input Struct column
* @param colName The name of the column
* @param enableNullCountCheck Whether to check for nulls in the Struct column
* @param enableNullabilityCheck Whether the table have a validity mask
*/
public static void assertPartialStructColumnsAreEqual(ColumnView expected, long rowOffset, long length,
ColumnView cv, String colName, boolean enableNullCountCheck, boolean enableNullabilityCheck) {
try (HostColumnVector hostExpected = expected.copyToHost();
HostColumnVector hostcv = cv.copyToHost()) {
assertPartialColumnsAreEqual(hostExpected, rowOffset, length, hostcv, colName, enableNullCountCheck, enableNullabilityCheck);
}
}
/**
* Checks and asserts that passed in columns match
* @param expected The expected result column
* @param cv The input column
* @param colName The name of the column
* @param enableNullCheck Whether to check for nulls in the column
* @param enableNullabilityCheck Whether the table have a validity mask
*/
public static void assertPartialColumnsAreEqual(ColumnView expected, long rowOffset, long length,
ColumnView cv, String colName, boolean enableNullCheck, boolean enableNullabilityCheck) {
try (HostColumnVector hostExpected = expected.copyToHost();
HostColumnVector hostcv = cv.copyToHost()) {
assertPartialColumnsAreEqual(hostExpected, rowOffset, length, hostcv, colName, enableNullCheck, enableNullabilityCheck);
}
}
/**
* Checks and asserts that passed in host columns match
* @param expected The expected result host column
* @param rowOffset start row index
* @param length number of rows from starting offset
* @param cv The input host column
* @param colName The name of the host column
* @param enableNullCountCheck Whether to check for nulls in the host column
*/
public static void assertPartialColumnsAreEqual(HostColumnVectorCore expected, long rowOffset, long length,
HostColumnVectorCore cv, String colName, boolean enableNullCountCheck, boolean enableNullabilityCheck) {
assertEquals(expected.getType(), cv.getType(), "Type For Column " + colName);
assertEquals(length, cv.getRowCount(), "Row Count For Column " + colName);
assertEquals(expected.getNumChildren(), cv.getNumChildren(), "Child Count for Column " + colName);
if (enableNullCountCheck) {
assertEquals(expected.getNullCount(), cv.getNullCount(), "Null Count For Column " + colName);
} else {
// TODO add in a proper check when null counts are supported by serializing a partitioned column
}
if (enableNullabilityCheck) {
assertEquals(expected.hasValidityVector(), cv.hasValidityVector(), "Column nullability is different than expected");
}
DType type = expected.getType();
for (long expectedRow = rowOffset; expectedRow < (rowOffset + length); expectedRow++) {
long tableRow = expectedRow - rowOffset;
assertEquals(expected.isNull(expectedRow), cv.isNull(tableRow),
"NULL for Column " + colName + " Row " + tableRow);
if (!expected.isNull(expectedRow)) {
switch (type.typeId) {
case BOOL8: // fall through
case INT8: // fall through
case UINT8:
assertEquals(expected.getByte(expectedRow), cv.getByte(tableRow),
"Column " + colName + " Row " + tableRow);
break;
case INT16: // fall through
case UINT16:
assertEquals(expected.getShort(expectedRow), cv.getShort(tableRow),
"Column " + colName + " Row " + tableRow);
break;
case INT32: // fall through
case UINT32: // fall through
case TIMESTAMP_DAYS:
case DURATION_DAYS:
case DECIMAL32:
assertEquals(expected.getInt(expectedRow), cv.getInt(tableRow),
"Column " + colName + " Row " + tableRow);
break;
case INT64: // fall through
case UINT64: // fall through
case DURATION_MICROSECONDS: // fall through
case DURATION_MILLISECONDS: // fall through
case DURATION_NANOSECONDS: // fall through
case DURATION_SECONDS: // fall through
case TIMESTAMP_MICROSECONDS: // fall through
case TIMESTAMP_MILLISECONDS: // fall through
case TIMESTAMP_NANOSECONDS: // fall through
case TIMESTAMP_SECONDS:
case DECIMAL64:
assertEquals(expected.getLong(expectedRow), cv.getLong(tableRow),
"Column " + colName + " Row " + tableRow);
break;
case DECIMAL128:
assertEquals(expected.getBigDecimal(expectedRow), cv.getBigDecimal(tableRow),
"Column " + colName + " Row " + tableRow);
break;
case FLOAT32:
CudfTestBase.assertEqualsWithinPercentage(expected.getFloat(expectedRow), cv.getFloat(tableRow), 0.0001,
"Column " + colName + " Row " + tableRow);
break;
case FLOAT64:
CudfTestBase.assertEqualsWithinPercentage(expected.getDouble(expectedRow), cv.getDouble(tableRow), 0.0001,
"Column " + colName + " Row " + tableRow);
break;
case STRING:
assertArrayEquals(expected.getUTF8(expectedRow), cv.getUTF8(tableRow),
"Column " + colName + " Row " + tableRow);
break;
case LIST:
HostMemoryBuffer expectedOffsets = expected.getOffsets();
HostMemoryBuffer cvOffsets = cv.getOffsets();
int expectedChildRows = expectedOffsets.getInt((expectedRow + 1) * 4) -
expectedOffsets.getInt(expectedRow * 4);
int cvChildRows = cvOffsets.getInt((tableRow + 1) * 4) -
cvOffsets.getInt(tableRow * 4);
assertEquals(expectedChildRows, cvChildRows, "Child row count for Column " +
colName + " Row " + tableRow);
break;
case STRUCT:
// parent column only has validity which was checked above
break;
default:
throw new IllegalArgumentException(type + " is not supported yet");
}
}
}
if (type.isNestedType()) {
switch (type.typeId) {
case LIST:
int expectedChildRowOffset = 0;
int numChildRows = 0;
if (length > 0) {
HostMemoryBuffer expectedOffsets = expected.getOffsets();
HostMemoryBuffer cvOffsets = cv.getOffsets();
expectedChildRowOffset = expectedOffsets.getInt(rowOffset * 4);
numChildRows = expectedOffsets.getInt((rowOffset + length) * 4) -
expectedChildRowOffset;
}
assertPartialColumnsAreEqual(expected.getNestedChildren().get(0), expectedChildRowOffset,
numChildRows, cv.getNestedChildren().get(0), colName + " list child",
enableNullCountCheck, enableNullabilityCheck);
break;
case STRUCT:
List<HostColumnVectorCore> expectedChildren = expected.getNestedChildren();
List<HostColumnVectorCore> cvChildren = cv.getNestedChildren();
for (int i = 0; i < expectedChildren.size(); i++) {
HostColumnVectorCore expectedChild = expectedChildren.get(i);
HostColumnVectorCore cvChild = cvChildren.get(i);
String childName = colName + " child " + i;
assertEquals(length, cvChild.getRowCount(), "Row Count for Column " + colName);
assertPartialColumnsAreEqual(expectedChild, rowOffset, length, cvChild,
colName, enableNullCountCheck, enableNullabilityCheck);
}
break;
default:
throw new IllegalArgumentException(type + " is not supported yet");
}
}
}
/**
* Checks and asserts that the two tables from a given rowindex match based on a provided schema.
* @param expected the expected result table
* @param rowOffset the row number to start checking from
* @param length the number of rows to check
* @param table the input table to compare against expected
* @param enableNullCheck whether to check for nulls or not
* @param enableNullabilityCheck whether the table have a validity mask
*/
public static void assertPartialTablesAreEqual(Table expected, long rowOffset, long length, Table table,
boolean enableNullCheck, boolean enableNullabilityCheck) {
assertEquals(expected.getNumberOfColumns(), table.getNumberOfColumns());
assertEquals(length, table.getRowCount(), "ROW COUNT");
for (int col = 0; col < expected.getNumberOfColumns(); col++) {
ColumnVector expect = expected.getColumn(col);
ColumnVector cv = table.getColumn(col);
String name = String.valueOf(col);
if (rowOffset != 0 || length != expected.getRowCount()) {
name = name + " PART " + rowOffset + "-" + (rowOffset + length - 1);
}
assertPartialColumnsAreEqual(expect, rowOffset, length, cv, name, enableNullCheck, enableNullabilityCheck);
}
}
/**
* Checks and asserts that the two tables match
* @param expected the expected result table
* @param table the input table to compare against expected
*/
public static void assertTablesAreEqual(Table expected, Table table) {
assertPartialTablesAreEqual(expected, 0, expected.getRowCount(), table, true, false);
}
public static void assertTableTypes(DType[] expectedTypes, Table t) {
int len = t.getNumberOfColumns();
assertEquals(expectedTypes.length, len);
for (int i = 0; i < len; i++) {
ColumnVector vec = t.getColumn(i);
DType type = vec.getType();
assertEquals(expectedTypes[i], type, "Types don't match at " + i);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/CudaTest.java
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class CudaTest {
@Test
public void testGetCudaRuntimeInfo() {
// The driver version is not necessarily larger than runtime version. Drivers of previous
// version are also able to support runtime of later version, only if they support same
// kinds of computeModes.
assert Cuda.getDriverVersion() >= 1000;
assert Cuda.getRuntimeVersion() >= 1000;
assertEquals(Cuda.getNativeComputeMode(), Cuda.getComputeMode().nativeId);
}
@Tag("noSanitizer")
@Test
public void testCudaException() {
assertThrows(CudaException.class, () -> {
try {
Cuda.memset(Long.MAX_VALUE, (byte) 0, 1024);
} catch (CudaFatalException ignored) {
} catch (CudaException ex) {
assertEquals(CudaException.CudaError.cudaErrorInvalidValue, ex.getCudaError());
throw ex;
}
}
);
// non-fatal CUDA error will not fail subsequent CUDA calls
try (ColumnVector cv = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5)) {
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/TimestampColumnVectorTest.java
|
/*
*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import java.util.function.Function;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TimestampColumnVectorTest extends CudfTestBase {
static final int[] TIMES_DAY = {-1528, //1965-10-26
17716, //2018-07-04
19382, //2023-01-25
-1528, //1965-10-26
17716}; //2018-07-04
static final long[] TIMES_S = {-131968728L, //'1965-10-26 14:01:12' Tuesday
1530705600L, //'2018-07-04 12:00:00' Wednesday
1674631932L, //'2023-01-25 07:32:12' Wednesday
-131968728L, //'1965-10-26 14:01:12' Tuesday
1530705600L}; //'2018-07-04 12:00:00' Wednesday
static final long[] TIMES_MS = {-131968727762L, //'1965-10-26 14:01:12.238' Tuesday
1530705600115L, //'2018-07-04 12:00:00.115' Wednesday
1674631932929L, //'2023-01-25 07:32:12.929' Wednesday
-131968727762L, //'1965-10-26 14:01:12.238' Tuesday
1530705600115L}; //'2018-07-04 12:00:00.115' Wednesday
static final long[] TIMES_US = {-131968727761703L, //'1965-10-26 14:01:12.238297'
1530705600115254L, //'2018-07-04 12:00:00.115254'
1674631932929861L, //'2023-01-25 07:32:12.929861'
-131968727761703L, //'1965-10-26 14:01:12.238297'
1530705600115254L}; //'2018-07-04 12:00:00.115254'
static final long[] TIMES_NS = {-131968727761702469L, //'1965-10-26 14:01:12.238297531'
1530705600115254330L, //'2018-07-04 12:00:00.115254330'
1674631932929861604L, //'2023-01-25 07:32:12.929861604'
-131968727761702469L, //'1965-10-26 14:01:12.238297531'
1530705600115254330L}; //'2018-07-04 12:00:00.115254330'
static final String[] TIMES_S_STRING = {"1965-10-26 14:01:12",
"2018-07-04 12:00:00",
"2023-01-25 07:32:12",
"1965-10-26 14:01:12",
"2018-07-04 12:00:00"};
static final String[] TIMES_MS_STRING = {"1965-10-26 14:01:12.238000000",
"2018-07-04 12:00:00.115000000",
"2023-01-25 07:32:12.929000000",
"1965-10-26 14:01:12.238000000",
"2018-07-04 12:00:00.115000000"};
static final String[] TIMES_US_STRING = {"1965-10-26 14:01:12.238297000",
"2018-07-04 12:00:00.115254000",
"2023-01-25 07:32:12.929861000",
"1965-10-26 14:01:12.238297000",
"2018-07-04 12:00:00.115254000"};
static final String[] TIMES_NS_STRING = {"1965-10-26 14:01:12.238297531",
"2018-07-04 12:00:00.115254330",
"2023-01-25 07:32:12.929861604",
"1965-10-26 14:01:12.238297531",
"2018-07-04 12:00:00.115254330"};
static final long[] THOUSAND = {1000L, 1000L, 1000L, 1000L, 1000L};
public static ColumnVector mulThouAndClose(ColumnVector cv, int times) {
ColumnVector input = cv;
ColumnVector tmp = null;
try (ColumnVector THOU = ColumnVector.fromLongs(THOUSAND)) {
for (int i = 0; i < times; i++) {
tmp = input.mul(THOU);
input.close();
input = tmp;
tmp = null;
}
ColumnVector ret = input;
input = null;
return ret;
} finally {
if (tmp != null) {
tmp.close();
}
if (input != null) {
input.close();
}
}
}
public static ColumnVector applyAndClose(ColumnVector cv, Function<ColumnVector, ColumnVector> function) {
try {
return function.apply(cv);
} finally {
cv.close();
}
}
@Test
public void getYear() {
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector tmp = timestampColumnVector.year();
HostColumnVector result = tmp.copyToHost()) {
assert timestampColumnVector.getType().equals(DType.TIMESTAMP_MILLISECONDS);
assertEquals(1965, result.getShort(0));
assertEquals(2018, result.getShort(1));
assertEquals(2023, result.getShort(2));
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector tmp = timestampColumnVector.year();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(1965, result.getShort(0));
assertEquals(2018, result.getShort(1));
assertEquals(2023, result.getShort(2));
}
}
@Test
public void getMonth() {
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector tmp = timestampColumnVector.month();
HostColumnVector result = tmp.copyToHost()) {
assert timestampColumnVector.getType().equals(DType.TIMESTAMP_MILLISECONDS);
assertEquals(10, result.getShort(0));
assertEquals(7, result.getShort(1));
assertEquals(1, result.getShort(2));
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector tmp = timestampColumnVector.month();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(10, result.getShort(0));
assertEquals(7, result.getShort(1));
assertEquals(1, result.getShort(2));
}
}
@Test
public void getDay() {
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS)) {
assert timestampColumnVector.getType().equals(DType.TIMESTAMP_MILLISECONDS);
try (ColumnVector tmp = timestampColumnVector.day();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(26, result.getShort(0));
assertEquals(4, result.getShort(1));
assertEquals(25, result.getShort(2));
}
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector tmp = timestampColumnVector.day();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(26, result.getShort(0));
assertEquals(4, result.getShort(1));
assertEquals(25, result.getShort(2));
}
}
@Test
public void getHour() {
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS)) {
assert timestampColumnVector.getType().equals(DType.TIMESTAMP_MILLISECONDS);
try (ColumnVector tmp = timestampColumnVector.hour();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(14, result.getShort(0));
assertEquals(12, result.getShort(1));
assertEquals(7, result.getShort(2));
}
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector tmp = timestampColumnVector.hour();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(14, result.getShort(0));
assertEquals(12, result.getShort(1));
assertEquals(7, result.getShort(2));
}
}
@Test
public void getMinute() {
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS)) {
assert timestampColumnVector.getType().equals(DType.TIMESTAMP_MILLISECONDS);
try (ColumnVector tmp = timestampColumnVector.minute();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(1, result.getShort(0));
assertEquals(0, result.getShort(1));
assertEquals(32, result.getShort(2));
}
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector tmp = timestampColumnVector.minute();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(1, result.getShort(0));
assertEquals(0, result.getShort(1));
assertEquals(32, result.getShort(2));
}
}
@Test
public void getSecond() {
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS)) {
assert timestampColumnVector.getType().equals(DType.TIMESTAMP_MILLISECONDS);
try (ColumnVector tmp = timestampColumnVector.second();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(12, result.getShort(0));
assertEquals(0, result.getShort(1));
assertEquals(12, result.getShort(2));
}
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector tmp = timestampColumnVector.second();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(12, result.getShort(0));
assertEquals(0, result.getShort(1));
assertEquals(12, result.getShort(2));
}
}
@Test
public void testWeekDay() {
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector result = timestampColumnVector.weekDay();
ColumnVector expected = ColumnVector.fromBoxedShorts(
(short)2, (short)3, (short)3, (short)2, (short)3)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector result = timestampColumnVector.weekDay();
ColumnVector expected = ColumnVector.fromBoxedShorts(
(short)2, (short)3, (short)3, (short)2, (short)3)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampDaysFromBoxedInts(
17713, 17714, 17715, 17716, 17717, 17718, 17719, 17720);
ColumnVector result = timestampColumnVector.weekDay();
ColumnVector expected = ColumnVector.fromBoxedShorts(
(short)7, (short)1, (short)2, (short)3, (short)4, (short)5, (short)6, (short)7)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
public void testLastDayOfMonth() {
int[] EXPECTED = new int[]{
-1523, //1965-10-31
17743, //2018-07-31
19388, //2023-01-31
-1523, //1965-10-31
17743}; //2018-07-31
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector result = timestampColumnVector.lastDayOfMonth();
ColumnVector expected = ColumnVector.daysFromInts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector result = timestampColumnVector.lastDayOfMonth();
ColumnVector expected = ColumnVector.daysFromInts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.daysFromInts(TIMES_DAY);
ColumnVector result = timestampColumnVector.lastDayOfMonth();
ColumnVector expected = ColumnVector.daysFromInts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
public void testDayOfYear() {
short[] EXPECTED = new short[]{299, 185, 25, 299, 185};
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector result = timestampColumnVector.dayOfYear();
ColumnVector expected = ColumnVector.fromShorts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector result = timestampColumnVector.dayOfYear();
ColumnVector expected = ColumnVector.fromShorts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.daysFromInts(TIMES_DAY);
ColumnVector result = timestampColumnVector.dayOfYear();
ColumnVector expected = ColumnVector.fromShorts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
public void testQuarterOfYear() {
short[] EXPECTED = new short[]{4, 3, 1, 4, 3};
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector result = timestampColumnVector.quarterOfYear();
ColumnVector expected = ColumnVector.fromShorts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector result = timestampColumnVector.quarterOfYear();
ColumnVector expected = ColumnVector.fromShorts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.daysFromInts(TIMES_DAY);
ColumnVector result = timestampColumnVector.quarterOfYear();
ColumnVector expected = ColumnVector.fromShorts(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
public void testAddMonths() {
long[] EXPECTED = new long[]{
-131968727762L, //'1965-10-26 14:01:12.238' Tuesday
1533384000115L, //'2018-08-04 12:00:00.115' Saturday
1679729532929L, //'2023-03-25 07:32:12.929' Saturday
-124019927762L, //'1966-01-26 14:01:12.238' Wednesday
1520164800115L}; //'2018-03-04 12:00:00.115' Sunday
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector months = ColumnVector.fromShorts(
(short)0, (short)1, (short)2, (short)3, (short)-4);
ColumnVector result = timestampColumnVector.addCalendricalMonths(months);
ColumnVector expected = ColumnVector.timestampMilliSecondsFromLongs(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
public void testIsLeapYear() {
Boolean[] EXPECTED = new Boolean[]{false, false, false, false, false};
try (ColumnVector timestampColumnVector = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector result = timestampColumnVector.isLeapYear();
ColumnVector expected = ColumnVector.fromBoxedBooleans(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector result = timestampColumnVector.isLeapYear();
ColumnVector expected = ColumnVector.fromBoxedBooleans(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
try (ColumnVector timestampColumnVector = ColumnVector.daysFromInts(TIMES_DAY);
ColumnVector result = timestampColumnVector.isLeapYear();
ColumnVector expected = ColumnVector.fromBoxedBooleans(EXPECTED)) {
assertColumnsAreEqual(expected, result);
}
final long[] LEAP_TIMES_S = {1073865600L, // Monday, January 12, 2004 0:00:00
947635200L, // Wednesday, January 12, 2000 0:00:00
-2208038400L // Friday, January 12, 1900 0:00:00
};
try (ColumnVector timestampColumnVector = ColumnVector.timestampSecondsFromLongs(LEAP_TIMES_S);
ColumnVector result = timestampColumnVector.isLeapYear();
ColumnVector expected = ColumnVector.fromBoxedBooleans(true, true, false)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
public void testCastToTimestamp() {
try (ColumnVector timestampMillis = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector tmp = timestampMillis.asTimestampSeconds();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(-131968728L, result.getLong(0));
assertEquals(1530705600L, result.getLong(1));
assertEquals(1674631932L, result.getLong(2));
}
}
@Test
public void testTimestampToDays() {
try (ColumnVector s_string_times = ColumnVector.fromStrings(TIMES_S_STRING);
ColumnVector ms_string_times = ColumnVector.fromStrings(TIMES_MS_STRING);
ColumnVector us_string_times = ColumnVector.fromStrings(TIMES_US_STRING);
ColumnVector ns_string_times = ColumnVector.fromStrings(TIMES_NS_STRING);
ColumnVector day_expected = ColumnVector.daysFromInts(TIMES_DAY);
ColumnVector s_result = s_string_times.asTimestamp(DType.TIMESTAMP_DAYS, "%Y-%m-%d %H:%M:%S");
ColumnVector ms_result = ms_string_times.asTimestamp(DType.TIMESTAMP_DAYS, "%Y-%m-%d %H:%M:%S.%f");
ColumnVector us_result = us_string_times.asTimestamp(DType.TIMESTAMP_DAYS, "%Y-%m-%d %H:%M:%S.%f");
ColumnVector ns_result = ns_string_times.asTimestamp(DType.TIMESTAMP_DAYS, "%Y-%m-%d %H:%M:%S.%f")) {
assertColumnsAreEqual(day_expected, s_result);
assertColumnsAreEqual(day_expected, ms_result);
assertColumnsAreEqual(day_expected, us_result);
assertColumnsAreEqual(day_expected, ns_result);
}
}
@Test
public void testTimestampToLongSecond() {
try (ColumnVector s_string_times = ColumnVector.fromStrings(TIMES_S_STRING);
ColumnVector ms_string_times = ColumnVector.fromStrings(TIMES_MS_STRING);
ColumnVector us_string_times = ColumnVector.fromStrings(TIMES_US_STRING);
ColumnVector ns_string_times = ColumnVector.fromStrings(TIMES_NS_STRING);
ColumnVector s_expected = ColumnVector.timestampSecondsFromLongs(TIMES_S);
ColumnVector s_result = s_string_times.asTimestamp(DType.TIMESTAMP_SECONDS, "%Y-%m-%d %H:%M:%S");
ColumnVector ms_result = ms_string_times.asTimestamp(DType.TIMESTAMP_SECONDS, "%Y-%m-%d %H:%M:%S.%f");
ColumnVector us_result = us_string_times.asTimestamp(DType.TIMESTAMP_SECONDS, "%Y-%m-%d %H:%M:%S.%f");
ColumnVector ns_result = ns_string_times.asTimestamp(DType.TIMESTAMP_SECONDS, "%Y-%m-%d %H:%M:%S.%f")) {
assertColumnsAreEqual(s_expected, s_result);
assertColumnsAreEqual(s_expected, ms_result);
assertColumnsAreEqual(s_expected, us_result);
assertColumnsAreEqual(s_expected, ns_result);
}
}
@Test
public void testTimestampToLongMillisecond() {
try (ColumnVector s_string_times = ColumnVector.fromStrings(TIMES_S_STRING);
ColumnVector ms_string_times = ColumnVector.fromStrings(TIMES_MS_STRING);
ColumnVector us_string_times = ColumnVector.fromStrings(TIMES_US_STRING);
ColumnVector ns_string_times = ColumnVector.fromStrings(TIMES_NS_STRING);
ColumnVector s_expected = applyAndClose(mulThouAndClose(ColumnVector.fromLongs(TIMES_S), 1), cv -> cv.asTimestampMilliseconds());
ColumnVector ms_expected = ColumnVector.timestampMilliSecondsFromLongs(TIMES_MS);
ColumnVector s_result = s_string_times.asTimestamp(DType.TIMESTAMP_MILLISECONDS, "%Y-%m-%d %H:%M:%S");
ColumnVector ms_result = ms_string_times.asTimestamp(DType.TIMESTAMP_MILLISECONDS, "%Y-%m-%d %H:%M:%S.%f");
ColumnVector us_result = us_string_times.asTimestamp(DType.TIMESTAMP_MILLISECONDS, "%Y-%m-%d %H:%M:%S.%f");
ColumnVector ns_result = ns_string_times.asTimestamp(DType.TIMESTAMP_MILLISECONDS, "%Y-%m-%d %H:%M:%S.%f")) {
assertColumnsAreEqual(s_expected, s_result);
assertColumnsAreEqual(ms_expected, ms_result);
assertColumnsAreEqual(ms_expected, us_result);
assertColumnsAreEqual(ms_expected, ns_result);
}
}
@Test
public void testTimestampToLongMicrosecond() {
try (ColumnVector s_string_times = ColumnVector.fromStrings(TIMES_S_STRING);
ColumnVector ms_string_times = ColumnVector.fromStrings(TIMES_MS_STRING);
ColumnVector us_string_times = ColumnVector.fromStrings(TIMES_US_STRING);
ColumnVector ns_string_times = ColumnVector.fromStrings(TIMES_NS_STRING);
ColumnVector s_expected = applyAndClose(mulThouAndClose(ColumnVector.fromLongs(TIMES_S), 2), cv -> cv.asTimestampMicroseconds());
ColumnVector ms_expected = applyAndClose(mulThouAndClose(ColumnVector.fromLongs(TIMES_MS), 1), cv -> cv.asTimestampMicroseconds());
ColumnVector us_expected = ColumnVector.timestampMicroSecondsFromLongs(TIMES_US);
ColumnVector s_result = s_string_times.asTimestamp(DType.TIMESTAMP_MICROSECONDS, "%Y-%m-%d %H:%M:%S");
ColumnVector ms_result = ms_string_times.asTimestamp(DType.TIMESTAMP_MICROSECONDS, "%Y-%m-%d %H:%M:%S.%f");
ColumnVector us_result = us_string_times.asTimestamp(DType.TIMESTAMP_MICROSECONDS, "%Y-%m-%d %H:%M:%S.%f");
ColumnVector ns_result = ns_string_times.asTimestamp(DType.TIMESTAMP_MICROSECONDS, "%Y-%m-%d %H:%M:%S.%f")) {
assertColumnsAreEqual(s_expected, s_result);
assertColumnsAreEqual(ms_expected, ms_result);
assertColumnsAreEqual(us_expected, us_result);
assertColumnsAreEqual(us_expected, ns_result);
}
}
@Test
public void testTimestampToLongNanosecond() {
try (ColumnVector s_string_times = ColumnVector.fromStrings(TIMES_S_STRING);
ColumnVector ms_string_times = ColumnVector.fromStrings(TIMES_MS_STRING);
ColumnVector us_string_times = ColumnVector.fromStrings(TIMES_US_STRING);
ColumnVector ns_string_times = ColumnVector.fromStrings(TIMES_NS_STRING);
ColumnVector s_expected = applyAndClose(mulThouAndClose(ColumnVector.fromLongs(TIMES_S), 3), cv -> cv.asTimestampNanoseconds());
ColumnVector ms_expected = applyAndClose(mulThouAndClose(ColumnVector.fromLongs(TIMES_MS), 2), cv -> cv.asTimestampNanoseconds());
ColumnVector us_expected = applyAndClose(mulThouAndClose(ColumnVector.fromLongs(TIMES_US), 1), cv -> cv.asTimestampNanoseconds());
ColumnVector ns_expected = ColumnVector.timestampNanoSecondsFromLongs(TIMES_NS);
ColumnVector s_result = s_string_times.asTimestamp(DType.TIMESTAMP_NANOSECONDS, "%Y-%m-%d %H:%M:%S");
ColumnVector ms_result = ms_string_times.asTimestamp(DType.TIMESTAMP_NANOSECONDS, "%Y-%m-%d %H:%M:%S.%9f");
ColumnVector us_result = us_string_times.asTimestamp(DType.TIMESTAMP_NANOSECONDS, "%Y-%m-%d %H:%M:%S.%9f");
ColumnVector ns_result = ns_string_times.asTimestamp(DType.TIMESTAMP_NANOSECONDS, "%Y-%m-%d %H:%M:%S.%9f")) {
assertColumnsAreEqual(s_expected, s_result);
assertColumnsAreEqual(ms_expected, ms_result);
assertColumnsAreEqual(us_expected, us_result);
assertColumnsAreEqual(ns_expected, ns_result);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/MemoryBufferTest.java
|
/*
*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.jupiter.api.Assertions.*;
public class MemoryBufferTest extends CudfTestBase {
private static final HostMemoryAllocator hostMemoryAllocator = DefaultHostMemoryAllocator.get();
private static final byte[] BYTES = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
private static final byte[] EXPECTED = {0, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
@Test
public void testAddressOutOfBoundsExceptionWhenCopying() {
try (HostMemoryBuffer from = hostMemoryAllocator.allocate(16);
HostMemoryBuffer to = hostMemoryAllocator.allocate(16)) {
assertThrows(AssertionError.class, () -> to.copyFromMemoryBuffer(-1, from, 0, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBuffer(16, from, 0, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBuffer(0, from, -1, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBuffer(0, from, 16, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBuffer(0, from, 0, -1, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBuffer(0, from, 0, 17, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBuffer(1, from, 0, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBuffer(0, from, 1, 16, Cuda.DEFAULT_STREAM));
}
}
@Test
public void testAddressOutOfBoundsExceptionWhenCopyingAsync() {
try (HostMemoryBuffer from = hostMemoryAllocator.allocate(16);
HostMemoryBuffer to = hostMemoryAllocator.allocate(16)) {
assertThrows(AssertionError.class, () -> to.copyFromMemoryBufferAsync(-1, from, 0, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBufferAsync(16, from, 0, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBufferAsync(0, from, -1, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBufferAsync(0, from, 16, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBufferAsync(0, from, 0, -1, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBufferAsync(0, from, 0, 17, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBufferAsync(1, from, 0, 16, Cuda.DEFAULT_STREAM));
assertThrows(AssertionError.class, () -> to.copyFromMemoryBufferAsync(0, from, 1, 16, Cuda.DEFAULT_STREAM));
}
}
@Test
public void testCopyingFromDeviceToDevice() {
try (HostMemoryBuffer in = hostMemoryAllocator.allocate(16);
DeviceMemoryBuffer from = DeviceMemoryBuffer.allocate(16);
DeviceMemoryBuffer to = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer out = hostMemoryAllocator.allocate(16)) {
in.setBytes(0, BYTES, 0, 16);
from.copyFromHostBuffer(in);
to.copyFromMemoryBuffer(0, from, 0, 16, Cuda.DEFAULT_STREAM);
to.copyFromMemoryBuffer(1, from, 2, 3, Cuda.DEFAULT_STREAM);
out.copyFromDeviceBuffer(to);
verifyOutput(out);
}
}
@Test
public void testCopyingFromDeviceToDeviceAsync() {
try (HostMemoryBuffer in = hostMemoryAllocator.allocate(16);
DeviceMemoryBuffer from = DeviceMemoryBuffer.allocate(16);
DeviceMemoryBuffer to = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer out = hostMemoryAllocator.allocate(16)) {
in.setBytes(0, BYTES, 0, 16);
from.copyFromHostBuffer(in);
to.copyFromMemoryBufferAsync(0, from, 0, 16, Cuda.DEFAULT_STREAM);
to.copyFromMemoryBufferAsync(1, from, 2, 3, Cuda.DEFAULT_STREAM);
out.copyFromDeviceBufferAsync(to, Cuda.DEFAULT_STREAM);
Cuda.DEFAULT_STREAM.sync();
verifyOutput(out);
}
}
@Test
public void testCopyingFromHostToHost() {
try (HostMemoryBuffer from = hostMemoryAllocator.allocate(16);
HostMemoryBuffer to = hostMemoryAllocator.allocate(16)) {
from.setBytes(0, BYTES, 0, 16);
to.setBytes(0, BYTES, 0, 16);
to.copyFromMemoryBuffer(1, from, 2, 3, Cuda.DEFAULT_STREAM);
verifyOutput(to);
}
}
@Test
public void testCopyingFromHostToHostAsync() {
try (HostMemoryBuffer from = hostMemoryAllocator.allocate(16);
HostMemoryBuffer to = hostMemoryAllocator.allocate(16)) {
from.setBytes(0, BYTES, 0, 16);
to.setBytes(0, BYTES, 0, 16);
to.copyFromMemoryBufferAsync(1, from, 2, 3, Cuda.DEFAULT_STREAM);
verifyOutput(to);
}
}
@Test
public void testCopyingFromHostToDevice() {
try (HostMemoryBuffer from = hostMemoryAllocator.allocate(16);
DeviceMemoryBuffer to = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer out = hostMemoryAllocator.allocate(16)) {
from.setBytes(0, BYTES, 0, 16);
to.copyFromMemoryBuffer(0, from, 0, 16, Cuda.DEFAULT_STREAM);
to.copyFromMemoryBufferAsync(1, from, 2, 3, Cuda.DEFAULT_STREAM);
out.copyFromDeviceBuffer(to);
verifyOutput(out);
}
}
@Test
public void testCopyingFromHostToDeviceAsync() {
try (HostMemoryBuffer from = hostMemoryAllocator.allocate(16);
DeviceMemoryBuffer to = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer out = hostMemoryAllocator.allocate(16)) {
from.setBytes(0, BYTES, 0, 16);
to.copyFromMemoryBufferAsync(0, from, 0, 16, Cuda.DEFAULT_STREAM);
to.copyFromMemoryBufferAsync(1, from, 2, 3, Cuda.DEFAULT_STREAM);
out.copyFromDeviceBufferAsync(to, Cuda.DEFAULT_STREAM);
Cuda.DEFAULT_STREAM.sync();
verifyOutput(out);
}
}
@Test
public void testCopyingFromDeviceToHost() {
try (HostMemoryBuffer in = hostMemoryAllocator.allocate(16);
DeviceMemoryBuffer from = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer to = hostMemoryAllocator.allocate(16)) {
in.setBytes(0, BYTES, 0, 16);
from.copyFromHostBuffer(in);
to.setBytes(0, BYTES, 0, 16);
to.copyFromMemoryBuffer(1, from, 2, 3, Cuda.DEFAULT_STREAM);
verifyOutput(to);
}
}
@Test
public void testCopyingFromDeviceToHostAsync() {
try (HostMemoryBuffer in = hostMemoryAllocator.allocate(16);
DeviceMemoryBuffer from = DeviceMemoryBuffer.allocate(16);
HostMemoryBuffer to = hostMemoryAllocator.allocate(16)) {
in.setBytes(0, BYTES, 0, 16);
from.copyFromHostBuffer(in);
to.setBytes(0, BYTES, 0, 16);
to.copyFromMemoryBufferAsync(1, from, 2, 3, Cuda.DEFAULT_STREAM);
Cuda.DEFAULT_STREAM.sync();
verifyOutput(to);
}
}
private void verifyOutput(HostMemoryBuffer out) {
byte[] bytes = new byte[16];
out.getBytes(bytes, 0, 0, 16);
assertArrayEquals(EXPECTED, bytes);
}
@Test
public void testEventHandlerIsCalledForEachClose() {
final AtomicInteger onClosedWasCalled = new AtomicInteger(0);
try (DeviceMemoryBuffer b = DeviceMemoryBuffer.allocate(256)) {
b.setEventHandler(refCount -> onClosedWasCalled.incrementAndGet());
}
assertEquals(1, onClosedWasCalled.get());
onClosedWasCalled.set(0);
try (DeviceMemoryBuffer b = DeviceMemoryBuffer.allocate(256)) {
b.setEventHandler(refCount -> onClosedWasCalled.incrementAndGet());
DeviceMemoryBuffer sliced = b.slice(0, b.getLength());
sliced.close();
}
assertEquals(2, onClosedWasCalled.get());
}
@Test
public void testEventHandlerIsNotCalledIfNotSet() {
final AtomicInteger onClosedWasCalled = new AtomicInteger(0);
try (DeviceMemoryBuffer b = DeviceMemoryBuffer.allocate(256)) {
assertNull(b.getEventHandler());
}
assertEquals(0, onClosedWasCalled.get());
try (DeviceMemoryBuffer b = DeviceMemoryBuffer.allocate(256)) {
b.setEventHandler(refCount -> onClosedWasCalled.incrementAndGet());
b.setEventHandler(null);
}
assertEquals(0, onClosedWasCalled.get());
}
@Test
public void testEventHandlerReturnsPreviousHandlerOnReset() {
try (DeviceMemoryBuffer b = DeviceMemoryBuffer.allocate(256)) {
MemoryBuffer.EventHandler handler = refCount -> {};
MemoryBuffer.EventHandler handler2 = refCount -> {};
assertNull(b.setEventHandler(handler));
assertEquals(handler, b.setEventHandler(null));
assertNull(b.setEventHandler(handler2));
assertEquals(handler2, b.setEventHandler(handler));
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/HashJoinTest.java
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class HashJoinTest {
@Test
void testGetNumberOfColumns() {
try (Table t = new Table.TestBuilder().column(1, 2).column(3, 4).column(5, 6).build();
HashJoin hashJoin = new HashJoin(t, false)) {
assertEquals(3, hashJoin.getNumberOfColumns());
}
}
@Test
void testGetCompareNulls() {
try (Table t = new Table.TestBuilder().column(1, 2, 3, 4).column(5, 6, 7, 8).build()) {
try (HashJoin hashJoin = new HashJoin(t, false)) {
assertFalse(hashJoin.getCompareNulls());
}
try (HashJoin hashJoin = new HashJoin(t, true)) {
assertTrue(hashJoin.getCompareNulls());
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/GatherMapTest.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class GatherMapTest {
private static final HostMemoryAllocator hostMemoryAllocator = DefaultHostMemoryAllocator.get();
@Test
void testInvalidBuffer() {
try (DeviceMemoryBuffer buffer = DeviceMemoryBuffer.allocate(707)) {
assertThrows(IllegalArgumentException.class, () -> new GatherMap(buffer));
}
}
@Test
void testRowCount() {
try (GatherMap map = new GatherMap(DeviceMemoryBuffer.allocate(700))) {
assertEquals(175, map.getRowCount());
}
}
@Test
void testClose() {
DeviceMemoryBuffer mockBuffer = Mockito.mock(DeviceMemoryBuffer.class);
GatherMap map = new GatherMap(mockBuffer);
map.close();
Mockito.verify(mockBuffer).close();
}
@Test
void testReleaseBuffer() {
DeviceMemoryBuffer mockBuffer = Mockito.mock(DeviceMemoryBuffer.class);
GatherMap map = new GatherMap(mockBuffer);
DeviceMemoryBuffer buffer = map.releaseBuffer();
assertSame(mockBuffer, buffer);
map.close();
Mockito.verify(mockBuffer, Mockito.never()).close();
}
@Test
void testInvalidColumnView() {
try (GatherMap map = new GatherMap(DeviceMemoryBuffer.allocate(1024))) {
assertThrows(IllegalArgumentException.class, () -> map.toColumnView(0, 257));
assertThrows(IllegalArgumentException.class, () -> map.toColumnView(257, 0));
assertThrows(IllegalArgumentException.class, () -> map.toColumnView(-4, 253));
assertThrows(IllegalArgumentException.class, () -> map.toColumnView(4, -2));
}
}
@Test
void testToColumnView() {
try (HostMemoryBuffer hostBuffer = hostMemoryAllocator.allocate(8 * 4)) {
hostBuffer.setInts(0, new int[]{10, 11, 12, 13, 14, 15, 16, 17}, 0, 8);
try (DeviceMemoryBuffer devBuffer = DeviceMemoryBuffer.allocate(8*4)) {
devBuffer.copyFromHostBuffer(hostBuffer);
devBuffer.incRefCount();
try (GatherMap map = new GatherMap(devBuffer)) {
ColumnView view = map.toColumnView(0, 8);
assertEquals(DType.INT32, view.getType());
assertEquals(0, view.getNullCount());
assertEquals(8, view.getRowCount());
try (HostMemoryBuffer viewHostBuffer = hostMemoryAllocator.allocate(8 * 4)) {
viewHostBuffer.copyFromDeviceBuffer(view.getData());
for (int i = 0; i < 8; i++) {
assertEquals(i + 10, viewHostBuffer.getInt(4*i));
}
}
view = map.toColumnView(3, 2);
assertEquals(DType.INT32, view.getType());
assertEquals(0, view.getNullCount());
assertEquals(2, view.getRowCount());
try (HostMemoryBuffer viewHostBuffer = hostMemoryAllocator.allocate(8)) {
viewHostBuffer.copyFromDeviceBuffer(view.getData());
assertEquals(13, viewHostBuffer.getInt(0));
assertEquals(14, viewHostBuffer.getInt(4));
}
}
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/LongColumnVectorTest.java
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.junit.jupiter.api.Test;
import java.util.Random;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class LongColumnVectorTest extends CudfTestBase {
@Test
public void testCreateColumnVectorBuilder() {
try (ColumnVector longColumnVector = ColumnVector.build(DType.INT64, 3, (b) -> b.append(1L))) {
assertFalse(longColumnVector.hasNulls());
}
}
@Test
public void testArrayAllocation() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertEquals(cv.getLong(0), 2);
assertEquals(cv.getLong(1), 3);
assertEquals(cv.getLong(2), 5);
};
try (HostColumnVector lcv = HostColumnVector.fromLongs(2L, 3L, 5L)) {
verify.accept(lcv);
}
try (HostColumnVector lcv = ColumnBuilderHelper.fromLongs(true,2L, 3L, 5L)) {
verify.accept(lcv);
}
}
@Test
public void testUnsignedArrayAllocation() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertEquals(Long.toUnsignedString(0xfedcba9876543210L),
Long.toUnsignedString(cv.getLong(0)));
assertEquals(Long.toUnsignedString(0x8000000000000000L),
Long.toUnsignedString(cv.getLong(1)));
assertEquals(5L, cv.getLong(2));
};
try (HostColumnVector lcv = HostColumnVector.fromUnsignedLongs(
0xfedcba9876543210L, 0x8000000000000000L, 5L)) {
verify.accept(lcv);
}
try (HostColumnVector lcv = ColumnBuilderHelper.fromLongs(false,
0xfedcba9876543210L, 0x8000000000000000L, 5L)) {
verify.accept(lcv);
}
}
@Test
public void testUpperIndexOutOfBoundsException() {
Consumer<HostColumnVector> verify = (cv) -> {
assertThrows(AssertionError.class, () -> cv.getLong(3));
assertFalse(cv.hasNulls());
};
try (HostColumnVector lcv = HostColumnVector.fromLongs(2L, 3L, 5L)) {
verify.accept(lcv);
}
try (HostColumnVector lcv = ColumnBuilderHelper.fromLongs(true, 2L, 3L, 5L)) {
verify.accept(lcv);
}
}
@Test
public void testLowerIndexOutOfBoundsException() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertThrows(AssertionError.class, () -> cv.getLong(-1));
};
try (HostColumnVector lcv = HostColumnVector.fromLongs(2L, 3L, 5L)) {
verify.accept(lcv);
}
try (HostColumnVector lcv = ColumnBuilderHelper.fromLongs(true, 2L, 3L, 5L)) {
verify.accept(lcv);
}
}
@Test
public void testAddingNullValues() {
Consumer<HostColumnVector> verify = (cv) -> {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
};
try (HostColumnVector lcv = HostColumnVector.fromBoxedLongs(2L, 3L, 4L, 5L, 6L, 7L, null, null)) {
verify.accept(lcv);
}
try (HostColumnVector lcv = ColumnBuilderHelper.fromBoxedLongs(true,
2L, 3L, 4L, 5L, 6L, 7L, null, null)) {
verify.accept(lcv);
}
}
@Test
public void testAddingUnsignedNullValues() {
Consumer<HostColumnVector> verify = (cv) -> {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertEquals(Long.toUnsignedString(0xfedcba9876543210L),
Long.toUnsignedString(cv.getLong(4)));
assertEquals(Long.toUnsignedString(0x8000000000000000L),
Long.toUnsignedString(cv.getLong(5)));
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
};
try (HostColumnVector lcv = HostColumnVector.fromBoxedUnsignedLongs(
2L, 3L, 4L, 5L, 0xfedcba9876543210L, 0x8000000000000000L, null, null)) {
verify.accept(lcv);
}
try (HostColumnVector lcv = ColumnBuilderHelper.fromBoxedLongs(false,
2L, 3L, 4L, 5L, 0xfedcba9876543210L, 0x8000000000000000L, null, null)) {
verify.accept(lcv);
}
}
@Test
public void testOverrunningTheBuffer() {
try (Builder builder = HostColumnVector.builder(DType.INT64, 3)) {
assertThrows(AssertionError.class,
() -> builder.append(2L).appendNull().append(5L).append(4L).build());
}
}
@Test
void testAppendVector() {
Random random = new Random(192312989128L);
for (int dstSize = 1; dstSize <= 100; dstSize++) {
for (int dstPrefilledSize = 0; dstPrefilledSize < dstSize; dstPrefilledSize++) {
final int srcSize = dstSize - dstPrefilledSize;
for (int sizeOfDataNotToAdd = 0; sizeOfDataNotToAdd <= dstPrefilledSize; sizeOfDataNotToAdd++) {
try (Builder dst = HostColumnVector.builder(DType.INT64, dstSize);
HostColumnVector src = HostColumnVector.build(DType.INT64, srcSize, (b) -> {
for (int i = 0; i < srcSize; i++) {
if (random.nextBoolean()) {
b.appendNull();
} else {
b.append(random.nextLong());
}
}
});
Builder gtBuilder = HostColumnVector.builder(DType.INT64,
dstPrefilledSize)) {
assertEquals(dstSize, srcSize + dstPrefilledSize);
//add the first half of the prefilled list
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
if (random.nextBoolean()) {
dst.appendNull();
gtBuilder.appendNull();
} else {
long a = random.nextLong();
dst.append(a);
gtBuilder.append(a);
}
}
// append the src vector
dst.append(src);
try (HostColumnVector dstVector = dst.build();
HostColumnVector gt = gtBuilder.build()) {
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
assertEquals(gt.isNull(i), dstVector.isNull(i));
if (!gt.isNull(i)) {
assertEquals(gt.getLong(i), dstVector.getLong(i));
}
}
for (int i = dstPrefilledSize - sizeOfDataNotToAdd, j = 0; i < dstSize - sizeOfDataNotToAdd && j < srcSize; i++, j++) {
assertEquals(src.isNull(j), dstVector.isNull(i));
if (!src.isNull(j)) {
assertEquals(src.getLong(j), dstVector.getLong(i));
}
}
if (dstVector.hasValidityVector()) {
long maxIndex =
BitVectorHelper.getValidityAllocationSizeInBytes(dstVector.getRowCount()) * 8;
for (long i = dstSize - sizeOfDataNotToAdd; i < maxIndex; i++) {
assertFalse(dstVector.isNullExtendedRange(i));
}
}
}
}
}
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/DecimalColumnVectorTest.java
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.util.Arrays;
import java.util.Objects;
import java.util.Random;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.*;
public class DecimalColumnVectorTest extends CudfTestBase {
private static final Random rdSeed = new Random(1234);
private static final int dec32Scale = 4;
private static final int dec64Scale = 10;
private static final int dec128Scale = 30;
private static final BigDecimal[] decimal32Zoo = new BigDecimal[20];
private static final BigDecimal[] decimal64Zoo = new BigDecimal[20];
private static final BigDecimal[] decimal128Zoo = new BigDecimal[20];
private static final int[] unscaledDec32Zoo = new int[decimal32Zoo.length];
private static final long[] unscaledDec64Zoo = new long[decimal64Zoo.length];
private static final BigInteger[] unscaledDec128Zoo = new BigInteger[decimal128Zoo.length];
private final BigDecimal[] boundaryDecimal32 = new BigDecimal[]{
new BigDecimal("999999999"), new BigDecimal("-999999999")};
private final BigDecimal[] boundaryDecimal64 = new BigDecimal[]{
new BigDecimal("999999999999999999"), new BigDecimal("-999999999999999999")};
private final BigDecimal[] boundaryDecimal128 = new BigDecimal[]{
new BigDecimal("99999999999999999999999999999999999999"), new BigDecimal("-99999999999999999999999999999999999999")};
private final BigDecimal[] overflowDecimal32 = new BigDecimal[]{
BigDecimal.valueOf(Integer.MAX_VALUE), BigDecimal.valueOf(Integer.MIN_VALUE)};
private final BigDecimal[] overflowDecimal64 = new BigDecimal[]{
BigDecimal.valueOf(Long.MAX_VALUE), BigDecimal.valueOf(Long.MIN_VALUE)};
private final BigDecimal[] overflowDecimal128 = new BigDecimal[]{
new BigDecimal("340282367000000000000000000000000000001"),
new BigDecimal("-340282367000000000000000000000000000001")};
@BeforeAll
public static void setup() {
for (int i = 0; i < decimal32Zoo.length; i++) {
unscaledDec32Zoo[i] = rdSeed.nextInt() / 100;
unscaledDec64Zoo[i] = rdSeed.nextLong() / 100;
unscaledDec128Zoo[i] = BigInteger.valueOf(rdSeed.nextLong()).multiply(BigInteger.valueOf(rdSeed.nextLong()));
if (rdSeed.nextBoolean()) {
// Create BigDecimal with slight variance on scale, in order to test building cv from inputs with different scales.
decimal32Zoo[i] = BigDecimal.valueOf(rdSeed.nextInt() / 100, dec32Scale - rdSeed.nextInt(2));
} else {
decimal32Zoo[i] = null;
}
if (rdSeed.nextBoolean()) {
// Create BigDecimal with slight variance on scale, in order to test building cv from inputs with different scales.
decimal64Zoo[i] = BigDecimal.valueOf(rdSeed.nextLong() / 100, dec64Scale - rdSeed.nextInt(2));
} else {
decimal64Zoo[i] = null;
}
if (rdSeed.nextBoolean()) {
BigInteger unscaledVal = BigInteger.valueOf(rdSeed.nextLong()).multiply(BigInteger.valueOf(rdSeed.nextLong()));
decimal128Zoo[i] = new BigDecimal(unscaledVal, dec128Scale);
} else {
decimal128Zoo[i] = null;
}
}
}
@Test
public void testCreateColumnVectorBuilder() {
try (ColumnVector cv = ColumnVector.build(DType.create(DType.DTypeEnum.DECIMAL32, -5), 3,
(b) -> b.append(BigDecimal.valueOf(123456789, 5)))) {
assertFalse(cv.hasNulls());
}
try (ColumnVector cv = ColumnVector.build(DType.create(DType.DTypeEnum.DECIMAL64, -10), 3,
(b) -> b.append(BigDecimal.valueOf(1023040506070809L, 10)))) {
assertFalse(cv.hasNulls());
}
// test building ColumnVector from BigDecimal values with varying scales
try (ColumnVector cv = ColumnVector.build(DType.create(DType.DTypeEnum.DECIMAL64, -5), 7,
(b) -> b.append(BigDecimal.valueOf(123456, 0), RoundingMode.UNNECESSARY)
.append(BigDecimal.valueOf(123456, 2), RoundingMode.UNNECESSARY)
.append(BigDecimal.valueOf(123456, 5))
.append(BigDecimal.valueOf(123456, 7), RoundingMode.HALF_UP)
.append(BigDecimal.valueOf(123456, 7), RoundingMode.FLOOR)
.append(BigDecimal.valueOf(123456, 9), RoundingMode.HALF_DOWN)
.append(BigDecimal.valueOf(123456, 9), RoundingMode.CEILING))) {
try (HostColumnVector hcv = cv.copyToHost()) {
assertEquals(12345600000L, hcv.getLong(0));
assertEquals(123456000L, hcv.getLong(1));
assertEquals(123456L, hcv.getLong(2));
assertEquals(1235L, hcv.getLong(3));
assertEquals(1234L, hcv.getLong(4));
assertEquals(12L, hcv.getLong(5));
assertEquals(13L, hcv.getLong(6));
}
}
}
@Test
public void testUpperIndexOutOfBoundsException() {
try (HostColumnVector decColumnVector = HostColumnVector.fromDecimals(decimal32Zoo)) {
assertThrows(AssertionError.class, () -> decColumnVector.getBigDecimal(decimal32Zoo.length));
}
}
@Test
public void testLowerIndexOutOfBoundsException() {
try (HostColumnVector doubleColumnVector = HostColumnVector.fromDecimals(decimal32Zoo)) {
assertThrows(AssertionError.class, () -> doubleColumnVector.getBigDecimal(-1));
}
}
@Test
public void testAddingNullValues() {
try (HostColumnVector cv = HostColumnVector.fromDecimals(decimal64Zoo)) {
for (int i = 0; i < decimal64Zoo.length; ++i) {
assertEquals(decimal64Zoo[i] == null, cv.isNull(i));
}
assertEquals(Arrays.stream(decimal64Zoo).filter(Objects::isNull).count(), cv.getNullCount());
}
}
@Test
public void testOverrunningTheBuffer() {
try (Builder builder = HostColumnVector.builder(DType.create(DType.DTypeEnum.DECIMAL32, -dec32Scale), 3)) {
assertThrows(AssertionError.class, () -> builder.appendBoxed(decimal32Zoo).build());
}
try (Builder builder = HostColumnVector.builder(DType.create(DType.DTypeEnum.DECIMAL64, -dec64Scale), 3)) {
assertThrows(AssertionError.class, () -> builder.appendUnscaledDecimalArray(unscaledDec64Zoo).build());
}
}
@Test
public void testDecimalValidation() {
// precision overflow
assertThrows(IllegalArgumentException.class, () -> HostColumnVector.fromDecimals(overflowDecimal128));
assertThrows(IllegalArgumentException.class, () -> {
try (ColumnVector ignored = ColumnVector.decimalFromInts(
-(DType.DECIMAL32_MAX_PRECISION + 1), unscaledDec32Zoo)) {
}
});
assertThrows(IllegalArgumentException.class, () -> {
try (ColumnVector ignored = ColumnVector.decimalFromLongs(
-(DType.DECIMAL64_MAX_PRECISION + 1), unscaledDec64Zoo)) {
}
});
// precision overflow due to rescaling by min scale
assertThrows(IllegalArgumentException.class, () -> {
try (ColumnVector ignored = ColumnVector.fromDecimals(
BigDecimal.valueOf(1.23e30), BigDecimal.valueOf(1.2e-7))) {
}
});
// exactly hit the MAX_PRECISION_DECIMAL128 after rescaling
assertDoesNotThrow(() -> {
try (ColumnVector ignored = ColumnVector.fromDecimals(
BigDecimal.valueOf(1.23e30), BigDecimal.valueOf(1.2e-6))) {
}
});
}
@Test
public void testDecimalGeneral() {
// Safe max precision of Decimal32 is 9, so integers have 10 digits will be backed by DECIMAL64.
try (ColumnVector cv = ColumnVector.fromDecimals(overflowDecimal32)) {
assertEquals(DType.create(DType.DTypeEnum.DECIMAL64, 0), cv.getType());
}
try (ColumnVector cv = ColumnVector.fromDecimals(overflowDecimal64)) {
assertEquals(DType.create(DType.DTypeEnum.DECIMAL128, 0), cv.getType());
}
// Create DECIMAL64 vector with small values
try (ColumnVector cv = ColumnVector.decimalFromLongs(0, 0L)) {
try (HostColumnVector hcv = cv.copyToHost()) {
assertTrue(hcv.getType().isBackedByLong());
assertEquals(0L, hcv.getBigDecimal(0).longValue());
}
}
}
@Test
public void testDecimalFromDecimals() {
DecimalColumnVectorTest.testDecimalImpl(DType.DTypeEnum.DECIMAL32, dec32Scale, decimal32Zoo);
DecimalColumnVectorTest.testDecimalImpl(DType.DTypeEnum.DECIMAL64, dec64Scale, decimal64Zoo);
DecimalColumnVectorTest.testDecimalImpl(DType.DTypeEnum.DECIMAL128, dec128Scale, decimal128Zoo);
DecimalColumnVectorTest.testDecimalImpl(DType.DTypeEnum.DECIMAL32, 0, boundaryDecimal32);
DecimalColumnVectorTest.testDecimalImpl(DType.DTypeEnum.DECIMAL64, 0, boundaryDecimal64);
DecimalColumnVectorTest.testDecimalImpl(DType.DTypeEnum.DECIMAL128, 0, boundaryDecimal128);
}
private static void testDecimalImpl(DType.DTypeEnum decimalType, int scale, BigDecimal[] decimalZoo) {
Consumer<HostColumnVector> assertions = (hcv) -> {
assertEquals(-scale, hcv.getType().getScale());
assertEquals(hcv.getType().typeId, decimalType);
assertEquals(decimalZoo.length, hcv.rows);
for (int i = 0; i < decimalZoo.length; i++) {
assertEquals(decimalZoo[i] == null, hcv.isNull(i));
if (decimalZoo[i] != null) {
BigDecimal actual;
switch (decimalType) {
case DECIMAL32:
actual = BigDecimal.valueOf(hcv.getInt(i), scale);
break;
case DECIMAL64:
actual = BigDecimal.valueOf(hcv.getLong(i), scale);
break;
default:
actual = hcv.getBigDecimal(i);
}
assertEquals(decimalZoo[i].subtract(actual).longValueExact(), 0L);
}
}
};
try (ColumnVector cv = ColumnVector.fromDecimals(decimalZoo)) {
try (HostColumnVector hcv = cv.copyToHost()) {
assertions.accept(hcv);
}
}
try (HostColumnVector hcv = ColumnBuilderHelper.fromDecimals(decimalZoo)) {
assertions.accept(hcv);
}
}
@Test
public void testDecimalFromInts() {
try (ColumnVector cv = ColumnVector.decimalFromInts(-DecimalColumnVectorTest.dec32Scale, DecimalColumnVectorTest.unscaledDec32Zoo)) {
try (HostColumnVector hcv = cv.copyToHost()) {
for (int i = 0; i < DecimalColumnVectorTest.unscaledDec32Zoo.length; i++) {
assertEquals(DecimalColumnVectorTest.unscaledDec32Zoo[i], hcv.getInt(i));
assertEquals(BigDecimal.valueOf(DecimalColumnVectorTest.unscaledDec32Zoo[i], DecimalColumnVectorTest.dec32Scale), hcv.getBigDecimal(i));
}
}
}
}
@Test
public void testDecimalFromLongs() {
try (ColumnVector cv = ColumnVector.decimalFromLongs(-DecimalColumnVectorTest.dec64Scale, DecimalColumnVectorTest.unscaledDec64Zoo)) {
try (HostColumnVector hcv = cv.copyToHost()) {
for (int i = 0; i < DecimalColumnVectorTest.unscaledDec64Zoo.length; i++) {
assertEquals(DecimalColumnVectorTest.unscaledDec64Zoo[i], hcv.getLong(i));
assertEquals(BigDecimal.valueOf(DecimalColumnVectorTest.unscaledDec64Zoo[i], DecimalColumnVectorTest.dec64Scale), hcv.getBigDecimal(i));
}
}
}
}
@Test
public void testDecimalFromBigInts() {
try (ColumnVector cv = ColumnVector.decimalFromBigInt(-DecimalColumnVectorTest.dec128Scale, DecimalColumnVectorTest.unscaledDec128Zoo)) {
try (HostColumnVector hcv = cv.copyToHost()) {
for (int i = 0; i < DecimalColumnVectorTest.unscaledDec128Zoo.length; i++) {
assertEquals(DecimalColumnVectorTest.unscaledDec128Zoo[i], hcv.getBigDecimal(i).unscaledValue());
}
}
}
try (HostColumnVector hcv = ColumnBuilderHelper.decimalFromBigInts(-DecimalColumnVectorTest.dec128Scale, DecimalColumnVectorTest.unscaledDec128Zoo)) {
for (int i = 0; i < DecimalColumnVectorTest.unscaledDec128Zoo.length; i++) {
assertEquals(DecimalColumnVectorTest.unscaledDec128Zoo[i], hcv.getBigDecimal(i).unscaledValue());
}
}
}
@Test
public void testDecimalFromDoubles() {
DType dt = DType.create(DType.DTypeEnum.DECIMAL32, -3);
try (ColumnVector cv = ColumnVector.decimalFromDoubles(dt, RoundingMode.DOWN,123456, -2.4567, 3.00001, -1111e-5)) {
try (HostColumnVector hcv = cv.copyToHost()) {
assertEquals(123456, hcv.getBigDecimal(0).doubleValue());
assertEquals(-2.456, hcv.getBigDecimal(1).doubleValue());
assertEquals(3, hcv.getBigDecimal(2).doubleValue());
assertEquals(-0.011, hcv.getBigDecimal(3).doubleValue());
}
}
dt = DType.create(DType.DTypeEnum.DECIMAL64, -10);
try (ColumnVector cv = ColumnVector.decimalFromDoubles(dt, RoundingMode.HALF_UP, 1.2345678, -2.45e-9, 3.000012, -51111e-15)) {
try (HostColumnVector hcv = cv.copyToHost()) {
assertEquals(1.2345678, hcv.getBigDecimal(0).doubleValue());
assertEquals(-2.5e-9, hcv.getBigDecimal(1).doubleValue());
assertEquals(3.000012, hcv.getBigDecimal(2).doubleValue());
assertEquals(-1e-10, hcv.getBigDecimal(3).doubleValue());
}
}
dt = DType.create(DType.DTypeEnum.DECIMAL64, 10);
try (ColumnVector cv = ColumnVector.decimalFromDoubles(dt, RoundingMode.UP, 1.234e20, -12.34e8, 1.1e10)) {
try (HostColumnVector hcv = cv.copyToHost()) {
assertEquals(1.234e20, hcv.getBigDecimal(0).doubleValue());
assertEquals(-1e10, hcv.getBigDecimal(1).doubleValue());
assertEquals(2e10, hcv.getBigDecimal(2).doubleValue());
}
}
assertThrows(ArithmeticException.class,
() -> {
final DType dt1 = DType.create(DType.DTypeEnum.DECIMAL32, -5);
try (ColumnVector cv = ColumnVector.decimalFromDoubles(dt1, RoundingMode.UNNECESSARY, 30000)) {
}
});
assertThrows(ArithmeticException.class,
() -> {
final DType dt1 = DType.create(DType.DTypeEnum.DECIMAL64, 10);
try (ColumnVector cv = ColumnVector.decimalFromDoubles(dt1, RoundingMode.FLOOR, 1e100)) {
}
});
}
@Test
public void testAppendVector() {
for (DType decType : new DType[]{
DType.create(DType.DTypeEnum.DECIMAL32, -6),
DType.create(DType.DTypeEnum.DECIMAL64, -10)}) {
for (int dstSize = 1; dstSize <= 100; dstSize++) {
for (int dstPrefilledSize = 0; dstPrefilledSize < dstSize; dstPrefilledSize++) {
final int srcSize = dstSize - dstPrefilledSize;
for (int sizeOfDataNotToAdd = 0; sizeOfDataNotToAdd <= dstPrefilledSize; sizeOfDataNotToAdd++) {
try (Builder dst = HostColumnVector.builder(decType, dstSize);
HostColumnVector src = HostColumnVector.build(decType, srcSize, (b) -> {
for (int i = 0; i < srcSize; i++) {
if (rdSeed.nextBoolean()) {
b.appendNull();
} else {
b.append(BigDecimal.valueOf(rdSeed.nextInt() / 100, -decType.getScale()));
}
}
});
Builder gtBuilder = HostColumnVector.builder(decType, dstPrefilledSize)) {
assertEquals(dstSize, srcSize + dstPrefilledSize);
//add the first half of the prefilled list
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
if (rdSeed.nextBoolean()) {
dst.appendNull();
gtBuilder.appendNull();
} else {
BigDecimal a = BigDecimal.valueOf(rdSeed.nextInt() / 100, -decType.getScale());
if (decType.typeId == DType.DTypeEnum.DECIMAL32) {
dst.appendUnscaledDecimal(a.unscaledValue().intValueExact());
} else {
dst.appendUnscaledDecimal(a.unscaledValue().longValueExact());
}
gtBuilder.append(a);
}
}
// append the src vector
dst.append(src);
try (HostColumnVector dstVector = dst.build();
HostColumnVector gt = gtBuilder.build()) {
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
assertEquals(gt.isNull(i), dstVector.isNull(i));
if (!gt.isNull(i)) {
assertEquals(gt.getBigDecimal(i), dstVector.getBigDecimal(i));
}
}
for (int i = dstPrefilledSize - sizeOfDataNotToAdd, j = 0; i < dstSize - sizeOfDataNotToAdd && j < srcSize; i++, j++) {
assertEquals(src.isNull(j), dstVector.isNull(i));
if (!src.isNull(j)) {
assertEquals(src.getBigDecimal(j), dstVector.getBigDecimal(i));
}
}
if (dstVector.hasValidityVector()) {
long maxIndex =
BitVectorHelper.getValidityAllocationSizeInBytes(dstVector.getRowCount()) * 8;
for (long i = dstSize - sizeOfDataNotToAdd; i < maxIndex; i++) {
assertFalse(dstVector.isNullExtendedRange(i));
}
}
}
}
}
}
}
}
}
@Test
public void testColumnVectorFromScalar() {
try (Scalar s = Scalar.fromDecimal(-3, 1233456)) {
try (ColumnVector cv = ColumnVector.fromScalar(s, 10)) {
assertEquals(s.getType(), cv.getType());
assertEquals(10L, cv.getRowCount());
try (HostColumnVector hcv = cv.copyToHost()) {
for (int i = 0; i < cv.getRowCount(); i++) {
assertEquals(s.getInt(), hcv.getInt(i));
assertEquals(s.getBigDecimal(), hcv.getBigDecimal(i));
}
}
}
}
try (Scalar s = Scalar.fromDecimal(-6, 123456789098L)) {
try (ColumnVector cv = ColumnVector.fromScalar(s, 10)) {
assertEquals(s.getType(), cv.getType());
assertEquals(10L, cv.getRowCount());
try (HostColumnVector hcv = cv.copyToHost()) {
for (int i = 0; i < cv.getRowCount(); i++) {
assertEquals(s.getLong(), hcv.getLong(i));
assertEquals(s.getBigDecimal(), hcv.getBigDecimal(i));
}
}
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/RmmMemoryAccessorTest.java
|
/*
*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
class RmmMemoryAccessorTest extends CudfTestBase {
@Test
public void log() throws IOException {
if (Rmm.isInitialized()) {
Rmm.shutdown();
}
File f = File.createTempFile("ALL_LOG",".csv");
f.deleteOnExit();
Rmm.initialize(RmmAllocationMode.CUDA_DEFAULT, Rmm.logTo(f), 1024*1024*1024);
try (DeviceMemoryBuffer address = Rmm.alloc(10, Cuda.DEFAULT_STREAM)) {
assertNotEquals(0, address);
}
Rmm.shutdown();
StringBuilder log = new StringBuilder();
try (Stream<String> stream = Files.lines(f.toPath(), StandardCharsets.UTF_8))
{
stream.forEach(s -> log.append(s).append("\n"));
}
System.err.println(log);
assertNotNull(log.toString());
assertTrue(0 < log.length());
}
@Test
public void init() {
if (Rmm.isInitialized()) {
Rmm.shutdown();
}
assertFalse(Rmm.isInitialized());
Rmm.initialize(RmmAllocationMode.CUDA_DEFAULT, Rmm.logToStderr(), -1);
assertTrue(Rmm.isInitialized());
Rmm.shutdown();
assertFalse(Rmm.isInitialized());
}
@Test
public void shutdown() {
if (Rmm.isInitialized()) {
Rmm.shutdown();
}
Rmm.initialize(RmmAllocationMode.POOL, Rmm.logToStderr(), 2048);
try (DeviceMemoryBuffer buffer = DeviceMemoryBuffer.allocate(1024)) {
assertThrows(RmmException.class, () -> Rmm.shutdown(500, 2000, TimeUnit.MILLISECONDS));
}
Rmm.shutdown();
}
@Test
public void allocate() {
try (DeviceMemoryBuffer address = Rmm.alloc(10, Cuda.DEFAULT_STREAM)) {
assertNotEquals(0, address.address);
}
}
@Test
public void doubleInitFails() {
if (!Rmm.isInitialized()) {
Rmm.initialize(RmmAllocationMode.CUDA_DEFAULT, Rmm.logToStderr(), 0);
}
assertThrows(IllegalStateException.class,
() -> Rmm.initialize(RmmAllocationMode.POOL, Rmm.logToStderr(), 1024 * 1024));
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/CudfTestBase.java
|
/*
*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
public class CudfTestBase {
static final long RMM_POOL_SIZE_DEFAULT = 512 * 1024 * 1024;
final int rmmAllocationMode;
final long rmmPoolSize;
public CudfTestBase() {
this(RmmAllocationMode.POOL, RMM_POOL_SIZE_DEFAULT);
}
public CudfTestBase(int allocationMode, long poolSize) {
this.rmmAllocationMode = allocationMode;
this.rmmPoolSize = poolSize;
}
@BeforeEach
void beforeEach() {
assumeTrue(Cuda.isEnvCompatibleForTesting());
if (!Rmm.isInitialized()) {
Rmm.initialize(rmmAllocationMode, Rmm.logToStderr(), rmmPoolSize);
}
}
@AfterAll
static void afterAll() {
if (Rmm.isInitialized()) {
Rmm.shutdown();
}
}
private static boolean doublesAreEqualWithinPercentage(double expected, double actual, double percentage) {
// doubleToLongBits will take care of returning true when both operands have same long value
// including +ve infinity, -ve infinity or NaNs
if (Double.doubleToLongBits(expected) != Double.doubleToLongBits(actual)) {
if (expected != 0) {
return Math.abs((expected - actual) / expected) <= percentage;
} else {
return Math.abs(expected - actual) <= percentage;
}
} else {
return true;
}
}
/**
* Fails if the absolute difference between expected and actual values as a percentage of the expected
* value is greater than the threshold
* i.e. Math.abs((expected - actual) / expected) > percentage, if expected != 0
* else Math.abs(expected - actual) > percentage
*/
static void assertEqualsWithinPercentage(double expected, double actual, double percentage) {
assertEqualsWithinPercentage(expected, actual, percentage, "");
}
/**
* Fails if the absolute difference between expected and actual values as a percentage of the expected
* value is greater than the threshold
* i.e. Math.abs((expected - actual) / expected) > percentage, if expected != 0
* else Math.abs(expected - actual) > percentage
*/
static void assertEqualsWithinPercentage(double expected, double actual, double percentage, String message) {
if (!doublesAreEqualWithinPercentage(expected, actual, percentage)) {
String msg = message + " Math.abs(expected - actual)";
String eq = (expected != 0 ?
" / Math.abs(expected) = " + Math.abs((expected - actual) / expected)
: " = " + Math.abs(expected - actual));
fail(msg + eq + " is not <= " + percentage + " expected(" + expected + ") actual(" + actual + ")");
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/Date32ColumnVectorTest.java
|
/*
*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
public class Date32ColumnVectorTest extends CudfTestBase {
private static final int[] DATES = {17897, //Jan 01, 2019
17532, //Jan 01, 2018
17167, //Jan 01, 2017
16802, //Jan 01, 2016
16437}; //Jan 01, 2015
private static final int[] DATES_2 = {17897, //Jan 01, 2019
17898, //Jan 02, 2019
17899, //Jan 03, 2019
17900, //Jan 04, 2019
17901}; //Jan 05, 2019
@Test
public void getYear() {
try (ColumnVector daysColumnVector = ColumnVector.daysFromInts(DATES);
ColumnVector tmp = daysColumnVector.year();
HostColumnVector result = tmp.copyToHost()) {
int expected = 2019;
for (int i = 0; i < DATES.length; i++) {
assertEquals(expected - i, result.getShort(i)); //2019 to 2015
}
}
}
@Test
public void getMonth() {
try (ColumnVector daysColumnVector = ColumnVector.daysFromInts(DATES);
ColumnVector tmp = daysColumnVector.month();
HostColumnVector result = tmp.copyToHost()) {
for (int i = 0; i < DATES.length; i++) {
assertEquals(1, result.getShort(i)); //Jan of every year
}
}
}
@Test
public void getDay() {
try (ColumnVector daysColumnVector = ColumnVector.daysFromInts(DATES_2);
ColumnVector tmp = daysColumnVector.day();
HostColumnVector result = tmp.copyToHost()) {
for (int i = 0; i < DATES_2.length; i++) {
assertEquals(i + 1, result.getShort(i)); //1 to 5
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ShortColumnVectorTest.java
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.junit.jupiter.api.Test;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ShortColumnVectorTest extends CudfTestBase {
@Test
public void testCreateColumnVectorBuilder() {
try (ColumnVector shortColumnVector = ColumnVector.build(DType.INT16, 3,
(b) -> b.append((short) 1))) {
assertFalse(shortColumnVector.hasNulls());
}
}
@Test
public void testArrayAllocation() {
try (HostColumnVector shortColumnVector =
HostColumnVector.fromShorts((short) 2, (short) 3, (short) 5)) {
assertFalse(shortColumnVector.hasNulls());
assertEquals(shortColumnVector.getShort(0), 2);
assertEquals(shortColumnVector.getShort(1), 3);
assertEquals(shortColumnVector.getShort(2), 5);
}
}
@Test
public void testUnsignedArrayAllocation() {
try (HostColumnVector v =
HostColumnVector.fromUnsignedShorts((short) 0xfedc, (short) 32768, (short) 5)) {
assertFalse(v.hasNulls());
assertEquals(0xfedc, Short.toUnsignedInt(v.getShort(0)));
assertEquals(32768, Short.toUnsignedInt(v.getShort(1)));
assertEquals(5, Short.toUnsignedInt(v.getShort(2)));
}
}
@Test
public void testUpperIndexOutOfBoundsException() {
try (HostColumnVector shortColumnVector =
HostColumnVector.fromShorts((short) 2, (short) 3, (short) 5)) {
assertThrows(AssertionError.class, () -> shortColumnVector.getShort(3));
assertFalse(shortColumnVector.hasNulls());
}
}
@Test
public void testLowerIndexOutOfBoundsException() {
try (HostColumnVector shortColumnVector =
HostColumnVector.fromShorts((short) 2, (short) 3, (short) 5)) {
assertFalse(shortColumnVector.hasNulls());
assertThrows(AssertionError.class, () -> shortColumnVector.getShort(-1));
}
}
@Test
public void testAddingNullValues() {
try (HostColumnVector cv =
HostColumnVector.fromBoxedShorts(new Short[]{2, 3, 4, 5, 6, 7, null, null})) {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
}
}
@Test
public void testAddingUnsignedNullValues() {
try (HostColumnVector cv = HostColumnVector.fromBoxedUnsignedShorts(
new Short[]{2, 3, 4, 5, (short)32768, (short)0xffff, null, null})) {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertEquals(32768, Short.toUnsignedInt(cv.getShort(4)));
assertEquals(0xffff, Short.toUnsignedInt(cv.getShort(5)));
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
}
}
@Test
public void testOverrunningTheBuffer() {
try (Builder builder = HostColumnVector.builder(DType.INT16, 3)) {
assertThrows(AssertionError.class,
() -> builder.append((short) 2).appendNull().appendArray(new short[]{5, 4}).build());
}
}
@Test
void testAppendVector() {
Random random = new Random(192312989128L);
for (int dstSize = 1; dstSize <= 100; dstSize++) {
for (int dstPrefilledSize = 0; dstPrefilledSize < dstSize; dstPrefilledSize++) {
final int srcSize = dstSize - dstPrefilledSize;
for (int sizeOfDataNotToAdd = 0; sizeOfDataNotToAdd <= dstPrefilledSize; sizeOfDataNotToAdd++) {
try (Builder dst = HostColumnVector.builder(DType.INT16, dstSize);
HostColumnVector src = HostColumnVector.build(DType.INT16, srcSize, (b) -> {
for (int i = 0; i < srcSize; i++) {
if (random.nextBoolean()) {
b.appendNull();
} else {
b.append((short) random.nextInt());
}
}
});
Builder gtBuilder = HostColumnVector.builder(DType.INT16,
dstPrefilledSize)) {
assertEquals(dstSize, srcSize + dstPrefilledSize);
//add the first half of the prefilled list
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
if (random.nextBoolean()) {
dst.appendNull();
gtBuilder.appendNull();
} else {
short a = (short) random.nextInt();
dst.append(a);
gtBuilder.append(a);
}
}
// append the src vector
dst.append(src);
try (HostColumnVector dstVector = dst.build();
HostColumnVector gt = gtBuilder.build()) {
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
assertEquals(gt.isNull(i), dstVector.isNull(i));
if (!gt.isNull(i)) {
assertEquals(gt.getShort(i), dstVector.getShort(i));
}
}
for (int i = dstPrefilledSize - sizeOfDataNotToAdd, j = 0; i < dstSize - sizeOfDataNotToAdd && j < srcSize; i++, j++) {
assertEquals(src.isNull(j), dstVector.isNull(i));
if (!src.isNull(j)) {
assertEquals(src.getShort(j), dstVector.getShort(i));
}
}
if (dstVector.hasValidityVector()) {
long maxIndex =
BitVectorHelper.getValidityAllocationSizeInBytes(dstVector.getRowCount()) * 8;
for (long i = dstSize - sizeOfDataNotToAdd; i < maxIndex; i++) {
assertFalse(dstVector.isNullExtendedRange(i));
}
}
}
}
}
}
}
}
}
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.