repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/JSONOptions.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.Collection; /** * Options for reading in JSON encoded data. */ public final class JSONOptions extends ColumnFilterOptions { public static JSONOptions DEFAULT = new JSONOptions(builder()); private final boolean dayFirst; private final boolean lines; private final boolean recoverWithNull; private JSONOptions(Builder builder) { super(builder); dayFirst = builder.dayFirst; lines = builder.lines; recoverWithNull = builder.recoverWithNull; } public boolean isDayFirst() { return dayFirst; } public boolean isLines() { return lines; } /** Return the value of the recoverWithNull option */ public boolean isRecoverWithNull() { return recoverWithNull; } @Override String[] getIncludeColumnNames() { throw new UnsupportedOperationException("JSON reader didn't support column prune"); } public static Builder builder() { return new Builder(); } public static final class Builder extends ColumnFilterOptions.Builder<JSONOptions.Builder> { private boolean dayFirst = false; private boolean lines = true; private boolean recoverWithNull = false; /** * Whether to parse dates as DD/MM versus MM/DD * @param dayFirst true: DD/MM, false, MM/DD * @return */ public Builder withDayFirst(boolean dayFirst) { this.dayFirst = dayFirst; return this; } /** * Whether to read the file as a json object per line * @param perLine true: per line, false: multi-line * @return builder for chaining */ public Builder withLines(boolean perLine) { assert perLine == true : "Cudf does not support multi-line"; this.lines = perLine; return this; } /** * Specify how to handle invalid lines when parsing json. Setting * recoverWithNull to true will cause null values to be returned * for invalid lines. Setting recoverWithNull to false will cause * the parsing to fail with an exception. * * @param recoverWithNull true: return nulls, false: throw exception * @return builder for chaining */ public Builder withRecoverWithNull(boolean recoverWithNull) { this.recoverWithNull = recoverWithNull; return this; } @Override public Builder includeColumn(String... names) { throw new UnsupportedOperationException("JSON reader didn't support column prune"); } @Override public Builder includeColumn(Collection<String> names) { throw new UnsupportedOperationException("JSON reader didn't support column prune"); } public JSONOptions build() { return new JSONOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/MemoryBuffer.java
/* * * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Abstract class for representing the Memory Buffer * * NOTE: MemoryBuffer is public to make it easier to work with the class hierarchy, * subclassing beyond what is included in CUDF is not recommended and not supported. */ abstract public class MemoryBuffer implements AutoCloseable { /** * Interface to handle events for this MemoryBuffer. Only invoked during * close, hence `onClosed` is the only event. */ public interface EventHandler { /** * `onClosed` is invoked with the updated `refCount` during `close`. * The last invocation of `onClosed` will be with `refCount=0`. * * @note the callback is invoked with this `MemoryBuffer`'s lock held. * * @param refCount - the updated ref count for this MemoryBuffer at the time * of invocation */ void onClosed(int refCount); } private static final Logger log = LoggerFactory.getLogger(MemoryBuffer.class); protected final long address; protected final long length; protected boolean closed = false; protected int refCount = 0; protected final MemoryBufferCleaner cleaner; protected final long id; private EventHandler eventHandler; public static abstract class MemoryBufferCleaner extends MemoryCleaner.Cleaner{} private static final class SlicedBufferCleaner extends MemoryBufferCleaner { private MemoryBuffer parent; SlicedBufferCleaner(MemoryBuffer parent) { this.parent = parent; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { if (parent != null) { if (logErrorIfNotClean) { log.error("A SLICED BUFFER WAS LEAKED(ID: " + id + " parent: " + parent + ")"); logRefCountDebug("Leaked sliced buffer"); } try { parent.close(); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. parent = null; } return true; } return false; } @Override public boolean isClean() { return parent == null; } } /** * This is a really ugly API, but it is possible that the lifecycle of a column of * data may not have a clear lifecycle thanks to java and GC. This API informs the leak * tracking code that this is expected for this column, and big scary warnings should * not be printed when this happens. */ public void noWarnLeakExpected() { if (cleaner != null) { cleaner.noWarnLeakExpected(); } } /** * Constructor * @param address location in memory * @param length size of this buffer * @param cleaner used to clean up the memory. May be null if no cleanup is needed. */ protected MemoryBuffer(long address, long length, MemoryBufferCleaner cleaner) { this.address = address; this.length = length; this.cleaner = cleaner; if (cleaner != null) { this.id = cleaner.id; incRefCount(); MemoryCleaner.register(this, cleaner); } else { this.id = -1; } } /** * Constructor * @param address location in memory * @param length size of this buffer */ protected MemoryBuffer(long address, long length) { this(address, length, (MemoryBufferCleaner)null); } /** * Internal constructor used when creating a slice. * @param address location in memory * @param length size of this buffer * @param parent the buffer that should be closed instead of closing this one. */ protected MemoryBuffer(long address, long length, MemoryBuffer parent) { this(address, length, new SlicedBufferCleaner(parent)); } /** * Returns the size of this buffer * @return - size */ public final long getLength() { return length; } protected final void addressOutOfBoundsCheck(long address, long size, String type) { assert !closed : "Buffer is already closed " + Long.toHexString(this.address); assert size >= 0 : "A positive size is required"; assert address >= this.address : "Start address is too low for " + type + " 0x" + Long.toHexString(address) + " < 0x" + Long.toHexString(this.address); assert (address + size) <= (this.address + length) : "End address is too high for " + type + " 0x" + Long.toHexString(address + size) + " < 0x" + Long.toHexString(this.address + length); } /** * Returns the location of the data pointed to by this buffer * @return - data address */ public final long getAddress() { return address; } /** * Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream. * The copy has completed when this returns, but the memory copy could overlap with * operations occurring on other streams. * @param destOffset the offset in this to start copying from. * @param src what to copy from * @param srcOffset offset into src to start out * @param length how many bytes to copy * @param stream CUDA stream to use */ public final void copyFromMemoryBuffer( long destOffset, MemoryBuffer src, long srcOffset, long length, Cuda.Stream stream) { addressOutOfBoundsCheck(address + destOffset, length, "copy range dest"); src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src"); Cuda.memcpy(address + destOffset, src.address + srcOffset, length, CudaMemcpyKind.DEFAULT, stream); } /** * Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream. * The copy is async and may not have completed when this returns. * @param destOffset the offset in this to start copying from. * @param src what to copy from * @param srcOffset offset into src to start out * @param length how many bytes to copy * @param stream CUDA stream to use */ public final void copyFromMemoryBufferAsync( long destOffset, MemoryBuffer src, long srcOffset, long length, Cuda.Stream stream) { addressOutOfBoundsCheck(address + destOffset, length, "copy range dest"); src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src"); Cuda.asyncMemcpy(address + destOffset, src.address + srcOffset, length, CudaMemcpyKind.DEFAULT, stream); } /** * Slice off a part of the buffer. Note that this is a zero copy operation and all * slices must be closed along with the original buffer before the memory is released. * So use this with some caution. * * Note that [[DeviceMemoryBuffer]] and [[HostMemoryBuffer]] support slicing, and override this * function. * * @param offset where to start the slice at. * @param len how many bytes to slice * @return a slice of the original buffer that will need to be closed independently */ public abstract MemoryBuffer slice(long offset, long len); /** * Set an event handler for this buffer. This method can be invoked with null * to unset the handler. * * @param newHandler - the EventHandler to use from this point forward * @return the prior event handler, or null if not set. */ public synchronized EventHandler setEventHandler(EventHandler newHandler) { EventHandler prev = this.eventHandler; this.eventHandler = newHandler; return prev; } /** * Returns the current event handler for this buffer or null if no handler * is associated or this buffer is closed. */ public synchronized EventHandler getEventHandler() { return this.eventHandler; } /** * Close this buffer and free memory */ public synchronized void close() { if (cleaner != null) { refCount--; cleaner.delRef(); if (eventHandler != null) { eventHandler.onClosed(refCount); } if (refCount == 0) { cleaner.clean(false); closed = true; } else if (refCount < 0) { cleaner.logRefCountDebug("double free " + this); throw new IllegalStateException("Close called too many times " + this); } } } @Override public String toString() { long id = -1; if (cleaner != null) { id = cleaner.id; } String name = this.getClass().getSimpleName(); return name + "{" + "address=0x" + Long.toHexString(address) + ", length=" + length + ", id=" + id + "}"; } /** * Increment the reference count for this column. You need to call close on this * to decrement the reference count again. */ public synchronized void incRefCount() { refCount++; cleaner.addRef(); } /** * Get the current reference count for this buffer. */ public synchronized int getRefCount() { return refCount; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/BinaryOperable.java
/* * * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; public interface BinaryOperable { /** * Finds the proper DType for an implicit output. This follows the typical rules of * C++, Java, and most SQL implementations. * FLOAT64/double > * FLOAT32/float > * INT64/long > * INT32/int > * INT16/short > * INT8/byte/char * <p> * Currently most TIMESTAMPs are treated the same as INT64. TIMESTAMP_DAYS is treated the same * as INT32. All time information is stripped from them. This may change in the future. * <p> * BOOL8 is treated like an INT8. Math on boolean operations makes little sense. If * you want to stay as a BOOL8 you will need to explicitly specify the output type. * For decimal types, DECIMAL32 and DECIMAL64 takes in another parameter `scale`. DType is created * with scale=0 as scale is required. Dtype is discarded for binary operations for decimal * types in cudf as a new DType is created for output type with the new scale. */ static DType implicitConversion(BinaryOp op, BinaryOperable lhs, BinaryOperable rhs) { DType a = lhs.getType(); DType b = rhs.getType(); if (a.equals(DType.FLOAT64) || b.equals(DType.FLOAT64)) { return DType.FLOAT64; } if (a.equals(DType.FLOAT32) || b.equals(DType.FLOAT32)) { return DType.FLOAT32; } if (a.equals(DType.UINT64) || b.equals(DType.UINT64)) { return DType.UINT64; } if (a.equals(DType.INT64) || b.equals(DType.INT64) || a.equals(DType.TIMESTAMP_MILLISECONDS) || b.equals(DType.TIMESTAMP_MILLISECONDS) || a.equals(DType.TIMESTAMP_MICROSECONDS) || b.equals(DType.TIMESTAMP_MICROSECONDS) || a.equals(DType.TIMESTAMP_SECONDS) || b.equals(DType.TIMESTAMP_SECONDS) || a.equals(DType.TIMESTAMP_NANOSECONDS) || b.equals(DType.TIMESTAMP_NANOSECONDS)) { return DType.INT64; } if (a.equals(DType.UINT32) || b.equals(DType.UINT32)) { return DType.UINT32; } if (a.equals(DType.INT32) || b.equals(DType.INT32) || a.equals(DType.TIMESTAMP_DAYS) || b.equals(DType.TIMESTAMP_DAYS)) { return DType.INT32; } if (a.equals(DType.UINT16) || b.equals(DType.UINT16)) { return DType.UINT16; } if (a.equals(DType.INT16) || b.equals(DType.INT16)) { return DType.INT16; } if (a.equals(DType.UINT8) || b.equals(DType.UINT8)) { return DType.UINT8; } if (a.equals(DType.INT8) || b.equals(DType.INT8)) { return DType.INT8; } if (a.equals(DType.BOOL8) || b.equals(DType.BOOL8)) { return DType.BOOL8; } if (a.isDecimalType() && b.isDecimalType()) { if (a.typeId != b.typeId) { throw new IllegalArgumentException("Both columns must be of the same fixed_point type"); } final int scale = ColumnView.getFixedPointOutputScale(op, lhs.getType(), rhs.getType()); // The output precision/size should be at least as large as the input. // It may be larger if room is needed for it based off of the output scale. final DType.DTypeEnum outputEnum; if (scale <= DType.DECIMAL32_MAX_PRECISION && a.typeId == DType.DTypeEnum.DECIMAL32) { outputEnum = DType.DTypeEnum.DECIMAL32; } else if (scale <= DType.DECIMAL64_MAX_PRECISION && (a.typeId == DType.DTypeEnum.DECIMAL32 || a.typeId == DType.DTypeEnum.DECIMAL64)) { outputEnum = DType.DTypeEnum.DECIMAL64; } else { outputEnum = DType.DTypeEnum.DECIMAL128; } return DType.create(outputEnum, scale); } throw new IllegalArgumentException("Unsupported types " + a + " and " + b); } /** * Get the type of this data. */ DType getType(); /** * Multiple different binary operations. * @param op the operation to perform * @param rhs the rhs of the operation * @param outType the type of output you want. * @return the result */ ColumnVector binaryOp(BinaryOp op, BinaryOperable rhs, DType outType); /** * Add one vector to another with the given output type. this + rhs * Output type is ignored for the operations between decimal types and * it is always decimal type. */ default ColumnVector add(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.ADD, rhs, outType); } /** * Add + operator. this + rhs */ default ColumnVector add(BinaryOperable rhs) { return add(rhs, implicitConversion(BinaryOp.ADD, this, rhs)); } /** * Subtract one vector from another with the given output type. this - rhs * Output type is ignored for the operations between decimal types and * it is always decimal type. */ default ColumnVector sub(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.SUB, rhs, outType); } /** * Subtract one vector from another. this - rhs */ default ColumnVector sub(BinaryOperable rhs) { return sub(rhs, implicitConversion(BinaryOp.SUB, this, rhs)); } /** * Multiply two vectors together with the given output type. this * rhs * Output type is ignored for the operations between decimal types and * it is always decimal type. */ default ColumnVector mul(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.MUL, rhs, outType); } /** * Multiply two vectors together. this * rhs */ default ColumnVector mul(BinaryOperable rhs) { return mul(rhs, implicitConversion(BinaryOp.MUL, this, rhs)); } /** * Divide one vector by another with the given output type. this / rhs * Output type is ignored for the operations between decimal types and * it is always decimal type. */ default ColumnVector div(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.DIV, rhs, outType); } /** * Divide one vector by another. this / rhs */ default ColumnVector div(BinaryOperable rhs) { return div(rhs, implicitConversion(BinaryOp.DIV, this, rhs)); } /** * Divide one vector by another converting to FLOAT64 in between with the given output type. * (double)this / (double)rhs */ default ColumnVector trueDiv(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.TRUE_DIV, rhs, outType); } /** * Divide one vector by another converting to FLOAT64 in between. * (double)this / (double)rhs */ default ColumnVector trueDiv(BinaryOperable rhs) { return trueDiv(rhs, implicitConversion(BinaryOp.TRUE_DIV, this, rhs)); } /** * Divide one vector by another and calculate the floor of the result with the given output type. * Math.floor(this/rhs) */ default ColumnVector floorDiv(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.FLOOR_DIV, rhs, outType); } /** * Divide one vector by another and calculate the floor of the result. * Math.floor(this/rhs) */ default ColumnVector floorDiv(BinaryOperable rhs) { return floorDiv(rhs, implicitConversion(BinaryOp.FLOOR_DIV, this, rhs)); } /** * Compute the modulus with the given output type. * this % rhs */ default ColumnVector mod(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.MOD, rhs, outType); } /** * Compute the modulus. * this % rhs */ default ColumnVector mod(BinaryOperable rhs) { return mod(rhs, implicitConversion(BinaryOp.MOD, this, rhs)); } /** * Compute the power with the given output type. * Math.pow(this, rhs) */ default ColumnVector pow(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.POW, rhs, outType); } /** * Compute the power. * Math.pow(this, rhs) */ default ColumnVector pow(BinaryOperable rhs) { return pow(rhs, implicitConversion(BinaryOp.POW, this, rhs)); } /** * this == rhs 1 is true 0 is false with the output cast to the given type. */ default ColumnVector equalTo(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.EQUAL, rhs, outType); } /** * this == rhs 1 is true 0 is false. The output type is BOOL8. */ default ColumnVector equalTo(BinaryOperable rhs) { return equalTo(rhs, DType.BOOL8); } /** * this != rhs 1 is true 0 is false with the output cast to the given type. */ default ColumnVector notEqualTo(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.NOT_EQUAL, rhs, outType); } /** * this != rhs 1 is true 0 is false. The output type is BOOL8. */ default ColumnVector notEqualTo(BinaryOperable rhs) { return notEqualTo(rhs, DType.BOOL8); } /** * this < rhs 1 is true 0 is false with the output cast to the given type. */ default ColumnVector lessThan(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.LESS, rhs, outType); } /** * this < rhs 1 is true 0 is false. The output type is BOOL8. */ default ColumnVector lessThan(BinaryOperable rhs) { return lessThan(rhs, DType.BOOL8); } /** * this > rhs 1 is true 0 is false with the output cast to the given type. */ default ColumnVector greaterThan(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.GREATER, rhs, outType); } /** * this > rhs 1 is true 0 is false. The output type is BOOL8. */ default ColumnVector greaterThan(BinaryOperable rhs) { return greaterThan(rhs, DType.BOOL8); } /** * this <= rhs 1 is true 0 is false with the output cast to the given type. */ default ColumnVector lessOrEqualTo(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.LESS_EQUAL, rhs, outType); } /** * this <= rhs 1 is true 0 is false. The output type is BOOL8. */ default ColumnVector lessOrEqualTo(BinaryOperable rhs) { return lessOrEqualTo(rhs, DType.BOOL8); } /** * this >= rhs 1 is true 0 is false with the output cast to the given type. */ default ColumnVector greaterOrEqualTo(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.GREATER_EQUAL, rhs, outType); } /** * this >= rhs 1 is true 0 is false. The output type is BOOL8. */ default ColumnVector greaterOrEqualTo(BinaryOperable rhs) { return greaterOrEqualTo(rhs, DType.BOOL8); } /** * Bit wise and (&) with the given output type. this & rhs */ default ColumnVector bitAnd(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.BITWISE_AND, rhs, outType); } /** * Bit wise and (&). this & rhs */ default ColumnVector bitAnd(BinaryOperable rhs) { return bitAnd(rhs, implicitConversion(BinaryOp.BITWISE_AND, this, rhs)); } /** * Bit wise or (|) with the given output type. this | rhs */ default ColumnVector bitOr(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.BITWISE_OR, rhs, outType); } /** * Bit wise or (|). this | rhs */ default ColumnVector bitOr(BinaryOperable rhs) { return bitOr(rhs, implicitConversion(BinaryOp.BITWISE_OR, this, rhs)); } /** * Bit wise xor (^) with the given output type. this ^ rhs */ default ColumnVector bitXor(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.BITWISE_XOR, rhs, outType); } /** * Bit wise xor (^). this ^ rhs */ default ColumnVector bitXor(BinaryOperable rhs) { return bitXor(rhs, implicitConversion(BinaryOp.BITWISE_XOR, this, rhs)); } /** * Logical and (&&) with the given output type. this && rhs */ default ColumnVector and(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.LOGICAL_AND, rhs, outType); } /** * Logical and (&&). this && rhs */ default ColumnVector and(BinaryOperable rhs) { return and(rhs, implicitConversion(BinaryOp.LOGICAL_AND, this, rhs)); } /** * Logical or (||) with the given output type. this || rhs */ default ColumnVector or(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.LOGICAL_OR, rhs, outType); } /** * Logical or (||). this || rhs */ default ColumnVector or(BinaryOperable rhs) { return or(rhs, implicitConversion(BinaryOp.LOGICAL_OR, this, rhs)); } /** * Bitwise left shifts the values of this vector by shiftBy. * * If "this" and shiftBy are both vectors then, this[i] << shiftBy[i] * If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows * with the scalar << shiftBy[i] * If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows * with this[i] << shiftBy * */ default ColumnVector shiftLeft(BinaryOperable shiftBy, DType outType) { return binaryOp(BinaryOp.SHIFT_LEFT, shiftBy, outType); } /** * Bitwise left shift the values of this vector by the shiftBy. * * If "this" and shiftBy are both vectors then, this[i] << shiftBy[i] * If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows * with the scalar << shiftBy[i] * If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows * with this[i] << shiftBy */ default ColumnVector shiftLeft(BinaryOperable shiftBy) { return shiftLeft(shiftBy, implicitConversion(BinaryOp.SHIFT_LEFT, this, shiftBy)); } /** * Bitwise right shift this vector by the shiftBy. * * If "this" and shiftBy are both vectors then, this[i] >> shiftBy[i] * If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows * with the scalar >> shiftBy[i] * If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows * with this[i] >> shiftBy */ default ColumnVector shiftRight(BinaryOperable shiftBy, DType outType) { return binaryOp(BinaryOp.SHIFT_RIGHT, shiftBy, outType); } /** * Bitwise right shift this vector by the shiftBy. * * If "this" and shiftBy are both vectors then, this[i] >> shiftBy[i] * If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows * with the scalar >> shiftBy[i] * If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows * with this[i] >> shiftBy */ default ColumnVector shiftRight(BinaryOperable shiftBy) { return shiftRight(shiftBy, implicitConversion(BinaryOp.SHIFT_RIGHT, this, shiftBy)); } /** * This method bitwise right shifts the values of this vector by the shiftBy. * This method always fills 0 irrespective of the sign of the number. * * If "this" and shiftBy are both vectors then, this[i] >>> shiftBy[i] * If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows * with the scalar >>> shiftBy[i] * If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows * with this[i] >>> shiftBy */ default ColumnVector shiftRightUnsigned(BinaryOperable shiftBy, DType outType) { return binaryOp(BinaryOp.SHIFT_RIGHT_UNSIGNED, shiftBy, outType); } /** * This method bitwise right shifts the values of this vector by the shiftBy. * This method always fills 0 irrespective of the sign of the number. * * If "this" and shiftBy are both vectors then, this[i] >>> shiftBy[i] * If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows * with the scalar >>> shiftBy[i] * If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows * with this[i] >>> shiftBy */ default ColumnVector shiftRightUnsigned(BinaryOperable shiftBy) { return shiftRightUnsigned(shiftBy, implicitConversion(BinaryOp.SHIFT_RIGHT_UNSIGNED, this, shiftBy)); } /** * Calculate the log with the specified base */ default ColumnVector log(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.LOG_BASE, rhs, outType); } /** * Calculate the log with the specified base, output is the same as this. */ default ColumnVector log(BinaryOperable rhs) { return log(rhs, getType()); } /** * The function arctan2(y,x) or atan2(y,x) is defined as the angle in the Euclidean plane, given * in radians, between the positive x axis and the ray to the point (x, y) ≠ (0, 0). */ default ColumnVector arctan2(BinaryOperable xCoordinate, DType outType) { return binaryOp(BinaryOp.ATAN2, xCoordinate, outType); } /** * The function arctan2(y,x) or atan2(y,x) is defined as the angle in the Euclidean plane, given * in radians, between the positive x axis and the ray to the point (x, y) ≠ (0, 0). */ default ColumnVector arctan2(BinaryOperable xCoordinate) { return arctan2(xCoordinate, implicitConversion(BinaryOp.ATAN2, this, xCoordinate)); } /** * Returns the positive value of lhs mod rhs. * * r = lhs % rhs * if r < 0 then (r + rhs) % rhs * else r * */ default ColumnVector pmod(BinaryOperable rhs, DType outputType) { return binaryOp(BinaryOp.PMOD, rhs, outputType); } /** * Returns the positive value of lhs mod rhs. * * r = lhs % rhs * if r < 0 then (r + rhs) % rhs * else r * */ default ColumnVector pmod(BinaryOperable rhs) { return pmod(rhs, implicitConversion(BinaryOp.PMOD, this, rhs)); } /** * like equalTo but NULL == NULL is TRUE and NULL == not NULL is FALSE */ default ColumnVector equalToNullAware(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.NULL_EQUALS, rhs, outType); } /** * like equalTo but NULL == NULL is TRUE and NULL == not NULL is FALSE */ default ColumnVector equalToNullAware(BinaryOperable rhs) { return equalToNullAware(rhs, DType.BOOL8); } /** * Returns the max non null value. */ default ColumnVector maxNullAware(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.NULL_MAX, rhs, outType); } /** * Returns the max non null value. */ default ColumnVector maxNullAware(BinaryOperable rhs) { return maxNullAware(rhs, implicitConversion(BinaryOp.NULL_MAX, this, rhs)); } /** * Returns the min non null value. */ default ColumnVector minNullAware(BinaryOperable rhs, DType outType) { return binaryOp(BinaryOp.NULL_MIN, rhs, outType); } /** * Returns the min non null value. */ default ColumnVector minNullAware(BinaryOperable rhs) { return minNullAware(rhs, implicitConversion(BinaryOp.NULL_MIN, this, rhs)); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CSVOptions.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.HashSet; import java.util.Set; /** * Options for reading a CSV file */ public class CSVOptions extends ColumnFilterOptions { public static CSVOptions DEFAULT = new CSVOptions(new Builder()); private final int headerRow; private final byte delim; private final byte quote; private final byte comment; private final String[] nullValues; private final String[] trueValues; private final String[] falseValues; private final QuoteStyle quoteStyle; private CSVOptions(Builder builder) { super(builder); headerRow = builder.headerRow; delim = builder.delim; quote = builder.quote; comment = builder.comment; nullValues = builder.nullValues.toArray( new String[builder.nullValues.size()]); trueValues = builder.trueValues.toArray( new String[builder.trueValues.size()]); falseValues = builder.falseValues.toArray( new String[builder.falseValues.size()]); quoteStyle = builder.quoteStyle; } String[] getNullValues() { return nullValues; } String[] getTrueValues() { return trueValues; } String[] getFalseValues() { return falseValues; } int getHeaderRow() { return headerRow; } byte getDelim() { return delim; } byte getQuote() { return quote; } byte getComment() { return comment; } QuoteStyle getQuoteStyle() { return quoteStyle; } public static Builder builder() { return new Builder(); } public static class Builder extends ColumnFilterOptions.Builder<Builder> { private static final int NO_HEADER_ROW = -1; private final Set<String> nullValues = new HashSet<>(); private final Set<String> trueValues = new HashSet<>(); private final Set<String> falseValues = new HashSet<>(); private byte comment = 0; private int headerRow = NO_HEADER_ROW; private byte delim = ','; private byte quote = '"'; private QuoteStyle quoteStyle = QuoteStyle.MINIMAL; /** * Row of the header data (0 based counting). Negative is no header. */ public Builder withHeaderAtRow(int index) { headerRow = index; return this; } /** * Set the row of the header to 0, the first line, if hasHeader is true else disables the * header. */ public Builder hasHeader(boolean hasHeader) { return withHeaderAtRow(hasHeader ? 0 : NO_HEADER_ROW); } /** * Set the row of the header to 0, the first line. */ public Builder hasHeader() { return withHeaderAtRow(0); } /** * Set the entry deliminator. Only ASCII chars are currently supported. */ public Builder withDelim(char delim) { if (Character.getNumericValue(delim) > 127) { throw new IllegalArgumentException("Only ASCII characters are currently supported"); } this.delim = (byte) delim; return this; } /** * Set the quote character. Only ASCII chars are currently supported. */ public Builder withQuote(char quote) { if (Character.getNumericValue(quote) > 127) { throw new IllegalArgumentException("Only ASCII characters are currently supported"); } this.quote = (byte) quote; return this; } /** * Quote style to expect in the input CSV data. * * Note: Only the following quoting styles are supported: * 1. MINIMAL: String columns containing special characters like row-delimiters/ * field-delimiter/quotes will be quoted. * 2. NONE: No quoting is done for any columns. */ public Builder withQuoteStyle(QuoteStyle quoteStyle) { if (quoteStyle != QuoteStyle.MINIMAL && quoteStyle != QuoteStyle.NONE) { throw new IllegalArgumentException("Only MINIMAL and NONE quoting styles are supported"); } this.quoteStyle = quoteStyle; return this; } /** * Set the character that starts the beginning of a comment line. setting to * 0 or '\0' will disable comments. The default is to have no comments. */ public Builder withComment(char comment) { if (Character.getNumericValue(quote) > 127) { throw new IllegalArgumentException("Only ASCII characters are currently supported"); } this.comment = (byte) comment; return this; } public Builder withoutComments() { this.comment = 0; return this; } public Builder withNullValue(String... nvs) { for (String nv : nvs) { nullValues.add(nv); } return this; } public Builder withTrueValue(String... tvs) { for (String tv : tvs) { trueValues.add(tv); } return this; } public Builder withFalseValue(String... fvs) { for (String fv : fvs) { falseValues.add(fv); } return this; } public CSVOptions build() { return new CSVOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CudaFatalException.java
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * CudaFatalException is a kind of CudaException which leaves the process in an inconsistent state * and any further CUDA work will return the same error. * To continue using CUDA, the process must be terminated and relaunched. */ public class CudaFatalException extends CudaException { CudaFatalException(String message, int errorCode) { this(message, "No native stacktrace is available.", errorCode); } CudaFatalException(String message, String nativeStacktrace, int errorCode) { super(message, nativeStacktrace, errorCode); } CudaFatalException(String message, String nativeStacktrace, int errorCode, Throwable cause) { super(message, nativeStacktrace, errorCode, cause); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ColumnView.java
/* * * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.*; import java.util.stream.IntStream; import static ai.rapids.cudf.HostColumnVector.OFFSET_SIZE; /** * This class represents the column_view of a column analogous to its cudf cpp counterpart. * It holds view information like the native handle and other metadata for a column_view. It also * exposes APIs that would allow operations on a view. */ public class ColumnView implements AutoCloseable, BinaryOperable { static { NativeDepsLoader.loadNativeDeps(); } public static final long UNKNOWN_NULL_COUNT = -1; protected long viewHandle; protected final DType type; protected final long rows; protected final long nullCount; protected final ColumnVector.OffHeapState offHeap; /** * Constructs a Column View given a native view address. This asserts that if the ColumnView is * of nested-type it doesn't contain non-empty nulls * @param address the view handle * @throws AssertionError if the address points to a nested-type view with non-empty nulls */ ColumnView(long address) { this.viewHandle = address; try { this.type = DType.fromNative(ColumnView.getNativeTypeId(viewHandle), ColumnView.getNativeTypeScale(viewHandle)); this.rows = ColumnView.getNativeRowCount(viewHandle); this.nullCount = ColumnView.getNativeNullCount(viewHandle); this.offHeap = null; AssertEmptyNulls.assertNullsAreEmpty(this); } catch (Throwable t) { // offHeap state is null, so there is nothing to clean in offHeap // delete ColumnView to avoid memory leak deleteColumnView(viewHandle); viewHandle = 0; throw t; } } /** * Intended to be called from ColumnVector when it is being constructed. Because state creates a * cudf::column_view instance and will close it in all cases, we don't want to have to double * close it. This asserts that if the offHeapState is of nested-type it doesn't contain non-empty nulls * @param state the state this view is based off of. * @throws AssertionError if offHeapState points to a nested-type view with non-empty nulls */ protected ColumnView(ColumnVector.OffHeapState state) { offHeap = state; try { viewHandle = state.getViewHandle(); type = DType.fromNative(ColumnView.getNativeTypeId(viewHandle), ColumnView.getNativeTypeScale(viewHandle)); rows = ColumnView.getNativeRowCount(viewHandle); nullCount = ColumnView.getNativeNullCount(viewHandle); AssertEmptyNulls.assertNullsAreEmpty(this); } catch (Throwable t) { // cleanup offHeap offHeap.clean(false); viewHandle = 0; throw t; } } /** * Create a new column view based off of data already on the device. Ref count on the buffers * is not incremented and none of the underlying buffers are owned by this view. The returned * ColumnView is only valid as long as the underlying buffers remain valid. If the buffers are * closed before this ColumnView is closed, it will result in undefined behavior. * * If ownership is needed, call {@link ColumnView#copyToColumnVector} * * @param type the type of the vector * @param rows the number of rows in this vector. * @param nullCount the number of nulls in the dataset. * @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0. * The ownership doesn't change on this buffer * @param offsetBuffer a host buffer required for nested types including strings and string * categories. The ownership doesn't change on this buffer * @param children an array of ColumnView children */ public ColumnView(DType type, long rows, Optional<Long> nullCount, BaseDeviceMemoryBuffer validityBuffer, BaseDeviceMemoryBuffer offsetBuffer, ColumnView[] children) { this(type, (int) rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(), null, validityBuffer, offsetBuffer, children); assert(type.isNestedType()); assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE) || !nullCount.isPresent(); } /** * Create a new column view based off of data already on the device. Ref count on the buffers * is not incremented and none of the underlying buffers are owned by this view. The returned * ColumnView is only valid as long as the underlying buffers remain valid. If the buffers are * closed before this ColumnView is closed, it will result in undefined behavior. * * If ownership is needed, call {@link ColumnView#copyToColumnVector} * * @param type the type of the vector * @param rows the number of rows in this vector. * @param nullCount the number of nulls in the dataset. * @param dataBuffer a host buffer required for nested types including strings and string * categories. The ownership doesn't change on this buffer * @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0. * The ownership doesn't change on this buffer */ public ColumnView(DType type, long rows, Optional<Long> nullCount, BaseDeviceMemoryBuffer dataBuffer, BaseDeviceMemoryBuffer validityBuffer) { this(type, (int) rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(), dataBuffer, validityBuffer, null, null); assert (!type.isNestedType()); assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE) || !nullCount.isPresent(); } /** * Create a new column view based off of data already on the device. Ref count on the buffers * is not incremented and none of the underlying buffers are owned by this view. The returned * ColumnView is only valid as long as the underlying buffers remain valid. If the buffers are * closed before this ColumnView is closed, it will result in undefined behavior. * * If ownership is needed, call {@link ColumnView#copyToColumnVector} * * @param type the type of the vector * @param rows the number of rows in this vector. * @param nullCount the number of nulls in the dataset. * @param dataBuffer a host buffer required for nested types including strings and string * categories. The ownership doesn't change on this buffer * @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0. * The ownership doesn't change on this buffer * @param offsetBuffer The offsetbuffer for columns that need an offset buffer */ public ColumnView(DType type, long rows, Optional<Long> nullCount, BaseDeviceMemoryBuffer dataBuffer, BaseDeviceMemoryBuffer validityBuffer, BaseDeviceMemoryBuffer offsetBuffer) { this(type, (int) rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(), dataBuffer, validityBuffer, offsetBuffer, null); assert (!type.isNestedType()); assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE) || !nullCount.isPresent(); } private ColumnView(DType type, long rows, int nullCount, BaseDeviceMemoryBuffer dataBuffer, BaseDeviceMemoryBuffer validityBuffer, BaseDeviceMemoryBuffer offsetBuffer, ColumnView[] children) { this(ColumnVector.initViewHandle(type, (int) rows, nullCount, dataBuffer, validityBuffer, offsetBuffer, children == null ? new long[]{} : Arrays.stream(children).mapToLong(c -> c.getNativeView()).toArray())); } /** Creates a ColumnVector from a column view handle * @return a new ColumnVector */ public ColumnVector copyToColumnVector() { return new ColumnVector(ColumnView.copyColumnViewToCV(getNativeView())); } /** * USE WITH CAUTION: This method exposes the address of the native cudf::column_view. This allows * writing custom kernels or other cuda operations on the data. DO NOT close this column * vector until you are completely done using the native column_view. DO NOT modify the column in * any way. This should be treated as a read only data structure. This API is unstable as * the underlying C/C++ API is still not stabilized. If the underlying data structure * is renamed this API may be replaced. The underlying data structure can change from release * to release (it is not stable yet) so be sure that your native code is complied against the * exact same version of libcudf as this is released for. */ public final long getNativeView() { return viewHandle; } static int getFixedPointOutputScale(BinaryOp op, DType lhsType, DType rhsType) { assert (lhsType.isDecimalType() && rhsType.isDecimalType()); return fixedPointOutputScale(op.nativeId, lhsType.getScale(), rhsType.getScale()); } private static native int fixedPointOutputScale(int op, int lhsScale, int rhsScale); public final DType getType() { return type; } /** * Returns the child column views for this view * Please note that it is the responsibility of the caller to close these views. * @return an array of child column views */ public final ColumnView[] getChildColumnViews() { int numChildren = getNumChildren(); if (!getType().isNestedType()) { return null; } ColumnView[] views = new ColumnView[numChildren]; try { for (int i = 0; i < numChildren; i++) { views[i] = getChildColumnView(i); } return views; } catch(Throwable t) { for (ColumnView v: views) { if (v != null) { v.close(); } } throw t; } } /** * Returns the child column view at a given index. * Please note that it is the responsibility of the caller to close this view. * @param childIndex the index of the child * @return a column view */ public final ColumnView getChildColumnView(int childIndex) { int numChildren = getNumChildren(); assert childIndex < numChildren : "children index should be less than " + numChildren; if (!getType().isNestedType()) { return null; } long childColumnView = ColumnView.getChildCvPointer(viewHandle, childIndex); return new ColumnView(childColumnView); } /** * Get a ColumnView that is the offsets for this list. * Please note that it is the responsibility of the caller to close this view, and the parent * column must out live this view. */ public ColumnView getListOffsetsView() { assert(getType().equals(DType.LIST)); return new ColumnView(getListOffsetCvPointer(viewHandle)); } /** * Gets the data buffer for the current column view (viewHandle). * If the type is LIST, STRUCT it returns null. * @return If the type is LIST, STRUCT or data buffer is empty it returns null, * else return the data device buffer */ public final BaseDeviceMemoryBuffer getData() { return getDataBuffer(viewHandle); } public final BaseDeviceMemoryBuffer getOffsets() { return getOffsetsBuffer(viewHandle); } public final BaseDeviceMemoryBuffer getValid() { return getValidityBuffer(viewHandle); } /** * Returns the number of nulls in the data. Note that this might end up * being a very expensive operation because if the null count is not * known it will be calculated. */ public long getNullCount() { return nullCount; } /** * Returns the number of rows in this vector. */ public final long getRowCount() { return rows; } public final int getNumChildren() { if (!getType().isNestedType()) { return 0; } return ColumnView.getNativeNumChildren(viewHandle); } /** * Returns the amount of device memory used. */ public long getDeviceMemorySize() { return getDeviceMemorySize(getNativeView(), false); } @Override public void close() { // close the view handle so long as offHeap is not going to do it for us. if (offHeap == null) { ColumnView.deleteColumnView(viewHandle); } viewHandle = 0; } @Override public String toString() { return "ColumnView{" + "rows=" + rows + ", type=" + type + ", nullCount=" + nullCount + '}'; } /** * Used for string strip function. * Indicates characters to be stripped from the beginning, end, or both of each string. */ private enum StripType { LEFT(0), // strip characters from the beginning of the string RIGHT(1), // strip characters from the end of the string BOTH(2); // strip characters from the beginning and end of the string final int nativeId; StripType(int nativeId) { this.nativeId = nativeId; } } /** * Returns a new ColumnVector with NaNs converted to nulls, preserving the existing null values. */ public final ColumnVector nansToNulls() { assert type.equals(DType.FLOAT32) || type.equals(DType.FLOAT64); return new ColumnVector(nansToNulls(this.getNativeView())); } ///////////////////////////////////////////////////////////////////////////// // DEVICE METADATA ///////////////////////////////////////////////////////////////////////////// /** * Retrieve the number of characters in each string. Null strings will have value of null. * * @return ColumnVector holding length of string at index 'i' in the original vector */ public final ColumnVector getCharLengths() { assert DType.STRING.equals(type) : "char length only available for String type"; return new ColumnVector(charLengths(getNativeView())); } /** * Retrieve the number of bytes for each string. Null strings will have value of null. * * @return ColumnVector, where each element at i = byte count of string at index 'i' in the original vector */ public final ColumnVector getByteCount() { assert type.equals(DType.STRING) : "type has to be a String"; return new ColumnVector(byteCount(getNativeView())); } /** * Get the number of elements for each list. Null lists will have a value of null. * @return the number of elements in each list as an INT32 value. */ public final ColumnVector countElements() { assert DType.LIST.equals(type) : "Only lists are supported"; return new ColumnVector(countElements(getNativeView())); } /** * Returns a Boolean vector with the same number of rows as this instance, that has * TRUE for any entry that is not null, and FALSE for any null entry (as per the validity mask) * * @return - Boolean vector */ public final ColumnVector isNotNull() { return new ColumnVector(isNotNullNative(getNativeView())); } /** * Returns a Boolean vector with the same number of rows as this instance, that has * FALSE for any entry that is not null, and TRUE for any null entry (as per the validity mask) * * @return - Boolean vector */ public final ColumnVector isNull() { return new ColumnVector(isNullNative(getNativeView())); } /** * Returns a Boolean vector with the same number of rows as this instance, that has * TRUE for any entry that is a fixed-point, and FALSE if its not a fixed-point. * A null will be returned for null entries. * * The sign and the exponent is optional. The decimal point may only appear once. * The integer component must fit within the size limits of the underlying fixed-point * storage type. The value of the integer component is based on the scale of the target * decimalType. * * Example: * vec = ["A", "nan", "Inf", "-Inf", "Infinity", "infinity", "2.1474", "112.383", "-2.14748", * "NULL", "null", null, "1.2", "1.2e-4", "0.00012"] * vec.isFixedPoint() = [false, false, false, false, false, false, true, true, true, false, false, * null, true, true, true] * * @param decimalType the data type that should be used for bounds checking. Note that only * Decimal types (fixed-point) are allowed. * @return Boolean vector */ public final ColumnVector isFixedPoint(DType decimalType) { assert type.equals(DType.STRING); assert decimalType.isDecimalType(); return new ColumnVector(isFixedPoint(getNativeView(), decimalType.getTypeId().getNativeId(), decimalType.getScale())); } /** * Returns a Boolean vector with the same number of rows as this instance, that has * TRUE for any entry that is an integer, and FALSE if its not an integer. A null will be returned * for null entries. * * NOTE: Integer doesn't mean a 32-bit integer. It means a number that is not a fraction. * i.e. If this method returns true for a value it could still result in an overflow or underflow * if you convert it to a Java integral type * * @return Boolean vector */ public final ColumnVector isInteger() { assert type.equals(DType.STRING); return new ColumnVector(isInteger(getNativeView())); } /** * Returns a Boolean vector with the same number of rows as this instance, that has * TRUE for any entry that is an integer, and FALSE if its not an integer. A null will be returned * for null entries. * * @param intType the data type that should be used for bounds checking. Note that only * cudf integer types are allowed including signed/unsigned int8 through int64 * @return Boolean vector */ public final ColumnVector isInteger(DType intType) { assert type.equals(DType.STRING); assert intType.isBackedByInt() || intType.isBackedByLong() || intType.isBackedByByte() || intType.isBackedByShort(); return new ColumnVector(isIntegerWithType(getNativeView(), intType.getTypeId().getNativeId(), intType.getScale())); } /** * Returns a Boolean vector with the same number of rows as this instance, that has * TRUE for any entry that is a float, and FALSE if its not a float. A null will be returned * for null entries * * NOTE: Float doesn't mean a 32-bit float. It means a number that is a fraction or can be written * as a fraction. i.e. This method will return true for integers as well as floats. Also note if * this method returns true for a value it could still result in an overflow or underflow if you * convert it to a Java float or double * * @return - Boolean vector */ public final ColumnVector isFloat() { assert type.equals(DType.STRING); return new ColumnVector(isFloat(getNativeView())); } /** * Returns a Boolean vector with the same number of rows as this instance, that has * TRUE for any entry that is NaN, and FALSE if null or a valid floating point value * @return - Boolean vector */ public final ColumnVector isNan() { return new ColumnVector(isNanNative(getNativeView())); } /** * Returns a Boolean vector with the same number of rows as this instance, that has * TRUE for any entry that is null or a valid floating point value, FALSE otherwise * @return - Boolean vector */ public final ColumnVector isNotNan() { return new ColumnVector(isNotNanNative(getNativeView())); } ///////////////////////////////////////////////////////////////////////////// // Replacement ///////////////////////////////////////////////////////////////////////////// /** * Returns a vector with all values "oldValues[i]" replaced with "newValues[i]". * Warning: * Currently this function doesn't work for Strings or StringCategories. * NaNs can't be replaced in the original vector but regular values can be replaced with NaNs * Nulls can't be replaced in the original vector but regular values can be replaced with Nulls * Mixing of types isn't allowed, the resulting vector will be the same type as the original. * e.g. You can't replace an integer vector with values from a long vector * * Usage: * this = {1, 4, 5, 1, 5} * oldValues = {1, 5, 7} * newValues = {2, 6, 9} * * result = this.findAndReplaceAll(oldValues, newValues); * result = {2, 4, 6, 2, 6} (1 and 5 replaced with 2 and 6 but 7 wasn't found so no change) * * @param oldValues - A vector containing values that should be replaced * @param newValues - A vector containing new values * @return - A new vector containing the old values replaced with new values */ public final ColumnVector findAndReplaceAll(ColumnView oldValues, ColumnView newValues) { return new ColumnVector(findAndReplaceAll(oldValues.getNativeView(), newValues.getNativeView(), this.getNativeView())); } /** * Returns a ColumnVector with any null values replaced with a scalar. * The types of the input ColumnVector and Scalar must match, else an error is thrown. * * @param scalar - Scalar value to use as replacement * @return - ColumnVector with nulls replaced by scalar */ public final ColumnVector replaceNulls(Scalar scalar) { return new ColumnVector(replaceNullsScalar(getNativeView(), scalar.getScalarHandle())); } /** * Returns a ColumnVector with any null values replaced with the corresponding row in the * specified replacement column. * This column and the replacement column must have the same type and number of rows. * * @param replacements column of replacement values * @return column with nulls replaced by corresponding row of replacements column */ public final ColumnVector replaceNulls(ColumnView replacements) { return new ColumnVector(replaceNullsColumn(getNativeView(), replacements.getNativeView())); } public final ColumnVector replaceNulls(ReplacePolicy policy) { return new ColumnVector(replaceNullsPolicy(getNativeView(), policy.isPreceding)); } /** * For a BOOL8 vector, computes a vector whose rows are selected from two other vectors * based on the boolean value of this vector in the corresponding row. * If the boolean value in a row is true, the corresponding row is selected from trueValues * otherwise the corresponding row from falseValues is selected. * Note that trueValues and falseValues vectors must be the same length as this vector, * and trueValues and falseValues must have the same data type. * @param trueValues the values to select if a row in this column is true * @param falseValues the values to select if a row in this column is not true * @return the computed vector */ public final ColumnVector ifElse(ColumnView trueValues, ColumnView falseValues) { if (!type.equals(DType.BOOL8)) { throw new IllegalArgumentException("Cannot select with a predicate vector of type " + type); } long result = ifElseVV(getNativeView(), trueValues.getNativeView(), falseValues.getNativeView()); return new ColumnVector(result); } /** * For a BOOL8 vector, computes a vector whose rows are selected from two other inputs * based on the boolean value of this vector in the corresponding row. * If the boolean value in a row is true, the corresponding row is selected from trueValues * otherwise the value from falseValue is selected. * Note that trueValues must be the same length as this vector, * and trueValues and falseValue must have the same data type. * Note that the trueValues vector and falseValue scalar must have the same data type. * @param trueValues the values to select if a row in this column is true * @param falseValue the value to select if a row in this column is not true * @return the computed vector */ public final ColumnVector ifElse(ColumnView trueValues, Scalar falseValue) { if (!type.equals(DType.BOOL8)) { throw new IllegalArgumentException("Cannot select with a predicate vector of type " + type); } long result = ifElseVS(getNativeView(), trueValues.getNativeView(), falseValue.getScalarHandle()); return new ColumnVector(result); } /** * For a BOOL8 vector, computes a vector whose rows are selected from two other inputs * based on the boolean value of this vector in the corresponding row. * If the boolean value in a row is true, the value from trueValue is selected * otherwise the corresponding row from falseValues is selected. * Note that falseValues must be the same length as this vector, * and trueValue and falseValues must have the same data type. * Note that the trueValue scalar and falseValues vector must have the same data type. * @param trueValue the value to select if a row in this column is true * @param falseValues the values to select if a row in this column is not true * @return the computed vector */ public final ColumnVector ifElse(Scalar trueValue, ColumnView falseValues) { if (!type.equals(DType.BOOL8)) { throw new IllegalArgumentException("Cannot select with a predicate vector of type " + type); } long result = ifElseSV(getNativeView(), trueValue.getScalarHandle(), falseValues.getNativeView()); return new ColumnVector(result); } /** * For a BOOL8 vector, computes a vector whose rows are selected from two other inputs * based on the boolean value of this vector in the corresponding row. * If the boolean value in a row is true, the value from trueValue is selected * otherwise the value from falseValue is selected. * Note that the trueValue and falseValue scalars must have the same data type. * @param trueValue the value to select if a row in this column is true * @param falseValue the value to select if a row in this column is not true * @return the computed vector */ public final ColumnVector ifElse(Scalar trueValue, Scalar falseValue) { if (!type.equals(DType.BOOL8)) { throw new IllegalArgumentException("Cannot select with a predicate vector of type " + type); } long result = ifElseSS(getNativeView(), trueValue.getScalarHandle(), falseValue.getScalarHandle()); return new ColumnVector(result); } ///////////////////////////////////////////////////////////////////////////// // Slice/Split and Concatenate ///////////////////////////////////////////////////////////////////////////// /** * Slices a column (including null values) into a set of columns * according to a set of indices. The caller owns the ColumnVectors and is responsible * closing them * * The "slice" function divides part of the input column into multiple intervals * of rows using the indices values and it stores the intervals into the output * columns. Regarding the interval of indices, a pair of values are taken from * the indices array in a consecutive manner. The pair of indices are left-closed * and right-open. * * The pairs of indices in the array are required to comply with the following * conditions: * a, b belongs to Range[0, input column size] * a <= b, where the position of a is less or equal to the position of b. * * Exceptional cases for the indices array are: * When the values in the pair are equal, the function returns an empty column. * When the values in the pair are 'strictly decreasing', the outcome is * undefined. * When any of the values in the pair don't belong to the range[0, input column * size), the outcome is undefined. * When the indices array is empty, an empty vector of columns is returned. * * The caller owns the output ColumnVectors and is responsible for closing them. * * @param indices * @return A new ColumnVector array with slices from the original ColumnVector */ public final ColumnVector[] slice(int... indices) { long[] nativeHandles = slice(this.getNativeView(), indices); ColumnVector[] columnVectors = new ColumnVector[nativeHandles.length]; try { for (int i = 0; i < nativeHandles.length; i++) { long nativeHandle = nativeHandles[i]; // setting address to zero, so we don't clean it in case of an exception as it // will be cleaned up by the constructor nativeHandles[i] = 0; columnVectors[i] = new ColumnVector(nativeHandle); } } catch (Throwable t) { try { cleanupColumnViews(nativeHandles, columnVectors, t); } catch (Throwable s) { t.addSuppressed(s); } finally { throw t; } } return columnVectors; } /** * Return a subVector from start inclusive to the end of the vector. * @param start the index to start at. */ public final ColumnVector subVector(int start) { return subVector(start, (int)rows); } /** * Return a subVector. * @param start the index to start at (inclusive). * @param end the index to end at (exclusive). */ public final ColumnVector subVector(int start, int end) { ColumnVector [] tmp = slice(start, end); assert tmp.length == 1; return tmp[0]; } /** * Splits a column (including null values) into a set of columns * according to a set of indices. The caller owns the ColumnVectors and is responsible * closing them. * * The "split" function divides the input column into multiple intervals * of rows using the splits indices values and it stores the intervals into the * output columns. Regarding the interval of indices, a pair of values are taken * from the indices array in a consecutive manner. The pair of indices are * left-closed and right-open. * * The indices array ('splits') is require to be a monotonic non-decreasing set. * The indices in the array are required to comply with the following conditions: * a, b belongs to Range[0, input column size] * a <= b, where the position of a is less or equal to the position of b. * * The split function will take a pair of indices from the indices array * ('splits') in a consecutive manner. For the first pair, the function will * take the value 0 and the first element of the indices array. For the last pair, * the function will take the last element of the indices array and the size of * the input column. * * Exceptional cases for the indices array are: * When the values in the pair are equal, the function return an empty column. * When the values in the pair are 'strictly decreasing', the outcome is * undefined. * When any of the values in the pair don't belong to the range[0, input column * size), the outcome is undefined. * When the indices array is empty, an empty vector of columns is returned. * * The input columns may have different sizes. The number of * columns must be equal to the number of indices in the array plus one. * * Example: * input: {10, 12, 14, 16, 18, 20, 22, 24, 26, 28} * splits: {2, 5, 9} * output: {{10, 12}, {14, 16, 18}, {20, 22, 24, 26}, {28}} * * Note that this is very similar to the output from a PartitionedTable. * * @param indices the indexes to split with * @return A new ColumnVector array with slices from the original ColumnVector */ public final ColumnVector[] split(int... indices) { ColumnView[] views = splitAsViews(indices); ColumnVector[] columnVectors = new ColumnVector[views.length]; try { for (int i = 0; i < views.length; i++) { columnVectors[i] = views[i].copyToColumnVector(); } return columnVectors; } catch (Throwable t) { for (ColumnVector cv : columnVectors) { if (cv != null) { cv.close(); } } throw t; } finally { for (ColumnView view : views) { view.close(); } } } /** * Splits a ColumnView (including null values) into a set of ColumnViews * according to a set of indices. No data is moved or copied. * * IMPORTANT NOTE: Nothing is copied out from the vector and the slices will only be relevant for * the lifecycle of the underlying ColumnVector. * * The "split" function divides the input column into multiple intervals * of rows using the splits indices values and it stores the intervals into the * output columns. Regarding the interval of indices, a pair of values are taken * from the indices array in a consecutive manner. The pair of indices are * left-closed and right-open. * * The indices array ('splits') is required to be a monotonic non-decreasing set. * The indices in the array are required to comply with the following conditions: * a, b belongs to Range[0, input column size] * a <= b, where the position of 'a' is less or equal to the position of 'b'. * * The split function will take a pair of indices from the indices array * ('splits') in a consecutive manner. For the first pair, the function will * take the value 0 and the first element of the indices array. For the last pair, * the function will take the last element of the indices array and the size of * the input column. * * Exceptional cases for the indices array are: * When the values in the pair are equal, the function return an empty column. * When the values in the pair are 'strictly decreasing', the outcome is * undefined. * When any of the values in the pair don't belong to the range[0, input column * size), the outcome is undefined. * When the indices array is empty, an empty array of ColumnViews is returned. * * The output columns may have different sizes. The number of * columns must be equal to the number of indices in the array plus one. * * Example: * input: {10, 12, 14, 16, 18, 20, 22, 24, 26, 28} * splits: {2, 5, 9} * output: {{10, 12}, {14, 16, 18}, {20, 22, 24, 26}, {28}} * * Note that this is very similar to the output from a PartitionedTable. * * * @param indices the indices to split with * @return A new ColumnView array with slices from the original ColumnView */ public ColumnView[] splitAsViews(int... indices) { long[] nativeHandles = split(this.getNativeView(), indices); ColumnView[] columnViews = new ColumnView[nativeHandles.length]; try { for (int i = 0; i < nativeHandles.length; i++) { long nativeHandle = nativeHandles[i]; // setting address to zero, so we don't clean it in case of an exception as it // will be cleaned up by the constructor nativeHandles[i] = 0; columnViews[i] = new ColumnView(nativeHandle); } } catch (Throwable t) { try { cleanupColumnViews(nativeHandles, columnViews, t); } catch (Throwable s) { t.addSuppressed(s); } finally { throw t; } } return columnViews; } static void cleanupColumnViews(long[] nativeHandles, ColumnView[] columnViews, Throwable throwable) { for (ColumnView columnView : columnViews) { if (columnView != null) { try { columnView.close(); } catch (Throwable s) { throwable.addSuppressed(s); } } } for (long nativeHandle : nativeHandles) { if (nativeHandle != 0) { try { deleteColumnView(nativeHandle); } catch (Throwable s) { throwable.addSuppressed(s); } } } } /** * Create a new vector of "normalized" values, where: * 1. All representations of NaN (and -NaN) are replaced with the normalized NaN value * 2. All elements equivalent to 0.0 (including +0.0 and -0.0) are replaced with +0.0. * 3. All elements that are not equivalent to NaN or 0.0 remain unchanged. * * The documentation for {@link Double#longBitsToDouble(long)} * describes how equivalent values of NaN/-NaN might have different bitwise representations. * * This method may be used to compare different bitwise values of 0.0 or NaN as logically * equivalent. For instance, if these values appear in a groupby key column, without normalization * 0.0 and -0.0 would be erroneously treated as distinct groups, as will each representation of NaN. * * @return A new ColumnVector with all elements equivalent to NaN/0.0 replaced with a normalized equivalent. */ public final ColumnVector normalizeNANsAndZeros() { return new ColumnVector(normalizeNANsAndZeros(getNativeView())); } /** * Create a deep copy of the column while replacing the null mask. The resultant null mask is the * bitwise merge of null masks in the columns given as arguments. * The result will be sanitized to not contain any non-empty nulls in case of nested types * * @param mergeOp binary operator (BITWISE_AND and BITWISE_OR only) * @param columns array of columns whose null masks are merged, must have identical number of rows. * @return the new ColumnVector with merged null mask. */ public final ColumnVector mergeAndSetValidity(BinaryOp mergeOp, ColumnView... columns) { assert mergeOp == BinaryOp.BITWISE_AND || mergeOp == BinaryOp.BITWISE_OR : "Only BITWISE_AND and BITWISE_OR supported right now"; long[] columnViews = new long[columns.length]; long size = getRowCount(); for(int i = 0; i < columns.length; i++) { assert columns[i] != null : "Column vectors passed may not be null"; assert columns[i].getRowCount() == size : "Row count mismatch, all columns must be the same size"; columnViews[i] = columns[i].getNativeView(); } return new ColumnVector(bitwiseMergeAndSetValidity(getNativeView(), columnViews, mergeOp.nativeId)); } /** * Creates a deep copy of a column while replacing the validity mask. The validity mask is the * device_vector equivalent of the boolean column given as argument. * * The boolColumn must have the same number of rows as the current column. * The result column will have the same number of rows as the current column. * For all indices `i` where the boolColumn is `true`, the result column will have a valid value at index i. * For all other values (i.e. `false` or `null`), the result column will have nulls. * * If the current column has a null at a given index `i`, and the new validity mask is `true` at index `i`, * then the row value is undefined. * * @param boolColumn bool column whose value is to be used as the validity mask. * @return Deep copy of the column with replaced validity mask. */ public final ColumnVector copyWithBooleanColumnAsValidity(ColumnView boolColumn) { return new ColumnVector(copyWithBooleanColumnAsValidity(getNativeView(), boolColumn.getNativeView())); } ///////////////////////////////////////////////////////////////////////////// // DATE/TIME ///////////////////////////////////////////////////////////////////////////// /** * Get year from a timestamp. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return - A new INT16 vector allocated on the GPU. */ public final ColumnVector year() { assert type.isTimestampType(); return new ColumnVector(year(getNativeView())); } /** * Get month from a timestamp. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return - A new INT16 vector allocated on the GPU. */ public final ColumnVector month() { assert type.isTimestampType(); return new ColumnVector(month(getNativeView())); } /** * Get day from a timestamp. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return - A new INT16 vector allocated on the GPU. */ public final ColumnVector day() { assert type.isTimestampType(); return new ColumnVector(day(getNativeView())); } /** * Get hour from a timestamp with time resolution. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return - A new INT16 vector allocated on the GPU. */ public final ColumnVector hour() { assert type.hasTimeResolution(); return new ColumnVector(hour(getNativeView())); } /** * Get minute from a timestamp with time resolution. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return - A new INT16 vector allocated on the GPU. */ public final ColumnVector minute() { assert type.hasTimeResolution(); return new ColumnVector(minute(getNativeView())); } /** * Get second from a timestamp with time resolution. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return A new INT16 vector allocated on the GPU. */ public final ColumnVector second() { assert type.hasTimeResolution(); return new ColumnVector(second(getNativeView())); } /** * Get the day of the week from a timestamp. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return A new INT16 vector allocated on the GPU. Monday=1, ..., Sunday=7 */ public final ColumnVector weekDay() { assert type.isTimestampType(); return new ColumnVector(weekDay(getNativeView())); } /** * Get the date that is the last day of the month for this timestamp. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return A new TIMESTAMP_DAYS vector allocated on the GPU. */ public final ColumnVector lastDayOfMonth() { assert type.isTimestampType(); return new ColumnVector(lastDayOfMonth(getNativeView())); } /** * Get the day of the year from a timestamp. * <p> * Postconditions - A new vector is allocated with the result. The caller owns the vector and * is responsible for its lifecycle. * @return A new INT16 vector allocated on the GPU. The value is between [1, {365-366}] */ public final ColumnVector dayOfYear() { assert type.isTimestampType(); return new ColumnVector(dayOfYear(getNativeView())); } /** * Get the quarter of the year from a timestamp. * @return A new INT16 vector allocated on the GPU. It will be a value from {1, 2, 3, 4} * corresponding to the quarter of the year. */ public final ColumnVector quarterOfYear() { assert type.isTimestampType(); return new ColumnVector(quarterOfYear(getNativeView())); } /** * Add the specified number of months to the timestamp. * @param months must be a INT16 column indicating the number of months to add. A negative number * of months works too. * @return the updated timestamp */ public final ColumnVector addCalendricalMonths(ColumnView months) { return new ColumnVector(addCalendricalMonths(getNativeView(), months.getNativeView())); } /** * Check to see if the year for this timestamp is a leap year or not. * @return BOOL8 vector of results */ public final ColumnVector isLeapYear() { return new ColumnVector(isLeapYear(getNativeView())); } /** * Rounds all the values in a column to the specified number of decimal places. * * @param decimalPlaces Number of decimal places to round to. If negative, this * specifies the number of positions to the left of the decimal point. * @param mode Rounding method(either HALF_UP or HALF_EVEN) * @return a new ColumnVector with rounded values. */ public ColumnVector round(int decimalPlaces, RoundMode mode) { return new ColumnVector(round(this.getNativeView(), decimalPlaces, mode.nativeId)); } /** * Rounds all the values in a column with decimal places = 0. Default number of decimal places * to round to is 0. * * @param round Rounding method(either HALF_UP or HALF_EVEN) * @return a new ColumnVector with rounded values. */ public ColumnVector round(RoundMode round) { return round(0, round); } /** * Rounds all the values in a column to the specified number of decimal places with HALF_UP * (default) as Rounding method. * * @param decimalPlaces Number of decimal places to round to. If negative, this * specifies the number of positions to the left of the decimal point. * @return a new ColumnVector with rounded values. */ public ColumnVector round(int decimalPlaces) { return round(decimalPlaces, RoundMode.HALF_UP); } /** * Rounds all the values in a column with these default values: * decimalPlaces = 0 * Rounding method = RoundMode.HALF_UP * * @return a new ColumnVector with rounded values. */ public ColumnVector round() { return round(0, RoundMode.HALF_UP); } ///////////////////////////////////////////////////////////////////////////// // ARITHMETIC ///////////////////////////////////////////////////////////////////////////// /** * Transform a vector using a custom function. Be careful this is not * simple to do. You need to be positive you know what type of data you are * processing and how the data is laid out. This also only works on fixed * length types. * @param udf This function will be applied to every element in the vector * @param isPtx is the code of the function ptx? true or C/C++ false. */ public final ColumnVector transform(String udf, boolean isPtx) { return new ColumnVector(transform(getNativeView(), udf, isPtx)); } /** * Multiple different unary operations. The output is the same type as input. * @param op the operation to perform * @return the result */ public final ColumnVector unaryOp(UnaryOp op) { return new ColumnVector(unaryOperation(getNativeView(), op.nativeId)); } /** * Calculate the sin, output is the same type as input. */ public final ColumnVector sin() { return unaryOp(UnaryOp.SIN); } /** * Calculate the cos, output is the same type as input. */ public final ColumnVector cos() { return unaryOp(UnaryOp.COS); } /** * Calculate the tan, output is the same type as input. */ public final ColumnVector tan() { return unaryOp(UnaryOp.TAN); } /** * Calculate the arcsin, output is the same type as input. */ public final ColumnVector arcsin() { return unaryOp(UnaryOp.ARCSIN); } /** * Calculate the arccos, output is the same type as input. */ public final ColumnVector arccos() { return unaryOp(UnaryOp.ARCCOS); } /** * Calculate the arctan, output is the same type as input. */ public final ColumnVector arctan() { return unaryOp(UnaryOp.ARCTAN); } /** * Calculate the hyperbolic sin, output is the same type as input. */ public final ColumnVector sinh() { return unaryOp(UnaryOp.SINH); } /** * Calculate the hyperbolic cos, output is the same type as input. */ public final ColumnVector cosh() { return unaryOp(UnaryOp.COSH); } /** * Calculate the hyperbolic tan, output is the same type as input. */ public final ColumnVector tanh() { return unaryOp(UnaryOp.TANH); } /** * Calculate the hyperbolic arcsin, output is the same type as input. */ public final ColumnVector arcsinh() { return unaryOp(UnaryOp.ARCSINH); } /** * Calculate the hyperbolic arccos, output is the same type as input. */ public final ColumnVector arccosh() { return unaryOp(UnaryOp.ARCCOSH); } /** * Calculate the hyperbolic arctan, output is the same type as input. */ public final ColumnVector arctanh() { return unaryOp(UnaryOp.ARCTANH); } /** * Calculate the exp, output is the same type as input. */ public final ColumnVector exp() { return unaryOp(UnaryOp.EXP); } /** * Calculate the log, output is the same type as input. */ public final ColumnVector log() { return unaryOp(UnaryOp.LOG); } /** * Calculate the log with base 2, output is the same type as input. */ public final ColumnVector log2() { try (Scalar base = Scalar.fromInt(2)) { return binaryOp(BinaryOp.LOG_BASE, base, getType()); } } /** * Calculate the log with base 10, output is the same type as input. */ public final ColumnVector log10() { try (Scalar base = Scalar.fromInt(10)) { return binaryOp(BinaryOp.LOG_BASE, base, getType()); } } /** * Calculate the sqrt, output is the same type as input. */ public final ColumnVector sqrt() { return unaryOp(UnaryOp.SQRT); } /** * Calculate the cube root, output is the same type as input. */ public final ColumnVector cbrt() { return unaryOp(UnaryOp.CBRT); } /** * Calculate the ceil, output is the same type as input. */ public final ColumnVector ceil() { return unaryOp(UnaryOp.CEIL); } /** * Calculate the floor, output is the same type as input. */ public final ColumnVector floor() { return unaryOp(UnaryOp.FLOOR); } /** * Calculate the abs, output is the same type as input. */ public final ColumnVector abs() { return unaryOp(UnaryOp.ABS); } /** * Rounds a floating-point argument to the closest integer value, but returns it as a float. */ public final ColumnVector rint() { return unaryOp(UnaryOp.RINT); } /** * invert the bits, output is the same type as input. */ public final ColumnVector bitInvert() { return unaryOp(UnaryOp.BIT_INVERT); } /** * Multiple different binary operations. * @param op the operation to perform * @param rhs the rhs of the operation * @param outType the type of output you want. * @return the result */ @Override public final ColumnVector binaryOp(BinaryOp op, BinaryOperable rhs, DType outType) { if (rhs instanceof ColumnView) { assert rows == ((ColumnView) rhs).getRowCount(); return new ColumnVector(binaryOp(this, (ColumnView) rhs, op, outType)); } else { return new ColumnVector(binaryOp(this, (Scalar) rhs, op, outType)); } } static long binaryOp(ColumnView lhs, ColumnView rhs, BinaryOp op, DType outputType) { return binaryOpVV(lhs.getNativeView(), rhs.getNativeView(), op.nativeId, outputType.typeId.getNativeId(), outputType.getScale()); } static long binaryOp(ColumnView lhs, Scalar rhs, BinaryOp op, DType outputType) { return binaryOpVS(lhs.getNativeView(), rhs.getScalarHandle(), op.nativeId, outputType.typeId.getNativeId(), outputType.getScale()); } ///////////////////////////////////////////////////////////////////////////// // AGGREGATION ///////////////////////////////////////////////////////////////////////////// /** * Computes the sum of all values in the column, returning a scalar * of the same type as this column. */ public Scalar sum() { return sum(type); } /** * Computes the sum of all values in the column, returning a scalar * of the specified type. */ public Scalar sum(DType outType) { return reduce(ReductionAggregation.sum(), outType); } /** * Returns the minimum of all values in the column, returning a scalar * of the same type as this column. */ public Scalar min() { return reduce(ReductionAggregation.min(), type); } /** * Returns the minimum of all values in the column, returning a scalar * of the specified type. * @deprecated the min reduction no longer internally allows for setting the output type, as a * work around this API will cast the input type to the output type for you, but this may not * work in all cases. */ @Deprecated public Scalar min(DType outType) { if (!outType.equals(type)) { try (ColumnVector tmp = this.castTo(outType)) { return tmp.min(outType); } } return reduce(ReductionAggregation.min(), outType); } /** * Returns the maximum of all values in the column, returning a scalar * of the same type as this column. */ public Scalar max() { return reduce(ReductionAggregation.max(), type); } /** * Returns the maximum of all values in the column, returning a scalar * of the specified type. * @deprecated the max reduction no longer internally allows for setting the output type, as a * work around this API will cast the input type to the output type for you, but this may not * work in all cases. */ @Deprecated public Scalar max(DType outType) { if (!outType.equals(type)) { try (ColumnVector tmp = this.castTo(outType)) { return tmp.max(outType); } } return reduce(ReductionAggregation.max(), outType); } /** * Returns the product of all values in the column, returning a scalar * of the same type as this column. */ public Scalar product() { return product(type); } /** * Returns the product of all values in the column, returning a scalar * of the specified type. */ public Scalar product(DType outType) { return reduce(ReductionAggregation.product(), outType); } /** * Returns the sum of squares of all values in the column, returning a * scalar of the same type as this column. */ public Scalar sumOfSquares() { return sumOfSquares(type); } /** * Returns the sum of squares of all values in the column, returning a * scalar of the specified type. */ public Scalar sumOfSquares(DType outType) { return reduce(ReductionAggregation.sumOfSquares(), outType); } /** * Returns the arithmetic mean of all values in the column, returning a * FLOAT64 scalar unless the column type is FLOAT32 then a FLOAT32 scalar is returned. * Null values are skipped. */ public Scalar mean() { DType outType = DType.FLOAT64; if (type.equals(DType.FLOAT32)) { outType = type; } return mean(outType); } /** * Returns the arithmetic mean of all values in the column, returning a * scalar of the specified type. * Null values are skipped. * @param outType the output type to return. Note that only floating point * types are currently supported. */ public Scalar mean(DType outType) { return reduce(ReductionAggregation.mean(), outType); } /** * Returns the variance of all values in the column, returning a * FLOAT64 scalar unless the column type is FLOAT32 then a FLOAT32 scalar is returned. * Null values are skipped. */ public Scalar variance() { DType outType = DType.FLOAT64; if (type.equals(DType.FLOAT32)) { outType = type; } return variance(outType); } /** * Returns the variance of all values in the column, returning a * scalar of the specified type. * Null values are skipped. * @param outType the output type to return. Note that only floating point * types are currently supported. */ public Scalar variance(DType outType) { return reduce(ReductionAggregation.variance(), outType); } /** * Returns the sample standard deviation of all values in the column, * returning a FLOAT64 scalar unless the column type is FLOAT32 then * a FLOAT32 scalar is returned. Nulls are not counted as an element * of the column when calculating the standard deviation. */ public Scalar standardDeviation() { DType outType = DType.FLOAT64; if (type.equals(DType.FLOAT32)) { outType = type; } return standardDeviation(outType); } /** * Returns the sample standard deviation of all values in the column, * returning a scalar of the specified type. Null's are not counted as * an element of the column when calculating the standard deviation. * @param outType the output type to return. Note that only floating point * types are currently supported. */ public Scalar standardDeviation(DType outType) { return reduce(ReductionAggregation.standardDeviation(), outType); } /** * Returns a boolean scalar that is true if any of the elements in * the column are true or non-zero otherwise false. * Null values are skipped. */ public Scalar any() { return any(DType.BOOL8); } /** * Returns a scalar is true or 1, depending on the specified type, * if any of the elements in the column are true or non-zero * otherwise false or 0. * Null values are skipped. */ public Scalar any(DType outType) { return reduce(ReductionAggregation.any(), outType); } /** * Returns a boolean scalar that is true if all of the elements in * the column are true or non-zero otherwise false. * Null values are skipped. */ public Scalar all() { return all(DType.BOOL8); } /** * Returns a scalar is true or 1, depending on the specified type, * if all of the elements in the column are true or non-zero * otherwise false or 0. * Null values are skipped. * @deprecated the only output type supported is BOOL8. */ @Deprecated public Scalar all(DType outType) { return reduce(ReductionAggregation.all(), outType); } /** * Computes the reduction of the values in all rows of a column. * Overflows in reductions are not detected. Specifying a higher precision * output type may prevent overflow. Only the MIN and MAX ops are * The null values are skipped for the operation. * @param aggregation The reduction aggregation to perform * @return The scalar result of the reduction operation. If the column is * empty or the reduction operation fails then the * {@link Scalar#isValid()} method of the result will return false. */ public Scalar reduce(ReductionAggregation aggregation) { return reduce(aggregation, type); } /** * Computes the reduction of the values in all rows of a column. * Overflows in reductions are not detected. Specifying a higher precision * output type may prevent overflow. Only the MIN and MAX ops are * supported for reduction of non-arithmetic types (TIMESTAMP...) * The null values are skipped for the operation. * @param aggregation The reduction aggregation to perform * @param outType The type of scalar value to return. Not all output types are supported * by all aggregation operations. * @return The scalar result of the reduction operation. If the column is * empty or the reduction operation fails then the * {@link Scalar#isValid()} method of the result will return false. */ public Scalar reduce(ReductionAggregation aggregation, DType outType) { long nativeId = aggregation.createNativeInstance(); try { return new Scalar(outType, reduce(getNativeView(), nativeId, outType.typeId.getNativeId(), outType.getScale())); } finally { Aggregation.close(nativeId); } } /** * Do a segmented reduce where the offsets column indicates which groups in this to combine. The * output type is the same as the input type. * @param offsets an INT32 column with no nulls. * @param aggregation the aggregation to do * @return the result. */ public ColumnVector segmentedReduce(ColumnView offsets, SegmentedReductionAggregation aggregation) { return segmentedReduce(offsets, aggregation, NullPolicy.EXCLUDE, type); } /** * Do a segmented reduce where the offsets column indicates which groups in this to combine. * @param offsets an INT32 column with no nulls. * @param aggregation the aggregation to do * @param outType the output data type. * @return the result. */ public ColumnVector segmentedReduce(ColumnView offsets, SegmentedReductionAggregation aggregation, DType outType) { return segmentedReduce(offsets, aggregation, NullPolicy.EXCLUDE, outType); } /** * Do a segmented reduce where the offsets column indicates which groups in this to combine. * @param offsets an INT32 column with no nulls. * @param aggregation the aggregation to do * @param nullPolicy the null policy. * @param outType the output data type. * @return the result. */ public ColumnVector segmentedReduce(ColumnView offsets, SegmentedReductionAggregation aggregation, NullPolicy nullPolicy, DType outType) { long nativeId = aggregation.createNativeInstance(); try { return new ColumnVector(segmentedReduce(getNativeView(), offsets.getNativeView(), nativeId, nullPolicy.includeNulls, outType.typeId.getNativeId(), outType.getScale())); } finally { Aggregation.close(nativeId); } } /** * Segmented gather of the elements within a list element in each row of a list column. * For each list, assuming the size is N, valid indices of gather map ranges in [-N, N). * Out of bound indices refer to null. * @param gatherMap ListColumnView carrying lists of integral indices which maps the * element in list of each row in the source columns to rows of lists in the result columns. * @return the result. */ public ColumnVector segmentedGather(ColumnView gatherMap) { return segmentedGather(gatherMap, OutOfBoundsPolicy.NULLIFY); } /** * Segmented gather of the elements within a list element in each row of a list column. * @param gatherMap ListColumnView carrying lists of integral indices which maps the * element in list of each row in the source columns to rows of lists in the result columns. * @param policy OutOfBoundsPolicy, `DONT_CHECK` leads to undefined behaviour; `NULLIFY` * replaces out of bounds with null. * @return the result. */ public ColumnVector segmentedGather(ColumnView gatherMap, OutOfBoundsPolicy policy) { return new ColumnVector(segmentedGather(getNativeView(), gatherMap.getNativeView(), policy.equals(OutOfBoundsPolicy.NULLIFY))); } /** * Do a reduction on the values in a list. The output type will be the type of the data column * of this list. * @param aggregation the aggregation to perform */ public ColumnVector listReduce(SegmentedReductionAggregation aggregation) { if (!getType().equals(DType.LIST)) { throw new IllegalArgumentException("listReduce only works on list types"); } try (ColumnView offsets = getListOffsetsView(); ColumnView data = getChildColumnView(0)) { return data.segmentedReduce(offsets, aggregation); } } /** * Do a reduction on the values in a list. * @param aggregation the aggregation to perform * @param outType the type of the output. Typically, this should match with the child type * of the list. */ public ColumnVector listReduce(SegmentedReductionAggregation aggregation, DType outType) { return listReduce(aggregation, NullPolicy.EXCLUDE, outType); } /** * Do a reduction on the values in a list. * @param aggregation the aggregation to perform * @param nullPolicy should nulls be included or excluded from the aggregation. * @param outType the type of the output. Typically, this should match with the child type * of the list. */ public ColumnVector listReduce(SegmentedReductionAggregation aggregation, NullPolicy nullPolicy, DType outType) { if (!getType().equals(DType.LIST)) { throw new IllegalArgumentException("listReduce only works on list types"); } try (ColumnView offsets = getListOffsetsView(); ColumnView data = getChildColumnView(0)) { return data.segmentedReduce(offsets, aggregation, nullPolicy, outType); } } /** * Calculate various percentiles of this ColumnVector, which must contain centroids produced by * a t-digest aggregation. * * @param percentiles Required percentiles [0,1] * @return Column containing the approximate percentile values as a list of doubles, in * the same order as the input percentiles */ public final ColumnVector approxPercentile(double[] percentiles) { try (ColumnVector cv = ColumnVector.fromDoubles(percentiles)) { return approxPercentile(cv); } } /** * Calculate various percentiles of this ColumnVector, which must contain centroids produced by * a t-digest aggregation. * * @param percentiles Column containing percentiles [0,1] * @return Column containing the approximate percentile values as a list of doubles, in * the same order as the input percentiles */ public final ColumnVector approxPercentile(ColumnVector percentiles) { return new ColumnVector(approxPercentile(getNativeView(), percentiles.getNativeView())); } /** * Calculate various quantiles of this ColumnVector. It is assumed that this is already sorted * in the desired order. * @param method the method used to calculate the quantiles * @param quantiles the quantile values [0,1] * @return Column containing the approximate percentile values as a list of doubles, in * the same order as the input percentiles */ public final ColumnVector quantile(QuantileMethod method, double[] quantiles) { return new ColumnVector(quantile(getNativeView(), method.nativeId, quantiles)); } /** * This function aggregates values in a window around each element i of the input * column. Please refer to WindowsOptions for various options that can be passed. * Note: Only rows-based windows are supported. * @param op the operation to perform. * @param options various window function arguments. * @return Column containing aggregate function result. * @throws IllegalArgumentException if unsupported window specification * (i.e. other than {@link WindowOptions.FrameType#ROWS} is used. */ public final ColumnVector rollingWindow(RollingAggregation op, WindowOptions options) { // Check that only row-based windows are used. if (!options.getFrameType().equals(WindowOptions.FrameType.ROWS)) { throw new IllegalArgumentException("Expected ROWS-based window specification. Unexpected window type: " + options.getFrameType()); } long nativePtr = op.createNativeInstance(); try { Scalar p = options.getPrecedingScalar(); Scalar f = options.getFollowingScalar(); return new ColumnVector( rollingWindow(this.getNativeView(), op.getDefaultOutput(), options.getMinPeriods(), nativePtr, p == null || !p.isValid() ? 0 : p.getInt(), f == null || !f.isValid() ? 0 : f.getInt(), options.getPrecedingCol() == null ? 0 : options.getPrecedingCol().getNativeView(), options.getFollowingCol() == null ? 0 : options.getFollowingCol().getNativeView())); } finally { Aggregation.close(nativePtr); } } /** * Compute the prefix sum (aka cumulative sum) of the values in this column. * This is just a convenience method for an inclusive scan with a SUM aggregation. */ public final ColumnVector prefixSum() { return scan(ScanAggregation.sum()); } /** * Computes a scan for a column. This is very similar to a running window on the column. * @param aggregation the aggregation to perform * @param scanType should the scan be inclusive, include the current row, or exclusive. * @param nullPolicy how should nulls be treated. Note that some aggregations also include a * null policy too. Currently none of those aggregations are supported so * it is undefined how they would interact with each other. */ public final ColumnVector scan(ScanAggregation aggregation, ScanType scanType, NullPolicy nullPolicy) { long nativeId = aggregation.createNativeInstance(); try { return new ColumnVector(scan(getNativeView(), nativeId, scanType.isInclusive, nullPolicy.includeNulls)); } finally { Aggregation.close(nativeId); } } /** * Computes a scan for a column that excludes nulls. * @param aggregation the aggregation to perform * @param scanType should the scan be inclusive, include the current row, or exclusive. */ public final ColumnVector scan(ScanAggregation aggregation, ScanType scanType) { return scan(aggregation, scanType, NullPolicy.EXCLUDE); } /** * Computes an inclusive scan for a column that excludes nulls. * @param aggregation the aggregation to perform */ public final ColumnVector scan(ScanAggregation aggregation) { return scan(aggregation, ScanType.INCLUSIVE, NullPolicy.EXCLUDE); } ///////////////////////////////////////////////////////////////////////////// // LOGICAL ///////////////////////////////////////////////////////////////////////////// /** * Returns a vector of the logical `not` of each value in the input * column (this) */ public final ColumnVector not() { return unaryOp(UnaryOp.NOT); } ///////////////////////////////////////////////////////////////////////////// // SEARCH ///////////////////////////////////////////////////////////////////////////// /** * Find if the `needle` is present in this col * * example: * * Single Column: * idx 0 1 2 3 4 * col = { 10, 20, 20, 30, 50 } * Scalar: * value = { 20 } * result = true * * @param needle * @return true if needle is present else false */ public boolean contains(Scalar needle) { return containsScalar(getNativeView(), needle.getScalarHandle()); } /** * Returns a new column of {@link DType#BOOL8} elements having the same size as this column, * each row value is true if the corresponding entry in this column is contained in the * given searchSpace column and false if it is not. * The caller will be responsible for the lifecycle of the new vector. * * example: * * col = { 10, 20, 30, 40, 50 } * searchSpace = { 20, 40, 60, 80 } * * result = { false, true, false, true, false } * * @param searchSpace * @return A new ColumnVector of type {@link DType#BOOL8} */ public final ColumnVector contains(ColumnView searchSpace) { return new ColumnVector(containsVector(getNativeView(), searchSpace.getNativeView())); } /** * Returns a column of strings where, for each string row in the input, * the first character after spaces is modified to upper-case, * while all the remaining characters in a word are modified to lower-case. * * Any null string entries return corresponding null output column entries */ public final ColumnVector toTitle() { assert type.equals(DType.STRING); return new ColumnVector(title(getNativeView())); } /** * Returns a column of capitalized strings. * * If the `delimiters` is an empty string, then only the first character of each * row is capitalized. Otherwise, a non-delimiter character is capitalized after * any delimiter character is found. * * Example: * input = ["tesT1", "a Test", "Another Test", "a\tb"]; * delimiters = "" * output is ["Test1", "A test", "Another test", "A\tb"] * delimiters = " " * output is ["Test1", "A Test", "Another Test", "A\tb"] * * Any null string entries return corresponding null output column entries. * * @param delimiters Used if identifying words to capitalize. Should not be null. * @return a column of capitalized strings. Users should close the returned column. */ public final ColumnVector capitalize(Scalar delimiters) { if (DType.STRING.equals(type) && DType.STRING.equals(delimiters.getType())) { return new ColumnVector(capitalize(getNativeView(), delimiters.getScalarHandle())); } throw new IllegalArgumentException("Both input column and delimiters scalar should be" + " string type. But got column: " + type + ", scalar: " + delimiters.getType()); } /** * Concatenates all strings in the column into one new string delimited * by an optional separator string. * * This returns a column with one string. Any null entries are ignored unless * the narep parameter specifies a replacement string (not a null value). * * @param separator what to insert to separate each row. * @param narep what to replace nulls with * @return a ColumnVector with a single string in it. */ public final ColumnVector joinStrings(Scalar separator, Scalar narep) { if (DType.STRING.equals(type) && DType.STRING.equals(separator.getType()) && DType.STRING.equals(narep.getType())) { return new ColumnVector(joinStrings(getNativeView(), separator.getScalarHandle(), narep.getScalarHandle())); } throw new IllegalArgumentException("The column, separator, and narep all need to be STRINGs"); } ///////////////////////////////////////////////////////////////////////////// // TYPE CAST ///////////////////////////////////////////////////////////////////////////// /** * Generic method to cast ColumnVector * When casting from a Date, Timestamp, or Boolean to a numerical type the underlying numerical * representation of the data will be used for the cast. * * For Strings: * Casting strings from/to timestamp isn't supported atm. * Please look at {@link ColumnVector#asTimestamp(DType, String)} * and {@link ColumnVector#asStrings(String)} for casting string to timestamp when the format * is known * * Float values when converted to String could be different from the expected default behavior in * Java * e.g. * 12.3 => "12.30000019" instead of "12.3" * Double.POSITIVE_INFINITY => "Inf" instead of "INFINITY" * Double.NEGATIVE_INFINITY => "-Inf" instead of "-INFINITY" * * @param type type of the resulting ColumnVector * @return A new vector allocated on the GPU */ public ColumnVector castTo(DType type) { return new ColumnVector(castTo(getNativeView(), type.typeId.getNativeId(), type.getScale())); } /** * This method takes in a nested type and replaces its children with the given views * Note: Make sure the numbers of rows in the leaf node are the same as the child replacing it * otherwise the list can point to elements outside of the column values. * * Note: this method returns a ColumnView that won't live past the ColumnVector that it's * pointing to. * * Ex: List<Int> list = col{{1,3}, {9,3,5}} * * validNewChild = col{8, 3, 9, 2, 0} * * list.replaceChildrenWithViews(1, validNewChild) => col{{8, 3}, {9, 2, 0}} * * invalidNewChild = col{3, 2} * list.replaceChildrenWithViews(1, invalidNewChild) => col{{3, 2}, {invalid, invalid, invalid}} * * invalidNewChild = col{8, 3, 9, 2, 0, 0, 7} * list.replaceChildrenWithViews(1, invalidNewChild) => col{{8, 3}, {9, 2, 0}} // undefined result */ public ColumnView replaceChildrenWithViews(int[] indices, ColumnView[] views) { assert (type.isNestedType()); assert (indices.length == views.length); if (type == DType.LIST) { assert (indices.length == 1); } if (indices.length != views.length) { throw new IllegalArgumentException("The indices size and children size should match"); } Map<Integer, ColumnView> map = new HashMap<>(); IntStream.range(0, indices.length).forEach(index -> { if (map.containsKey(indices[index])) { throw new IllegalArgumentException("Duplicate mapping found for replacing child index"); } map.put(indices[index], views[index]); }); List<ColumnView> newChildren = new ArrayList<>(getNumChildren()); List<ColumnView> toClose = new ArrayList<>(getNumChildren()); try { IntStream.range(0, getNumChildren()).forEach(i -> { ColumnView view = map.remove(i); ColumnView child = getChildColumnView(i); toClose.add(child); if (view == null) { newChildren.add(child); } else { if (child.getRowCount() != view.getRowCount()) { throw new IllegalArgumentException("Child row count doesn't match the old child"); } newChildren.add(view); } }); if (!map.isEmpty()) { throw new IllegalArgumentException("One or more invalid child indices passed to be " + "replaced"); } return new ColumnView(type, getRowCount(), Optional.of(getNullCount()), getValid(), getOffsets(), newChildren.stream().toArray(n -> new ColumnView[n])); } finally { for (ColumnView columnView: toClose) { columnView.close(); } } } /** * This method takes in a list and returns a new list with the leaf node replaced with the given * view. Make sure the numbers of rows in the leaf node are the same as the child replacing it * otherwise the list can point to elements outside of the column values. * * Note: this method returns a ColumnView that won't live past the ColumnVector that it's * pointing to. * * Ex: List<Int> list = col{{1,3}, {9,3,5}} * * validNewChild = col{8, 3, 9, 2, 0} * * list.replaceChildrenWithViews(1, validNewChild) => col{{8, 3}, {9, 2, 0}} * * invalidNewChild = col{3, 2} * list.replaceChildrenWithViews(1, invalidNewChild) => * col{{3, 2}, {invalid, invalid, invalid}} throws an exception * * invalidNewChild = col{8, 3, 9, 2, 0, 0, 7} * list.replaceChildrenWithViews(1, invalidNewChild) => * col{{8, 3}, {9, 2, 0}} throws an exception */ public ColumnView replaceListChild(ColumnView child) { assert(type == DType.LIST); return replaceChildrenWithViews(new int[]{0}, new ColumnView[]{child}); } /** * Zero-copy cast between types with the same underlying representation. * * Similar to reinterpret_cast or bit_cast in C++. This will essentially take the underlying data * and update the metadata to reflect a new type. Not all types are supported the width of the * types must match. * @param type the type you want to go to. * @return a ColumnView that cannot outlive the Column that owns the actual data it points to. * @deprecated this has changed to bit_cast in C++ so use that name instead */ @Deprecated public ColumnView logicalCastTo(DType type) { return bitCastTo(type); } /** * Zero-copy cast between types with the same underlying length. * * Similar to bit_cast in C++. This will take the underlying data and create new metadata * so it is interpreted as a new type. Not all types are supported the width of the * types must match. * @param type the type you want to go to. * @return a ColumnView that cannot outlive the Column that owns the actual data it points to. */ public ColumnView bitCastTo(DType type) { return new ColumnView(bitCastTo(getNativeView(), type.typeId.getNativeId(), type.getScale())); } /** * Cast to Byte - ColumnVector * This method takes the value provided by the ColumnVector and casts to byte * When casting from a Date, Timestamp, or Boolean to a byte type the underlying numerical * representation of the data will be used for the cast. * @return A new vector allocated on the GPU */ public final ColumnVector asBytes() { return castTo(DType.INT8); } /** * Cast to list of bytes * This method converts the rows provided by the ColumnVector and casts each row to a list of * bytes with endinanness reversed. Numeric and string types supported, but not timestamps. * * @return A new vector allocated on the GPU */ public final ColumnVector asByteList() { return new ColumnVector(byteListCast(getNativeView(), true)); } /** * Cast to list of bytes * This method converts the rows provided by the ColumnVector and casts each row to a list * of bytes. Numeric and string types supported, but not timestamps. * * @param config Flips the byte order (endianness) if true, retains byte order otherwise * @return A new vector allocated on the GPU */ public final ColumnVector asByteList(boolean config) { return new ColumnVector(byteListCast(getNativeView(), config)); } /** * Cast to unsigned Byte - ColumnVector * This method takes the value provided by the ColumnVector and casts to byte * When casting from a Date, Timestamp, or Boolean to a byte type the underlying numerical * representation of the data will be used for the cast. * <p> * Java does not have an unsigned byte type, so properly decoding these values * will require extra steps on the part of the application. See * {@link Byte#toUnsignedInt(byte)}. * @return A new vector allocated on the GPU */ public final ColumnVector asUnsignedBytes() { return castTo(DType.UINT8); } /** * Cast to Short - ColumnVector * This method takes the value provided by the ColumnVector and casts to short * When casting from a Date, Timestamp, or Boolean to a short type the underlying numerical * representation of the data will be used for the cast. * @return A new vector allocated on the GPU */ public final ColumnVector asShorts() { return castTo(DType.INT16); } /** * Cast to unsigned Short - ColumnVector * This method takes the value provided by the ColumnVector and casts to short * When casting from a Date, Timestamp, or Boolean to a short type the underlying numerical * representation of the data will be used for the cast. * <p> * Java does not have an unsigned short type, so properly decoding these values * will require extra steps on the part of the application. See * {@link Short#toUnsignedInt(short)}. * @return A new vector allocated on the GPU */ public final ColumnVector asUnsignedShorts() { return castTo(DType.UINT16); } /** * Cast to Int - ColumnVector * This method takes the value provided by the ColumnVector and casts to int * When casting from a Date, Timestamp, or Boolean to a int type the underlying numerical * representation of the data will be used for the cast. * @return A new vector allocated on the GPU */ public final ColumnVector asInts() { return castTo(DType.INT32); } /** * Cast to unsigned Int - ColumnVector * This method takes the value provided by the ColumnVector and casts to int * When casting from a Date, Timestamp, or Boolean to a int type the underlying numerical * representation of the data will be used for the cast. * <p> * Java does not have an unsigned int type, so properly decoding these values * will require extra steps on the part of the application. See * {@link Integer#toUnsignedLong(int)}. * @return A new vector allocated on the GPU */ public final ColumnVector asUnsignedInts() { return castTo(DType.UINT32); } /** * Cast to Long - ColumnVector * This method takes the value provided by the ColumnVector and casts to long * When casting from a Date, Timestamp, or Boolean to a long type the underlying numerical * representation of the data will be used for the cast. * @return A new vector allocated on the GPU */ public final ColumnVector asLongs() { return castTo(DType.INT64); } /** * Cast to unsigned Long - ColumnVector * This method takes the value provided by the ColumnVector and casts to long * When casting from a Date, Timestamp, or Boolean to a long type the underlying numerical * representation of the data will be used for the cast. * <p> * Java does not have an unsigned long type, so properly decoding these values * will require extra steps on the part of the application. See * {@link Long#toUnsignedString(long)}. * @return A new vector allocated on the GPU */ public final ColumnVector asUnsignedLongs() { return castTo(DType.UINT64); } /** * Cast to Float - ColumnVector * This method takes the value provided by the ColumnVector and casts to float * When casting from a Date, Timestamp, or Boolean to a float type the underlying numerical * representatio of the data will be used for the cast. * @return A new vector allocated on the GPU */ public final ColumnVector asFloats() { return castTo(DType.FLOAT32); } /** * Cast to Double - ColumnVector * This method takes the value provided by the ColumnVector and casts to double * When casting from a Date, Timestamp, or Boolean to a double type the underlying numerical * representation of the data will be used for the cast. * @return A new vector allocated on the GPU */ public final ColumnVector asDoubles() { return castTo(DType.FLOAT64); } /** * Cast to TIMESTAMP_DAYS - ColumnVector * This method takes the value provided by the ColumnVector and casts to TIMESTAMP_DAYS * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampDays() { if (type.equals(DType.STRING)) { return asTimestamp(DType.TIMESTAMP_DAYS, "%Y-%m-%dT%H:%M:%SZ%f"); } return castTo(DType.TIMESTAMP_DAYS); } /** * Cast to TIMESTAMP_DAYS - ColumnVector * This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_DAYS * @param format timestamp string format specifier, ignored if the column type is not string * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampDays(String format) { assert type.equals(DType.STRING) : "A column of type string is required when using a format string"; return asTimestamp(DType.TIMESTAMP_DAYS, format); } /** * Cast to TIMESTAMP_SECONDS - ColumnVector * This method takes the value provided by the ColumnVector and casts to TIMESTAMP_SECONDS * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampSeconds() { if (type.equals(DType.STRING)) { return asTimestamp(DType.TIMESTAMP_SECONDS, "%Y-%m-%dT%H:%M:%SZ%f"); } return castTo(DType.TIMESTAMP_SECONDS); } /** * Cast to TIMESTAMP_SECONDS - ColumnVector * This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_SECONDS * @param format timestamp string format specifier, ignored if the column type is not string * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampSeconds(String format) { assert type.equals(DType.STRING) : "A column of type string is required when using a format string"; return asTimestamp(DType.TIMESTAMP_SECONDS, format); } /** * Cast to TIMESTAMP_MICROSECONDS - ColumnVector * This method takes the value provided by the ColumnVector and casts to TIMESTAMP_MICROSECONDS * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampMicroseconds() { if (type.equals(DType.STRING)) { return asTimestamp(DType.TIMESTAMP_MICROSECONDS, "%Y-%m-%dT%H:%M:%SZ%f"); } return castTo(DType.TIMESTAMP_MICROSECONDS); } /** * Cast to TIMESTAMP_MICROSECONDS - ColumnVector * This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_MICROSECONDS * @param format timestamp string format specifier, ignored if the column type is not string * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampMicroseconds(String format) { assert type.equals(DType.STRING) : "A column of type string is required when using a format string"; return asTimestamp(DType.TIMESTAMP_MICROSECONDS, format); } /** * Cast to TIMESTAMP_MILLISECONDS - ColumnVector * This method takes the value provided by the ColumnVector and casts to TIMESTAMP_MILLISECONDS. * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampMilliseconds() { if (type.equals(DType.STRING)) { return asTimestamp(DType.TIMESTAMP_MILLISECONDS, "%Y-%m-%dT%H:%M:%SZ%f"); } return castTo(DType.TIMESTAMP_MILLISECONDS); } /** * Cast to TIMESTAMP_MILLISECONDS - ColumnVector * This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_MILLISECONDS. * @param format timestamp string format specifier, ignored if the column type is not string * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampMilliseconds(String format) { assert type.equals(DType.STRING) : "A column of type string is required when using a format string"; return asTimestamp(DType.TIMESTAMP_MILLISECONDS, format); } /** * Cast to TIMESTAMP_NANOSECONDS - ColumnVector * This method takes the value provided by the ColumnVector and casts to TIMESTAMP_NANOSECONDS. * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampNanoseconds() { if (type.equals(DType.STRING)) { return asTimestamp(DType.TIMESTAMP_NANOSECONDS, "%Y-%m-%dT%H:%M:%SZ%9f"); } return castTo(DType.TIMESTAMP_NANOSECONDS); } /** * Cast to TIMESTAMP_NANOSECONDS - ColumnVector * This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_NANOSECONDS. * @param format timestamp string format specifier, ignored if the column type is not string * @return A new vector allocated on the GPU */ public final ColumnVector asTimestampNanoseconds(String format) { assert type.equals(DType.STRING) : "A column of type string is required when using a format string"; return asTimestamp(DType.TIMESTAMP_NANOSECONDS, format); } /** * Parse a string to a timestamp. Strings that fail to parse will default to 0, corresponding * to 1970-01-01 00:00:00.000. * @param timestampType timestamp DType that includes the time unit to parse the timestamp into. * @param format strptime format specifier string of the timestamp. Used to parse and convert * the timestamp with. Supports %Y,%y,%m,%d,%H,%I,%p,%M,%S,%f,%z format specifiers. * See https://github.com/rapidsai/custrings/blob/branch-0.10/docs/source/datetime.md * for full parsing format specification and documentation. * @return A new ColumnVector containing the long representations of the timestamps in the * original column vector. */ public final ColumnVector asTimestamp(DType timestampType, String format) { assert type.equals(DType.STRING) : "A column of type string " + "is required for .to_timestamp() operation"; assert format != null : "Format string may not be NULL"; assert timestampType.isTimestampType() : "unsupported conversion to non-timestamp DType"; // Only nativeID is passed in the below function as timestamp type does not have `scale`. return new ColumnVector(stringTimestampToTimestamp(getNativeView(), timestampType.typeId.getNativeId(), format)); } /** * Cast to Strings. * Negative timestamp values are not currently supported and will yield undesired results. See * github issue https://github.com/rapidsai/cudf/issues/3116 for details * In case of timestamps it follows the following formats * {@link DType#TIMESTAMP_DAYS} - "%Y-%m-%d" * {@link DType#TIMESTAMP_SECONDS} - "%Y-%m-%d %H:%M:%S" * {@link DType#TIMESTAMP_MICROSECONDS} - "%Y-%m-%d %H:%M:%S.%f" * {@link DType#TIMESTAMP_MILLISECONDS} - "%Y-%m-%d %H:%M:%S.%f" * {@link DType#TIMESTAMP_NANOSECONDS} - "%Y-%m-%d %H:%M:%S.%f" * * @return A new vector allocated on the GPU. */ public final ColumnVector asStrings() { switch(type.typeId) { case TIMESTAMP_SECONDS: return asStrings("%Y-%m-%d %H:%M:%S"); case TIMESTAMP_DAYS: return asStrings("%Y-%m-%d"); case TIMESTAMP_MICROSECONDS: case TIMESTAMP_MILLISECONDS: case TIMESTAMP_NANOSECONDS: return asStrings("%Y-%m-%d %H:%M:%S.%f"); default: return castTo(DType.STRING); } } /** * Method to parse and convert a timestamp column vector to string column vector. A unix * timestamp is a long value representing how many units since 1970-01-01 00:00:00:000 in either * positive or negative direction. * No checking is done for invalid formats or invalid timestamp units. * Negative timestamp values are not currently supported and will yield undesired results. See * github issue https://github.com/rapidsai/cudf/issues/3116 for details * * @param format - strftime format specifier string of the timestamp. Its used to parse and convert * the timestamp with. Supports %m,%j,%d,%H,%M,%S,%y,%Y,%f format specifiers. * %d Day of the month: 01-31 * %m Month of the year: 01-12 * %y Year without century: 00-99c * %Y Year with century: 0001-9999 * %H 24-hour of the day: 00-23 * %M Minute of the hour: 00-59 * %S Second of the minute: 00-59 * %f 6-digit microsecond: 000000-999999 * See https://github.com/rapidsai/custrings/blob/branch-0.10/docs/source/datetime.md * * Reported bugs * https://github.com/rapidsai/cudf/issues/4160 after the bug is fixed this method should * also support * %I 12-hour of the day: 01-12 * %p Only 'AM', 'PM' * %j day of the year * * @return A new vector allocated on the GPU */ public final ColumnVector asStrings(String format) { assert type.isTimestampType() : "unsupported conversion from non-timestamp DType"; assert format != null || format.isEmpty(): "Format string may not be NULL or empty"; return new ColumnVector(timestampToStringTimestamp(this.getNativeView(), format)); } /** * Verifies that a string column can be parsed to timestamps using the provided format * pattern. * * The format pattern can include the following specifiers: "%Y,%y,%m,%d,%H,%I,%p,%M,%S,%f,%z" * * | Specifier | Description | * | :-------: | ----------- | * | \%d | Day of the month: 01-31 | * | \%m | Month of the year: 01-12 | * | \%y | Year without century: 00-99 | * | \%Y | Year with century: 0001-9999 | * | \%H | 24-hour of the day: 00-23 | * | \%I | 12-hour of the day: 01-12 | * | \%M | Minute of the hour: 00-59| * | \%S | Second of the minute: 00-59 | * | \%f | 6-digit microsecond: 000000-999999 | * | \%z | UTC offset with format ±HHMM Example +0500 | * | \%j | Day of the year: 001-366 | * | \%p | Only 'AM', 'PM' or 'am', 'pm' are recognized | * * Other specifiers are not currently supported. * The "%f" supports a precision value to read the numeric digits. Specify the * precision with a single integer value (1-9) as follows: * use "%3f" for milliseconds, "%6f" for microseconds and "%9f" for nanoseconds. * * Any null string entry will result in a corresponding null row in the output column. * * This will return a column of type boolean where a `true` row indicates the corresponding * input string can be parsed correctly with the given format. * * @param format String specifying the timestamp format in strings. * @return New boolean ColumnVector. */ public final ColumnVector isTimestamp(String format) { return new ColumnVector(isTimestamp(getNativeView(), format)); } ///////////////////////////////////////////////////////////////////////////// // LISTS ///////////////////////////////////////////////////////////////////////////// /** * For each list in this column pull out the entry at the given index. If the entry would * go off the end of the list a NULL is returned instead. * @param index 0 based offset into the list. Negative values go backwards from the end of the * list. * @return a new column of the values at those indexes. */ public final ColumnVector extractListElement(int index) { assert type.equals(DType.LIST) : "A column of type LIST is required for .extractListElement()"; return new ColumnVector(extractListElement(getNativeView(), index)); } /** * For each list in this column pull out the entry at the corresponding index specified in * the index column. If the entry goes off the end of the list a NULL is returned instead. * * The index column should have the same row count with the list column. * * @param indices a column of 0 based offsets into the list. Negative values go backwards from * the end of the list. * @return a new column of the values at those indexes. */ public final ColumnVector extractListElement(ColumnView indices) { assert type.equals(DType.LIST) : "A column of type LIST is required for .extractListElement()"; assert indices != null && DType.INT32.equals(indices.type) : "indices should be non-null and integer type"; assert indices.getRowCount() == rows : "indices must have the same row count with list column"; return new ColumnVector(extractListElementV(getNativeView(), indices.getNativeView())); } /** * Create a new LIST column by copying elements from the current LIST column ignoring duplicate, * producing a LIST column in which each list contain only unique elements. * * Order of the output elements within each list are not guaranteed to be preserved as in the * input. * * @return A new LIST column having unique list elements. */ public final ColumnVector dropListDuplicates() { return new ColumnVector(dropListDuplicates(getNativeView())); } /** * Given a LIST column in which each element is a struct containing a <key, value> pair. An output * LIST column is generated by copying elements of the current column in a way such that if a list * contains multiple elements having the same key then only the last element will be copied. * * @return A new LIST column having list elements with unique keys. */ public final ColumnVector dropListDuplicatesWithKeysValues() { return new ColumnVector(dropListDuplicatesWithKeysValues(getNativeView())); } /** * Flatten each list of lists into a single list. * * The column must have rows that are lists of lists. * Any row containing null list elements will result in a null output row. * * @return A new column vector containing the flattened result */ public ColumnVector flattenLists() { return flattenLists(false); } /** * Flatten each list of lists into a single list. * * The column must have rows that are lists of lists. * * @param ignoreNull Whether to ignore null list elements in the input column from the operation, * or any row containing null list elements will result in a null output row * @return A new column vector containing the flattened result */ public ColumnVector flattenLists(boolean ignoreNull) { return new ColumnVector(flattenLists(getNativeView(), ignoreNull)); } ///////////////////////////////////////////////////////////////////////////// // STRINGS ///////////////////////////////////////////////////////////////////////////// /** * Copy the current column to a new column, each string or list of the output column will have * reverse order of characters or elements. * * @return A new column with lists or strings having reverse order. */ public final ColumnVector reverseStringsOrLists() { assert type.equals(DType.STRING) || type.equals(DType.LIST) : "A column of type string or list is required, actual: " + type; return new ColumnVector(reverseStringsOrLists(getNativeView())); } /** * Convert a string to upper case. */ public final ColumnVector upper() { assert type.equals(DType.STRING) : "A column of type string is required for .upper() operation"; return new ColumnVector(upperStrings(getNativeView())); } /** * Convert a string to lower case. */ public final ColumnVector lower() { assert type.equals(DType.STRING) : "A column of type string is required for .lower() operation"; return new ColumnVector(lowerStrings(getNativeView())); } /** * Locates the starting index of the first instance of the given string in each row of a column. * 0 indexing, returns -1 if the substring is not found. Overloading stringLocate to support * default values for start (0) and end index. * @param substring scalar containing the string to locate within each row. */ public final ColumnVector stringLocate(Scalar substring) { return stringLocate(substring, 0); } /** * Locates the starting index of the first instance of the given string in each row of a column. * 0 indexing, returns -1 if the substring is not found. Overloading stringLocate to support * default value for end index (-1, the end of each string). * @param substring scalar containing the string to locate within each row. * @param start character index to start the search from (inclusive). */ public final ColumnVector stringLocate(Scalar substring, int start) { return stringLocate(substring, start, -1); } /** * Locates the starting index of the first instance of the given string in each row of a column. * 0 indexing, returns -1 if the substring is not found. Can be be configured to start or end * the search mid string. * @param substring scalar containing the string scalar to locate within each row. * @param start character index to start the search from (inclusive). * @param end character index to end the search on (exclusive). */ public final ColumnVector stringLocate(Scalar substring, int start, int end) { assert type.equals(DType.STRING) : "column type must be a String"; assert substring != null : "target string may not be null"; assert substring.getType().equals(DType.STRING) : "substring scalar must be a string scalar"; assert start >= 0 : "start index must be a positive value"; assert end >= start || end == -1 : "end index must be -1 or >= the start index"; return new ColumnVector(substringLocate(getNativeView(), substring.getScalarHandle(), start, end)); } /** * Returns a list of columns by splitting each string using the specified pattern. The number of * rows in the output columns will be the same as the input column. Null entries are added for a * row where split results have been exhausted. Null input entries result in all nulls in the * corresponding rows of the output columns. * * @param pattern UTF-8 encoded string identifying the split pattern for each input string. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. * @param splitByRegex a boolean flag indicating whether the input strings will be split by a * regular expression pattern or just by a string literal delimiter. * @return list of strings columns as a table. */ @Deprecated public final Table stringSplit(String pattern, int limit, boolean splitByRegex) { if (splitByRegex) { return stringSplit(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), limit); } else { return stringSplit(pattern, limit); } } /** * Returns a list of columns by splitting each string using the specified regex program pattern. * The number of rows in the output columns will be the same as the input column. Null entries * are added for the rows where split results have been exhausted. Null input entries result in * all nulls in the corresponding rows of the output columns. * * @param regexProg the regex program with UTF-8 encoded string identifying the split pattern * for each input string. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. * @return list of strings columns as a table. */ public final Table stringSplit(RegexProgram regexProg, int limit) { assert type.equals(DType.STRING) : "column type must be a String"; assert regexProg != null : "regex program is null"; assert limit != 0 && limit != 1 : "split limit == 0 and limit == 1 are not supported"; return new Table(stringSplitRe(this.getNativeView(), regexProg.pattern(), regexProg.combinedFlags(), regexProg.capture().nativeId, limit)); } /** * Returns a list of columns by splitting each string using the specified pattern. The number of * rows in the output columns will be the same as the input column. Null entries are added for a * row where split results have been exhausted. Null input entries result in all nulls in the * corresponding rows of the output columns. * * @param pattern UTF-8 encoded string identifying the split pattern for each input string. * @param splitByRegex a boolean flag indicating whether the input strings will be split by a * regular expression pattern or just by a string literal delimiter. * @return list of strings columns as a table. */ @Deprecated public final Table stringSplit(String pattern, boolean splitByRegex) { return stringSplit(pattern, -1, splitByRegex); } /** * Returns a list of columns by splitting each string using the specified string literal * delimiter. The number of rows in the output columns will be the same as the input column. * Null entries are added for a row where split results have been exhausted. Null input entries * result in all nulls in the corresponding rows of the output columns. * * @param delimiter UTF-8 encoded string identifying the split delimiter for each input string. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. * @return list of strings columns as a table. */ public final Table stringSplit(String delimiter, int limit) { assert type.equals(DType.STRING) : "column type must be a String"; assert delimiter != null : "delimiter is null"; assert limit != 0 && limit != 1 : "split limit == 0 and limit == 1 are not supported"; return new Table(stringSplit(this.getNativeView(), delimiter, limit)); } /** * Returns a list of columns by splitting each string using the specified string literal * delimiter. The number of rows in the output columns will be the same as the input column. * Null entries are added for a row where split results have been exhausted. Null input entries * result in all nulls in the corresponding rows of the output columns. * * @param delimiter UTF-8 encoded string identifying the split delimiter for each input string. * @return list of strings columns as a table. */ public final Table stringSplit(String delimiter) { return stringSplit(delimiter, -1); } /** * Returns a list of columns by splitting each string using the specified regex program pattern. * The number of rows in the output columns will be the same as the input column. Null entries * are added for the rows where split results have been exhausted. Null input entries result in * all nulls in the corresponding rows of the output columns. * * @param regexProg the regex program with UTF-8 encoded string identifying the split pattern * for each input string. * @return list of strings columns as a table. */ public final Table stringSplit(RegexProgram regexProg) { return stringSplit(regexProg, -1); } /** * Returns a column that are lists of strings in which each list is made by splitting the * corresponding input string using the specified pattern. * * @param pattern UTF-8 encoded string identifying the split pattern for each input string. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. * @param splitByRegex a boolean flag indicating whether the input strings will be split by a * regular expression pattern or just by a string literal delimiter. * @return a LIST column of string elements. */ @Deprecated public final ColumnVector stringSplitRecord(String pattern, int limit, boolean splitByRegex) { if (splitByRegex) { return stringSplitRecord(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), limit); } else { return stringSplitRecord(pattern, limit); } } /** * Returns a column that are lists of strings in which each list is made by splitting the * corresponding input string using the specified regex program pattern. * * @param regexProg the regex program with UTF-8 encoded string identifying the split pattern * for each input string. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. * @return a LIST column of string elements. */ public final ColumnVector stringSplitRecord(RegexProgram regexProg, int limit) { assert type.equals(DType.STRING) : "column type must be String"; assert regexProg != null : "regex program is null"; assert limit != 0 && limit != 1 : "split limit == 0 and limit == 1 are not supported"; return new ColumnVector( stringSplitRecordRe(this.getNativeView(), regexProg.pattern(), regexProg.combinedFlags(), regexProg.capture().nativeId, limit)); } /** * Returns a column that are lists of strings in which each list is made by splitting the * corresponding input string using the specified pattern. * * @param pattern UTF-8 encoded string identifying the split pattern for each input string. * @param splitByRegex a boolean flag indicating whether the input strings will be split by a * regular expression pattern or just by a string literal delimiter. * @return a LIST column of string elements. */ @Deprecated public final ColumnVector stringSplitRecord(String pattern, boolean splitByRegex) { return stringSplitRecord(pattern, -1, splitByRegex); } /** * Returns a column that are lists of strings in which each list is made by splitting the * corresponding input string using the specified string literal delimiter. * * @param delimiter UTF-8 encoded string identifying the split delimiter for each input string. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. * @return a LIST column of string elements. */ public final ColumnVector stringSplitRecord(String delimiter, int limit) { assert type.equals(DType.STRING) : "column type must be String"; assert delimiter != null : "delimiter is null"; assert limit != 0 && limit != 1 : "split limit == 0 and limit == 1 are not supported"; return new ColumnVector(stringSplitRecord(this.getNativeView(), delimiter, limit)); } /** * Returns a column that are lists of strings in which each list is made by splitting the * corresponding input string using the specified string literal delimiter. * * @param delimiter UTF-8 encoded string identifying the split delimiter for each input string. * @return a LIST column of string elements. */ public final ColumnVector stringSplitRecord(String delimiter) { return stringSplitRecord(delimiter, -1); } /** * Returns a column that are lists of strings in which each list is made by splitting the * corresponding input string using the specified regex program pattern. * * @param regexProg the regex program with UTF-8 encoded string identifying the split pattern * for each input string. * @return a LIST column of string elements. */ public final ColumnVector stringSplitRecord(RegexProgram regexProg) { return stringSplitRecord(regexProg, -1); } /** * Returns a new strings column that contains substrings of the strings in the provided column. * The character positions to retrieve in each string are `[start, <the string end>)`.. * * @param start first character index to begin the substring(inclusive). */ public final ColumnVector substring(int start) { assert type.equals(DType.STRING) : "column type must be a String"; return new ColumnVector(substringS(getNativeView(), start)); } /** * Returns a new strings column that contains substrings of the strings in the provided column. * 0-based indexing, If the stop position is past end of a string's length, then end of string is * used as stop position for that string. * @param start first character index to begin the substring(inclusive). * @param end last character index to stop the substring(exclusive) * @return A new java column vector containing the substrings. */ public final ColumnVector substring(int start, int end) { assert type.equals(DType.STRING) : "column type must be a String"; return new ColumnVector(substring(getNativeView(), start, end)); } /** * Returns a new strings column that contains substrings of the strings in the provided column * which uses unique ranges for each string * @param start Vector containing start indices of each string * @param end Vector containing end indices of each string. -1 indicated to read until end of string. * @return A new java column vector containing the substrings/ */ public final ColumnVector substring(ColumnView start, ColumnView end) { assert type.equals(DType.STRING) : "column type must be a String"; assert (rows == start.getRowCount() && rows == end.getRowCount()) : "Number of rows must be equal"; assert (start.getType().equals(DType.INT32) && end.getType().equals(DType.INT32)) : "start and end " + "vectors must be of integer type"; return new ColumnVector(substringColumn(getNativeView(), start.getNativeView(), end.getNativeView())); } /** * Given a lists column of strings (each row is a list of strings), concatenates the strings * within each row and returns a single strings column result. Each new string is created by * concatenating the strings from the same row (same list element) delimited by the separator * provided. This version of the function relaces nulls with empty string and returns null * for empty list. * @param sepCol strings column that provides separators for concatenation. * @return A new java column vector containing the concatenated strings with separator between. */ public final ColumnVector stringConcatenateListElements(ColumnView sepCol) { try (Scalar nullString = Scalar.fromString(null); Scalar emptyString = Scalar.fromString("")) { return stringConcatenateListElements(sepCol, nullString, emptyString, false, false); } } /** * Given a lists column of strings (each row is a list of strings), concatenates the strings * within each row and returns a single strings column result. * Each new string is created by concatenating the strings from the same row (same list element) * delimited by the row separator provided in the sepCol strings column. * @param sepCol strings column that provides separators for concatenation. * @param separatorNarep string scalar indicating null behavior when a separator is null. * If set to null and the separator is null the resulting string will * be null. If not null, this string will be used in place of a null * separator. * @param stringNarep string that should be used to replace null strings in any non-null list * row. If set to null and the string is null the resulting string will * be null. If not null, this string will be used in place of a null value. * @param separateNulls if true, then the separator is included for null rows if * `stringNarep` is valid. * @param emptyStringOutputIfEmptyList if set to true, any input row that is an empty list * will result in an empty string. Otherwise, it will result in a null. * @return A new java column vector containing the concatenated strings with separator between. */ public final ColumnVector stringConcatenateListElements(ColumnView sepCol, Scalar separatorNarep, Scalar stringNarep, boolean separateNulls, boolean emptyStringOutputIfEmptyList) { assert type.equals(DType.LIST) : "column type must be a list"; assert separatorNarep != null : "separator narep scalar provided may not be null"; assert stringNarep != null : "string narep scalar provided may not be null"; assert separatorNarep.getType().equals(DType.STRING) : "separator naprep scalar must be a string scalar"; assert stringNarep.getType().equals(DType.STRING) : "string narep scalar must be a string scalar"; return new ColumnVector(stringConcatenationListElementsSepCol(getNativeView(), sepCol.getNativeView(), separatorNarep.getScalarHandle(), stringNarep.getScalarHandle(), separateNulls, emptyStringOutputIfEmptyList)); } /** * Given a lists column of strings (each row is a list of strings), concatenates the strings * within each row and returns a single strings column result. Each new string is created by * concatenating the strings from the same row (same list element) delimited by the * separator provided. * @param separator string scalar inserted between each string being merged. * @param narep string scalar indicating null behavior. If set to null and any string in the row * is null the resulting string will be null. If not null, null values in any * column will be replaced by the specified string. The underlying value in the * string scalar may be null, but the object passed in may not. * @param separateNulls if true, then the separator is included for null rows if * `narep` is valid. * @param emptyStringOutputIfEmptyList if set to true, any input row that is an empty list * will result in an empty string. Otherwise, it will result in a null. * @return A new java column vector containing the concatenated strings with separator between. */ public final ColumnVector stringConcatenateListElements(Scalar separator, Scalar narep, boolean separateNulls, boolean emptyStringOutputIfEmptyList) { assert type.equals(DType.LIST) : "column type must be a list"; assert separator != null : "separator scalar provided may not be null"; assert narep != null : "column narep scalar provided may not be null"; assert narep.getType().equals(DType.STRING) : "narep scalar must be a string scalar"; return new ColumnVector(stringConcatenationListElements(getNativeView(), separator.getScalarHandle(), narep.getScalarHandle(), separateNulls, emptyStringOutputIfEmptyList)); } /** * Given a strings column, each string in it is repeated a number of times specified by the * <code>repeatTimes</code> parameter. * * In special cases: * - If <code>repeatTimes</code> is not a positive number, a non-null input string will always * result in an empty output string. * - A null input string will always result in a null output string regardless of the value of * the <code>repeatTimes</code> parameter. * * @param repeatTimes The number of times each input string is repeated. * @return A new java column vector containing repeated strings. */ public final ColumnVector repeatStrings(int repeatTimes) { assert type.equals(DType.STRING) : "column type must be String"; return new ColumnVector(repeatStrings(getNativeView(), repeatTimes)); } /** * Given a strings column, an output strings column is generated by repeating each of the input * string by a number of times given by the corresponding row in a <code>repeatTimes</code> * numeric column. * * In special cases: * - Any null row (from either the input strings column or the <code>repeatTimes</code> column) * will always result in a null output string. * - If any value in the <code>repeatTimes</code> column is not a positive number and its * corresponding input string is not null, the output string will be an empty string. * * @param repeatTimes The column containing numbers of times each input string is repeated. * @return A new java column vector containing repeated strings. */ public final ColumnVector repeatStrings(ColumnView repeatTimes) { assert type.equals(DType.STRING) : "column type must be String"; return new ColumnVector(repeatStringsWithColumnRepeatTimes(getNativeView(), repeatTimes.getNativeView())); } /** * Apply a JSONPath string to all rows in an input strings column. * * Applies a JSONPath string to an incoming strings column where each row in the column * is a valid json string. The output is returned by row as a strings column. * * For reference, https://tools.ietf.org/id/draft-goessner-dispatch-jsonpath-00.html * Note: Only implements the operators: $ . [] * * * @param path The JSONPath string to be applied to each row * @return new strings ColumnVector containing the retrieved json object strings */ public final ColumnVector getJSONObject(Scalar path) { assert(type.equals(DType.STRING)) : "column type must be a String"; return new ColumnVector(getJSONObject(getNativeView(), path.getScalarHandle())); } /** * Returns a new strings column where target string within each string is replaced with the specified * replacement string. * The replacement proceeds from the beginning of the string to the end, for example, * replacing "aa" with "b" in the string "aaa" will result in "ba" rather than "ab". * Specifying an empty string for replace will essentially remove the target string if found in each string. * Null string entries will return null output string entries. * target Scalar should be string and should not be empty or null. * * @param target String to search for within each string. * @param replace Replacement string if target is found. * @return A new java column vector containing replaced strings */ public final ColumnVector stringReplace(Scalar target, Scalar replace) { assert type.equals(DType.STRING) : "column type must be a String"; assert target != null : "target string may not be null"; assert target.getType().equals(DType.STRING) : "target string must be a string scalar"; assert target.getJavaString().isEmpty() == false : "target scalar may not be empty"; return new ColumnVector(stringReplace(getNativeView(), target.getScalarHandle(), replace.getScalarHandle())); } /** * Returns a new strings column where target strings with each string are replaced with * corresponding replacement strings. For each string in the column, the list of targets * is searched within that string. If a target string is found, it is replaced by the * corresponding entry in the repls column. All occurrences found in each string are replaced. * The repls argument can optionally contain a single string. In this case, all matching * target substrings will be replaced by that single string. * * Example: * cv = ["hello", "goodbye"] * targets = ["e","o"] * repls = ["EE","OO"] * r1 = cv.stringReplace(targets, repls) * r1 is now ["hEEllO", "gOOOOdbyEE"] * * targets = ["e", "o"] * repls = ["_"] * r2 = cv.stringReplace(targets, repls) * r2 is now ["h_ll_", "g__dby_"] * * @param targets Strings to search for in each string. * @param repls Corresponding replacement strings for target strings. * @return A new java column vector containing the replaced strings. */ public final ColumnVector stringReplace(ColumnView targets, ColumnView repls) { assert type.equals(DType.STRING) : "column type must be a String"; assert targets != null : "target list may not be null"; assert targets.getType().equals(DType.STRING) : "target list must be a string column"; assert repls != null : "replacement list may not be null"; assert repls.getType().equals(DType.STRING) : "replacement list must be a string column"; return new ColumnVector(stringReplaceMulti(getNativeView(), targets.getNativeView(), repls.getNativeView())); } /** * For each string, replaces any character sequence matching the given pattern using the * replacement string scalar. * * @param pattern The regular expression pattern to search within each string. * @param repl The string scalar to replace for each pattern match. * @return A new column vector containing the string results. */ @Deprecated public final ColumnVector replaceRegex(String pattern, Scalar repl) { return replaceRegex(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), repl); } /** * For each string, replaces any character sequence matching the given regex program pattern * using the replacement string scalar. * * @param regexProg The regex program with pattern to search within each string. * @param repl The string scalar to replace for each pattern match. * @return A new column vector containing the string results. */ public final ColumnVector replaceRegex(RegexProgram regexProg, Scalar repl) { return replaceRegex(regexProg, repl, -1); } /** * For each string, replaces any character sequence matching the given pattern using the * replacement string scalar. * * @param pattern The regular expression pattern to search within each string. * @param repl The string scalar to replace for each pattern match. * @param maxRepl The maximum number of times a replacement should occur within each string. * @return A new column vector containing the string results. */ @Deprecated public final ColumnVector replaceRegex(String pattern, Scalar repl, int maxRepl) { return replaceRegex(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), repl, maxRepl); } /** * For each string, replaces any character sequence matching the given regex program pattern * using the replacement string scalar. * * @param regexProg The regex program with pattern to search within each string. * @param repl The string scalar to replace for each pattern match. * @param maxRepl The maximum number of times a replacement should occur within each string. * @return A new column vector containing the string results. */ public final ColumnVector replaceRegex(RegexProgram regexProg, Scalar repl, int maxRepl) { if (!repl.getType().equals(DType.STRING)) { throw new IllegalArgumentException("Replacement must be a string scalar"); } assert regexProg != null : "regex program may not be null"; return new ColumnVector(replaceRegex(getNativeView(), regexProg.pattern(), regexProg.combinedFlags(), regexProg.capture().nativeId, repl.getScalarHandle(), maxRepl)); } /** * For each string, replaces any character sequence matching any of the regular expression * patterns with the corresponding replacement strings. * * @param patterns The regular expression patterns to search within each string. * @param repls The string scalars to replace for each corresponding pattern match. * @return A new column vector containing the string results. */ public final ColumnVector replaceMultiRegex(String[] patterns, ColumnView repls) { return new ColumnVector(replaceMultiRegex(getNativeView(), patterns, repls.getNativeView())); } /** * For each string, replaces any character sequence matching the given pattern * using the replace template for back-references. * * Any null string entries return corresponding null output column entries. * * @param pattern The regular expression patterns to search within each string. * @param replace The replacement template for creating the output string. * @return A new java column vector containing the string results. */ @Deprecated public final ColumnVector stringReplaceWithBackrefs(String pattern, String replace) { return stringReplaceWithBackrefs(new RegexProgram(pattern), replace); } /** * For each string, replaces any character sequence matching the given regex program * pattern using the replace template for back-references. * * Any null string entries return corresponding null output column entries. * * @param regexProg The regex program with pattern to search within each string. * @param replace The replacement template for creating the output string. * @return A new java column vector containing the string results. */ public final ColumnVector stringReplaceWithBackrefs(RegexProgram regexProg, String replace) { assert regexProg != null : "regex program may not be null"; return new ColumnVector( stringReplaceWithBackrefs(getNativeView(), regexProg.pattern(), regexProg.combinedFlags(), regexProg.capture().nativeId, replace)); } /** * Add '0' as padding to the left of each string. * * If the string is already width or more characters, no padding is performed. * No strings are truncated. * * Null string entries result in null entries in the output column. * * @param width The minimum number of characters for each string. * @return New column of strings. */ public final ColumnVector zfill(int width) { return new ColumnVector(zfill(getNativeView(), width)); } /** * Pad the Strings column until it reaches the desired length with spaces " " on the right. * * If the string is already width or more characters, no padding is performed. * No strings are truncated. * * Null string entries result in null entries in the output column. * * @param width the minimum number of characters for each string. * @return the new strings column. */ public final ColumnVector pad(int width) { return pad(width, PadSide.RIGHT, " "); } /** * Pad the Strings column until it reaches the desired length with spaces " ". * * If the string is already width or more characters, no padding is performed. * No strings are truncated. * * Null string entries result in null entries in the output column. * * @param width the minimum number of characters for each string. * @param side where to add new characters. * @return the new strings column. */ public final ColumnVector pad(int width, PadSide side) { return pad(width, side, " "); } /** * Pad the Strings column until it reaches the desired length. * * If the string is already width or more characters, no padding is performed. * No strings are truncated. * * Null string entries result in null entries in the output column. * * @param width the minimum number of characters for each string. * @param side where to add new characters. * @param fillChar a single character string that holds what should be added. * @return the new strings column. */ public final ColumnVector pad(int width, PadSide side, String fillChar) { assert fillChar != null; assert fillChar.length() == 1; return new ColumnVector(pad(getNativeView(), width, side.getNativeId(), fillChar)); } /** * Checks if each string in a column starts with a specified comparison string, resulting in a * parallel column of the boolean results. * @param pattern scalar containing the string being searched for at the beginning of the column's strings. * @return A new java column vector containing the boolean results. */ public final ColumnVector startsWith(Scalar pattern) { assert type.equals(DType.STRING) : "column type must be a String"; assert pattern != null : "pattern scalar may not be null"; assert pattern.getType().equals(DType.STRING) : "pattern scalar must be a string scalar"; return new ColumnVector(stringStartWith(getNativeView(), pattern.getScalarHandle())); } /** * Checks if each string in a column ends with a specified comparison string, resulting in a * parallel column of the boolean results. * @param pattern scalar containing the string being searched for at the end of the column's strings. * @return A new java column vector containing the boolean results. */ public final ColumnVector endsWith(Scalar pattern) { assert type.equals(DType.STRING) : "column type must be a String"; assert pattern != null : "pattern scalar may not be null"; assert pattern.getType().equals(DType.STRING) : "pattern scalar must be a string scalar"; return new ColumnVector(stringEndWith(getNativeView(), pattern.getScalarHandle())); } /** * Removes whitespace from the beginning and end of a string. * @return A new java column vector containing the stripped strings. */ public final ColumnVector strip() { assert type.equals(DType.STRING) : "column type must be a String"; try (Scalar emptyString = Scalar.fromString("")) { return new ColumnVector(stringStrip(getNativeView(), StripType.BOTH.nativeId, emptyString.getScalarHandle())); } } /** * Removes the specified characters from the beginning and end of each string. * @param toStrip UTF-8 encoded characters to strip from each string. * @return A new java column vector containing the stripped strings. */ public final ColumnVector strip(Scalar toStrip) { assert type.equals(DType.STRING) : "column type must be a String"; assert toStrip != null : "toStrip scalar may not be null"; assert toStrip.getType().equals(DType.STRING) : "toStrip must be a string scalar"; return new ColumnVector(stringStrip(getNativeView(), StripType.BOTH.nativeId, toStrip.getScalarHandle())); } /** * Removes whitespace from the beginning of a string. * @return A new java column vector containing the stripped strings. */ public final ColumnVector lstrip() { assert type.equals(DType.STRING) : "column type must be a String"; try (Scalar emptyString = Scalar.fromString("")) { return new ColumnVector(stringStrip(getNativeView(), StripType.LEFT.nativeId, emptyString.getScalarHandle())); } } /** * Removes the specified characters from the beginning of each string. * @param toStrip UTF-8 encoded characters to strip from each string. * @return A new java column vector containing the stripped strings. */ public final ColumnVector lstrip(Scalar toStrip) { assert type.equals(DType.STRING) : "column type must be a String"; assert toStrip != null : "toStrip Scalar may not be null"; assert toStrip.getType().equals(DType.STRING) : "toStrip must be a string scalar"; return new ColumnVector(stringStrip(getNativeView(), StripType.LEFT.nativeId, toStrip.getScalarHandle())); } /** * Removes whitespace from the end of a string. * @return A new java column vector containing the stripped strings. */ public final ColumnVector rstrip() { assert type.equals(DType.STRING) : "column type must be a String"; try (Scalar emptyString = Scalar.fromString("")) { return new ColumnVector(stringStrip(getNativeView(), StripType.RIGHT.nativeId, emptyString.getScalarHandle())); } } /** * Removes the specified characters from the end of each string. * @param toStrip UTF-8 encoded characters to strip from each string. * @return A new java column vector containing the stripped strings. */ public final ColumnVector rstrip(Scalar toStrip) { assert type.equals(DType.STRING) : "column type must be a String"; assert toStrip != null : "toStrip Scalar may not be null"; assert toStrip.getType().equals(DType.STRING) : "toStrip must be a string scalar"; return new ColumnVector(stringStrip(getNativeView(), StripType.RIGHT.nativeId, toStrip.getScalarHandle())); } /** * Checks if each string in a column contains a specified comparison string, resulting in a * parallel column of the boolean results. * @param compString scalar containing the string being searched for. * @return A new java column vector containing the boolean results. */ public final ColumnVector stringContains(Scalar compString) { assert type.equals(DType.STRING) : "column type must be a String"; assert compString != null : "compString scalar may not be null"; assert compString.getType().equals(DType.STRING) : "compString scalar must be a string scalar"; return new ColumnVector(stringContains(getNativeView(), compString.getScalarHandle())); } /** * Replaces values less than `lo` in `input` with `lo`, * and values greater than `hi` with `hi`. * * if `lo` is invalid, then lo will not be considered while * evaluating the input (Essentially considered minimum value of that type). * if `hi` is invalid, then hi will not be considered while * evaluating the input (Essentially considered maximum value of that type). * * ``` * Example: * input: {1, 2, 3, NULL, 5, 6, 7} * * valid lo and hi * lo: 3, hi: 5, lo_replace : 0, hi_replace : 16 * output:{0, 0, 3, NULL, 5, 16, 16} * * invalid lo * lo: NULL, hi: 5, lo_replace : 0, hi_replace : 16 * output:{1, 2, 3, NULL, 5, 16, 16} * * invalid hi * lo: 3, hi: NULL, lo_replace : 0, hi_replace : 16 * output:{0, 0, 3, NULL, 5, 6, 7} * ``` * @param lo - Minimum clamp value. All elements less than `lo` will be replaced by `lo`. * Ignored if null. * @param hi - Maximum clamp value. All elements greater than `hi` will be replaced by `hi`. * Ignored if null. * @return Returns a new clamped column as per `lo` and `hi` boundaries */ public final ColumnVector clamp(Scalar lo, Scalar hi) { return new ColumnVector(clamper(this.getNativeView(), lo.getScalarHandle(), lo.getScalarHandle(), hi.getScalarHandle(), hi.getScalarHandle())); } /** * Replaces values less than `lo` in `input` with `lo_replace`, * and values greater than `hi` with `hi_replace`. * * if `lo` is invalid, then lo will not be considered while * evaluating the input (Essentially considered minimum value of that type). * if `hi` is invalid, then hi will not be considered while * evaluating the input (Essentially considered maximum value of that type). * * @note: If `lo` is valid then `lo_replace` should be valid * If `hi` is valid then `hi_replace` should be valid * * ``` * Example: * input: {1, 2, 3, NULL, 5, 6, 7} * * valid lo and hi * lo: 3, hi: 5, lo_replace : 0, hi_replace : 16 * output:{0, 0, 3, NULL, 5, 16, 16} * * invalid lo * lo: NULL, hi: 5, lo_replace : 0, hi_replace : 16 * output:{1, 2, 3, NULL, 5, 16, 16} * * invalid hi * lo: 3, hi: NULL, lo_replace : 0, hi_replace : 16 * output:{0, 0, 3, NULL, 5, 6, 7} * ``` * * @param lo - Minimum clamp value. All elements less than `lo` will be replaced by `loReplace`. Ignored if null. * @param loReplace - All elements less than `lo` will be replaced by `loReplace`. * @param hi - Maximum clamp value. All elements greater than `hi` will be replaced by `hiReplace`. Ignored if null. * @param hiReplace - All elements greater than `hi` will be replaced by `hiReplace`. * @return - a new clamped column as per `lo` and `hi` boundaries */ public final ColumnVector clamp(Scalar lo, Scalar loReplace, Scalar hi, Scalar hiReplace) { return new ColumnVector(clamper(this.getNativeView(), lo.getScalarHandle(), loReplace.getScalarHandle(), hi.getScalarHandle(), hiReplace.getScalarHandle())); } /** * Returns a boolean ColumnVector identifying rows which * match the given regex pattern but only at the beginning of the string. * * ``` * cv = ["abc", "123", "def456"] * result = cv.matchesRe("\\d+") * r is now [false, true, false] * ``` * Any null string entries return corresponding null output column entries. * For supported regex patterns refer to: * @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html * * @param pattern Regex pattern to match to each string. * @return New ColumnVector of boolean results for each string. */ @Deprecated public final ColumnVector matchesRe(String pattern) { return matchesRe(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE)); } /** * Returns a boolean ColumnVector identifying rows which * match the given regex program pattern but only at the beginning of the string. * * ``` * cv = ["abc", "123", "def456"] * p = new RegexProgram("\\d+", CaptureGroups.NON_CAPTURE) * r = cv.matchesRe(p) * r is now [false, true, false] * ``` * Any null string entries return corresponding null output column entries. * For supported regex patterns refer to: * @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html * * @param regexProg Regex program to match to each string. * @return New ColumnVector of boolean results for each string. */ public final ColumnVector matchesRe(RegexProgram regexProg) { assert type.equals(DType.STRING) : "column type must be a String"; assert regexProg != null : "regex program may not be null"; assert !regexProg.pattern().isEmpty() : "pattern string may not be empty"; return new ColumnVector(matchesRe(getNativeView(), regexProg.pattern(), regexProg.combinedFlags(), regexProg.capture().nativeId)); } /** * Returns a boolean ColumnVector identifying rows which * match the given regex pattern starting at any location. * * ``` * cv = ["abc", "123", "def456"] * r = cv.containsRe("\\d+") * r is now [false, true, true] * ``` * Any null string entries return corresponding null output column entries. * For supported regex patterns refer to: * @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html * * @param pattern Regex pattern to match to each string. * @return New ColumnVector of boolean results for each string. */ @Deprecated public final ColumnVector containsRe(String pattern) { return containsRe(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE)); } /** * Returns a boolean ColumnVector identifying rows which * match the given RegexProgram pattern starting at any location. * * ``` * cv = ["abc", "123", "def456"] * p = new RegexProgram("\\d+", CaptureGroups.NON_CAPTURE) * r = cv.containsRe(p) * r is now [false, true, true] * ``` * Any null string entries return corresponding null output column entries. * For supported regex patterns refer to: * @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html * * @param regexProg Regex program to match to each string. * @return New ColumnVector of boolean results for each string. */ public final ColumnVector containsRe(RegexProgram regexProg) { assert type.equals(DType.STRING) : "column type must be a String"; assert regexProg != null : "regex program may not be null"; assert !regexProg.pattern().isEmpty() : "pattern string may not be empty"; return new ColumnVector(containsRe(getNativeView(), regexProg.pattern(), regexProg.combinedFlags(), regexProg.capture().nativeId)); } /** * For each captured group specified in the given regular expression * return a column in the table. Null entries are added if the string * does not match. Any null inputs also result in null output entries. * * For supported regex patterns refer to: * @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html * @param pattern the pattern to use * @return the table of extracted matches * @throws CudfException if any error happens including if the RE does * not contain any capture groups. */ @Deprecated public final Table extractRe(String pattern) throws CudfException { return extractRe(new RegexProgram(pattern)); } /** * For each captured group specified in the given regex program * return a column in the table. Null entries are added if the string * does not match. Any null inputs also result in null output entries. * * For supported regex patterns refer to: * @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html * @param regexProg the regex program to use * @return the table of extracted matches * @throws CudfException if any error happens including if the regex * program does not contain any capture groups. */ public final Table extractRe(RegexProgram regexProg) throws CudfException { assert type.equals(DType.STRING) : "column type must be a String"; assert regexProg != null : "regex program may not be null"; return new Table(extractRe(this.getNativeView(), regexProg.pattern(), regexProg.combinedFlags(), regexProg.capture().nativeId)); } /** * Extracts all strings that match the given regular expression and corresponds to the * regular expression group index. Any null inputs also result in null output entries. * * For supported regex patterns refer to: * @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html * @param pattern The regex pattern * @param idx The regex group index * @return A new column vector of extracted matches */ @Deprecated public final ColumnVector extractAllRecord(String pattern, int idx) { if (idx == 0) { return extractAllRecord(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), idx); } return extractAllRecord(new RegexProgram(pattern), idx); } /** * Extracts all strings that match the given regex program pattern and corresponds to the * regular expression group index. Any null inputs also result in null output entries. * * For supported regex patterns refer to: * @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html * @param regexProg The regex program * @param idx The regex group index * @return A new column vector of extracted matches */ public final ColumnVector extractAllRecord(RegexProgram regexProg, int idx) { assert type.equals(DType.STRING) : "column type must be a String"; assert idx >= 0 : "group index must be at least 0"; assert regexProg != null : "regex program may not be null"; return new ColumnVector( extractAllRecord(this.getNativeView(), regexProg.pattern(), regexProg.combinedFlags(), regexProg.capture().nativeId, idx)); } /** * Returns a boolean ColumnVector identifying rows which * match the given like pattern. * * The like pattern expects only 2 wildcard special characters * - `%` any number of any character (including no characters) * - `_` any single character * * ``` * cv = ["azaa", "ababaabba", "aaxa"] * r = cv.like("%a_aa%", "\\") * r is now [true, true, false] * r = cv.like("a__a", "\\") * r is now [true, false, true] * ``` * * The escape character is specified to include either `%` or `_` in the search, * which is expected to be either 0 or 1 character. * If more than one character is specified, only the first character is used. * * ``` * cv = ["abc_def", "abc1def", "abc_"] * r = cv.like("abc/_d%", "/") * r is now [true, false, false] * ``` * Any null string entries return corresponding null output column entries. * * @param pattern Like pattern to match to each string. * @param escapeChar Character specifies the escape prefix; default is "\\". * @return New ColumnVector of boolean results for each string. */ public final ColumnVector like(Scalar pattern, Scalar escapeChar) { assert type.equals(DType.STRING) : "column type must be a String"; assert pattern != null : "pattern scalar must not be null"; assert pattern.getType().equals(DType.STRING) : "pattern scalar must be a string scalar"; assert escapeChar != null : "escapeChar scalar must not be null"; assert escapeChar.getType().equals(DType.STRING) : "escapeChar scalar must be a string scalar"; return new ColumnVector(like(getNativeView(), pattern.getScalarHandle(), escapeChar.getScalarHandle())); } /** * Converts all character sequences starting with '%' into character code-points * interpreting the 2 following characters as hex values to create the code-point. * For example, the sequence '%20' is converted into byte (0x20) which is a single * space character. Another example converts '%C3%A9' into 2 sequential bytes * (0xc3 and 0xa9 respectively) which is the é character. Overall, 3 characters * are converted into one char byte whenever a '%%' (single percent) character * is encountered in the string. * <p> * Any null entries will result in corresponding null entries in the output column. * * @return a new column instance containing the decoded strings */ public final ColumnVector urlDecode() throws CudfException { assert type.equals(DType.STRING) : "column type must be a String"; return new ColumnVector(urlDecode(getNativeView())); } /** * Converts mostly non-ascii characters and control characters into UTF-8 hex code-points * prefixed with '%'. For example, the space character must be converted to characters '%20' where * the '20' indicates the hex value for space in UTF-8. Likewise, multi-byte characters are * converted to multiple hex characters. For example, the é character is converted to characters * '%C3%A9' where 'C3A9' is the UTF-8 bytes 0xC3A9 for this character. * <p> * Any null entries will result in corresponding null entries in the output column. * * @return a new column instance containing the encoded strings */ public final ColumnVector urlEncode() throws CudfException { assert type.equals(DType.STRING) : "column type must be a String"; return new ColumnVector(urlEncode(getNativeView())); } private static void assertIsSupportedMapKeyType(DType keyType) { boolean isSupportedKeyType = !keyType.equals(DType.EMPTY) && !keyType.equals(DType.LIST) && !keyType.equals(DType.STRUCT); assert isSupportedKeyType : "Map lookup by STRUCT and LIST keys is not supported."; } /** * Given a column of type List<Struct<X, Y>> and a key column of type X, return a column of type Y, * where each row in the output column is the Y value corresponding to the X key. * If the key is not found, the corresponding output value is null. * @param keys the column view with keys to lookup in the column * @return a column of values or nulls based on the lookup result */ public final ColumnVector getMapValue(ColumnView keys) { assert type.equals(DType.LIST) : "column type must be a LIST"; assert keys != null : "Lookup key may not be null"; return new ColumnVector(mapLookupForKeys(getNativeView(), keys.getNativeView())); } /** * Given a column of type List<Struct<X, Y>> and a key of type X, return a column of type Y, * where each row in the output column is the Y value corresponding to the X key. * If the key is not found, the corresponding output value is null. * @param key the scalar key to lookup in the column * @return a column of values or nulls based on the lookup result */ public final ColumnVector getMapValue(Scalar key) { assert type.equals(DType.LIST) : "column type must be a LIST"; assert key != null : "Lookup key may not be null"; assertIsSupportedMapKeyType(key.getType()); return new ColumnVector(mapLookup(getNativeView(), key.getScalarHandle())); } /** For a column of type List<Struct<String, String>> and a passed in String key, return a boolean * column for all keys in the structs, It is true if the key exists in the corresponding map for * that row, false otherwise. It will never return null for a row. * @param key the String scalar to lookup in the column * @return a boolean column based on the lookup result */ public final ColumnVector getMapKeyExistence(Scalar key) { assert type.equals(DType.LIST) : "column type must be a LIST"; assert key != null : "Lookup key may not be null"; assertIsSupportedMapKeyType(key.getType()); return new ColumnVector(mapContains(getNativeView(), key.getScalarHandle())); } /** For a column of type List<Struct<_, _>> and a passed in key column, return a boolean * column for all keys in the map. Each output row is true if the key exists in the corresponding map for * that row, false otherwise. It will never return null for a row. * @param keys the keys to lookup in the column * @return a boolean column based on the lookup result */ public final ColumnVector getMapKeyExistence(ColumnView keys) { assert type.equals(DType.LIST) : "column type must be a LIST"; assert keys != null : "Lookup key may not be null"; assertIsSupportedMapKeyType(keys.getType()); return new ColumnVector(mapContainsKeys(getNativeView(), keys.getNativeView())); } /** * Create a new struct column view of existing column views. Note that this will NOT copy * the contents of the input columns to make a new vector, but makes a view that must not * outlive the child views that it references. The resulting column cannot be null. * @param rows the number of rows in the struct column. This is needed if no columns * are provided. * @param columns the columns to add to the struct in the order they should be added * @return the new column view. It is the responsibility of the caller to close this. */ public static ColumnView makeStructView(long rows, ColumnView... columns) { long[] handles = new long[columns.length]; for (int i = 0; i < columns.length; i++) { ColumnView cv = columns[i]; if (rows != cv.getRowCount()) { throw new IllegalArgumentException("All columns must have the same number of rows"); } handles[i] = cv.getNativeView(); } return new ColumnView(makeStructView(handles, rows)); } /** * Create a new struct column view of existing column views. Note that this will NOT copy * the contents of the input columns to make a new vector, but makes a view that must not * outlive the child views that it references. The resulting column cannot be null. * @param columns the columns to add to the struct in the order they should be added * @return the new column view. It is the responsibility of the caller to close this. */ public static ColumnView makeStructView(ColumnView... columns) { if (columns.length <= 0) { throw new IllegalArgumentException("At least one column is needed to get the row count"); } return makeStructView(columns[0].rows, columns); } /** * Create a new column view from a raw device buffer. Note that this will NOT copy * the contents of the buffer but only creates a view. The view MUST NOT outlive * the underlying device buffer. The column view will be created without a validity * vector, so it is not possible to create a view containing null elements. Additionally * only fixed-width primitive types are supported. * * @param buffer device memory that will back the column view * @param startOffset byte offset into the device buffer where the column data starts * @param type type of data in the column view * @param rows number of data elements in the column view * @return new column view instance that must not outlive the backing device buffer */ public static ColumnView fromDeviceBuffer(BaseDeviceMemoryBuffer buffer, long startOffset, DType type, int rows) { if (buffer == null) { throw new NullPointerException("buffer is null"); } int typeSize = type.getSizeInBytes(); if (typeSize <= 0) { throw new IllegalArgumentException("Unsupported type: " + type); } if (startOffset < 0) { throw new IllegalArgumentException("Invalid start offset: " + startOffset); } if (rows < 0) { throw new IllegalArgumentException("Invalid row count: " + rows); } long dataSize = typeSize * rows; if (startOffset + dataSize > buffer.length) { throw new IllegalArgumentException("View extends beyond buffer range"); } long dataAddress = buffer.getAddress() + startOffset; if (dataAddress % typeSize != 0) { throw new IllegalArgumentException("Data address " + Long.toHexString(dataAddress) + " is misaligned relative to type size of " + typeSize + " bytes"); } return new ColumnView(makeCudfColumnView(type.typeId.getNativeId(), type.getScale(), dataAddress, dataSize, 0, 0, 0, rows, null)); } /** * Create a column of bool values indicating whether the specified scalar * is an element of each row of a list column. * Output `column[i]` is set to null if one or more of the following are true: * 1. The key is null * 2. The column vector list value is null * @param key the scalar to look up * @return a Boolean ColumnVector with the result of the lookup */ public final ColumnVector listContains(Scalar key) { assert type.equals(DType.LIST) : "column type must be a LIST"; return new ColumnVector(listContains(getNativeView(), key.getScalarHandle())); } /** * Create a column of bool values indicating whether the list rows of the first * column contain the corresponding values in the second column. * Output `column[i]` is set to null if one or more of the following are true: * 1. The key value is null * 2. The column vector list value is null * @param key the ColumnVector with look up values * @return a Boolean ColumnVector with the result of the lookup */ public final ColumnVector listContainsColumn(ColumnView key) { assert type.equals(DType.LIST) : "column type must be a LIST"; return new ColumnVector(listContainsColumn(getNativeView(), key.getNativeView())); } /** * Create a column of bool values indicating whether the list rows of the specified * column contain null elements. * Output `column[i]` is set to null iff the input list row is null. * @return a Boolean ColumnVector with the result of the lookup */ public final ColumnVector listContainsNulls() { assert type.equals(DType.LIST) : "column type must be a LIST"; return new ColumnVector(listContainsNulls(getNativeView())); } /** * Enum to choose behaviour of listIndexOf functions: * 1. FIND_FIRST finds the first occurrence of a search key. * 2. FIND_LAST finds the last occurrence of a search key. */ public enum FindOptions {FIND_FIRST, FIND_LAST}; /** * Create a column of int32 indices, indicating the position of the scalar search key * in each list row. * All indices are 0-based. If a search key is not found, the index is set to -1. * The index is set to null if one of the following is true: * 1. The search key is null. * 2. The list row is null. * @param key The scalar search key * @param findOption Whether to find the first index of the key, or the last. * @return The resultant column of int32 indices */ public final ColumnVector listIndexOf(Scalar key, FindOptions findOption) { assert type.equals(DType.LIST) : "column type must be a LIST"; boolean isFindFirst = findOption == FindOptions.FIND_FIRST; return new ColumnVector(listIndexOfScalar(getNativeView(), key.getScalarHandle(), isFindFirst)); } /** * Create a column of int32 indices, indicating the position of each row in the * search key column in the corresponding row of the lists column. * All indices are 0-based. If a search key is not found, the index is set to -1. * The index is set to null if one of the following is true: * 1. The search key row is null. * 2. The list row is null. * @param keys ColumnView of search keys. * @param findOption Whether to find the first index of the key, or the last. * @return The resultant column of int32 indices */ public final ColumnVector listIndexOf(ColumnView keys, FindOptions findOption) { assert type.equals(DType.LIST) : "column type must be a LIST"; boolean isFindFirst = findOption == FindOptions.FIND_FIRST; return new ColumnVector(listIndexOfColumn(getNativeView(), keys.getNativeView(), isFindFirst)); } /** * Segmented sort of the elements within a list in each row of a list column. * NOTICE: list columns with nested child are NOT supported yet. * * @param isDescending whether sorting each row with descending order (or ascending order) * @param isNullSmallest whether to regard the null value as the min value (or the max value) * @return a List ColumnVector with elements in each list sorted */ public final ColumnVector listSortRows(boolean isDescending, boolean isNullSmallest) { assert type.equals(DType.LIST) : "column type must be a LIST"; return new ColumnVector(listSortRows(getNativeView(), isDescending, isNullSmallest)); } /** * For each pair of lists from the input lists columns, check if they have any common non-null * elements. * * A null input row in any of the input columns will result in a null output row. During checking * for common elements, nulls within each list are considered as different values while * floating-point NaN values are considered as equal. * * The input lists columns must have the same size and same data type. * * @param lhs The input lists column for one side * @param rhs The input lists column for the other side * @return A column of type BOOL8 containing the check result */ public static ColumnVector listsHaveOverlap(ColumnView lhs, ColumnView rhs) { assert lhs.getType().equals(DType.LIST) && rhs.getType().equals(DType.LIST) : "Input columns type must be of type LIST"; assert lhs.getRowCount() == rhs.getRowCount() : "Input columns must have the same size"; return new ColumnVector(listsHaveOverlap(lhs.getNativeView(), rhs.getNativeView())); } /** * Find the intersection without duplicate between lists at each row of the given lists columns. * * A null input row in any of the input lists columns will result in a null output row. During * finding list intersection, nulls and floating-point NaN values within each list are * considered as equal values. * * The input lists columns must have the same size and same data type. * * @param lhs The input lists column for one side * @param rhs The input lists column for the other side * @return A lists column containing the intersection result */ public static ColumnVector listsIntersectDistinct(ColumnView lhs, ColumnView rhs) { assert lhs.getType().equals(DType.LIST) && rhs.getType().equals(DType.LIST) : "Input columns type must be of type LIST"; assert lhs.getRowCount() == rhs.getRowCount() : "Input columns must have the same size"; return new ColumnVector(listsIntersectDistinct(lhs.getNativeView(), rhs.getNativeView())); } /** * Find the union without duplicate between lists at each row of the given lists columns. * * A null input row in any of the input lists columns will result in a null output row. During * finding list union, nulls and floating-point NaN values within each list are considered as * equal values. * * The input lists columns must have the same size and same data type. * * @param lhs The input lists column for one side * @param rhs The input lists column for the other side * @return A lists column containing the union result */ public static ColumnVector listsUnionDistinct(ColumnView lhs, ColumnView rhs) { assert lhs.getType().equals(DType.LIST) && rhs.getType().equals(DType.LIST) : "Input columns type must be of type LIST"; assert lhs.getRowCount() == rhs.getRowCount() : "Input columns must have the same size"; return new ColumnVector(listsUnionDistinct(lhs.getNativeView(), rhs.getNativeView())); } /** * Find the difference of lists of the left column against lists of the right column. * Specifically, find the elements (without duplicates) from each list of the left column that * do not exist in the corresponding list of the right column. * * A null input row in any of the input lists columns will result in a null output row. During * finding, nulls and floating-point NaN values within each list are considered as equal values. * * The input lists columns must have the same size and same data type. * * @param lhs The input lists column for one side * @param rhs The input lists column for the other side * @return A lists column containing the difference result */ public static ColumnVector listsDifferenceDistinct(ColumnView lhs, ColumnView rhs) { assert lhs.getType().equals(DType.LIST) && rhs.getType().equals(DType.LIST) : "Input columns type must be of type LIST"; assert lhs.getRowCount() == rhs.getRowCount() : "Input columns must have the same size"; return new ColumnVector(listsDifferenceDistinct(lhs.getNativeView(), rhs.getNativeView())); } /** * Generate list offsets from sizes of each list. * NOTICE: This API only works for INT32. Otherwise, the behavior is undefined. And no null and negative value is allowed. * * @return a column of list offsets whose size is N + 1 */ public final ColumnVector generateListOffsets() { return new ColumnVector(generateListOffsets(getNativeView())); } /** * Get a single item from the column at the specified index as a Scalar. * * Be careful. This is expensive and may involve running a kernel to copy the data out. * * @param index the index to look at * @return the value at that index as a scalar. * @throws CudfException if the index is out of bounds. */ public final Scalar getScalarElement(int index) { return new Scalar(getType(), getElement(getNativeView(), index)); } /** * Filters elements in each row of this LIST column using `booleanMaskView` * LIST of booleans as a mask. * <p> * Given a list-of-bools column, the function produces * a new `LIST` column of the same type as this column, where each element is copied * from the row *only* if the corresponding `boolean_mask` is non-null and `true`. * <p> * E.g. * column = { {0,1,2}, {3,4}, {5,6,7}, {8,9} }; * boolean_mask = { {0,1,1}, {1,0}, {1,1,1}, {0,0} }; * results = { {1,2}, {3}, {5,6,7}, {} }; * <p> * This column and `boolean_mask` must have the same number of rows. * The output column has the same number of rows as this column. * An element is copied to an output row *only* * if the corresponding boolean_mask element is `true`. * An output row is invalid only if the row is invalid. * * @param booleanMaskView A nullable list of bools column used to filter elements in this column * @return List column of the same type as this column, containing filtered list rows * @throws CudfException if `boolean_mask` is not a "lists of bools" column * @throws CudfException if this column and `boolean_mask` have different number of rows */ public final ColumnVector applyBooleanMask(ColumnView booleanMaskView) { assert (getType().equals(DType.LIST)); assert (booleanMaskView.getType().equals(DType.LIST)); assert (getRowCount() == booleanMaskView.getRowCount()); return new ColumnVector(applyBooleanMask(getNativeView(), booleanMaskView.getNativeView())); } /** * Get the number of bytes needed to allocate a validity buffer for the given number of rows. * According to cudf::bitmask_allocation_size_bytes, the padding boundary for null mask is 64 bytes. */ static long getValidityBufferSize(int numRows) { // number of bytes required = Math.ceil(number of bits / 8) long actualBytes = ((long) numRows + 7) >> 3; // padding to the multiplies of the padding boundary(64 bytes) return ((actualBytes + 63) >> 6) << 6; } /** * Count how many rows in the column are distinct from one another. * @param nullPolicy if nulls should be included or not. */ public int distinctCount(NullPolicy nullPolicy) { return distinctCount(getNativeView(), nullPolicy.includeNulls); } /** * Count how many rows in the column are distinct from one another. * Nulls are included. */ public int distinctCount() { return distinctCount(getNativeView(), true); } ///////////////////////////////////////////////////////////////////////////// // INTERNAL/NATIVE ACCESS ///////////////////////////////////////////////////////////////////////////// static DeviceMemoryBufferView getDataBuffer(long viewHandle) { long address = getNativeDataAddress(viewHandle); if (address == 0) { return null; } long length = getNativeDataLength(viewHandle); return new DeviceMemoryBufferView(address, length); } static DeviceMemoryBufferView getValidityBuffer(long viewHandle) { long address = getNativeValidityAddress(viewHandle); if (address == 0) { return null; } long length = getNativeValidityLength(viewHandle); return new DeviceMemoryBufferView(address, length); } static DeviceMemoryBufferView getOffsetsBuffer(long viewHandle) { long address = getNativeOffsetsAddress(viewHandle); if (address == 0) { return null; } long length = getNativeOffsetsLength(viewHandle); return new DeviceMemoryBufferView(address, length); } // Native Methods private static native int distinctCount(long handle, boolean nullsIncluded); /** * Native method to parse and convert a string column vector to unix timestamp. A unix * timestamp is a long value representing how many units since 1970-01-01 00:00:00.000 in either * positive or negative direction. This mirrors the functionality spark sql's to_unix_timestamp. * Strings that fail to parse will default to 0. Supported time units are second, millisecond, * microsecond, and nanosecond. Larger time units for column vectors are not supported yet in cudf. * No checking is done for invalid formats or invalid timestamp units. * Negative timestamp values are not currently supported and will yield undesired results. See * github issue https://github.com/rapidsai/cudf/issues/3116 for details * * @param unit integer native ID of the time unit to parse the timestamp into. * @param format strptime format specifier string of the timestamp. Used to parse and convert * the timestamp with. Supports %Y,%y,%m,%d,%H,%I,%p,%M,%S,%f,%z format specifiers. * See https://github.com/rapidsai/custrings/blob/branch-0.10/docs/source/datetime.md * for full parsing format specification and documentation. * @return native handle of the resulting cudf column, used to construct the Java column vector * by the timestampToLong method. */ private static native long stringTimestampToTimestamp(long viewHandle, int unit, String format); private static native long isFixedPoint(long viewHandle, int nativeTypeId, int scale); private static native long toHex(long viewHandle); /** * Native method to concatenate a list column of strings (each row is a list of strings), * concatenates the strings within each row and returns a single strings column result. * Each new string is created by concatenating the strings from the same row (same list element) * delimited by the row separator provided in the `separators` strings column. * @param listColumnHandle long holding the native handle of the column containing lists of strings * to concatenate. * @param sepColumn long holding the native handle of the strings column that provides separators * for concatenation. * @param separatorNarep string scalar indicating null behavior when a separator is null. * If set to null and the separator is null the resulting string will * be null. If not null, this string will be used in place of a null * separator. * @param colNarep string String scalar that should be used in place of any null strings * found in any column. * @param separateNulls boolean if true, then the separator is included for null rows if * `colNarep` is valid. * @param emptyStringOutputIfEmptyList boolean if true, any input row that is an empty list * will result in an empty string. Otherwise, it will * result in a null. * @return native handle of the resulting cudf column, used to construct the Java column. */ private static native long stringConcatenationListElementsSepCol(long listColumnHandle, long sepColumn, long separatorNarep, long colNarep, boolean separateNulls, boolean emptyStringOutputIfEmptyList); /** * Native method to concatenate a list column of strings (each row is a list of strings), * concatenates the strings within each row and returns a single strings column result. * Each new string is created by concatenating the strings from the same row (same list element) * delimited by the separator provided. * @param listColumnHandle long holding the native handle of the column containing lists of strings * to concatenate. * @param separator string scalar inserted between each string being merged, may not be null. * @param narep string scalar indicating null behavior. If set to null and any string in the row * is null the resulting string will be null. If not null, null values in any * column will be replaced by the specified string. The underlying value in the * string scalar may be null, but the object passed in may not. * @param separateNulls boolean if true, then the separator is included for null rows if * `narep` is valid. * @param emptyStringOutputIfEmptyList boolean if true, any input row that is an empty list * will result in an empty string. Otherwise, it will * result in a null. * @return native handle of the resulting cudf column, used to construct the Java column. */ private static native long stringConcatenationListElements(long listColumnHandle, long separator, long narep, boolean separateNulls, boolean emptyStringOutputIfEmptyList); /** * Native method to repeat each string in the given input strings column a number of times * specified by the <code>repeatTimes</code> parameter. * * In special cases: * - If <code>repeatTimes</code> is not a positive number, a non-null input string will always * result in an empty output string. * - A null input string will always result in a null output string regardless of the value of * the <code>repeatTimes</code> parameter. * * @param viewHandle long holding the native handle of the column containing strings to repeat. * @param repeatTimes The number of times each input string is repeated. * @return native handle of the resulting cudf strings column containing repeated strings. */ private static native long repeatStrings(long viewHandle, int repeatTimes); /** * Native method to repeat strings in the given input strings column, each string is repeated * by a different number of times given by the corresponding row in a <code>repeatTimes</code> * numeric column. * * In special cases: * - Any null row (from either the input strings column or the <code>repeatTimes</code> column) * will always result in a null output string. * - If any value in the <code>repeatTimes</code> column is not a positive number and its * corresponding input string is not null, the output string will be an empty string. * * If the input <code>repeatTimesHandle</code> column does not have a numeric type, or it has a * size that is different from size of the input strings column, an exception will be thrown. * * @param stringsHandle long holding the native handle of the column containing strings to repeat. * @param repeatTimesHandle long holding the native handle of the column containing the numbers * of times each input string is repeated. * @return native handle of the resulting cudf strings column containing repeated strings. */ private static native long repeatStringsWithColumnRepeatTimes(long stringsHandle, long repeatTimesHandle); private static native long getJSONObject(long viewHandle, long scalarHandle) throws CudfException; /** * Native method to parse and convert a timestamp column vector to string column vector. A unix * timestamp is a long value representing how many units since 1970-01-01 00:00:00:000 in either * positive or negative direction. This mirrors the functionality spark sql's from_unixtime. * No checking is done for invalid formats or invalid timestamp units. * Negative timestamp values are not currently supported and will yield undesired results. See * github issue https://github.com/rapidsai/cudf/issues/3116 for details * * @param format - strftime format specifier string of the timestamp. Its used to parse and convert * the timestamp with. Supports %Y,%y,%m,%d,%H,%M,%S,%f format specifiers. * %d Day of the month: 01-31 * %m Month of the year: 01-12 * %y Year without century: 00-99c * %Y Year with century: 0001-9999 * %H 24-hour of the day: 00-23 * %M Minute of the hour: 00-59 * %S Second of the minute: 00-59 * %f 6-digit microsecond: 000000-999999 * See http://man7.org/linux/man-pages/man3/strftime.3.html for details * * Reported bugs * https://github.com/rapidsai/cudf/issues/4160 after the bug is fixed this method should * also support * %I 12-hour of the day: 01-12 * %p Only 'AM', 'PM' * %j day of the year * * @return - native handle of the resulting cudf column used to construct the Java column vector */ private static native long timestampToStringTimestamp(long viewHandle, String format); /** * Native method for locating the starting index of the first instance of a given substring * in each string in the column. 0 indexing, returns -1 if the substring is not found. Can be * be configured to start or end the search mid string. * @param columnView native handle of the cudf::column_view containing strings being operated on. * @param substringScalar string scalar handle containing the string to locate within each row. * @param start character index to start the search from (inclusive). * @param end character index to end the search on (exclusive). */ private static native long substringLocate(long columnView, long substringScalar, int start, int end); /** * Returns a list of columns by splitting each string using the specified string literal * delimiter. The number of rows in the output columns will be the same as the input column. * Null entries are added for the rows where split results have been exhausted. Null input entries * result in all nulls in the corresponding rows of the output columns. * * @param nativeHandle native handle of the input strings column that being operated on. * @param delimiter UTF-8 encoded string identifying the split delimiter for each input string. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. */ private static native long[] stringSplit(long nativeHandle, String delimiter, int limit); /** * Returns a list of columns by splitting each string using the specified regular expression * pattern. The number of rows in the output columns will be the same as the input column. * Null entries are added for the rows where split results have been exhausted. Null input entries * result in all nulls in the corresponding rows of the output columns. * * @param nativeHandle native handle of the input strings column that being operated on. * @param pattern UTF-8 encoded string identifying the split regular expression pattern for * each input string. * @param flags regex flags setting. * @param capture capture groups setting. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. */ private static native long[] stringSplitRe(long nativeHandle, String pattern, int flags, int capture, int limit); /** * Returns a column that are lists of strings in which each list is made by splitting the * corresponding input string using the specified string literal delimiter. * * @param nativeHandle native handle of the input strings column that being operated on. * @param delimiter UTF-8 encoded string identifying the split delimiter for each input string. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. */ private static native long stringSplitRecord(long nativeHandle, String delimiter, int limit); /** * Returns a column that are lists of strings in which each list is made by splitting the * corresponding input string using the specified regular expression pattern. * * @param nativeHandle native handle of the input strings column that being operated on. * @param pattern UTF-8 encoded string identifying the split regular expression pattern for * each input string. * @param flags regex flags setting. * @param capture capture groups setting. * @param limit the maximum size of the list resulting from splitting each input string, * or -1 for all possible splits. Note that limit = 0 (all possible splits without * trailing empty strings) and limit = 1 (no split at all) are not supported. */ private static native long stringSplitRecordRe(long nativeHandle, String pattern, int flags, int capture, int limit); /** * Native method to calculate substring from a given string column. 0 indexing. * @param columnView native handle of the cudf::column_view being operated on. * @param start first character index to begin the substring(inclusive). * @param end last character index to stop the substring(exclusive). */ private static native long substring(long columnView, int start, int end) throws CudfException; /** * Native method to extract substrings from a given strings column. * @param columnView native handle of the cudf::column_view being operated on. * @param start first character index to begin the substrings (inclusive). */ private static native long substringS(long columnView, int start) throws CudfException; /** * Native method to calculate substring from a given string column. * @param columnView native handle of the cudf::column_view being operated on. * @param startColumn handle of cudf::column_view which has start indices of each string. * @param endColumn handle of cudf::column_view which has end indices of each string. */ private static native long substringColumn(long columnView, long startColumn, long endColumn) throws CudfException; /** * Native method to replace target string by repl string. * @param columnView native handle of the cudf::column_view being operated on. * @param target handle of scalar containing the string being searched. * @param repl handle of scalar containing the string to replace. */ private static native long stringReplace(long columnView, long target, long repl) throws CudfException; /** * Native method to replace target strings by corresponding repl strings. * @param inputCV native handle of the cudf::column_view being operated on. * @param targetsCV handle of column containing the strings being searched. * @param replsCV handle of column containing the strings to replace (can optionally contain a single string). */ private static native long stringReplaceMulti(long inputCV, long targetsCV, long replsCV) throws CudfException; /** * Native method for replacing each regular expression pattern match with the specified * replacement string. * @param columnView native handle of the cudf::column_view being operated on. * @param pattern regular expression pattern to search within each string. * @param flags regex flags setting. * @param capture capture groups setting. * @param repl native handle of the cudf::scalar containing the replacement string. * @param maxRepl maximum number of times to replace the pattern within a string * @return native handle of the resulting cudf column containing the string results. */ private static native long replaceRegex(long columnView, String pattern, int flags, int capture, long repl, long maxRepl) throws CudfException; /** * Native method for multiple instance regular expression replacement. * @param columnView native handle of the cudf::column_view being operated on. * @param patterns native handle of the cudf::column_view containing the regex patterns. * @param repls The replacement template for creating the output string. * @return native handle of the resulting cudf column containing the string results. */ private static native long replaceMultiRegex(long columnView, String[] patterns, long repls) throws CudfException; /** * Native method for replacing any character sequence matching the given regex program * pattern using the replace template for back-references. * @param columnView native handle of the cudf::column_view being operated on. * @param pattern The regular expression patterns to search within each string. * @param flags Regex flags setting. * @param capture Capture groups setting. * @param replace The replacement template for creating the output string. * @return native handle of the resulting cudf column containing the string results. */ private static native long stringReplaceWithBackrefs(long columnView, String pattern, int flags, int capture, String replace) throws CudfException; /** * Native method for checking if strings in a column starts with a specified comparison string. * @param cudfViewHandle native handle of the cudf::column_view being operated on. * @param compString handle of scalar containing the string being searched for at the beginning of each string in the column. * @return native handle of the resulting cudf column containing the boolean results. */ private static native long stringStartWith(long cudfViewHandle, long compString) throws CudfException; /** * Native method for checking if strings in a column ends with a specified comparison string. * @param cudfViewHandle native handle of the cudf::column_view being operated on. * @param compString handle of scalar containing the string being searched for at the end of each string in the column. * @return native handle of the resulting cudf column containing the boolean results. */ private static native long stringEndWith(long cudfViewHandle, long compString) throws CudfException; /** * Native method to strip whitespace from the start and end of a string. * @param columnView native handle of the cudf::column_view being operated on. */ private static native long stringStrip(long columnView, int type, long toStrip) throws CudfException; /** * Native method for checking if strings match the passed in regex program pattern from the * beginning of the string. * @param cudfViewHandle native handle of the cudf::column_view being operated on. * @param pattern string regex pattern. * @param flags regex flags setting. * @param capture capture groups setting. * @return native handle of the resulting cudf column containing the boolean results. */ private static native long matchesRe(long cudfViewHandle, String pattern, int flags, int capture) throws CudfException; /** * Native method for checking if strings match the passed in regex program pattern starting at any location. * @param cudfViewHandle native handle of the cudf::column_view being operated on. * @param pattern string regex pattern. * @param flags regex flags setting. * @param capture capture groups setting. * @return native handle of the resulting cudf column containing the boolean results. */ private static native long containsRe(long cudfViewHandle, String pattern, int flags, int capture) throws CudfException; /** * Native method for checking if strings match the passed in like pattern * and escape character. * @param cudfViewHandle native handle of the cudf::column_view being operated on. * @param patternHandle handle of scalar containing the string like pattern. * @param escapeCharHandle handle of scalar containing the string escape character. * @return native handle of the resulting cudf column containing the boolean results. */ private static native long like(long cudfViewHandle, long patternHandle, long escapeCharHandle) throws CudfException; /** * Native method for checking if strings in a column contains a specified comparison string. * @param cudfViewHandle native handle of the cudf::column_view being operated on. * @param compString handle of scalar containing the string being searched for. * @return native handle of the resulting cudf column containing the boolean results. */ private static native long stringContains(long cudfViewHandle, long compString) throws CudfException; /** * Native method for extracting results from a regex program pattern. Returns a table handle. * * @param cudfViewHandle Native handle of the cudf::column_view being operated on. * @param pattern String regex pattern. * @param flags Regex flags setting. * @param capture Capture groups setting. */ private static native long[] extractRe(long cudfViewHandle, String pattern, int flags, int capture) throws CudfException; /** * Native method for extracting all results corresponding to group idx from a regex program pattern. * * @param nativeHandle Native handle of the cudf::column_view being operated on. * @param pattern String regex pattern. * @param flags Regex flags setting. * @param capture Capture groups setting. * @param idx Regex group index. A 0 value means matching the entire regex. * @return Native handle of a string column of the result. */ private static native long extractAllRecord(long nativeHandle, String pattern, int flags, int capture, int idx); private static native long urlDecode(long cudfViewHandle); private static native long urlEncode(long cudfViewHandle); /** * Native method for map lookup over a column of List<Struct<String,String>> * @param columnView the column view handle of the map * @param key the string scalar that is the key for lookup * @return a string column handle of the resultant * @throws CudfException */ private static native long mapLookup(long columnView, long key) throws CudfException; /** * Native method for map lookup over a column of List<Struct<String,String>> * The lookup column must have as many rows as the map column, * and must match the key-type of the map. * A column of values is returned, with the same number of rows as the map column. * If a key is repeated in a map row, the value corresponding to the last matching * key is returned. * If a lookup key is null or not found, the corresponding value is null. * @param columnView the column view handle of the map * @param keys the column view holding the keys * @return a column of values corresponding the value of the lookup key. * @throws CudfException */ private static native long mapLookupForKeys(long columnView, long keys) throws CudfException; /** * Native method for check the existence of a key over a column of List<Struct<_, _>> * @param columnView the column view handle of the map * @param key the column view holding the keys * @return boolean column handle of the result * @throws CudfException */ private static native long mapContainsKeys(long columnView, long key) throws CudfException; /** * Native method for check the existence of a key over a column of List<Struct<String,String>> * @param columnView the column view handle of the map * @param key the string scalar that is the key for lookup * @return boolean column handle of the result * @throws CudfException */ private static native long mapContains(long columnView, long key) throws CudfException; /** * Native method to add zeros as padding to the left of each string. */ private static native long zfill(long nativeHandle, int width); private static native long pad(long nativeHandle, int width, int side, String fillChar); private static native long binaryOpVS(long lhs, long rhs, int op, int dtype, int scale); private static native long binaryOpVV(long lhs, long rhs, int op, int dtype, int scale); private static native long countElements(long viewHandle); private static native long byteCount(long viewHandle) throws CudfException; private static native long extractListElement(long nativeView, int index); private static native long extractListElementV(long nativeView, long indicesView); private static native long dropListDuplicates(long nativeView); private static native long dropListDuplicatesWithKeysValues(long nativeHandle); private static native long flattenLists(long inputHandle, boolean ignoreNull); /** * Native method for list lookup * @param nativeView the column view handle of the list * @param key the scalar key handle * @return column handle of the resultant */ private static native long listContains(long nativeView, long key); /** * Native method for list lookup * @param nativeView the column view handle of the list * @param keyColumn the column handle of look up keys * @return column handle of the resultant */ private static native long listContainsColumn(long nativeView, long keyColumn); /** * Native method to search list rows for null elements. * @param nativeView the column view handle of the list * @return column handle of the resultant boolean column */ private static native long listContainsNulls(long nativeView); /** * Native method to find the first (or last) index of a specified scalar key, * in each row of a list column. * @param nativeView the column view handle of the list * @param scalarKeyHandle handle to the scalar search key * @param isFindFirst Whether to find the first index of the key, or the last. * @return column handle of the resultant column of int32 indices */ private static native long listIndexOfScalar(long nativeView, long scalarKeyHandle, boolean isFindFirst); /** * Native method to find the first (or last) index of each search key in the specified column, * in each row of a list column. * @param nativeView the column view handle of the list * @param keyColumnHandle handle to the search key column * @param isFindFirst Whether to find the first index of the key, or the last. * @return column handle of the resultant column of int32 indices */ private static native long listIndexOfColumn(long nativeView, long keyColumnHandle, boolean isFindFirst); private static native long listSortRows(long nativeView, boolean isDescending, boolean isNullSmallest); private static native long listsHaveOverlap(long lhsViewHandle, long rhsViewHandle); private static native long listsIntersectDistinct(long lhsViewHandle, long rhsViewHandle); private static native long listsUnionDistinct(long lhsViewHandle, long rhsViewHandle); private static native long listsDifferenceDistinct(long lhsViewHandle, long rhsViewHandle); private static native long getElement(long nativeView, int index); private static native long castTo(long nativeHandle, int type, int scale); private static native long bitCastTo(long nativeHandle, int type, int scale); private static native long byteListCast(long nativeHandle, boolean config); private static native long[] slice(long nativeHandle, int[] indices) throws CudfException; private static native long[] split(long nativeHandle, int[] indices) throws CudfException; private static native long findAndReplaceAll(long valuesHandle, long replaceHandle, long myself) throws CudfException; private static native long round(long nativeHandle, int decimalPlaces, int roundingMethod) throws CudfException; private static native long reverseStringsOrLists(long inputHandle); /** * Native method to switch all characters in a column of strings to lowercase characters. * @param cudfViewHandle native handle of the cudf::column_view being operated on. * @return native handle of the resulting cudf column, used to construct the Java column * by the lower method. */ private static native long lowerStrings(long cudfViewHandle); /** * Native method to switch all characters in a column of strings to uppercase characters. * @param cudfViewHandle native handle of the cudf::column_view being operated on. * @return native handle of the resulting cudf column, used to construct the Java column * by the upper method. */ private static native long upperStrings(long cudfViewHandle); /** * Native method to compute approx percentiles. * @param cudfColumnHandle T-Digest column * @param percentilesHandle Percentiles * @return native handle of the resulting cudf column, used to construct the Java column * by the approxPercentile method. */ private static native long approxPercentile(long cudfColumnHandle, long percentilesHandle) throws CudfException; private static native long quantile(long cudfColumnHandle, int quantileMethod, double[] quantiles) throws CudfException; private static native long rollingWindow( long viewHandle, long defaultOutputHandle, int min_periods, long aggPtr, int preceding, int following, long preceding_col, long following_col); private static native long scan(long viewHandle, long aggregation, boolean isInclusive, boolean includeNulls) throws CudfException; private static native long nansToNulls(long viewHandle) throws CudfException; private static native long charLengths(long viewHandle) throws CudfException; private static native long replaceNullsScalar(long viewHandle, long scalarHandle) throws CudfException; private static native long replaceNullsColumn(long viewHandle, long replaceViewHandle) throws CudfException; private static native long replaceNullsPolicy(long nativeView, boolean isPreceding) throws CudfException; private static native long ifElseVV(long predVec, long trueVec, long falseVec) throws CudfException; private static native long ifElseVS(long predVec, long trueVec, long falseScalar) throws CudfException; private static native long ifElseSV(long predVec, long trueScalar, long falseVec) throws CudfException; private static native long ifElseSS(long predVec, long trueScalar, long falseScalar) throws CudfException; private static native long reduce(long viewHandle, long aggregation, int dtype, int scale) throws CudfException; private static native long segmentedReduce(long dataViewHandle, long offsetsViewHandle, long aggregation, boolean includeNulls, int dtype, int scale) throws CudfException; private static native long segmentedGather(long sourceColumnHandle, long gatherMapListHandle, boolean isNullifyOutBounds) throws CudfException; private static native long isNullNative(long viewHandle); private static native long isNanNative(long viewHandle); private static native long isFloat(long viewHandle); private static native long isInteger(long viewHandle); private static native long isIntegerWithType(long viewHandle, int typeId, int typeScale); private static native long isNotNanNative(long viewHandle); private static native long isNotNullNative(long viewHandle); private static native long unaryOperation(long viewHandle, int op); private static native long year(long viewHandle) throws CudfException; private static native long month(long viewHandle) throws CudfException; private static native long day(long viewHandle) throws CudfException; private static native long hour(long viewHandle) throws CudfException; private static native long minute(long viewHandle) throws CudfException; private static native long second(long viewHandle) throws CudfException; private static native long weekDay(long viewHandle) throws CudfException; private static native long lastDayOfMonth(long viewHandle) throws CudfException; private static native long dayOfYear(long viewHandle) throws CudfException; private static native long quarterOfYear(long viewHandle) throws CudfException; private static native long addCalendricalMonths(long tsViewHandle, long monthsViewHandle); private static native long isLeapYear(long viewHandle) throws CudfException; private static native boolean containsScalar(long columnViewHaystack, long scalarHandle) throws CudfException; private static native long containsVector(long valuesHandle, long searchSpaceHandle) throws CudfException; private static native long transform(long viewHandle, String udf, boolean isPtx); private static native long clamper(long nativeView, long loScalarHandle, long loScalarReplaceHandle, long hiScalarHandle, long hiScalarReplaceHandle); protected static native long title(long handle); private static native long capitalize(long strsColHandle, long delimitersHandle); private static native long joinStrings(long strsHandle, long sepHandle, long narepHandle); private static native long makeStructView(long[] handles, long rowCount); private static native long isTimestamp(long nativeView, String format); /** * Native method to normalize the various bitwise representations of NAN and zero. * * All occurrences of -NaN are converted to NaN. Likewise, all -0.0 are converted to 0.0. * * @param viewHandle `long` representation of pointer to input column_view. * @return Pointer to a new `column` of normalized values. * @throws CudfException On failure to normalize. */ private static native long normalizeNANsAndZeros(long viewHandle) throws CudfException; /** * Native method to deep copy a column while replacing the null mask. The null mask is the * bitwise merge of the null masks in the columns given as arguments. * * @param baseHandle column view of the column that is deep copied. * @param viewHandles array of views whose null masks are merged, must have identical row counts. * @return native handle of the copied cudf column with replaced null mask. */ private static native long bitwiseMergeAndSetValidity(long baseHandle, long[] viewHandles, int nullConfig) throws CudfException; /** * Native method to deep copy a column while replacing the null mask. The null mask is the * device_vector equivalent of the boolean column given as argument. * * The boolColumn must have the same number of rows as the exemplar column. * The result column will have the same number of rows as the exemplar. * For all indices `i` where the boolean column is `true`, the result column will have a valid value at index i. * For all other values (i.e. `false` or `null`), the result column will have nulls. * * If the exemplar column has a null at a given index `i`, and the new validity mask is `true` at index `i`, * then the resultant row value is undefined. * * @param exemplarViewHandle column view of the column that is deep copied. * @param boolColumnViewHandle bool column whose value is to be used as the null mask. * @return Deep copy of the column with replaced null mask. */ private static native long copyWithBooleanColumnAsValidity(long exemplarViewHandle, long boolColumnViewHandle) throws CudfException; //////// // Native cudf::column_view life cycle and metadata access methods. Life cycle methods // should typically only be called from the OffHeap inner class. //////// static native int getNativeTypeId(long viewHandle) throws CudfException; static native int getNativeTypeScale(long viewHandle) throws CudfException; static native int getNativeRowCount(long viewHandle) throws CudfException; static native int getNativeNullCount(long viewHandle) throws CudfException; static native void deleteColumnView(long viewHandle) throws CudfException; private static native long getNativeDataAddress(long viewHandle) throws CudfException; private static native long getNativeDataLength(long viewHandle) throws CudfException; private static native long getNativeOffsetsAddress(long viewHandle) throws CudfException; private static native long getNativeOffsetsLength(long viewHandle) throws CudfException; private static native long getNativeValidityAddress(long viewHandle) throws CudfException; private static native long getNativeValidityLength(long viewHandle) throws CudfException; static native long makeCudfColumnView(int type, int scale, long data, long dataSize, long offsets, long valid, int nullCount, int size, long[] childHandle); static native long getChildCvPointer(long viewHandle, int childIndex) throws CudfException; private static native long getListOffsetCvPointer(long viewHandle) throws CudfException; static native int getNativeNumChildren(long viewHandle) throws CudfException; // calculate the amount of device memory used by this column including any child columns static native long getDeviceMemorySize(long viewHandle, boolean shouldPadForCpu) throws CudfException; static native long copyColumnViewToCV(long viewHandle) throws CudfException; static native long generateListOffsets(long handle) throws CudfException; static native long applyBooleanMask(long arrayColumnView, long booleanMaskHandle) throws CudfException; static native boolean hasNonEmptyNulls(long handle) throws CudfException; static native long purgeNonEmptyNulls(long handle) throws CudfException; /** * A utility class to create column vector like objects without refcounts and other APIs when * creating the device side vector from host side nested vectors. Eventually this can go away or * be refactored to hold less state like just the handles and the buffers to close. */ static class NestedColumnVector { private final DeviceMemoryBuffer data; private final DeviceMemoryBuffer valid; private final DeviceMemoryBuffer offsets; private final DType dataType; private final long rows; private final Optional<Long> nullCount; List<NestedColumnVector> children; private NestedColumnVector(DType type, long rows, Optional<Long> nullCount, DeviceMemoryBuffer data, DeviceMemoryBuffer valid, DeviceMemoryBuffer offsets, List<NestedColumnVector> children) { this.dataType = type; this.rows = rows; this.nullCount = nullCount; this.data = data; this.valid = valid; this.offsets = offsets; this.children = children; } /** * Returns a LIST ColumnVector, for now, after constructing the NestedColumnVector from the host side * nested Column Vector - children. This is used for host side to device side copying internally. * @param type top level dtype, which is LIST currently * @param rows top level number of rows in the LIST column * @param valid validity buffer * @param offsets offsets buffer * @param nullCount nullCount for the LIST column * @param child the host side nested column vector list * @return new ColumnVector of type LIST at the moment */ static ColumnVector createColumnVector(DType type, int rows, HostMemoryBuffer data, HostMemoryBuffer valid, HostMemoryBuffer offsets, Optional<Long> nullCount, List<HostColumnVectorCore> child) { List<NestedColumnVector> devChildren = new ArrayList<>(); for (HostColumnVectorCore c : child) { devChildren.add(createNewNestedColumnVector(c)); } int mainColRows = rows; DType mainColType = type; HostMemoryBuffer mainColValid = valid; HostMemoryBuffer mainColOffsets = offsets; DeviceMemoryBuffer mainDataDevBuff = null; DeviceMemoryBuffer mainValidDevBuff = null; DeviceMemoryBuffer mainOffsetsDevBuff = null; if (mainColValid != null) { long validLen = getValidityBufferSize(mainColRows); mainValidDevBuff = DeviceMemoryBuffer.allocate(validLen); mainValidDevBuff.copyFromHostBuffer(mainColValid, 0, validLen); } if (data != null) { long dataLen = data.length; mainDataDevBuff = DeviceMemoryBuffer.allocate(dataLen); mainDataDevBuff.copyFromHostBuffer(data, 0, dataLen); } if (mainColOffsets != null) { // The offset buffer has (no. of rows + 1) entries, where each entry is INT32.sizeInBytes long offsetsLen = OFFSET_SIZE * (((long)mainColRows) + 1); mainOffsetsDevBuff = DeviceMemoryBuffer.allocate(offsetsLen); mainOffsetsDevBuff.copyFromHostBuffer(mainColOffsets, 0, offsetsLen); } List<DeviceMemoryBuffer> toClose = new ArrayList<>(); long[] childHandles = new long[devChildren.size()]; try { for (ColumnView.NestedColumnVector ncv : devChildren) { toClose.addAll(ncv.getBuffersToClose()); } for (int i = 0; i < devChildren.size(); i++) { childHandles[i] = devChildren.get(i).createViewHandle(); } return new ColumnVector(mainColType, mainColRows, nullCount, mainDataDevBuff, mainValidDevBuff, mainOffsetsDevBuff, toClose, childHandles); } finally { for (int i = 0; i < childHandles.length; i++) { if (childHandles[i] != 0) { ColumnView.deleteColumnView(childHandles[i]); childHandles[i] = 0; } } } } private static NestedColumnVector createNewNestedColumnVector( HostColumnVectorCore nestedChildren) { if (nestedChildren == null) { return null; } DType colType = nestedChildren.getType(); Optional<Long> nullCount = Optional.of(nestedChildren.getNullCount()); long colRows = nestedChildren.getRowCount(); HostMemoryBuffer colData = nestedChildren.getNestedChildren().isEmpty() ? nestedChildren.getData() : null; HostMemoryBuffer colValid = nestedChildren.getValidity(); HostMemoryBuffer colOffsets = nestedChildren.getOffsets(); List<NestedColumnVector> children = new ArrayList<>(); for (HostColumnVectorCore nhcv : nestedChildren.getNestedChildren()) { children.add(createNewNestedColumnVector(nhcv)); } return createNestedColumnVector(colType, colRows, nullCount, colData, colValid, colOffsets, children); } private long createViewHandle() { long[] childrenColViews = null; try { if (children != null) { childrenColViews = new long[children.size()]; for (int i = 0; i < children.size(); i++) { childrenColViews[i] = children.get(i).createViewHandle(); } } long dataAddr = data == null ? 0 : data.address; long dataLen = data == null ? 0 : data.length; long offsetAddr = offsets == null ? 0 : offsets.address; long validAddr = valid == null ? 0 : valid.address; int nc = nullCount.orElse(ColumnVector.OffHeapState.UNKNOWN_NULL_COUNT).intValue(); return makeCudfColumnView(dataType.typeId.getNativeId(), dataType.getScale(), dataAddr, dataLen, offsetAddr, validAddr, nc, (int) rows, childrenColViews); } finally { if (childrenColViews != null) { for (int i = 0; i < childrenColViews.length; i++) { if (childrenColViews[i] != 0) { deleteColumnView(childrenColViews[i]); childrenColViews[i] = 0; } } } } } List<DeviceMemoryBuffer> getBuffersToClose() { List<DeviceMemoryBuffer> buffers = new ArrayList<>(); if (children != null) { for (NestedColumnVector ncv : children) { buffers.addAll(ncv.getBuffersToClose()); } } if (data != null) { buffers.add(data); } if (valid != null) { buffers.add(valid); } if (offsets != null) { buffers.add(offsets); } return buffers; } private static long getEndStringOffset(long totalRows, long index, HostMemoryBuffer offsets) { assert index < totalRows; return offsets.getInt((index + 1) * 4); } private static NestedColumnVector createNestedColumnVector(DType type, long rows, Optional<Long> nullCount, HostMemoryBuffer dataBuffer, HostMemoryBuffer validityBuffer, HostMemoryBuffer offsetBuffer, List<NestedColumnVector> child) { DeviceMemoryBuffer data = null; DeviceMemoryBuffer valid = null; DeviceMemoryBuffer offsets = null; if (dataBuffer != null) { long dataLen = rows * type.getSizeInBytes(); if (type.equals(DType.STRING)) { // This needs a different type dataLen = getEndStringOffset(rows, rows - 1, offsetBuffer); if (dataLen == 0 && nullCount.get() == 0) { // This is a work around to an issue where a column of all empty strings must have at // least one byte or it will not be interpreted correctly. dataLen = 1; } } data = DeviceMemoryBuffer.allocate(dataLen); data.copyFromHostBuffer(dataBuffer, 0, dataLen); } if (validityBuffer != null) { long validLen = getValidityBufferSize((int)rows); valid = DeviceMemoryBuffer.allocate(validLen); valid.copyFromHostBuffer(validityBuffer, 0, validLen); } if (offsetBuffer != null) { long offsetsLen = OFFSET_SIZE * (rows + 1); offsets = DeviceMemoryBuffer.allocate(offsetsLen); offsets.copyFromHostBuffer(offsetBuffer, 0, offsetsLen); } NestedColumnVector ret = new NestedColumnVector(type, rows, nullCount, data, valid, offsets, child); return ret; } } ///////////////////////////////////////////////////////////////////////////// // DATA MOVEMENT ///////////////////////////////////////////////////////////////////////////// private static HostColumnVectorCore copyToHostNestedHelper( ColumnView deviceCvPointer, HostMemoryAllocator hostMemoryAllocator) { if (deviceCvPointer == null) { return null; } HostMemoryBuffer hostOffsets = null; HostMemoryBuffer hostValid = null; HostMemoryBuffer hostData = null; List<HostColumnVectorCore> children = new ArrayList<>(); BaseDeviceMemoryBuffer currData = null; BaseDeviceMemoryBuffer currOffsets = null; BaseDeviceMemoryBuffer currValidity = null; long currNullCount = 0l; boolean needsCleanup = true; try { long currRows = deviceCvPointer.getRowCount(); DType currType = deviceCvPointer.getType(); currData = deviceCvPointer.getData(); currOffsets = deviceCvPointer.getOffsets(); currValidity = deviceCvPointer.getValid(); if (currData != null) { hostData = hostMemoryAllocator.allocate(currData.length); hostData.copyFromDeviceBuffer(currData); } if (currValidity != null) { hostValid = hostMemoryAllocator.allocate(currValidity.length); hostValid.copyFromDeviceBuffer(currValidity); } if (currOffsets != null) { hostOffsets = hostMemoryAllocator.allocate(currOffsets.length); hostOffsets.copyFromDeviceBuffer(currOffsets); } int numChildren = deviceCvPointer.getNumChildren(); for (int i = 0; i < numChildren; i++) { try(ColumnView childDevPtr = deviceCvPointer.getChildColumnView(i)) { children.add(copyToHostNestedHelper(childDevPtr, hostMemoryAllocator)); } } currNullCount = deviceCvPointer.getNullCount(); Optional<Long> nullCount = Optional.of(currNullCount); HostColumnVectorCore ret = new HostColumnVectorCore(currType, currRows, nullCount, hostData, hostValid, hostOffsets, children); needsCleanup = false; return ret; } finally { if (currData != null) { currData.close(); } if (currOffsets != null) { currOffsets.close(); } if (currValidity != null) { currValidity.close(); } if (needsCleanup) { if (hostData != null) { hostData.close(); } if (hostOffsets != null) { hostOffsets.close(); } if (hostValid != null) { hostValid.close(); } } } } /** * Copy the data to the host. */ public HostColumnVector copyToHost(HostMemoryAllocator hostMemoryAllocator) { try (NvtxRange toHost = new NvtxRange("ensureOnHost", NvtxColor.BLUE)) { HostMemoryBuffer hostDataBuffer = null; HostMemoryBuffer hostValidityBuffer = null; HostMemoryBuffer hostOffsetsBuffer = null; BaseDeviceMemoryBuffer valid = getValid(); BaseDeviceMemoryBuffer offsets = getOffsets(); BaseDeviceMemoryBuffer data = null; DType type = this.type; long rows = this.rows; if (!type.isNestedType()) { data = getData(); } boolean needsCleanup = true; try { // We don't have a good way to tell if it is cached on the device or recalculate it on // the host for now, so take the hit here. getNullCount(); if (!type.isNestedType()) { if (valid != null) { hostValidityBuffer = hostMemoryAllocator.allocate(valid.getLength()); hostValidityBuffer.copyFromDeviceBuffer(valid); } if (offsets != null) { hostOffsetsBuffer = hostMemoryAllocator.allocate(offsets.length); hostOffsetsBuffer.copyFromDeviceBuffer(offsets); } // If a strings column is all null values there is no data buffer allocated if (data != null) { hostDataBuffer = hostMemoryAllocator.allocate(data.length); hostDataBuffer.copyFromDeviceBuffer(data); } HostColumnVector ret = new HostColumnVector(type, rows, Optional.of(nullCount), hostDataBuffer, hostValidityBuffer, hostOffsetsBuffer); needsCleanup = false; return ret; } else { if (data != null) { hostDataBuffer = hostMemoryAllocator.allocate(data.length); hostDataBuffer.copyFromDeviceBuffer(data); } if (valid != null) { hostValidityBuffer = hostMemoryAllocator.allocate(valid.getLength()); hostValidityBuffer.copyFromDeviceBuffer(valid); } if (offsets != null) { hostOffsetsBuffer = hostMemoryAllocator.allocate(offsets.getLength()); hostOffsetsBuffer.copyFromDeviceBuffer(offsets); } List<HostColumnVectorCore> children = new ArrayList<>(); for (int i = 0; i < getNumChildren(); i++) { try (ColumnView childDevPtr = getChildColumnView(i)) { children.add(copyToHostNestedHelper(childDevPtr, hostMemoryAllocator)); } } HostColumnVector ret = new HostColumnVector(type, rows, Optional.of(nullCount), hostDataBuffer, hostValidityBuffer, hostOffsetsBuffer, children); needsCleanup = false; return ret; } } finally { if (data != null) { data.close(); } if (offsets != null) { offsets.close(); } if (valid != null) { valid.close(); } if (needsCleanup) { if (hostOffsetsBuffer != null) { hostOffsetsBuffer.close(); } if (hostDataBuffer != null) { hostDataBuffer.close(); } if (hostValidityBuffer != null) { hostValidityBuffer.close(); } } } } } public HostColumnVector copyToHost() { return copyToHost(DefaultHostMemoryAllocator.get()); } /** * Calculate the total space required to copy the data to the host. This should be padded to * the alignment that the CPU requires. */ public long getHostBytesRequired() { return getDeviceMemorySize(getNativeView(), true); } /** * Get the size that the host will align memory allocations to in bytes. */ public static native long hostPaddingSizeInBytes(); /** * Exact check if a column or its descendants have non-empty null rows * * @return Whether the column or its descendants have non-empty null rows */ public boolean hasNonEmptyNulls() { return hasNonEmptyNulls(viewHandle); } /** * Copies this column into output while purging any non-empty null rows in the column or its * descendants. * * If this column is not of compound type (LIST/STRING/STRUCT/DICTIONARY), the output will be * the same as input. * * The purge operation only applies directly to LIST and STRING columns, but it applies indirectly * to STRUCT/DICTIONARY columns as well, since these columns may have child columns that * are LIST or STRING. * * Examples: * lists = data: [{{0,1}, {2,3}, {4,5}} validity: {true, false, true}] * lists[1] is null, but the list's child column still stores `{2,3}`. * * After purging the contents of the list's null rows, the column's contents will be: * lists = [data: {{0,1}, {4,5}} validity: {true, false, true}] * * @return A new column with equivalent contents to `input`, but with null rows purged */ public ColumnVector purgeNonEmptyNulls() { return new ColumnVector(purgeNonEmptyNulls(viewHandle)); } static ColumnView[] getColumnViewsFromPointers(long[] nativeHandles) { ColumnView[] columns = new ColumnView[nativeHandles.length]; try { for (int i = 0; i < nativeHandles.length; i++) { long nativeHandle = nativeHandles[i]; // setting address to zero, so we don't clean it in case of an exception as it // will be cleaned up by the constructor nativeHandles[i] = 0; columns[i] = new ColumnView(nativeHandle); } return columns; } catch (Throwable t) { try { cleanupColumnViews(nativeHandles, columns, t); } catch (Throwable s) { t.addSuppressed(s); } finally { throw t; } } } /** * Convert this integer column to hexadecimal column and return a new strings column * * Any null entries will result in corresponding null entries in the output column. * * The output character set is '0'-'9' and 'A'-'F'. The output string width will * be a multiple of 2 depending on the size of the integer type. A single leading * zero is applied to the first non-zero output byte if it is less than 0x10. * * Example: * input = [123, -1, 0, 27, 342718233] * s = input.toHex() * s is [ '04D2', 'FFFFFFFF', '00', '1B', '146D7719'] * * The example above shows an `INT32` type column where each integer is 4 bytes. * Leading zeros are suppressed unless filling out a complete byte as in * `123 -> '04D2'` instead of `000004D2` or `4D2`. * * @return new string ColumnVector */ public ColumnVector toHex() { assert getType().isIntegral() : "Only integers are supported"; return new ColumnVector(toHex(this.getNativeView())); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CuFileHandle.java
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Represents a cuFile file handle. */ abstract class CuFileHandle implements AutoCloseable { private final CuFileResourceCleaner cleaner; static { CuFile.initialize(); } protected CuFileHandle(long pointer) { cleaner = new CuFileResourceCleaner(pointer, CuFileHandle::destroy); MemoryCleaner.register(this, cleaner); } @Override public void close() { cleaner.close(this); } protected long getPointer() { return cleaner.getPointer(); } private static native void destroy(long pointer); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/OutOfBoundsPolicy.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Policy to account for possible out-of-bounds indices * * `NULLIFY` means to nullify output values corresponding to out-of-bounds gather map values. * * `DONT_CHECK` means do not check whether the indices are out-of-bounds, for better * performance. Use `DONT_CHECK` carefully, as it can result in a CUDA exception if * the gather map values are actually out of range. * * @note This enum doesn't have a nativeId because the C++ out_of_bounds_policy is a * a boolean enum. It is just added for clarity in the Java API. */ public enum OutOfBoundsPolicy { /* Output values corresponding to out-of-bounds indices are null */ NULLIFY, /* No bounds checking is performed, better performance */ DONT_CHECK }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ParquetOptions.java
/* * * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.ArrayList; import java.util.Collection; import java.util.List; /** * Options for reading a parquet file */ public class ParquetOptions extends ColumnFilterOptions { public static ParquetOptions DEFAULT = new ParquetOptions(new Builder()); private final DType unit; private final boolean[] readBinaryAsString; private ParquetOptions(Builder builder) { super(builder); unit = builder.unit; readBinaryAsString = new boolean[builder.binaryAsStringColumns.size()]; for (int i = 0 ; i < builder.binaryAsStringColumns.size() ; i++) { readBinaryAsString[i] = builder.binaryAsStringColumns.get(i); } } DType timeUnit() { return unit; } boolean[] getReadBinaryAsString() { return readBinaryAsString; } public static ParquetOptions.Builder builder() { return new Builder(); } public static class Builder extends ColumnFilterOptions.Builder<Builder> { private DType unit = DType.EMPTY; final List<Boolean> binaryAsStringColumns = new ArrayList<>(); /** * Specify the time unit to use when returning timestamps. * @param unit default unit of time specified by the user * @return builder for chaining */ public Builder withTimeUnit(DType unit) { assert unit.isTimestampType(); this.unit = unit; return this; } /** * Include one or more specific columns. Any column not included will not be read. * @param names the name of the column, or more than one if you want. */ @Override public Builder includeColumn(String... names) { super.includeColumn(names); for (int i = 0 ; i < names.length ; i++) { binaryAsStringColumns.add(true); } return this; } /** * Include this column. * @param name the name of the column * @param isBinary whether this column is to be read in as binary */ public Builder includeColumn(String name, boolean isBinary) { includeColumnNames.add(name); binaryAsStringColumns.add(!isBinary); return this; } /** * Include one or more specific columns. Any column not included will not be read. * @param names the name of the column, or more than one if you want. */ @Override public Builder includeColumn(Collection<String> names) { super.includeColumn(names); for (int i = 0 ; i < names.size() ; i++) { binaryAsStringColumns.add(true); } return this; } public ParquetOptions build() { return new ParquetOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/QuoteStyle.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Quote style for CSV records, closely following cudf::io::quote_style. */ public enum QuoteStyle { MINIMAL(0), // Quote only fields which contain special characters ALL(1), // Quote all fields NONNUMERIC(2), // Quote all non-numeric fields NONE(3); // Never quote fields; disable quotation parsing final int nativeId; // Native id, for use with libcudf. QuoteStyle(int nativeId) { this.nativeId = nativeId; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ArrowColumnBuilder.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.nio.ByteBuffer; import java.util.ArrayList; /** * Column builder from Arrow data. This builder takes in byte buffers referencing * Arrow data and allows efficient building of CUDF ColumnVectors from that Arrow data. * The caller can add multiple batches where each batch corresponds to Arrow data * and those batches get concatenated together after being converted to CUDF * ColumnVectors. * This currently only supports primitive types and Strings, Decimals and nested types * such as list and struct are not supported. */ public final class ArrowColumnBuilder implements AutoCloseable { private DType type; private final ArrayList<ByteBuffer> data = new ArrayList<>(); private final ArrayList<ByteBuffer> validity = new ArrayList<>(); private final ArrayList<ByteBuffer> offsets = new ArrayList<>(); private final ArrayList<Long> nullCount = new ArrayList<>(); private final ArrayList<Long> rows = new ArrayList<>(); public ArrowColumnBuilder(HostColumnVector.DataType type) { this.type = type.getType(); } /** * Add an Arrow buffer. This API allows you to add multiple if you want them * combined into a single ColumnVector. * Note, this takes all data, validity, and offsets buffers, but they may not all * be needed based on the data type. The buffer should be null if its not used * for that type. * This API only supports primitive types and Strings, Decimals and nested types * such as list and struct are not supported. * @param rows - number of rows in this Arrow buffer * @param nullCount - number of null values in this Arrow buffer * @param data - ByteBuffer of the Arrow data buffer * @param validity - ByteBuffer of the Arrow validity buffer * @param offsets - ByteBuffer of the Arrow offsets buffer */ public void addBatch(long rows, long nullCount, ByteBuffer data, ByteBuffer validity, ByteBuffer offsets) { this.rows.add(rows); this.nullCount.add(nullCount); this.data.add(data); this.validity.add(validity); this.offsets.add(offsets); } /** * Create the immutable ColumnVector, copied to the device based on the Arrow data. * @return - new ColumnVector */ public final ColumnVector buildAndPutOnDevice() { int numBatches = rows.size(); ArrayList<ColumnVector> allVecs = new ArrayList<>(numBatches); ColumnVector vecRet; try { for (int i = 0; i < numBatches; i++) { allVecs.add(ColumnVector.fromArrow(type, rows.get(i), nullCount.get(i), data.get(i), validity.get(i), offsets.get(i))); } if (numBatches == 1) { vecRet = allVecs.get(0); } else if (numBatches > 1) { vecRet = ColumnVector.concatenate(allVecs.toArray(new ColumnVector[0])); } else { throw new IllegalStateException("Can't build a ColumnVector when no Arrow batches specified"); } } finally { // close the vectors that were concatenated if (numBatches > 1) { allVecs.forEach(cv -> cv.close()); } } return vecRet; } @Override public void close() { // memory buffers owned outside of this } @Override public String toString() { return "ArrowColumnBuilder{" + "type=" + type + ", data=" + data + ", validity=" + validity + ", offsets=" + offsets + ", nullCount=" + nullCount + ", rows=" + rows + '}'; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CudaMemInfo.java
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Represent free and total device memory. */ public class CudaMemInfo { /** * free memory in bytes */ public final long free; /** * total memory in bytes */ public final long total; CudaMemInfo(long free, long total) { this.free = free; this.total = total; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HostMemoryBuffer.java
/* * * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.EOFException; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.FileChannel.MapMode; /** * This class holds an off-heap buffer in the host/CPU memory. * Please note that instances must be explicitly closed or native memory will be leaked! * * Internally this class will try to use PinnedMemoryPool to allocate and free the memory * it uses by default. To avoid using the pinned memory pool for allocations by default * set the Java system property ai.rapids.cudf.prefer-pinned to false. * * Be aware that the off heap memory limits set by Java do not apply to these buffers. */ public class HostMemoryBuffer extends MemoryBuffer { private static final boolean defaultPreferPinned; private static final Logger log = LoggerFactory.getLogger(HostMemoryBuffer.class); static { boolean preferPinned = true; String propString = System.getProperty("ai.rapids.cudf.prefer-pinned"); if (propString != null) { preferPinned = Boolean.parseBoolean(propString); } defaultPreferPinned = preferPinned; } private static final class HostBufferCleaner extends MemoryBufferCleaner { private long address; private final long length; HostBufferCleaner(long address, long length) { this.address = address; this.length = length; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; long origAddress = address; if (address != 0) { try { UnsafeMemoryAccessor.free(address); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. address = 0; } neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A HOST BUFFER WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")"); logRefCountDebug("Leaked host buffer"); } return neededCleanup; } @Override public boolean isClean() { return address == 0; } } private static final class MmapCleaner extends MemoryBufferCleaner { private long address; private final long length; MmapCleaner(long address, long length) { this.address = address; this.length = length; } @Override protected boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; if (address != 0) { try { HostMemoryBufferNativeUtils.munmap(address, length); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. address = 0; } neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A MEMORY MAPPED BUFFER WAS LEAKED!!!!"); logRefCountDebug("Leaked mmap buffer"); } return neededCleanup; } @Override public boolean isClean() { return address == 0; } } /** * Allocate memory, but be sure to close the returned buffer to avoid memory leaks. * @param bytes size in bytes to allocate * @param preferPinned If set to true, the pinned memory pool will be used if possible with a * fallback to off-heap memory. If set to false, the allocation will always * be from off-heap memory. * @return the newly created buffer */ public static HostMemoryBuffer allocate(long bytes, boolean preferPinned) { if (preferPinned) { HostMemoryBuffer pinnedBuffer = PinnedMemoryPool.tryAllocate(bytes); if (pinnedBuffer != null) { return pinnedBuffer; } } return new HostMemoryBuffer(UnsafeMemoryAccessor.allocate(bytes), bytes); } /** * Allocate memory, but be sure to close the returned buffer to avoid memory leaks. Pinned memory * will be preferred for allocations if the java system property ai.rapids.cudf.prefer-pinned is * set to true. * @param bytes size in bytes to allocate * @return the newly created buffer */ public static HostMemoryBuffer allocate(long bytes) { return allocate(bytes, defaultPreferPinned); } /** * Create a host buffer that is memory-mapped to a file. * @param path path to the file to map into host memory * @param mode mapping type * @param offset file offset where the map will start * @param length the number of bytes to map * @return file-mapped buffer */ public static HostMemoryBuffer mapFile(File path, MapMode mode, long offset, long length) throws IOException { // mapping offset must be a multiple of the system page size long offsetDelta = offset & (UnsafeMemoryAccessor.pageSize() - 1); long address; try { address = HostMemoryBufferNativeUtils.mmap(path.getPath(), modeAsInt(mode), offset - offsetDelta, length + offsetDelta); } catch (IOException e) { throw new IOException("Error creating memory map for " + path, e); } return new HostMemoryBuffer(address + offsetDelta, length, new MmapCleaner(address, length + offsetDelta)); } private static int modeAsInt(MapMode mode) { if (MapMode.READ_ONLY.equals(mode)) { return 0; } else if (MapMode.READ_WRITE.equals(mode)) { return 1; } else { throw new UnsupportedOperationException("Unsupported mapping mode: " + mode); } } HostMemoryBuffer(long address, long length) { this(address, length, new HostBufferCleaner(address, length)); } HostMemoryBuffer(long address, long length, MemoryBufferCleaner cleaner) { super(address, length, cleaner); } private HostMemoryBuffer(long address, long lengthInBytes, HostMemoryBuffer parent) { super(address, lengthInBytes, parent); // This is a slice so we are not going to mark it as allocated } /** * Return a ByteBuffer that provides access to the underlying memory. Please note: if the buffer * is larger than a ByteBuffer can handle (2GB) an exception will be thrown. Also * be aware that the ByteBuffer will be in native endian order, which is different from regular * ByteBuffers that are big endian by default. */ public final ByteBuffer asByteBuffer() { assert length <= Integer.MAX_VALUE : "2GB limit on ByteBuffers"; return asByteBuffer(0, (int) length); } /** * Return a ByteBuffer that provides access to the underlying memory. Be aware that the * ByteBuffer will be in native endian order, which is different from regular * ByteBuffers that are big endian by default. * @param offset the offset to start at * @param length how many bytes to include. */ public final ByteBuffer asByteBuffer(long offset, int length) { addressOutOfBoundsCheck(address + offset, length, "asByteBuffer"); return HostMemoryBufferNativeUtils.wrapRangeInBuffer(address + offset, length) .order(ByteOrder.nativeOrder()); } /** * Copy the contents of the given buffer to this buffer * @param destOffset offset in bytes in this buffer to start copying to * @param srcData Buffer to be copied from * @param srcOffset offset in bytes to start copying from in srcData * @param length number of bytes to copy */ public final void copyFromHostBuffer(long destOffset, HostMemoryBuffer srcData, long srcOffset, long length) { addressOutOfBoundsCheck(address + destOffset, length, "copy from dest"); srcData.addressOutOfBoundsCheck(srcData.address + srcOffset, length, "copy from source"); UnsafeMemoryAccessor.copyMemory(null, srcData.address + srcOffset, null, address + destOffset, length); } /** * Copy len bytes from in to this buffer. * @param destOffset offset in bytes in this buffer to start copying to * @param in input stream to copy bytes from * @param byteLength number of bytes to copy */ final void copyFromStream(long destOffset, InputStream in, long byteLength) throws IOException { addressOutOfBoundsCheck(address + destOffset, byteLength, "copy from stream"); byte[] arrayBuffer = new byte[(int) Math.min(1024 * 128, byteLength)]; long left = byteLength; while (left > 0) { int amountToCopy = (int) Math.min(arrayBuffer.length, left); int amountRead = in.read(arrayBuffer, 0, amountToCopy); if (amountRead < 0) { throw new EOFException(); } setBytes(destOffset, arrayBuffer, 0, amountRead); destOffset += amountRead; left -= amountRead; } } /** * Returns the byte value at that offset * @param offset - offset from the address * @return - value */ public final byte getByte(long offset) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 1, "getByte"); return UnsafeMemoryAccessor.getByte(requestedAddress); } /** * Sets the byte value at that offset * @param offset - offset from the address * @param value - value to be set */ public final void setByte(long offset, byte value) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 1, "setByte"); UnsafeMemoryAccessor.setByte(requestedAddress, value); } /** * Copy a set of bytes to an array from the buffer starting at offset. * @param dst destination byte array * @param dstOffset starting offset within the destination array * @param srcOffset starting offset within this buffer * @param len number of bytes to copy */ public final void getBytes(byte[] dst, long dstOffset, long srcOffset, long len) { assert len >= 0; assert len <= dst.length - dstOffset; assert srcOffset >= 0; long requestedAddress = this.address + srcOffset; addressOutOfBoundsCheck(requestedAddress, len, "getBytes"); UnsafeMemoryAccessor.getBytes(dst, dstOffset, requestedAddress, len); } /** * Copy a set of bytes from an array into the buffer at offset. * @param offset the offset from the address to start copying to * @param data the data to be copied. */ public final void setBytes(long offset, byte[] data, long srcOffset, long len) { assert len >= 0 : "length is not allowed " + len; assert len <= data.length - srcOffset; assert srcOffset >= 0; long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, len, "setBytes"); UnsafeMemoryAccessor.setBytes(requestedAddress, data, srcOffset, len); } /** * Returns the Short value at that offset * @param offset - offset from the address * @return - value */ public final short getShort(long offset) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 2, "getShort"); return UnsafeMemoryAccessor.getShort(requestedAddress); } /** * Sets the Short value at that offset * @param offset - offset from the address * @param value - value to be set */ public final void setShort(long offset, short value) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 2, "setShort"); UnsafeMemoryAccessor.setShort(requestedAddress, value); } /** * Copy a set of shorts from an array into the buffer at offset. * @param offset the offset from the address to start copying to * @param data the data to be copied. * @param srcOffset index in data to start at. */ public final void setShorts(long offset, short[] data, long srcOffset, long len) { assert len >= 0 : "length is not allowed " + len; assert len <= data.length - srcOffset; long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, len * 2, "setShorts"); UnsafeMemoryAccessor.setShorts(requestedAddress, data, srcOffset, len); } /** * Returns the Integer value at that offset * @param offset - offset from the address * @return - value */ public final int getInt(long offset) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 4, "getInt"); return UnsafeMemoryAccessor.getInt(requestedAddress); } /** * Sets the Integer value at that offset * @param offset - offset from the address * @param value - value to be set */ public final void setInt(long offset, int value) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 4, "setInt"); UnsafeMemoryAccessor.setInt(requestedAddress, value); } /** * Copy a set of ints from an array into the buffer at offset. * @param offset the offset from the address to start copying to * @param data the data to be copied. * @param srcOffset index into data to start at */ public final void setInts(long offset, int[] data, long srcOffset, long len) { assert len >= 0 : "length is not allowed " + len; assert len <= data.length - srcOffset; long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, len * 4, "setInts"); UnsafeMemoryAccessor.setInts(requestedAddress, data, srcOffset, len); } /** * Returns the Long value at that offset * @param offset - offset from the address * @return - value */ public final long getLong(long offset) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 8, "getLong"); return UnsafeMemoryAccessor.getLong(requestedAddress); } /** * Sets the Long value at that offset * @param offset - offset from the address * @param value - value to be set */ public final void setLong(long offset, long value) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 8, "setLong"); UnsafeMemoryAccessor.setLong(requestedAddress, value); } /** * Copy a set of longs to an array from the buffer starting at offset. * @param dst destination long array * @param dstIndex starting index within the destination array * @param srcOffset starting offset within this buffer * @param count number of longs to copy */ public final void getLongs(long[] dst, long dstIndex, long srcOffset, int count) { assert count >= 0; assert count <= dst.length - dstIndex; assert srcOffset >= 0; long requestedAddress = this.address + srcOffset; addressOutOfBoundsCheck(requestedAddress, count * 8L, "getLongs"); UnsafeMemoryAccessor.getLongs(dst, dstIndex, requestedAddress, count); } /** * Copy a set of longs from an array into the buffer at offset. * @param offset the offset from the address to start copying to * @param data the data to be copied. * @param srcOffset index into data to start at. */ public final void setLongs(long offset, long[] data, long srcOffset, long len) { assert len >= 0 : "length is not allowed " + len; assert len <= data.length - srcOffset; long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, len * 8, "setLongs"); UnsafeMemoryAccessor.setLongs(requestedAddress, data, srcOffset, len); } /** * Returns the Float value at that offset * @param offset - offset from the address * @return - value */ public final float getFloat(long offset) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 4, "getFloat"); return UnsafeMemoryAccessor.getFloat(requestedAddress); } /** * Sets the Float value at that offset * @param offset - offset from the address * @param value - value to be set */ public final void setFloat(long offset, float value) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 4, "setFloat"); UnsafeMemoryAccessor.setFloat(requestedAddress, value); } /** * Copy a set of floats from an array into the buffer at offset. * @param offset the offset from the address to start copying to * @param data the data to be copied. * @param srcOffset index into data to start at */ public final void setFloats(long offset, float[] data, long srcOffset, long len) { assert len >= 0 : "length is not allowed " + len; assert len <= data.length - srcOffset; long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, len * 4, "setFloats"); UnsafeMemoryAccessor.setFloats(requestedAddress, data, srcOffset, len); } /** * Returns the Double value at that offset * @param offset - offset from the address * @return - value */ public final double getDouble(long offset) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 8, "getDouble"); return UnsafeMemoryAccessor.getDouble(requestedAddress); } /** * Sets the Double value at that offset * @param offset - offset from the address * @param value - value to be set */ public final void setDouble(long offset, double value) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 8, "setDouble"); UnsafeMemoryAccessor.setDouble(requestedAddress, value); } /** * Copy a set of doubles from an array into the buffer at offset. * @param offset the offset from the address to start copying to * @param data the data to be copied. * @param srcOffset index into data to start at */ public final void setDoubles(long offset, double[] data, long srcOffset, long len) { assert len >= 0 : "length is not allowed " + len; assert len <= data.length - srcOffset; long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, len * 8, "setDoubles"); UnsafeMemoryAccessor.setDoubles(requestedAddress, data, srcOffset, len); } /** * Returns the Boolean value at that offset * @param offset - offset from the address * @return - value */ public final boolean getBoolean(long offset) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 1, "getBoolean"); return UnsafeMemoryAccessor.getBoolean(requestedAddress); } /** * Sets the Boolean value at that offset * @param offset - offset from the address * @param value - value to be set */ public final void setBoolean(long offset, boolean value) { long requestedAddress = this.address + offset; addressOutOfBoundsCheck(requestedAddress, 1, "setBoolean"); UnsafeMemoryAccessor.setBoolean(requestedAddress, value); } /** * Sets the values in this buffer repeatedly * @param offset - offset from the address * @param length - number of bytes to set * @param value - value to be set */ public final void setMemory(long offset, long length, byte value) { addressOutOfBoundsCheck(address + offset, length, "set memory"); UnsafeMemoryAccessor.setMemory(address + offset, length, value); } final void copyFromMemory(long fromAddress, long len) { addressOutOfBoundsCheck(address, len, "copy from memory"); UnsafeMemoryAccessor.copyMemory(null, fromAddress, null, address, len); } /** * Copy data from this buffer to the given address. * @param toAddress where the data should go * @param len how much data to copy */ final void copyToMemory(long toAddress, long len) { addressOutOfBoundsCheck(address, len, "copy to memory"); UnsafeMemoryAccessor.copyMemory(null, address, null, toAddress, len); } /** * Synchronously copy from a DeviceMemoryBuffer to a HostMemoryBuffer * @param deviceMemoryBuffer buffer to copy data from */ public final void copyFromDeviceBuffer(BaseDeviceMemoryBuffer deviceMemoryBuffer) { addressOutOfBoundsCheck(address, deviceMemoryBuffer.length, "copy range dest"); assert !deviceMemoryBuffer.closed; Cuda.memcpy(address, deviceMemoryBuffer.address, deviceMemoryBuffer.length, CudaMemcpyKind.DEVICE_TO_HOST); } /** * Copy from a DeviceMemoryBuffer to a HostMemoryBuffer using the specified stream. * The copy has completed when this returns, but the memory copy could overlap with * operations occurring on other streams. * @param deviceMemoryBuffer buffer to copy data from * @param stream CUDA stream to use */ public final void copyFromDeviceBuffer(BaseDeviceMemoryBuffer deviceMemoryBuffer, Cuda.Stream stream) { addressOutOfBoundsCheck(address, deviceMemoryBuffer.length, "copy range dest"); assert !deviceMemoryBuffer.closed; Cuda.memcpy(address, deviceMemoryBuffer.address, deviceMemoryBuffer.length, CudaMemcpyKind.DEVICE_TO_HOST, stream); } /** * Copy from a DeviceMemoryBuffer to a HostMemoryBuffer using the specified stream. * The copy is async and may not have completed when this returns. * @param deviceMemoryBuffer buffer to copy data from * @param stream CUDA stream to use */ public final void copyFromDeviceBufferAsync(BaseDeviceMemoryBuffer deviceMemoryBuffer, Cuda.Stream stream) { addressOutOfBoundsCheck(address, deviceMemoryBuffer.length, "copy range dest"); assert !deviceMemoryBuffer.closed; Cuda.asyncMemcpy(address, deviceMemoryBuffer.address, deviceMemoryBuffer.length, CudaMemcpyKind.DEVICE_TO_HOST, stream); } /** * Slice off a part of the host buffer. * @param offset where to start the slice at. * @param len how many bytes to slice * @return a host buffer that will need to be closed independently from this buffer. */ @Override public final synchronized HostMemoryBuffer slice(long offset, long len) { addressOutOfBoundsCheck(address + offset, len, "slice"); refCount++; cleaner.addRef(); return new HostMemoryBuffer(address + offset, len, this); } /** * Slice off a part of the host buffer, actually making a copy of the data. * @param offset where to start the slice at. * @param len how many bytes to slice * @return a host buffer that will need to be closed independently from this buffer. */ public final HostMemoryBuffer sliceWithCopy(long offset, long len) { addressOutOfBoundsCheck(address + offset, len, "slice"); HostMemoryBuffer ret = null; boolean success = false; try { ret = allocate(len); UnsafeMemoryAccessor.copyMemory(null, address + offset, null, ret.getAddress(), len); success = true; return ret; } finally { if (!success && ret != null) { ret.close(); } } } /** * WARNING: Debug only method to print buffer. Does not work for buffers over 2GB. */ public void printBuffer() { printBuffer(5); } /** * WARNING: Debug only method to print buffer. Does not work for buffers over 2GB. * @param wordsPerRow the number of 32 bit works to print per row. */ public void printBuffer(int wordsPerRow) { final int bytesPerWord = 4; final int bytesPerRow = bytesPerWord * wordsPerRow; assert (length == (int)length) : "The buffer is too large to be printed"; byte[] data = new byte[(int)length]; System.out.println("BUFFER length = " + data.length); getBytes(data, 0, 0, length); for (int i = 0; i < data.length; i++) { if (i % bytesPerWord == 0) { if (i % bytesPerRow == 0) { System.out.println(); } else { System.out.print(" "); } } System.out.print(String.format("%02x",((long)data[i]) & 0xFF)); } System.out.println(); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CompressedMetadataWriterOptions.java
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; class CompressedMetadataWriterOptions extends WriterOptions { private final CompressionType compressionType; private final Map<String, String> metadata; <T extends CMWriterBuilder> CompressedMetadataWriterOptions(T builder) { super(builder); compressionType = builder.compressionType; metadata = Collections.unmodifiableMap(builder.metadata); } public CompressionType getCompressionType() { return compressionType; } public Map<String, String> getMetadata() { return metadata; } String[] getMetadataKeys() { return metadata.keySet().toArray(new String[metadata.size()]); } String[] getMetadataValues() { return metadata.values().toArray(new String[metadata.size()]); } protected static class CMWriterBuilder<T extends CMWriterBuilder> extends WriterBuilder<T> { final Map<String, String> metadata = new LinkedHashMap<>(); CompressionType compressionType = CompressionType.AUTO; /** * Add a metadata key and a value * @param key * @param value */ public T withMetadata(String key, String value) { this.metadata.put(key, value); return (T) this; } /** * Add a map of metadata keys and values * @param metadata */ public T withMetadata(Map<String, String> metadata) { this.metadata.putAll(metadata); return (T) this; } /** * Set the compression type to use for writing * @param compression */ public T withCompressionType(CompressionType compression) { this.compressionType = compression; return (T) this; } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CaptureGroups.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Capture groups setting, closely following cudf::strings::capture_groups. * * For processing a regex pattern containing capture groups. These can be used * to optimize the generated regex instructions where the capture groups do not * require extracting the groups. */ public enum CaptureGroups { EXTRACT(0), // capture groups processed normally for extract NON_CAPTURE(1); // convert all capture groups to non-capture groups final int nativeId; // Native id, for use with libcudf. private CaptureGroups(int nativeId) { // Only constant values should be used this.nativeId = nativeId; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/NvtxColor.java
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; public enum NvtxColor { GREEN(0xff00ff00), BLUE(0xff0000ff), YELLOW(0xffffff00), PURPLE(0xffff00ff), CYAN(0xff00ffff), RED(0xffff0000), WHITE(0xffffffff), DARK_GREEN(0xff006600), ORANGE(0xffffa500); final int colorBits; NvtxColor(int colorBits) { this.colorBits = colorBits; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CuFileDriver.java
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Represents a cuFile driver. */ final class CuFileDriver implements AutoCloseable { private final CuFileResourceCleaner cleaner; CuFileDriver() { cleaner = new CuFileResourceCleaner(create(), CuFileDriver::destroy); MemoryCleaner.register(this, cleaner); } @Override public void close() { cleaner.close(this); } private static native long create(); private static native void destroy(long pointer); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/TableWithMeta.java
/* * * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * A table along with some metadata about the table. This is typically returned when * reading data from an input file where the metadata can be important. */ public class TableWithMeta implements AutoCloseable { private long handle; TableWithMeta(long handle) { this.handle = handle; } /** * Get the table out of this metadata. Note that this can only be called once. Later calls * will return a null. */ public Table releaseTable() { long[] ptr = releaseTable(handle); if (ptr == null) { return null; } else { return new Table(ptr); } } /** * Get the names of the top level columns. In the future new APIs can be added to get * names of child columns. */ public String[] getColumnNames() { return getColumnNames(handle); } @Override public void close() { if (handle != 0) { close(handle); handle = 0; } } private static native void close(long handle); private static native long[] releaseTable(long handle); private static native String[] getColumnNames(long handle); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RegexFlag.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Regex flags setting, closely following cudf::strings::regex_flags. * * These types can be or'd to combine them. The values are chosen to * leave room for future flags and to match the Python flag values. */ public enum RegexFlag { DEFAULT(0), // default MULTILINE(8), // the '^' and '$' honor new-line characters DOTALL(16), // the '.' matching includes new-line characters ASCII(256); // use only ASCII when matching built-in character classes final int nativeId; // Native id, for use with libcudf. private RegexFlag(int nativeId) { // Only constant values should be used this.nativeId = nativeId; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/Schema.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.ArrayList; import java.util.List; /** * The schema of data to be read in. */ public class Schema { public static final Schema INFERRED = new Schema(); private final List<String> names; private final List<DType> types; private Schema(List<String> names, List<DType> types) { this.names = new ArrayList<>(names); this.types = new ArrayList<>(types); } /** * Inferred schema. */ private Schema() { names = null; types = null; } public static Builder builder() { return new Builder(); } public String[] getColumnNames() { if (names == null) { return null; } return names.toArray(new String[names.size()]); } int[] getTypeIds() { if (types == null) { return null; } int[] ret = new int[types.size()]; for (int i = 0; i < types.size(); i++) { ret[i] = types.get(i).getTypeId().nativeId; } return ret; } int[] getTypeScales() { if (types == null) { return null; } int[] ret = new int[types.size()]; for (int i = 0; i < types.size(); i++) { ret[i] = types.get(i).getScale(); } return ret; } DType[] getTypes() { if (types == null) { return null; } DType[] ret = new DType[types.size()]; for (int i = 0; i < types.size(); i++) { ret[i] = types.get(i); } return ret; } public static class Builder { private final List<String> names = new ArrayList<>(); private final List<DType> types = new ArrayList<>(); public Builder column(DType type, String name) { types.add(type); names.add(name); return this; } public Schema build() { return new Schema(names, types); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HostBufferConsumer.java
/* * * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Provides a set of APIs for consuming host buffers. This is typically used * when writing out Tables in various file formats. */ public interface HostBufferConsumer { /** * Consume a buffer. * @param buffer the buffer. Be sure to close this buffer when you are done * with it or it will leak. * @param len the length of the buffer that is valid. The valid data will be 0 until len. */ void handleBuffer(HostMemoryBuffer buffer, long len); /** * Indicates that no more buffers will be supplied. */ default void done() {} }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmPoolMemoryResource.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * A device memory resource that will pre-allocate a pool of resources and sub-allocate from this * pool to improve memory performance. */ public class RmmPoolMemoryResource<C extends RmmDeviceMemoryResource> extends RmmWrappingDeviceMemoryResource<C> { private long handle = 0; private final long initSize; private final long maxSize; /** * Create a new pooled memory resource taking ownership of the RmmDeviceMemoryResource that it is * wrapping. * @param wrapped the memory resource to use for the pool. This should not be reused. * @param initSize the size of the initial pool * @param maxSize the size of the maximum pool */ public RmmPoolMemoryResource(C wrapped, long initSize, long maxSize) { super(wrapped); this.initSize = initSize; this.maxSize = maxSize; handle = Rmm.newPoolMemoryResource(wrapped.getHandle(), initSize, maxSize); } public long getMaxSize() { return maxSize; } @Override public long getHandle() { return handle; } @Override public void close() { if (handle != 0) { Rmm.releasePoolMemoryResource(handle); handle = 0; } super.close(); } @Override public String toString() { return Long.toHexString(getHandle()) + "/POOL(" + wrapped + ")"; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/SegmentedReductionAggregation.java
/* * * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * An aggregation that can be used for a reduce. */ public final class SegmentedReductionAggregation { private final Aggregation wrapped; private SegmentedReductionAggregation(Aggregation wrapped) { this.wrapped = wrapped; } long createNativeInstance() { return wrapped.createNativeInstance(); } long getDefaultOutput() { return wrapped.getDefaultOutput(); } Aggregation getWrapped() { return wrapped; } @Override public int hashCode() { return wrapped.hashCode(); } @Override public boolean equals(Object other) { if (other == this) { return true; } else if (other instanceof SegmentedReductionAggregation) { SegmentedReductionAggregation o = (SegmentedReductionAggregation) other; return wrapped.equals(o.wrapped); } return false; } /** * Sum Aggregation */ public static SegmentedReductionAggregation sum() { return new SegmentedReductionAggregation(Aggregation.sum()); } /** * Product Aggregation. */ public static SegmentedReductionAggregation product() { return new SegmentedReductionAggregation(Aggregation.product()); } /** * Min Aggregation */ public static SegmentedReductionAggregation min() { return new SegmentedReductionAggregation(Aggregation.min()); } /** * Max Aggregation */ public static SegmentedReductionAggregation max() { return new SegmentedReductionAggregation(Aggregation.max()); } /** * Any reduction. Produces a true or 1, depending on the output type, * if any of the elements in the range are true or non-zero, otherwise produces a false or 0. * Null values are skipped. */ public static SegmentedReductionAggregation any() { return new SegmentedReductionAggregation(Aggregation.any()); } /** * All reduction. Produces true or 1, depending on the output type, if all of the elements in * the range are true or non-zero, otherwise produces a false or 0. * Null values are skipped. */ public static SegmentedReductionAggregation all() { return new SegmentedReductionAggregation(Aggregation.all()); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/NvtxRange.java
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * This class supports push/pop NVTX profiling ranges, or "scoped" ranges. * * The constructor pushes an NVTX range and the close method pops off the most recent range that * was pushed. Therefore instances of this class should always be used in a try-with-resources * block to guarantee that ranges are always closed in the proper order. For example: * <pre> * try (NvtxRange a = new NvtxRange("a", NvtxColor.RED)) { * ... * try (NvtxRange b = new NvtxRange("b", NvtxColor.BLUE)) { * ... * } * ... * } * </pre> * * Instances should be associated with a single thread to avoid pushing an NVTX range in * one thread and then trying to pop the range in a different thread. * * Push/pop ranges show a stacking behavior in tools such as Nsight, where newly pushed * ranges are correlated and enclosed by the prior pushed range (in the example above, * "b" is enclosed by "a"). */ public class NvtxRange implements AutoCloseable { private static final boolean isEnabled = Boolean.getBoolean("ai.rapids.cudf.nvtx.enabled"); static { if (isEnabled) { NativeDepsLoader.loadNativeDeps(); } } public NvtxRange(String name, NvtxColor color) { this(name, color.colorBits); } public NvtxRange(String name, int colorBits) { if (isEnabled) { push(name, colorBits); } } @Override public void close() { if (isEnabled) { pop(); } } private native void push(String name, int colorBits); private native void pop(); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ReplacePolicyWithColumn.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * A replacement policy for a specific column */ public class ReplacePolicyWithColumn { final int column; final ReplacePolicy policy; ReplacePolicyWithColumn(int column, ReplacePolicy policy) { this.column = column; this.policy = policy; } @Override public boolean equals(Object other) { if (!(other instanceof ReplacePolicyWithColumn)) { return false; } ReplacePolicyWithColumn ro = (ReplacePolicyWithColumn)other; return this.column == ro.column && this.policy.equals(ro.policy); } @Override public int hashCode() { return 31 * column + policy.hashCode(); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ParquetChunkedReader.java
/* * * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.io.File; /** * Provide an interface for reading a Parquet file in an iterative manner. */ public class ParquetChunkedReader implements AutoCloseable { static { NativeDepsLoader.loadNativeDeps(); } /** * Construct the reader instance from a read limit and a file path. * * @param chunkSizeByteLimit Limit on total number of bytes to be returned per read, * or 0 if there is no limit. * @param filePath Full path of the input Parquet file to read. */ public ParquetChunkedReader(long chunkSizeByteLimit, File filePath) { this(chunkSizeByteLimit, ParquetOptions.DEFAULT, filePath); } /** * Construct the reader instance from a read limit, a ParquetOptions object, and a file path. * * @param chunkSizeByteLimit Limit on total number of bytes to be returned per read, * or 0 if there is no limit. * @param opts The options for Parquet reading. * @param filePath Full path of the input Parquet file to read. */ public ParquetChunkedReader(long chunkSizeByteLimit, ParquetOptions opts, File filePath) { handle = create(chunkSizeByteLimit, opts.getIncludeColumnNames(), opts.getReadBinaryAsString(), filePath.getAbsolutePath(), 0, 0, opts.timeUnit().typeId.getNativeId()); if (handle == 0) { throw new IllegalStateException("Cannot create native chunked Parquet reader object."); } } /** * Construct the reader instance from a read limit and a file already read in a memory buffer. * * @param chunkSizeByteLimit Limit on total number of bytes to be returned per read, * or 0 if there is no limit. * @param opts The options for Parquet reading. * @param buffer Raw Parquet file content. * @param offset The starting offset into buffer. * @param len The number of bytes to parse the given buffer. */ public ParquetChunkedReader(long chunkSizeByteLimit, ParquetOptions opts, HostMemoryBuffer buffer, long offset, long len) { handle = create(chunkSizeByteLimit, opts.getIncludeColumnNames(), opts.getReadBinaryAsString(), null, buffer.getAddress() + offset, len, opts.timeUnit().typeId.getNativeId()); if (handle == 0) { throw new IllegalStateException("Cannot create native chunked Parquet reader object."); } } /** * Construct a reader instance from a DataSource * @param chunkSizeByteLimit Limit on total number of bytes to be returned per read, * or 0 if there is no limit. * @param opts The options for Parquet reading. * @param ds the data source to read from */ public ParquetChunkedReader(long chunkSizeByteLimit, ParquetOptions opts, DataSource ds) { dataSourceHandle = DataSourceHelper.createWrapperDataSource(ds); if (dataSourceHandle == 0) { throw new IllegalStateException("Cannot create native datasource object"); } boolean passed = false; try { handle = createWithDataSource(chunkSizeByteLimit, opts.getIncludeColumnNames(), opts.getReadBinaryAsString(), opts.timeUnit().typeId.getNativeId(), dataSourceHandle); passed = true; } finally { if (!passed) { DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); dataSourceHandle = 0; } } } /** * Check if the given file has anything left to read. * * @return A boolean value indicating if there is more data to read from file. */ public boolean hasNext() { if (handle == 0) { throw new IllegalStateException("Native chunked Parquet reader object may have been closed."); } if (firstCall) { // This function needs to return true at least once, so an empty table // (but having empty columns instead of no column) can be returned by readChunk() // if the input file has no row. firstCall = false; return true; } return hasNext(handle); } /** * Read a chunk of rows in the given Parquet file such that the returning data has total size * does not exceed the given read limit. If the given file has no data, or all data has been read * before by previous calls to this function, a null Table will be returned. * * @return A table of new rows reading from the given file. */ public Table readChunk() { if (handle == 0) { throw new IllegalStateException("Native chunked Parquet reader object may have been closed."); } long[] columnPtrs = readChunk(handle); return columnPtrs != null ? new Table(columnPtrs) : null; } @Override public void close() { if (handle != 0) { close(handle); handle = 0; } if (dataSourceHandle != 0) { DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); dataSourceHandle = 0; } } /** * Auxiliary variable to help {@link #hasNext()} returning true at least once. */ private boolean firstCall = true; /** * Handle for memory address of the native Parquet chunked reader class. */ private long handle; private long dataSourceHandle = 0; /** * Create a native chunked Parquet reader object on heap and return its memory address. * * @param chunkSizeByteLimit Limit on total number of bytes to be returned per read, * or 0 if there is no limit. * @param filterColumnNames Name of the columns to read, or an empty array if we want to read all. * @param binaryToString Whether to convert the corresponding column to String if it is binary. * @param filePath Full path of the file to read, or given as null if reading from a buffer. * @param bufferAddrs The address of a buffer to read from, or 0 if we are not using that buffer. * @param length The length of the buffer to read from. * @param timeUnit Return type of time unit for timestamps. */ private static native long create(long chunkSizeByteLimit, String[] filterColumnNames, boolean[] binaryToString, String filePath, long bufferAddrs, long length, int timeUnit); private static native long createWithDataSource(long chunkedSizeByteLimit, String[] filterColumnNames, boolean[] binaryToString, int timeUnit, long dataSourceHandle); private static native boolean hasNext(long handle); private static native long[] readChunk(long handle); private static native void close(long handle); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HostBufferProvider.java
/* * * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Provides a set of APIs for providing host buffers to be read. */ public interface HostBufferProvider extends AutoCloseable { /** * Place data into the given buffer. * @param buffer the buffer to put data into. * @param len the maximum amount of data to put into buffer. Less is okay if at EOF. * @return the actual amount of data put into the buffer. */ long readInto(HostMemoryBuffer buffer, long len); /** * Indicates that no more buffers will be supplied. */ @Override default void close() {} }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/TableDebug.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Locale; import java.util.function.Consumer; public class TableDebug { /** * Specify one of * -Dai.rapids.cudf.debug.output=stderr to print directly to standard error (default) * -Dai.rapids.cudf.debug.output=stdout to print directly to standard output * -Dai.rapids.cudf.debug.output=log[_level] to redirect to a logging subsystem that can * further be * configured. * Supported log levels: * debug (default) * info * warn * error */ public static final String OUTPUT_STREAM = "ai.rapids.cudf.debug.output"; private static final Logger log = LoggerFactory.getLogger(TableDebug.class); public enum Output { STDOUT(System.out::println), STDERR(System.err::println), LOG(log::debug), LOG_DEBUG(log::debug), LOG_INFO(log::info), LOG_WARN(log::warn), LOG_ERROR(log::error); private final Consumer<String> printFunc; Output(Consumer<String> pf) { this.printFunc = pf; } final void println(String s) { printFunc.accept(s); } } public static class Builder { private Output outputMode = Output.STDERR; public Builder() { try { outputMode = Output.valueOf( System.getProperty(OUTPUT_STREAM, Output.STDERR.name()) .toUpperCase(Locale.US)); } catch (Throwable e) { log.warn("Failed to parse the output mode", e); } } public Builder withOutput(Output outputMode) { this.outputMode = outputMode; return this; } public final TableDebug build() { return new TableDebug(outputMode); } } public static Builder builder() { return new Builder(); } private static final TableDebug DEFAULT_DEBUG = builder().build(); public static TableDebug get() { return DEFAULT_DEBUG; } private final Output output; private TableDebug(Output output) { this.output = output; } /** * Print the contents of a table. Note that this should never be * called from production code, as it is very slow. Also note that this is not production * code. You might need/want to update how the data shows up or add in support for more * types as this really is just for debugging. * @param name the name of the table to print out. * @param table the table to print out. */ public synchronized void debug(String name, Table table) { output.println("DEBUG " + name + " " + table); for (int col = 0; col < table.getNumberOfColumns(); col++) { debug(String.valueOf(col), table.getColumn(col)); } } /** * Print the contents of a column. Note that this should never be * called from production code, as it is very slow. Also note that this is not production * code. You might need/want to update how the data shows up or add in support for more * types as this really is just for debugging. * @param name the name of the column to print out. * @param col the column to print out. */ public synchronized void debug(String name, ColumnView col) { debugGPUAddrs(name, col); try (HostColumnVector hostCol = col.copyToHost()) { debug(name, hostCol); } } private synchronized void debugGPUAddrs(String name, ColumnView col) { try (BaseDeviceMemoryBuffer data = col.getData(); BaseDeviceMemoryBuffer validity = col.getValid()) { output.println("GPU COLUMN " + name + " - NC: " + col.getNullCount() + " DATA: " + data + " VAL: " + validity); } if (col.getType() == DType.STRUCT) { for (int i = 0; i < col.getNumChildren(); i++) { try (ColumnView child = col.getChildColumnView(i)) { debugGPUAddrs(name + ":CHILD_" + i, child); } } } else if (col.getType() == DType.LIST) { try (ColumnView child = col.getChildColumnView(0)) { debugGPUAddrs(name + ":DATA", child); } } } /** * Print the contents of a column. Note that this should never be * called from production code, as it is very slow. Also note that this is not production * code. You might need/want to update how the data shows up or add in support for more * types as this really is just for debugging. * @param name the name of the column to print out. * @param hostCol the column to print out. */ public synchronized void debug(String name, HostColumnVectorCore hostCol) { DType type = hostCol.getType(); output.println("COLUMN " + name + " - " + type); if (type.isDecimalType()) { for (int i = 0; i < hostCol.getRowCount(); i++) { if (hostCol.isNull(i)) { output.println(i + " NULL"); } else { output.println(i + " " + hostCol.getBigDecimal(i)); } } } else if (DType.STRING.equals(type)) { for (int i = 0; i < hostCol.getRowCount(); i++) { if (hostCol.isNull(i)) { output.println(i + " NULL"); } else { output.println(i + " \"" + hostCol.getJavaString(i) + "\" " + hexString(hostCol.getUTF8(i))); } } } else if (DType.INT32.equals(type) || DType.INT8.equals(type) || DType.INT16.equals(type) || DType.INT64.equals(type) || DType.TIMESTAMP_DAYS.equals(type) || DType.TIMESTAMP_SECONDS.equals(type) || DType.TIMESTAMP_MICROSECONDS.equals(type) || DType.TIMESTAMP_MILLISECONDS.equals(type) || DType.TIMESTAMP_NANOSECONDS.equals(type) || DType.UINT8.equals(type) || DType.UINT16.equals(type) || DType.UINT32.equals(type) || DType.UINT64.equals(type)) { debugInteger(hostCol, type); } else if (DType.BOOL8.equals(type)) { for (int i = 0; i < hostCol.getRowCount(); i++) { if (hostCol.isNull(i)) { output.println(i + " NULL"); } else { output.println(i + " " + hostCol.getBoolean(i)); } } } else if (DType.FLOAT64.equals(type)) { for (int i = 0; i < hostCol.getRowCount(); i++) { if (hostCol.isNull(i)) { output.println(i + " NULL"); } else { output.println(i + " " + hostCol.getDouble(i)); } } } else if (DType.FLOAT32.equals(type)) { for (int i = 0; i < hostCol.getRowCount(); i++) { if (hostCol.isNull(i)) { output.println(i + " NULL"); } else { output.println(i + " " + hostCol.getFloat(i)); } } } else if (DType.STRUCT.equals(type)) { for (int i = 0; i < hostCol.getRowCount(); i++) { if (hostCol.isNull(i)) { output.println(i + " NULL"); } // The struct child columns are printed out later on. } for (int i = 0; i < hostCol.getNumChildren(); i++) { debug(name + ":CHILD_" + i, hostCol.getChildColumnView(i)); } } else if (DType.LIST.equals(type)) { output.println("OFFSETS"); for (int i = 0; i < hostCol.getRowCount(); i++) { if (hostCol.isNull(i)) { output.println(i + " NULL"); } else { output.println(i + " [" + hostCol.getStartListOffset(i) + " - " + hostCol.getEndListOffset(i) + ")"); } } debug(name + ":DATA", hostCol.getChildColumnView(0)); } else { output.println("TYPE " + type + " NOT SUPPORTED FOR DEBUG PRINT"); } } private void debugInteger(HostColumnVectorCore hostCol, DType intType) { for (int i = 0; i < hostCol.getRowCount(); i++) { if (hostCol.isNull(i)) { output.println(i + " NULL"); } else { final int sizeInBytes = intType.getSizeInBytes(); final Object value; switch (sizeInBytes) { case Byte.BYTES: value = hostCol.getByte(i); break; case Short.BYTES: value = hostCol.getShort(i); break; case Integer.BYTES: value = hostCol.getInt(i); break; case Long.BYTES: value = hostCol.getLong(i); break; default: throw new IllegalArgumentException("INFEASIBLE: Unsupported integer-like type " + intType); } output.println(i + " " + value); } } } private static String hexString(byte[] bytes) { StringBuilder str = new StringBuilder(); for (byte b : bytes) { str.append(String.format("%02x", b & 0xff)); } return str.toString(); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ScanAggregation.java
/* * * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * An aggregation that can be used for a scan. */ public final class ScanAggregation { private final Aggregation wrapped; private ScanAggregation(Aggregation wrapped) { this.wrapped = wrapped; } long createNativeInstance() { return wrapped.createNativeInstance(); } long getDefaultOutput() { return wrapped.getDefaultOutput(); } Aggregation getWrapped() { return wrapped; } @Override public int hashCode() { return wrapped.hashCode(); } @Override public boolean equals(Object other) { if (other == this) { return true; } else if (other instanceof ScanAggregation) { ScanAggregation o = (ScanAggregation) other; return wrapped.equals(o.wrapped); } return false; } /** * Sum Aggregation */ public static ScanAggregation sum() { return new ScanAggregation(Aggregation.sum()); } /** * Product Aggregation. */ public static ScanAggregation product() { return new ScanAggregation(Aggregation.product()); } /** * Min Aggregation */ public static ScanAggregation min() { return new ScanAggregation(Aggregation.min()); } /** * Max Aggregation */ public static ScanAggregation max() { return new ScanAggregation(Aggregation.max()); } /** * Get the row's ranking. */ public static ScanAggregation rank() { return new ScanAggregation(Aggregation.rank()); } /** * Get the row's dense ranking. */ public static ScanAggregation denseRank() { return new ScanAggregation(Aggregation.denseRank()); } /** * Get the row's percent rank. */ public static ScanAggregation percentRank() { return new ScanAggregation(Aggregation.percentRank()); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/PadSide.java
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; public enum PadSide { LEFT(0), RIGHT(1), BOTH(2); private static final PadSide[] PAD_SIDES = PadSide.values(); final int nativeId; PadSide(int nativeId) { this.nativeId = nativeId; } public int getNativeId() { return nativeId; } public static PadSide fromNative(int nativeId) { for (PadSide type : PAD_SIDES) { if (type.nativeId == nativeId) { return type; } } throw new IllegalArgumentException("Could not translate " + nativeId + " into a PadSide"); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ColumnFilterOptions.java
/* * * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.ArrayList; import java.util.Collection; import java.util.List; /** * Base options class for input formats that can filter columns. */ public abstract class ColumnFilterOptions { // Names of the columns to be returned (other columns are skipped) // If empty all columns are returned. private final String[] includeColumnNames; protected ColumnFilterOptions(Builder<?> builder) { includeColumnNames = builder.includeColumnNames.toArray( new String[builder.includeColumnNames.size()]); } String[] getIncludeColumnNames() { return includeColumnNames; } public static class Builder<T extends Builder> { final List<String> includeColumnNames = new ArrayList<>(); /** * Include one or more specific columns. Any column not included will not be read. * @param names the name of the column, or more than one if you want. */ public T includeColumn(String... names) { for (String name : names) { includeColumnNames.add(name); } return (T) this; } /** * Include one or more specific columns. Any column not included will not be read. * @param names the name of the column, or more than one if you want. */ public T includeColumn(Collection<String> names) { includeColumnNames.addAll(names); return (T) this; } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/GroupByScanAggregation.java
/* * * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * An aggregation that can be used for a grouped scan. */ public final class GroupByScanAggregation { private final Aggregation wrapped; private GroupByScanAggregation(Aggregation wrapped) { this.wrapped = wrapped; } long createNativeInstance() { return wrapped.createNativeInstance(); } long getDefaultOutput() { return wrapped.getDefaultOutput(); } Aggregation getWrapped() { return wrapped; } /** * Add a column to the Aggregation so it can be used on a specific column of data. * @param columnIndex the index of the column to operate on. */ public GroupByScanAggregationOnColumn onColumn(int columnIndex) { return new GroupByScanAggregationOnColumn(this, columnIndex); } @Override public int hashCode() { return wrapped.hashCode(); } @Override public boolean equals(Object other) { if (other == this) { return true; } else if (other instanceof GroupByScanAggregation) { GroupByScanAggregation o = (GroupByScanAggregation) other; return wrapped.equals(o.wrapped); } return false; } /** * Sum Aggregation */ public static GroupByScanAggregation sum() { return new GroupByScanAggregation(Aggregation.sum()); } /** * Product Aggregation. */ public static GroupByScanAggregation product() { return new GroupByScanAggregation(Aggregation.product()); } /** * Min Aggregation */ public static GroupByScanAggregation min() { return new GroupByScanAggregation(Aggregation.min()); } /** * Max Aggregation */ public static GroupByScanAggregation max() { return new GroupByScanAggregation(Aggregation.max()); } /** * Count number of elements. * @param nullPolicy INCLUDE if nulls should be counted. EXCLUDE if only non-null values * should be counted. */ public static GroupByScanAggregation count(NullPolicy nullPolicy) { return new GroupByScanAggregation(Aggregation.count(nullPolicy)); } /** * Get the row's ranking. */ public static GroupByScanAggregation rank() { return new GroupByScanAggregation(Aggregation.rank()); } /** * Get the row's dense ranking. */ public static GroupByScanAggregation denseRank() { return new GroupByScanAggregation(Aggregation.denseRank()); } /** * Get the row's percent ranking. */ public static GroupByScanAggregation percentRank() { return new GroupByScanAggregation(Aggregation.percentRank()); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmLimitingResourceAdaptor.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * A device memory resource that will limit the maximum amount allocated. */ public class RmmLimitingResourceAdaptor<C extends RmmDeviceMemoryResource> extends RmmWrappingDeviceMemoryResource<C> { private final long limit; private final long alignment; private long handle = 0; /** * Create a new limiting resource adaptor. * @param wrapped the memory resource to limit. This should not be reused. * @param limit the allocation limit in bytes * @param alignment the alignment */ public RmmLimitingResourceAdaptor(C wrapped, long limit, long alignment) { super(wrapped); this.limit = limit; this.alignment = alignment; handle = Rmm.newLimitingResourceAdaptor(wrapped.getHandle(), limit, alignment); } @Override public long getHandle() { return handle; } @Override public void close() { if (handle != 0) { Rmm.releaseLimitingResourceAdaptor(handle); handle = 0; } super.close(); } @Override public String toString() { return Long.toHexString(getHandle()) + "/LIMIT(" + wrapped + ", " + limit + ", " + alignment + ")"; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/Rmm.java
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import java.io.File; import java.util.concurrent.TimeUnit; /** * This is the binding class for rmm lib. */ public class Rmm { private static volatile RmmTrackingResourceAdaptor<RmmDeviceMemoryResource> tracker = null; private static volatile RmmDeviceMemoryResource deviceResource = null; private static volatile boolean initialized = false; private static volatile long poolSize = -1; private static volatile boolean poolingEnabled = false; static { NativeDepsLoader.loadNativeDeps(); } enum LogLoc { NONE(0), FILE(1), STDOUT(2), STDERR(3); final int internalId; LogLoc(int internalId) { this.internalId = internalId; } } /** * What to send RMM alloc and free logs to. */ public static class LogConf { final File file; final LogLoc loc; private LogConf(File file, LogLoc loc) { this.file = file; this.loc = loc; } } /** * Create a config that will write alloc/free logs to a file. */ public static LogConf logTo(File location) { return new LogConf(location, LogLoc.FILE); } /** * Create a config that will write alloc/free logs to stdout. */ public static LogConf logToStdout() { return new LogConf(null, LogLoc.STDOUT); } /** * Create a config that will write alloc/free logs to stderr. */ public static LogConf logToStderr() { return new LogConf(null, LogLoc.STDERR); } /** * Get the RmmDeviceMemoryResource that was last set through the java APIs. This will * not return the correct value if the resource was not set using the java APIs. It will * return a null if the resource was never set through the java APIs. */ public static synchronized RmmDeviceMemoryResource getCurrentDeviceResource() { return deviceResource; } /** * Get the currently set RmmTrackingResourceAdaptor that is set. This might return null if * RMM has nto been initialized. */ public static synchronized RmmTrackingResourceAdaptor<RmmDeviceMemoryResource> getTracker() { return tracker; } /** * Set the current device resource that RMM should use for all allocations and de-allocations. * This should only be done if you feel comfortable that the current device resource has no * pending allocations. Note that the caller of this is responsible for closing the current * RmmDeviceMemoryResource that is returned by this. Assuming that it was not used to create * the newResource. Please use the `shutdown` API to clear the resource as it does best * effort clean up before shutting it down. If `newResource` is not null this will initialize * the CUDA context for the calling thread if it is not already set. The caller is responsible * for setting the desired CUDA device prior to this call if a specific device is already set. * <p>NOTE: All cudf methods will set the chosen CUDA device in the CUDA context of the calling * thread after this returns and `newResource` was not null. * <p>If `newResource` is null this will unset the default CUDA device and mark RMM as not * initialized. * <p>Be aware that for many of these APIs to work the RmmDeviceMemoryResource will need an * `RmmTrackingResourceAdaptor`. If one is not found and `newResource` is not null it will * be added to `newResource`. * <p>Also be very careful with how you set this up. It is possible to set up an * RmmDeviceMemoryResource that is just bad, like multiple pools or pools on top of an * RmmAsyncMemoryResource, that does pooling already. Unless you know what you are doing it is * best to just use the `initialize` API instead. * * @param newResource the new resource to set. If it is null an RmmCudaMemoryResource will be * used, and RMM will be set as not initialized. * @param expectedResource the resource that we expect to be set. This is to let us avoid race * conditions with multiple things trying to set this at once. It should * never happen, but just to be careful. * @param forceChange if true then the expectedResource check is not done. */ public static synchronized RmmDeviceMemoryResource setCurrentDeviceResource( RmmDeviceMemoryResource newResource, RmmDeviceMemoryResource expectedResource, boolean forceChange) { boolean shouldInit = false; boolean shouldDeinit = false; RmmDeviceMemoryResource newResourceToSet = newResource; if (newResourceToSet == null) { // We always want it to be set to something or else it can cause problems... newResourceToSet = new RmmCudaMemoryResource(); if (initialized) { shouldDeinit = true; } } else if (!initialized) { shouldInit = true; } RmmDeviceMemoryResource oldResource = deviceResource; if (!forceChange && expectedResource != null && deviceResource != null) { long expectedOldHandle = expectedResource.getHandle(); long oldHandle = deviceResource.getHandle(); if (oldHandle != expectedOldHandle) { throw new RmmException("The expected device resource is not correct " + Long.toHexString(oldHandle) + " != " + Long.toHexString(expectedOldHandle)); } } poolSize = -1; poolingEnabled = false; setGlobalValsFromResource(newResourceToSet); if (newResource != null && tracker == null) { // No tracker was set, but we need one tracker = new RmmTrackingResourceAdaptor<>(newResourceToSet, 256); newResourceToSet = tracker; } long newHandle = newResourceToSet.getHandle(); setCurrentDeviceResourceInternal(newHandle); deviceResource = newResource; if (shouldInit) { initDefaultCudaDevice(); MemoryCleaner.setDefaultGpu(Cuda.getDevice()); initialized = true; } if (shouldDeinit) { cleanupDefaultCudaDevice(); initialized = false; } return oldResource; } private static void setGlobalValsFromResource(RmmDeviceMemoryResource resource) { if (resource instanceof RmmTrackingResourceAdaptor) { Rmm.tracker = (RmmTrackingResourceAdaptor<RmmDeviceMemoryResource>) resource; } else if (resource instanceof RmmPoolMemoryResource) { Rmm.poolSize = Math.max(((RmmPoolMemoryResource)resource).getMaxSize(), Rmm.poolSize); Rmm.poolingEnabled = true; } else if (resource instanceof RmmArenaMemoryResource) { Rmm.poolSize = Math.max(((RmmArenaMemoryResource)resource).getSize(), Rmm.poolSize); Rmm.poolingEnabled = true; } else if (resource instanceof RmmCudaAsyncMemoryResource) { Rmm.poolSize = Math.max(((RmmCudaAsyncMemoryResource)resource).getSize(), Rmm.poolSize); Rmm.poolingEnabled = true; } // Recurse as needed if (resource instanceof RmmWrappingDeviceMemoryResource) { setGlobalValsFromResource(((RmmWrappingDeviceMemoryResource<RmmDeviceMemoryResource>)resource).getWrapped()); } } /** * Initialize memory manager state and storage. This will always initialize * the CUDA context for the calling thread if it is not already set. The * caller is responsible for setting the desired CUDA device prior to this * call if a specific device is already set. * <p>NOTE: All cudf methods will set the chosen CUDA device in the CUDA * context of the calling thread after this returns. * @param allocationMode Allocation strategy to use. Bit set using * {@link RmmAllocationMode#CUDA_DEFAULT}, * {@link RmmAllocationMode#POOL}, * {@link RmmAllocationMode#ARENA}, * {@link RmmAllocationMode#CUDA_ASYNC} and * {@link RmmAllocationMode#CUDA_MANAGED_MEMORY} * @param logConf How to do logging or null if you don't want to * @param poolSize The initial pool size in bytes * @throws IllegalStateException if RMM has already been initialized */ public static synchronized void initialize(int allocationMode, LogConf logConf, long poolSize) throws RmmException { if (initialized) { throw new IllegalStateException("RMM is already initialized"); } boolean isPool = (allocationMode & RmmAllocationMode.POOL) != 0; boolean isArena = (allocationMode & RmmAllocationMode.ARENA) != 0; boolean isAsync = (allocationMode & RmmAllocationMode.CUDA_ASYNC) != 0; boolean isManaged = (allocationMode & RmmAllocationMode.CUDA_MANAGED_MEMORY) != 0; if (isAsync && isManaged) { throw new IllegalArgumentException( "CUDA Unified Memory is not supported in CUDA_ASYNC allocation mode"); } RmmDeviceMemoryResource resource = null; boolean succeeded = false; try { if (isPool) { if (isManaged) { resource = new RmmPoolMemoryResource<>(new RmmManagedMemoryResource(), poolSize, poolSize); } else { resource = new RmmPoolMemoryResource<>(new RmmCudaMemoryResource(), poolSize, poolSize); } } else if (isArena) { if (isManaged) { resource = new RmmArenaMemoryResource<>(new RmmManagedMemoryResource(), poolSize, false); } else { resource = new RmmArenaMemoryResource<>(new RmmCudaMemoryResource(), poolSize, false); } } else if (isAsync) { resource = new RmmLimitingResourceAdaptor<>( new RmmCudaAsyncMemoryResource(poolSize, poolSize), poolSize, 512); } else if (isManaged) { resource = new RmmManagedMemoryResource(); } else { resource = new RmmCudaMemoryResource(); } if (logConf != null && logConf.loc != LogLoc.NONE) { resource = new RmmLoggingResourceAdaptor<>(resource, logConf, true); } resource = new RmmTrackingResourceAdaptor<>(resource, 256); setCurrentDeviceResource(resource, null, false); succeeded = true; } finally { if (!succeeded && resource != null) { resource.close(); } } } /** * Get the most recently set pool size or -1 if RMM has not been initialized or pooling is * not enabled. */ public static synchronized long getPoolSize() { return poolSize; } /** * Return true if rmm is initialized and pooling has been enabled, else false. */ public static synchronized boolean isPoolingEnabled() { return poolingEnabled; } /** * Check if RMM has been initialized already or not. */ public static boolean isInitialized() throws RmmException { return initialized; } /** * Return the amount of RMM memory allocated in bytes. Note that the result * may be less than the actual amount of allocated memory if underlying RMM * allocator decides to return more memory than what was requested. However, * the result will always be a lower bound on the amount allocated. */ public static synchronized long getTotalBytesAllocated() { if (tracker == null) { return 0; } else { return tracker.getTotalBytesAllocated(); } } /** * Returns the maximum amount of RMM memory (Bytes) outstanding during the * lifetime of the process. */ public static synchronized long getMaximumTotalBytesAllocated() { if (tracker == null) { return 0; } else { return tracker.getMaxTotalBytesAllocated(); } } /** * Resets a scoped maximum counter of RMM memory used to keep track of usage between * code sections while debugging. * * @param initialValue an initial value (in Bytes) to use for this scoped counter */ public static synchronized void resetScopedMaximumBytesAllocated(long initialValue) { if (tracker != null) { tracker.resetScopedMaxTotalBytesAllocated(initialValue); } } /** * Resets a scoped maximum counter of RMM memory used to keep track of usage between * code sections while debugging. * * This resets the counter to 0 Bytes. */ public static synchronized void resetScopedMaximumBytesAllocated() { if (tracker != null) { tracker.resetScopedMaxTotalBytesAllocated(0L); } } /** * Returns the maximum amount of RMM memory (Bytes) outstanding since the last * `resetScopedMaximumOutstanding` call was issued (it is "scoped" because it's the * maximum amount seen since the last reset). * <p> * If the memory used is net negative (for example if only frees happened since * reset, and we reset to 0), then result will be 0. * <p> * If `resetScopedMaximumBytesAllocated` is never called, the scope is the whole * program and is equivalent to `getMaximumTotalBytesAllocated`. * * @return the scoped maximum bytes allocated */ public static synchronized long getScopedMaximumBytesAllocated() { if (tracker == null) { return 0L; } else { return tracker.getScopedMaxTotalBytesAllocated(); } } /** * Sets the event handler to be called on RMM events (e.g.: allocation failure). * @param handler event handler to invoke on RMM events or null to clear an existing handler * @throws RmmException if an active handler is already set */ public static void setEventHandler(RmmEventHandler handler) throws RmmException { setEventHandler(handler, false); } /** * Sets the event handler to be called on RMM events (e.g.: allocation failure) and * optionally enable debug mode (callbacks on every allocate and deallocate) * <p> * NOTE: Only enable debug mode when necessary, as code will run much slower! * * @param handler event handler to invoke on RMM events or null to clear an existing handler * @param enableDebug if true enable debug callbacks in RmmEventHandler * (onAllocated, onDeallocated) * @throws RmmException if an active handler is already set */ public static synchronized void setEventHandler(RmmEventHandler handler, boolean enableDebug) throws RmmException { if (!initialized) { throw new RmmException("RMM has not been initialized"); } if (deviceResource instanceof RmmEventHandlerResourceAdaptor) { throw new RmmException("Another event handler is already set"); } if (tracker == null) { // This is just to be safe it should always be true if this is initialized. throw new RmmException("A tracker must be set for the event handler to work"); } RmmEventHandlerResourceAdaptor<RmmDeviceMemoryResource> newResource = new RmmEventHandlerResourceAdaptor<>(deviceResource, tracker, handler, enableDebug); boolean success = false; try { setCurrentDeviceResource(newResource, deviceResource, false); success = true; } finally { if (!success) { newResource.releaseWrapped(); } } } /** Clears the active RMM event handler if one is set. */ public static synchronized void clearEventHandler() throws RmmException { if (deviceResource != null && deviceResource instanceof RmmEventHandlerResourceAdaptor) { RmmEventHandlerResourceAdaptor<RmmDeviceMemoryResource> orig = (RmmEventHandlerResourceAdaptor<RmmDeviceMemoryResource>)deviceResource; boolean success = false; try { setCurrentDeviceResource(orig.wrapped, orig, false); success = true; } finally { if (success) { orig.releaseWrapped(); } } } } public static native void initDefaultCudaDevice(); public static native void cleanupDefaultCudaDevice(); /** * Shut down any initialized RMM instance. This should be used very rarely. It does not need to * be used when shutting down your process because CUDA will handle releasing all of the * resources when your process exits. This really should only be used if you want to turn off the * memory pool for some reasons. As such we make an effort to be sure no resources have been * leaked before shutting down. This may involve forcing a JVM GC to collect any leaked java * objects that still point to CUDA memory. By default this will do a gc every 2 seconds and * wait for up to 4 seconds before throwing an RmmException if not all of the resources are freed. * @throws RmmException on any error. This includes if there are outstanding allocations that * could not be collected. */ public static void shutdown() throws RmmException { shutdown(2, 4, TimeUnit.SECONDS); } /** * Shut down any initialized RMM instance. This should be used very rarely. It does not need to * be used when shutting down your process because CUDA will handle releasing all of the * resources when your process exits. This really should only be used if you want to turn off the * memory pool for some reasons. As such we make an effort to be sure no resources have been * leaked before shutting down. This may involve forcing a JVM GC to collect any leaked java * objects that still point to CUDA memory. * * @param forceGCInterval how frequently should we force a JVM GC. This is just a recommendation * to the JVM to do a gc. * @param maxWaitTime the maximum amount of time to wait for all objects to be collected before * throwing an exception. * @param units the units for forceGcInterval and maxWaitTime. * @throws RmmException on any error. This includes if there are outstanding allocations that * could not be collected before maxWaitTime. */ public static synchronized void shutdown(long forceGCInterval, long maxWaitTime, TimeUnit units) throws RmmException { long now = System.currentTimeMillis(); final long endTime = now + units.toMillis(maxWaitTime); long nextGcTime = now; try { if (MemoryCleaner.bestEffortHasRmmBlockers()) { do { if (nextGcTime <= now) { System.gc(); nextGcTime = nextGcTime + units.toMillis(forceGCInterval); } // Check if everything is ready about every 10 ms Thread.sleep(10); now = System.currentTimeMillis(); } while (endTime > now && MemoryCleaner.bestEffortHasRmmBlockers()); } } catch (InterruptedException e) { // Ignored } if (MemoryCleaner.bestEffortHasRmmBlockers()) { throw new RmmException("Could not shut down RMM there appear to be outstanding allocations"); } if (initialized) { if (deviceResource != null) { setCurrentDeviceResource(null, deviceResource, true).close(); } } } /** * Allocate device memory and return a pointer to device memory, using stream 0. * @param size The size in bytes of the allocated memory region * @return Returned pointer to the allocated memory */ public static DeviceMemoryBuffer alloc(long size) { return alloc(size, null); } /** * Allocate device memory and return a pointer to device memory. * @param size The size in bytes of the allocated memory region * @param stream The stream in which to synchronize this command. * @return Returned pointer to the allocated memory */ public static DeviceMemoryBuffer alloc(long size, Cuda.Stream stream) { long s = stream == null ? 0 : stream.getStream(); return new DeviceMemoryBuffer(allocInternal(size, s), size, stream); } private static native long allocInternal(long size, long stream) throws RmmException; static native void free(long ptr, long length, long stream) throws RmmException; /** * Delete an rmm::device_buffer. */ static native void freeDeviceBuffer(long rmmBufferAddress) throws RmmException; /** * Allocate device memory using `cudaMalloc` and return a pointer to device memory. * @param size The size in bytes of the allocated memory region * @param stream The stream in which to synchronize this command. * @return Returned pointer to the allocated memory */ public static CudaMemoryBuffer allocCuda(long size, Cuda.Stream stream) { long s = stream == null ? 0 : stream.getStream(); return new CudaMemoryBuffer(allocCudaInternal(size, s), size, stream); } private static native long allocCudaInternal(long size, long stream) throws RmmException; static native void freeCuda(long ptr, long length, long stream) throws RmmException; static native long newCudaMemoryResource() throws RmmException; static native void releaseCudaMemoryResource(long handle); static native long newManagedMemoryResource() throws RmmException; static native void releaseManagedMemoryResource(long handle); static native long newPoolMemoryResource(long childHandle, long initSize, long maxSize) throws RmmException; static native void releasePoolMemoryResource(long handle); static native long newArenaMemoryResource(long childHandle, long size, boolean dumpOnOOM) throws RmmException; static native void releaseArenaMemoryResource(long handle); static native long newCudaAsyncMemoryResource(long size, long release) throws RmmException; static native void releaseCudaAsyncMemoryResource(long handle); static native long newLimitingResourceAdaptor(long handle, long limit, long align) throws RmmException; static native void releaseLimitingResourceAdaptor(long handle); static native long newLoggingResourceAdaptor(long handle, int type, String path, boolean autoFlush) throws RmmException; static native void releaseLoggingResourceAdaptor(long handle); static native long newTrackingResourceAdaptor(long handle, long alignment) throws RmmException; static native void releaseTrackingResourceAdaptor(long handle); static native long nativeGetTotalBytesAllocated(long handle); static native long nativeGetMaxTotalBytesAllocated(long handle); static native void nativeResetScopedMaxTotalBytesAllocated(long handle, long initValue); static native long nativeGetScopedMaxTotalBytesAllocated(long handle); static native long newEventHandlerResourceAdaptor(long handle, long trackerHandle, RmmEventHandler handler, long[] allocThresholds, long[] deallocThresholds, boolean debug); static native long releaseEventHandlerResourceAdaptor(long handle, boolean debug); private static native void setCurrentDeviceResourceInternal(long newHandle); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmTrackingResourceAdaptor.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * A device memory resource that will track some basic statistics about the memory usage. */ public class RmmTrackingResourceAdaptor<C extends RmmDeviceMemoryResource> extends RmmWrappingDeviceMemoryResource<C> { private long handle = 0; /** * Create a new tracking resource adaptor. * @param wrapped the memory resource to track allocations. This should not be reused. * @param alignment the alignment to apply. */ public RmmTrackingResourceAdaptor(C wrapped, long alignment) { super(wrapped); handle = Rmm.newTrackingResourceAdaptor(wrapped.getHandle(), alignment); } @Override public long getHandle() { return handle; } public long getTotalBytesAllocated() { return Rmm.nativeGetTotalBytesAllocated(getHandle()); } public long getMaxTotalBytesAllocated() { return Rmm.nativeGetMaxTotalBytesAllocated(getHandle()); } public void resetScopedMaxTotalBytesAllocated(long initValue) { Rmm.nativeResetScopedMaxTotalBytesAllocated(getHandle(), initValue); } public long getScopedMaxTotalBytesAllocated() { return Rmm.nativeGetScopedMaxTotalBytesAllocated(getHandle()); } @Override public void close() { if (handle != 0) { Rmm.releaseTrackingResourceAdaptor(handle); handle = 0; } super.close(); } @Override public String toString() { return Long.toHexString(getHandle()) + "/TRACK(" + wrapped + ")"; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ArrowIPCOptions.java
/* * * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Options for reading data in Arrow IPC format */ public class ArrowIPCOptions { public interface NeedGpu { /** * A callback to indicate that we are about to start putting data on the GPU. */ void needTheGpu(); } public static ArrowIPCOptions DEFAULT = new ArrowIPCOptions(new Builder()); private final NeedGpu callback; private ArrowIPCOptions(Builder builder) { this.callback = builder.callback; } public NeedGpu getCallback() { return callback; } public static Builder builder() { return new Builder(); } public static class Builder { private NeedGpu callback = () -> {}; public Builder withCallback(NeedGpu callback) { if (callback == null) { this.callback = () -> {}; } else { this.callback = callback; } return this; } public ArrowIPCOptions build() { return new ArrowIPCOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/UnaryOp.java
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Mathematical unary operations. */ public enum UnaryOp { SIN(0), COS(1), TAN(2), ARCSIN(3), ARCCOS(4), ARCTAN(5), SINH(6), COSH(7), TANH(8), ARCSINH(9), ARCCOSH(10), ARCTANH(11), EXP(12), LOG(13), SQRT(14), CBRT(15), CEIL(16), FLOOR(17), ABS(18), RINT(19), BIT_INVERT(20), NOT(21); private static final UnaryOp[] OPS = UnaryOp.values(); final int nativeId; UnaryOp(int nativeId) { this.nativeId = nativeId; } static UnaryOp fromNative(int nativeId) { for (UnaryOp type : OPS) { if (type.nativeId == nativeId) { return type; } } throw new IllegalArgumentException("Could not translate " + nativeId + " into a UnaryOp"); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/WindowOptions.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Options for rolling windows. */ public class WindowOptions implements AutoCloseable { enum FrameType {ROWS, RANGE} /** * Extent of (range) window bounds. * Analogous to cudf::range_window_bounds::extent_type. */ enum RangeExtentType { CURRENT_ROW(0), // Bounds defined as the first/last row that matches the current row. BOUNDED(1), // Bounds defined as the first/last row that falls within // a specified range from the current row. UNBOUNDED(2); // Bounds stretching to the first/last row in the entire group. public final int nominalValue; RangeExtentType(int n) { this.nominalValue = n; } } private final int minPeriods; private final Scalar precedingScalar; private final Scalar followingScalar; private final ColumnVector precedingCol; private final ColumnVector followingCol; private final int orderByColumnIndex; private final boolean orderByOrderAscending; private final FrameType frameType; private final RangeExtentType precedingBoundsExtent; private final RangeExtentType followingBoundsExtent; private WindowOptions(Builder builder) { this.minPeriods = builder.minPeriods; this.precedingScalar = builder.precedingScalar; if (precedingScalar != null) { precedingScalar.incRefCount(); } this.followingScalar = builder.followingScalar; if (followingScalar != null) { followingScalar.incRefCount(); } this.precedingCol = builder.precedingCol; if (precedingCol != null) { precedingCol.incRefCount(); } this.followingCol = builder.followingCol; if (followingCol != null) { followingCol.incRefCount(); } this.orderByColumnIndex = builder.orderByColumnIndex; this.orderByOrderAscending = builder.orderByOrderAscending; this.frameType = orderByColumnIndex == -1? FrameType.ROWS : FrameType.RANGE; this.precedingBoundsExtent = builder.precedingBoundsExtent; this.followingBoundsExtent = builder.followingBoundsExtent; } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof WindowOptions) { WindowOptions o = (WindowOptions) other; boolean ret = this.minPeriods == o.minPeriods && this.orderByColumnIndex == o.orderByColumnIndex && this.orderByOrderAscending == o.orderByOrderAscending && this.frameType == o.frameType && this.precedingBoundsExtent == o.precedingBoundsExtent && this.followingBoundsExtent == o.followingBoundsExtent; if (precedingCol != null) { ret = ret && precedingCol.equals(o.precedingCol); } if (followingCol != null) { ret = ret && followingCol.equals(o.followingCol); } if (precedingScalar != null) { ret = ret && precedingScalar.equals(o.precedingScalar); } if (followingScalar != null) { ret = ret && followingScalar.equals(o.followingScalar); } return ret; } return false; } @Override public int hashCode() { int ret = 7; ret = 31 * ret + minPeriods; ret = 31 * ret + orderByColumnIndex; ret = 31 * ret + Boolean.hashCode(orderByOrderAscending); ret = 31 * ret + frameType.hashCode(); if (precedingCol != null) { ret = 31 * ret + precedingCol.hashCode(); } if (followingCol != null) { ret = 31 * ret + followingCol.hashCode(); } if (precedingScalar != null) { ret = 31 * ret + precedingScalar.hashCode(); } if (followingScalar != null) { ret = 31 * ret + followingScalar.hashCode(); } ret = 31 * ret + precedingBoundsExtent.hashCode(); ret = 31 * ret + followingBoundsExtent.hashCode(); return ret; } public static Builder builder(){ return new Builder(); } int getMinPeriods() { return this.minPeriods; } Scalar getPrecedingScalar() { return this.precedingScalar; } Scalar getFollowingScalar() { return this.followingScalar; } ColumnVector getPrecedingCol() { return precedingCol; } ColumnVector getFollowingCol() { return this.followingCol; } @Deprecated int getTimestampColumnIndex() { return getOrderByColumnIndex(); } int getOrderByColumnIndex() { return this.orderByColumnIndex; } @Deprecated boolean isTimestampOrderAscending() { return isOrderByOrderAscending(); } boolean isOrderByOrderAscending() { return this.orderByOrderAscending; } boolean isUnboundedPreceding() { return this.precedingBoundsExtent == RangeExtentType.UNBOUNDED; } boolean isUnboundedFollowing() { return this.followingBoundsExtent == RangeExtentType.UNBOUNDED; } boolean isCurrentRowPreceding() { return this.precedingBoundsExtent == RangeExtentType.CURRENT_ROW; } boolean isCurrentRowFollowing() { return this.followingBoundsExtent == RangeExtentType.CURRENT_ROW; } RangeExtentType getPrecedingBoundsExtent() { return this.precedingBoundsExtent; } RangeExtentType getFollowingBoundsExtent() { return this.followingBoundsExtent; } FrameType getFrameType() { return frameType; } public static class Builder { private int minPeriods = 1; // for range window private Scalar precedingScalar = null; private Scalar followingScalar = null; private ColumnVector precedingCol = null; private ColumnVector followingCol = null; private int orderByColumnIndex = -1; private boolean orderByOrderAscending = true; private RangeExtentType precedingBoundsExtent = RangeExtentType.BOUNDED; private RangeExtentType followingBoundsExtent = RangeExtentType.BOUNDED; /** * Set the minimum number of observation required to evaluate an element. If there are not * enough elements for a given window a null is placed in the result instead. */ public Builder minPeriods(int minPeriods) { if (minPeriods < 0 ) { throw new IllegalArgumentException("Minimum observations must be non negative"); } this.minPeriods = minPeriods; return this; } /** * Set the size of the window, one entry per row. This does not take ownership of the * columns passed in so you have to be sure that the lifetime of the column outlives * this operation. * @param precedingCol the number of rows preceding the current row and * precedingCol will be live outside of WindowOptions. * @param followingCol the number of rows following the current row and * following will be live outside of WindowOptions. */ public Builder window(ColumnVector precedingCol, ColumnVector followingCol) { if (precedingCol == null || precedingCol.hasNulls()) { throw new IllegalArgumentException("preceding cannot be null or have nulls"); } if (followingCol == null || followingCol.hasNulls()) { throw new IllegalArgumentException("following cannot be null or have nulls"); } if (precedingBoundsExtent != RangeExtentType.BOUNDED || precedingScalar != null) { throw new IllegalStateException("preceding has already been set a different way"); } if (followingBoundsExtent != RangeExtentType.BOUNDED || followingScalar != null) { throw new IllegalStateException("following has already been set a different way"); } this.precedingCol = precedingCol; this.followingCol = followingCol; return this; } /** * Set the size of the range window. * @param precedingScalar the relative number preceding the current row and * the precedingScalar will be live outside of WindowOptions. * @param followingScalar the relative number following the current row and * the followingScalar will be live outside of WindowOptions */ public Builder window(Scalar precedingScalar, Scalar followingScalar) { return preceding(precedingScalar).following(followingScalar); } /** * @deprecated Use orderByColumnIndex(int index) */ @Deprecated public Builder timestampColumnIndex(int index) { return orderByColumnIndex(index); } public Builder orderByColumnIndex(int index) { this.orderByColumnIndex = index; return this; } /** * @deprecated Use orderByAscending() */ @Deprecated public Builder timestampAscending() { return orderByAscending(); } public Builder orderByAscending() { this.orderByOrderAscending = true; return this; } public Builder orderByDescending() { this.orderByOrderAscending = false; return this; } /** * @deprecated Use orderByDescending() */ @Deprecated public Builder timestampDescending() { return orderByDescending(); } public Builder currentRowPreceding() { if (precedingCol != null || precedingScalar != null) { throw new IllegalStateException("preceding has already been set a different way"); } this.precedingBoundsExtent = RangeExtentType.CURRENT_ROW; return this; } public Builder currentRowFollowing() { if (followingCol != null || followingScalar != null) { throw new IllegalStateException("following has already been set a different way"); } this.followingBoundsExtent = RangeExtentType.CURRENT_ROW; return this; } public Builder unboundedPreceding() { if (precedingCol != null || precedingScalar != null) { throw new IllegalStateException("preceding has already been set a different way"); } this.precedingBoundsExtent = RangeExtentType.UNBOUNDED; return this; } public Builder unboundedFollowing() { if (followingCol != null || followingScalar != null) { throw new IllegalStateException("following has already been set a different way"); } this.followingBoundsExtent = RangeExtentType.UNBOUNDED; return this; } /** * Set the relative number preceding the current row for range window * @return this for chaining */ public Builder preceding(Scalar preceding) { if (preceding == null || !preceding.isValid()) { throw new IllegalArgumentException("preceding cannot be null"); } if (precedingBoundsExtent != RangeExtentType.BOUNDED || precedingCol != null) { throw new IllegalStateException("preceding has already been set a different way"); } this.precedingScalar = preceding; return this; } /** * Set the relative number following the current row for range window * @return this for chaining */ public Builder following(Scalar following) { if (following == null || !following.isValid()) { throw new IllegalArgumentException("following cannot be null"); } if (followingBoundsExtent != RangeExtentType.BOUNDED || followingCol != null) { throw new IllegalStateException("following has already been set a different way"); } this.followingScalar = following; return this; } public WindowOptions build() { return new WindowOptions(this); } } public synchronized WindowOptions incRefCount() { if (precedingScalar != null) { precedingScalar.incRefCount(); } if (followingScalar != null) { followingScalar.incRefCount(); } if (precedingCol != null) { precedingCol.incRefCount(); } if (followingCol != null) { followingCol.incRefCount(); } return this; } @Override public void close() { if (precedingScalar != null) { precedingScalar.close(); } if (followingScalar != null) { followingScalar.close(); } if (precedingCol != null) { precedingCol.close(); } if (followingCol != null) { followingCol.close(); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RollingAggregation.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * An aggregation that can be used on rolling windows. */ public final class RollingAggregation { private final Aggregation wrapped; private RollingAggregation(Aggregation wrapped) { this.wrapped = wrapped; } long createNativeInstance() { return wrapped.createNativeInstance(); } long getDefaultOutput() { return wrapped.getDefaultOutput(); } /** * Add a column to the Aggregation so it can be used on a specific column of data. * @param columnIndex the index of the column to operate on. */ public RollingAggregationOnColumn onColumn(int columnIndex) { return new RollingAggregationOnColumn(this, columnIndex); } @Override public int hashCode() { return wrapped.hashCode(); } @Override public boolean equals(Object other) { if (other == this) { return true; } else if (other instanceof RollingAggregation) { RollingAggregation o = (RollingAggregation) other; return wrapped.equals(o.wrapped); } return false; } /** * Rolling Window Sum */ public static RollingAggregation sum() { return new RollingAggregation(Aggregation.sum()); } /** * Rolling Window Min */ public static RollingAggregation min() { return new RollingAggregation(Aggregation.min()); } /** * Rolling Window Max */ public static RollingAggregation max() { return new RollingAggregation(Aggregation.max()); } /** * Rolling Window Standard Deviation with 1 as delta degrees of freedom(DDOF). */ public static RollingAggregation standardDeviation() { return new RollingAggregation(Aggregation.standardDeviation()); } /** * Rolling Window Standard Deviation with configurable delta degrees of freedom(DDOF). */ public static RollingAggregation standardDeviation(int ddof) { return new RollingAggregation(Aggregation.standardDeviation(ddof)); } /** * Count number of valid, a.k.a. non-null, elements. */ public static RollingAggregation count() { return new RollingAggregation(Aggregation.count()); } /** * Count number of elements. * @param nullPolicy INCLUDE if nulls should be counted. EXCLUDE if only non-null values * should be counted. */ public static RollingAggregation count(NullPolicy nullPolicy) { return new RollingAggregation(Aggregation.count(nullPolicy)); } /** * Arithmetic Mean */ public static RollingAggregation mean() { return new RollingAggregation(Aggregation.mean()); } /** * Index of max element. */ public static RollingAggregation argMax() { return new RollingAggregation(Aggregation.argMax()); } /** * Index of min element. */ public static RollingAggregation argMin() { return new RollingAggregation(Aggregation.argMin()); } /** * Get the row number. */ public static RollingAggregation rowNumber() { return new RollingAggregation(Aggregation.rowNumber()); } /** * In a rolling window return the value offset entries ahead or null if it is outside of the * window. */ public static RollingAggregation lead(int offset) { return lead(offset, null); } /** * In a rolling window return the value offset entries ahead or the corresponding value from * defaultOutput if it is outside of the window. Note that this does not take any ownership of * defaultOutput and the caller mush ensure that defaultOutput remains valid during the life * time of this aggregation operation. */ public static RollingAggregation lead(int offset, ColumnVector defaultOutput) { return new RollingAggregation(Aggregation.lead(offset, defaultOutput)); } /** * In a rolling window return the value offset entries behind or null if it is outside of the * window. */ public static RollingAggregation lag(int offset) { return lag(offset, null); } /** * In a rolling window return the value offset entries behind or the corresponding value from * defaultOutput if it is outside of the window. Note that this does not take any ownership of * defaultOutput and the caller mush ensure that defaultOutput remains valid during the life * time of this aggregation operation. */ public static RollingAggregation lag(int offset, ColumnVector defaultOutput) { return new RollingAggregation(Aggregation.lag(offset, defaultOutput)); } /** * Collect the values into a list. Nulls will be skipped. */ public static RollingAggregation collectList() { return new RollingAggregation(Aggregation.collectList()); } /** * Collect the values into a list. * * @param nullPolicy Indicates whether to include/exclude nulls during collection. */ public static RollingAggregation collectList(NullPolicy nullPolicy) { return new RollingAggregation(Aggregation.collectList(nullPolicy)); } /** * Collect the values into a set. All null values will be excluded, and all nan values are regarded as * unique instances. */ public static RollingAggregation collectSet() { return new RollingAggregation(Aggregation.collectSet()); } /** * Collect the values into a set. * * @param nullPolicy Indicates whether to include/exclude nulls during collection. * @param nullEquality Flag to specify whether null entries within each list should be considered equal. * @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal. */ public static RollingAggregation collectSet(NullPolicy nullPolicy, NullEquality nullEquality, NaNEquality nanEquality) { return new RollingAggregation(Aggregation.collectSet(nullPolicy, nullEquality, nanEquality)); } /** * Select the nth element from a specified window. * * @param n Indicates the index of the element to be selected from the window * @param nullPolicy Indicates whether null elements are to be skipped, or not */ public static RollingAggregation nth(int n, NullPolicy nullPolicy) { return new RollingAggregation(Aggregation.nth(n, nullPolicy)); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/QuantileMethod.java
/* * * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Interpolation method to use when the desired quantile lies between * two data points i and j. */ public enum QuantileMethod { /** * Linear interpolation between i and j */ LINEAR(0), /** * Lower data point (i) */ LOWER(1), /** * Higher data point (j) */ HIGHER(2), /** * (i + j)/2 */ MIDPOINT(3), /** * i or j, whichever is nearest */ NEAREST(4); final int nativeId; QuantileMethod(int nativeId) { this.nativeId = nativeId; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/PackedColumnMetadata.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.nio.ByteBuffer; /** * Metadata for a table that is backed by a single contiguous device buffer. */ public final class PackedColumnMetadata implements AutoCloseable { private long metadataHandle = 0; private ByteBuffer metadataBuffer = null; // This method is invoked by JNI static PackedColumnMetadata fromPackedColumnMeta(long metadataHandle) { return new PackedColumnMetadata(metadataHandle); } /** * Construct the PackedColumnMetadata instance given a metadata handle. * @param metadataHandle address of the cudf packed_table host-based metadata instance */ PackedColumnMetadata(long metadataHandle) { this.metadataHandle = metadataHandle; } /** * Get the byte buffer containing the host metadata describing the schema and layout of the * contiguous table. * <p> * NOTE: This is a direct byte buffer that is backed by the underlying native metadata instance * and therefore is only valid to be used while this PackedColumnMetadata instance is valid. * Attempts to cache and access the resulting buffer after this instance has been destroyed * will result in undefined behavior including the possibility of segmentation faults * or data corruption. */ public ByteBuffer getMetadataDirectBuffer() { if (metadataBuffer == null) { metadataBuffer = createMetadataDirectBuffer(metadataHandle); } return metadataBuffer.asReadOnlyBuffer(); } /** Close the PackedColumnMetadata instance and its underlying resources. */ @Override public void close() { if (metadataHandle != 0) { closeMetadata(metadataHandle); metadataHandle = 0; } } // create a DirectByteBuffer for the packed metadata private static native ByteBuffer createMetadataDirectBuffer(long metadataHandle); // release the native metadata resources for a packed table private static native void closeMetadata(long metadataHandle); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HostMemoryAllocator.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; public interface HostMemoryAllocator { /** * Allocate memory, but be sure to close the returned buffer to avoid memory leaks. * @param bytes size in bytes to allocate * @param preferPinned If set to true, the pinned memory pool will be used if possible with a * fallback to off-heap memory. If set to false, the allocation will always * be from off-heap memory. * @return the newly created buffer */ HostMemoryBuffer allocate(long bytes, boolean preferPinned); /** * Allocate memory, but be sure to close the returned buffer to avoid memory leaks. Pinned memory * for allocations preference is up to the implementor * * @param bytes size in bytes to allocate * @return the newly created buffer */ HostMemoryBuffer allocate(long bytes); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ContiguousTable.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.nio.ByteBuffer; /** * A table that is backed by a single contiguous device buffer. This makes transfers of the data * much simpler. */ public final class ContiguousTable implements AutoCloseable { private Table table = null; private DeviceMemoryBuffer buffer; private final long rowCount; private PackedColumnMetadata meta; private ByteBuffer metadataBuffer; // This method is invoked by JNI static ContiguousTable fromPackedTable(long metadataHandle, long dataAddress, long dataLength, long rmmBufferAddress, long rowCount) { DeviceMemoryBuffer buffer = DeviceMemoryBuffer.fromRmm(dataAddress, dataLength, rmmBufferAddress); return new ContiguousTable(metadataHandle, buffer, rowCount); } /** Construct a contiguous table instance given a table and the device buffer backing it. */ ContiguousTable(Table table, DeviceMemoryBuffer buffer) { this.meta = new PackedColumnMetadata(createPackedMetadata(table.getNativeView(), buffer.getAddress(), buffer.getLength())); this.table = table; this.buffer = buffer; this.rowCount = table.getRowCount(); } /** * Construct a contiguous table * @param metadataHandle address of the cudf packed_table host-based metadata instance * @param buffer buffer containing the packed table data * @param rowCount number of rows in the table */ ContiguousTable(long metadataHandle, DeviceMemoryBuffer buffer, long rowCount) { this.meta = new PackedColumnMetadata(metadataHandle); this.buffer = buffer; this.rowCount = rowCount; } /** * Returns the number of rows in the table. This accessor avoids manifesting * the Table instance if only the row count is needed. */ public long getRowCount() { return rowCount; } /** Get the table instance, reconstructing it from the metadata if necessary. */ public synchronized Table getTable() { if (table == null) { table = Table.fromPackedTable(getMetadataDirectBuffer(), buffer); } return table; } /** Get the device buffer backing the contiguous table data. */ public DeviceMemoryBuffer getBuffer() { return buffer; } /** * Get the byte buffer containing the host metadata describing the schema and layout of the * contiguous table. * <p> * NOTE: This is a direct byte buffer that is backed by the underlying native metadata instance * and therefore is only valid to be used while this contiguous table instance is valid. * Attempts to cache and access the resulting buffer after this instance has been destroyed * will result in undefined behavior including the possibility of segmentation faults * or data corruption. */ public ByteBuffer getMetadataDirectBuffer() { return meta.getMetadataDirectBuffer(); } /** Close the contiguous table instance and its underlying resources. */ @Override public void close() { if (meta != null) { meta.close(); } if (table != null) { table.close(); table = null; } if (buffer != null) { buffer.close(); buffer = null; } } // create packed metadata for a table backed by a single data buffer private static native long createPackedMetadata(long tableView, long dataAddress, long dataSize); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmEventHandlerResourceAdaptor.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import java.util.Arrays; /** * A device memory resource that will give callbacks in specific situations. */ public class RmmEventHandlerResourceAdaptor<C extends RmmDeviceMemoryResource> extends RmmWrappingDeviceMemoryResource<C> { private long handle = 0; private final long [] allocThresholds; private final long [] deallocThresholds; private final boolean debug; /** * Create a new logging resource adaptor. * @param wrapped the memory resource to get callbacks for. This should not be reused. * @param handler the handler that will get the callbacks * @param tracker the tracking event handler * @param debug true if you want all the callbacks, else false */ public RmmEventHandlerResourceAdaptor(C wrapped, RmmTrackingResourceAdaptor<?> tracker, RmmEventHandler handler, boolean debug) { super(wrapped); this.debug = debug; allocThresholds = sortThresholds(handler.getAllocThresholds()); deallocThresholds = sortThresholds(handler.getDeallocThresholds()); handle = Rmm.newEventHandlerResourceAdaptor(wrapped.getHandle(), tracker.getHandle(), handler, allocThresholds, deallocThresholds, debug); } private static long[] sortThresholds(long[] thresholds) { if (thresholds == null) { return null; } long[] result = Arrays.copyOf(thresholds, thresholds.length); Arrays.sort(result); return result; } @Override public long getHandle() { return handle; } @Override public void close() { if (handle != 0) { Rmm.releaseEventHandlerResourceAdaptor(handle, debug); handle = 0; } super.close(); } @Override public String toString() { return Long.toHexString(getHandle()) + "/EVENT(" + wrapped + ", " + debug + ", " + Arrays.toString(allocThresholds) + ", " + Arrays.toString(deallocThresholds) + ")"; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/MaskState.java
/* * * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; enum MaskState { UNALLOCATED(0), UNINITIALIZED(1), ALL_VALID(2), ALL_NULL(3); private static final MaskState[] MASK_STATES = MaskState.values(); final int nativeId; MaskState(int nativeId) { this.nativeId = nativeId; } static MaskState fromNative(int nativeId) { for (MaskState type : MASK_STATES) { if (type.nativeId == nativeId) { return type; } } throw new IllegalArgumentException("Could not translate " + nativeId + " into a MaskState"); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/TableWriter.java
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Provides an interface for writing out Table information in multiple steps. * A TableWriter will be returned from one of various factory functions in the Table class that * let you set the format of the data and its destination. After that write can be called one or * more times. When you are done writing call close to finish. */ public abstract class TableWriter implements AutoCloseable { protected long writerHandle; TableWriter(long writerHandle) { this.writerHandle = writerHandle; } /** * Write out a table. Note that all columns must be in the same order each time this is called * and the format of each table cannot change. * @param table what to write out. */ abstract public void write(Table table) throws CudfException; @Override abstract public void close() throws CudfException; public static class WriteStatistics { public final long numCompressedBytes; // The number of bytes that were successfully compressed public final long numFailedBytes; // The number of bytes that failed to compress public final long numSkippedBytes; // The number of bytes that were skipped during compression public final double compressionRatio; // The compression ratio for the successfully compressed data public WriteStatistics(long numCompressedBytes, long numFailedBytes, long numSkippedBytes, double compressionRatio) { this.numCompressedBytes = numCompressedBytes; this.numFailedBytes = numFailedBytes; this.numSkippedBytes = numSkippedBytes; this.compressionRatio = compressionRatio; } } /** * Get the write statistics for the writer up to the last write call. * Currently, only ORC and Parquet writers support write statistics. * Calling this method on other writers will return null. * @return The write statistics. */ public WriteStatistics getWriteStatistics() { double[] statsData = getWriteStatistics(writerHandle); assert statsData.length == 4 : "Unexpected write statistics data length"; return new WriteStatistics((long) statsData[0], (long) statsData[1], (long) statsData[2], statsData[3]); } /** * Get the write statistics for the writer up to the last write call. * The data returned from native method is encoded as an array of doubles. * @param writerHandle The handle to the writer. * @return The write statistics. */ private static native double[] getWriteStatistics(long writerHandle); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HostColumnVectorCore.java
/* * * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteOrder; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Optional; /** * A class that holds Host side Column Vector APIs and the OffHeapState. * Any children of a HostColumnVector will be instantiated via this class. */ public class HostColumnVectorCore implements AutoCloseable { private static final Logger log = LoggerFactory.getLogger(HostColumnVector.class); protected final OffHeapState offHeap; protected final DType type; protected long rows; protected Optional<Long> nullCount; protected List<HostColumnVectorCore> children; public HostColumnVectorCore(DType type, long rows, Optional<Long> nullCount, HostMemoryBuffer data, HostMemoryBuffer validity, HostMemoryBuffer offsets, List<HostColumnVectorCore> nestedChildren) { this.offHeap = new OffHeapState(data, validity, offsets); MemoryCleaner.register(this, offHeap); this.type = type; this.rows = rows; this.nullCount = nullCount; this.children = nestedChildren; } /** * Returns the type of this vector. */ public DType getType() { return type; } /** * Returns the data buffer for a given host side column vector */ public HostMemoryBuffer getData() { return offHeap.data; } /** * Returns the validity buffer for a given host side column vector */ public HostMemoryBuffer getValidity() { return offHeap.valid; } /** * Returns the offset buffer */ public HostMemoryBuffer getOffsets() { return offHeap.offsets; } public HostColumnVectorCore getChildColumnView(int childIndex) { return getNestedChildren().get(childIndex); } /** * Returns the number of nulls in the data. Note that this might end up * being a very expensive operation because if the null count is not * known it will be calculated. */ public long getNullCount() { if (!nullCount.isPresent()) { throw new IllegalStateException("Calculating an unknown null count on the host is not currently supported"); } return nullCount.get(); } /** * Returns the list of child host column vectors for a given host side column */ List<HostColumnVectorCore> getNestedChildren() { return children; } /** * Returns the number of rows for a given host side column vector */ public long getRowCount() { return rows; } /** * Returns the number of children for this column */ public int getNumChildren() { return children.size(); } /** * Return the element at a given row for a give data type * @param rowIndex the row number * @return an object that would need to be casted to appropriate type based on this vector's data type */ Object getElement(int rowIndex) { if (type.equals(DType.LIST)) { return getList(rowIndex); } else if (type.equals(DType.STRUCT)) { return getStruct(rowIndex); } else { if (isNull(rowIndex)) { return null; } return readValue(rowIndex); } } private Object getString(int rowIndex) { if (isNull(rowIndex)) { return null; } int start = (int)getStartListOffset(rowIndex); int end = (int)getEndListOffset(rowIndex); int size = end - start; byte[] rawData = new byte[size]; if (size > 0) { offHeap.data.getBytes(rawData, 0, start, size); return new String(rawData); } else { return new String(); } } ///////////////////////////////////////////////////////////////////////////// // DATA ACCESS ///////////////////////////////////////////////////////////////////////////// /** * For testing only. Allows null checks to go past the number of rows, but not past the end * of the buffer. NOTE: If the validity vector was allocated by cudf itself it is not * guaranteed to have the same padding, but for all practical purposes it does. This is * just to verify that the buffer was allocated and initialized properly. */ boolean isNullExtendedRange(long index) { long maxNullRow = BitVectorHelper.getValidityAllocationSizeInBytes(rows) * 8; assert (index >= 0 && index < maxNullRow) : "TEST: index is out of range 0 <= " + index + " <" + " " + maxNullRow; if (hasValidityVector()) { if (nullCount.isPresent() && !hasNulls()) { return false; } return BitVectorHelper.isNull(offHeap.valid, index); } return false; } /** * Get access to the raw host buffer for this column. This is intended to be used with a lot * of caution. The lifetime of the buffer is tied to the lifetime of the column (Do not close * the buffer, as the column will take care of it). Do not modify the contents of the buffer or * it might negatively impact what happens on the column. The data must be on the host for this * to work. * @param type the type of buffer to get access to. * @return the underlying buffer or null if no buffer is associated with it for this column. * Please note that if the column is empty there may be no buffers at all associated with the * column. */ public HostMemoryBuffer getHostBufferFor(BufferType type) { HostMemoryBuffer srcBuffer = null; switch(type) { case VALIDITY: srcBuffer = offHeap.valid; break; case OFFSET: srcBuffer = offHeap.offsets; break; case DATA: srcBuffer = offHeap.data; break; default: throw new IllegalArgumentException(type + " is not a supported buffer type."); } return srcBuffer; } void copyHostBufferBytes(byte[] dst, int dstOffset, BufferType src, long srcOffset, int length) { assert dstOffset >= 0; assert srcOffset >= 0; assert length >= 0; assert dstOffset + length <= dst.length; HostMemoryBuffer srcBuffer = getHostBufferFor(src); assert srcOffset + length <= srcBuffer.length : "would copy off end of buffer " + srcOffset + " + " + length + " > " + srcBuffer.length; UnsafeMemoryAccessor.getBytes(dst, dstOffset, srcBuffer.getAddress() + srcOffset, length); } /** * Generic type independent asserts when getting a value from a single index. * @param index where to get the data from. */ private void assertsForGet(long index) { assert (index >= 0 && index < rows) : "index is out of range 0 <= " + index + " < " + rows; assert !isNull(index) : " value at " + index + " is null"; } /** * Get the value at index. */ public byte getByte(long index) { assert type.isBackedByByte() : type + " is not stored as a byte."; assertsForGet(index); return offHeap.data.getByte(index * type.getSizeInBytes()); } /** * Get the value at index. */ public final short getShort(long index) { assert type.isBackedByShort() : type + " is not stored as a short."; assertsForGet(index); return offHeap.data.getShort(index * type.getSizeInBytes()); } /** * Get the value at index. */ public final int getInt(long index) { assert type.isBackedByInt() : type + " is not stored as a int."; assertsForGet(index); return offHeap.data.getInt(index * type.getSizeInBytes()); } /** * Get the starting byte offset for the string at index * Wraps getStartListOffset for backwards compatibility */ long getStartStringOffset(long index) { return getStartListOffset(index); } /** * Get the starting element offset for the list or string at index */ public long getStartListOffset(long index) { assert type.equals(DType.STRING) || type.equals(DType.LIST): type + " is not a supported string or list type."; assert (index >= 0 && index < rows) : "index is out of range 0 <= " + index + " < " + rows; return offHeap.offsets.getInt(index * 4); } /** * Get the ending byte offset for the string at index. * Wraps getEndListOffset for backwards compatibility */ long getEndStringOffset(long index) { return getEndListOffset(index); } /** * Get the ending element offset for the list or string at index. */ public long getEndListOffset(long index) { assert type.equals(DType.STRING) || type.equals(DType.LIST): type + " is not a supported string or list type."; assert (index >= 0 && index < rows) : "index is out of range 0 <= " + index + " < " + rows; // The offsets has one more entry than there are rows. return offHeap.offsets.getInt((index + 1) * 4); } /** * Get the value at index. */ public final long getLong(long index) { // Timestamps with time values are stored as longs assert type.isBackedByLong(): type + " is not stored as a long."; assertsForGet(index); return offHeap.data.getLong(index * type.getSizeInBytes()); } /** * Get the value at index. */ public final float getFloat(long index) { assert type.equals(DType.FLOAT32) : type + " is not a supported float type."; assertsForGet(index); return offHeap.data.getFloat(index * type.getSizeInBytes()); } /** * Get the value at index. */ public final double getDouble(long index) { assert type.equals(DType.FLOAT64) : type + " is not a supported double type."; assertsForGet(index); return offHeap.data.getDouble(index * type.getSizeInBytes()); } /** * Get the boolean value at index */ public final boolean getBoolean(long index) { assert type.equals(DType.BOOL8) : type + " is not a supported boolean type."; assertsForGet(index); return offHeap.data.getBoolean(index * type.getSizeInBytes()); } /** * Get the BigDecimal value at index. */ public final BigDecimal getBigDecimal(long index) { assert type.isDecimalType() : type + " is not a supported decimal type."; assertsForGet(index); if (type.typeId == DType.DTypeEnum.DECIMAL32) { int unscaledValue = offHeap.data.getInt(index * type.getSizeInBytes()); return BigDecimal.valueOf(unscaledValue, -type.getScale()); } else if (type.typeId == DType.DTypeEnum.DECIMAL64) { long unscaledValue = offHeap.data.getLong(index * type.getSizeInBytes()); return BigDecimal.valueOf(unscaledValue, -type.getScale()); } else if (type.typeId == DType.DTypeEnum.DECIMAL128) { int sizeInBytes = DType.DTypeEnum.DECIMAL128.sizeInBytes; byte[] dst = new byte[sizeInBytes]; // We need to switch the endianness for decimal128 byte arrays between java and native code. offHeap.data.getBytes(dst, 0, (index * sizeInBytes), sizeInBytes); convertInPlaceToBigEndian(dst); return new BigDecimal(new BigInteger(dst), -type.getScale()); } else { throw new IllegalStateException(type + " is not a supported decimal type."); } } /** * Get the raw UTF8 bytes at index. This API is faster than getJavaString, but still not * ideal because it is copying the data onto the heap. */ public byte[] getUTF8(long index) { assert type.equals(DType.STRING) : type + " is not a supported string type."; assertsForGet(index); int start = (int)getStartListOffset(index); int size = (int)getEndListOffset(index) - start; byte[] rawData = new byte[size]; if (size > 0) { offHeap.data.getBytes(rawData, 0, start, size); } return rawData; } /** * Get the value at index. This API is slow as it has to translate the * string representation. Please use it with caution. */ public String getJavaString(long index) { byte[] rawData = getUTF8(index); return new String(rawData, StandardCharsets.UTF_8); } /** * WARNING: Special case for lists of int8 or uint8, does not support null list values or lists * * Get array of bytes at index from a list column of int8 or uint8. The column may not be a list * of lists and may not have nulls. */ public byte[] getBytesFromList(long rowIndex) { assert type.equals(DType.LIST) : type + " is not a supported list of bytes type."; HostColumnVectorCore listData = children.get(0); assert listData.type.equals(DType.INT8) || listData.type.equals(DType.UINT8) : type + " is not a supported list of bytes type."; assert !listData.hasNulls() : "byte list column with nulls are not supported"; assertsForGet(rowIndex); int start = (int)getStartListOffset(rowIndex); int end = (int)getEndListOffset(rowIndex); int size = end - start; byte[] result = new byte[size]; if (size > 0) { listData.offHeap.data.getBytes(result, 0, start, size); } return result; } /** * WARNING: Strictly for test only. This call is not efficient for production. */ public List getList(long rowIndex) { assert rowIndex < rows; assert type.equals(DType.LIST); List retList = new ArrayList(); int start = (int)getStartListOffset(rowIndex); int end = (int)getEndListOffset(rowIndex); // check if null or empty if (isNull(rowIndex)) { return null; } for(int j = start; j < end; j++) { for (HostColumnVectorCore childHcv : children) { // lists have only 1 child retList.add(childHcv.getElement(j)); } } return retList; } /** * WARNING: Strictly for test only. This call is not efficient for production. */ public HostColumnVector.StructData getStruct(int rowIndex) { assert rowIndex < rows; assert type.equals(DType.STRUCT); List<Object> retList = new ArrayList<>(); // check if null or empty if (isNull(rowIndex)) { return null; } for (int k = 0; k < this.getNumChildren(); k++) { retList.add(children.get(k).getElement(rowIndex)); } return new HostColumnVector.StructData(retList); } /** * Method that returns a boolean to indicate if the element at a given row index is null * @param rowIndex the row index * @return true if null else false */ public boolean isNull(long rowIndex) { return rowIndex < 0 || rowIndex >= rows // unknown, hence NULL || hasValidityVector() && BitVectorHelper.isNull(offHeap.valid, rowIndex); } /** * Returns if the vector has a validity vector allocated or not. */ public boolean hasValidityVector() { return (offHeap.valid != null); } /** * Returns if the vector has nulls. Note that this might end up * being a very expensive operation because if the null count is not * known it will be calculated. */ public boolean hasNulls() { return getNullCount() > 0; } /** * Helper method that reads in a value at a given row index * @param rowIndex the row index * @return an object that would need to be casted to appropriate type based on this vector's data type */ private Object readValue(int rowIndex) { assert rowIndex < rows; int rowOffset = rowIndex * type.getSizeInBytes(); switch (type.typeId) { case INT32: // fall through case UINT32: // fall through case TIMESTAMP_DAYS: case DURATION_DAYS: return offHeap.data.getInt(rowOffset); case INT64: // fall through case UINT64: // fall through case DURATION_MICROSECONDS: // fall through case DURATION_MILLISECONDS: // fall through case DURATION_NANOSECONDS: // fall through case DURATION_SECONDS: // fall through case TIMESTAMP_MICROSECONDS: // fall through case TIMESTAMP_MILLISECONDS: // fall through case TIMESTAMP_NANOSECONDS: // fall through case TIMESTAMP_SECONDS: return offHeap.data.getLong(rowOffset); case FLOAT32: return offHeap.data.getFloat(rowOffset); case FLOAT64: return offHeap.data.getDouble(rowOffset); case UINT8: // fall through case INT8: return offHeap.data.getByte(rowOffset); case UINT16: // fall through case INT16: return offHeap.data.getShort(rowOffset); case BOOL8: return offHeap.data.getBoolean(rowOffset); case STRING: return getString(rowIndex); case DECIMAL32: return BigDecimal.valueOf(offHeap.data.getInt(rowOffset), -type.getScale()); case DECIMAL64: return BigDecimal.valueOf(offHeap.data.getLong(rowOffset), -type.getScale()); default: throw new UnsupportedOperationException("Do not support " + type); } } /** * Returns the amount of host memory used to store column/validity data (not metadata). */ public long getHostMemorySize() { long totalSize = offHeap.getHostMemorySize(); for (HostColumnVectorCore nhcv : children) { totalSize += nhcv.getHostMemorySize(); } return totalSize; } /** * Close method for the column */ @Override public synchronized void close() { for (HostColumnVectorCore child : children) { if (child != null) { child.close(); } } offHeap.delRef(); offHeap.cleanImpl(false); } @Override public String toString() { return "HostColumnVectorCore{" + "rows=" + rows + ", type=" + type + ", nullCount=" + nullCount + ", offHeap=" + offHeap + '}'; } protected static byte[] convertDecimal128FromJavaToCudf(byte[] bytes) { byte[] finalBytes = new byte[DType.DTypeEnum.DECIMAL128.sizeInBytes]; byte lastByte = bytes[0]; //Convert to 2's complement representation and make sure the sign bit is extended correctly byte setByte = (lastByte & 0x80) > 0 ? (byte)0xff : (byte)0x00; for(int i = bytes.length; i < finalBytes.length; i++) { finalBytes[i] = setByte; } // After setting the sign bits, reverse the rest of the bytes for endianness for(int k = 0; k < bytes.length; k++) { finalBytes[k] = bytes[bytes.length - k - 1]; } return finalBytes; } private void convertInPlaceToBigEndian(byte[] dst) { assert ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN); int i =0; int j = dst.length -1; while (j > i) { byte tmp; tmp = dst[j]; dst[j] = dst[i]; dst[i] = tmp; j--; i++; } } ///////////////////////////////////////////////////////////////////////////// // HELPER CLASSES ///////////////////////////////////////////////////////////////////////////// /** * Holds the off heap state of the column vector so we can clean it up, even if it is leaked. */ protected static final class OffHeapState extends MemoryCleaner.Cleaner { public HostMemoryBuffer data; public HostMemoryBuffer valid = null; public HostMemoryBuffer offsets = null; OffHeapState(HostMemoryBuffer data, HostMemoryBuffer valid, HostMemoryBuffer offsets) { this.data = data; this.valid = valid; this.offsets = offsets; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; if (data != null || valid != null || offsets != null) { try { if (data != null) { data.close(); } if (offsets != null) { offsets.close(); } if (valid != null) { valid.close(); } } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. data = null; valid = null; offsets = null; } neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A HOST COLUMN VECTOR WAS LEAKED (ID: " + id + ")"); logRefCountDebug("Leaked vector"); } return neededCleanup; } @Override public void noWarnLeakExpected() { super.noWarnLeakExpected(); if (data != null) { data.noWarnLeakExpected(); } if (valid != null) { valid.noWarnLeakExpected(); } if (offsets != null) { offsets.noWarnLeakExpected(); } } @Override public boolean isClean() { return data == null && valid == null && offsets == null; } /** * This returns total memory allocated on the host for the ColumnVector. */ public long getHostMemorySize() { long total = 0; if (valid != null) { total += valid.length; } if (data != null) { total += data.length; } if (offsets != null) { total += offsets.length; } return total; } @Override public String toString() { return "(ID: " + id + ")"; } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HostMemoryReservation.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Represents some amount of host memory that has been reserved. A reservation guarantees that one * or more allocations up to the reserved amount, minus padding for alignment will succeed. A * reservation typically guarantees the amount can be allocated one, meaning when a buffer * allocated from a reservation is freed it is not returned to the reservation, but to the pool of * memory the reservation originally came from. If more memory is allocated from the reservation * an OutOfMemoryError may be thrown, but it is not guaranteed to happen. * * When the reservation is closed any unused reservation will be returned to the pool of memory * the reservation came from. */ public interface HostMemoryReservation extends HostMemoryAllocator, AutoCloseable {}
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/GroupByOptions.java
/* * * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Options for groupby (see cudf::groupby::groupby's constructor) */ public class GroupByOptions { public static GroupByOptions DEFAULT = new GroupByOptions(new Builder()); private final boolean ignoreNullKeys; private final boolean keysSorted; private final boolean[] keysDescending; private final boolean[] keysNullSmallest; private GroupByOptions(Builder builder) { ignoreNullKeys = builder.ignoreNullKeys; keysSorted = builder.keysSorted; keysDescending = builder.keysDescending; keysNullSmallest = builder.keysNullSmallest; } boolean getIgnoreNullKeys() { return ignoreNullKeys; } boolean getKeySorted() { return keysSorted; } boolean[] getKeysDescending() { return keysDescending; } boolean[] getKeysNullSmallest() { return keysNullSmallest; } public static Builder builder() { return new Builder(); } public static class Builder { private boolean ignoreNullKeys = false; private boolean keysSorted = false; private boolean[] keysDescending = new boolean[0]; private boolean[] keysNullSmallest = new boolean[0]; /** * If true, the cudf groupby will ignore grouping keys that are null. * The default value is false, so a null in the grouping column will produce a * group. */ public Builder withIgnoreNullKeys(boolean ignoreNullKeys) { this.ignoreNullKeys = ignoreNullKeys; return this; } /** * Indicates whether rows in `keys` are already sorted. * The default value is false. * * If the `keys` are already sorted, better performance may be achieved by * passing `keysSorted == true` and indicating the ascending/descending * order of each column and null order by calling `withKeysDescending` and * `withKeysNullSmallest`, respectively. */ public Builder withKeysSorted(boolean keysSorted) { this.keysSorted = keysSorted; return this; } /** * If `keysSorted == true`, indicates whether each * column is ascending/descending. If empty or null, assumes all columns are * ascending. Ignored if `keysSorted == false`. */ public Builder withKeysDescending(boolean... keysDescending) { if (keysDescending == null) { // Use empty array instead of null this.keysDescending = new boolean[0]; } else { this.keysDescending = keysDescending; } return this; } /** * If `keysSorted == true`, indicates the ordering * of null values in each column. If empty or null, assumes all columns * use 'null smallest'. Ignored if `keysSorted == false`. */ public Builder withKeysNullSmallest(boolean... keysNullSmallest) { if (keysNullSmallest == null) { // Use empty array instead of null this.keysNullSmallest = new boolean[0]; } else { this.keysNullSmallest = keysNullSmallest; } return this; } public GroupByOptions build() { return new GroupByOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmEventHandler.java
/* * * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; public interface RmmEventHandler { /** * Invoked on a memory allocation failure. * @param sizeRequested number of bytes that failed to allocate * @deprecated deprecated in favor of onAllocFailure(long, boolean) * @return true if the memory allocation should be retried or false if it should fail */ default boolean onAllocFailure(long sizeRequested) { // this should not be called since it was the previous interface, // and it was abstract before, throwing by default for good measure. throw new UnsupportedOperationException( "Unexpected invocation of deprecated onAllocFailure without retry count."); } /** * Invoked after every memory allocation when debug mode is enabled. * @param size number of bytes allocated */ default void onAllocated(long size) {} /** * Invoked after every memory deallocation when debug mode is enabled. * @param size number of bytes deallocated */ default void onDeallocated(long size) {} /** * Invoked on a memory allocation failure. * @param sizeRequested number of bytes that failed to allocate * @param retryCount number of times this allocation has been retried after failure * @return true if the memory allocation should be retried or false if it should fail */ default boolean onAllocFailure(long sizeRequested, int retryCount) { // newer code should override this implementation of `onAllocFailure` to handle // `retryCount`. Otherwise, we call the prior implementation to not // break existing code. return onAllocFailure(sizeRequested); } /** * Get the memory thresholds that will trigger {@link #onAllocThreshold(long)} * to be called when one or more of the thresholds is crossed during a memory allocation. * A threshold is crossed when the total memory allocated before the RMM allocate operation * is less than a threshold value and the threshold value is less than or equal to the * total memory allocated after the RMM memory allocate operation. * @return allocate memory thresholds or null for no thresholds. */ long[] getAllocThresholds(); /** * Get the memory thresholds that will trigger {@link #onDeallocThreshold(long)} * to be called when one or more of the thresholds is crossed during a memory deallocation. * A threshold is crossed when the total memory allocated before the RMM deallocate operation * is greater than or equal to a threshold value and the threshold value is greater than the * total memory allocated after the RMM memory deallocate operation. * @return deallocate memory thresholds or null for no thresholds. */ long[] getDeallocThresholds(); /** * Invoked after an RMM memory allocate operation when an allocate threshold is crossed. * See {@link #getAllocThresholds()} for details on allocate threshold crossing. * <p>NOTE: Any exception thrown by this method will cause the corresponding allocation * that triggered the threshold callback to be released before the exception is * propagated to the application. * @param totalAllocSize total amount of memory allocated after the crossing */ void onAllocThreshold(long totalAllocSize); /** * Invoked after an RMM memory deallocation operation when a deallocate threshold is crossed. * See {@link #getDeallocThresholds()} for details on deallocate threshold crossing. * <p>NOTE: Any exception thrown by this method will be propagated to the application * after the resource that triggered the threshold was released. * @param totalAllocSize total amount of memory allocated after the crossing */ void onDeallocThreshold(long totalAllocSize); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CudaMemoryBuffer.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class represents data allocated using `cudaMalloc` directly instead of the default RMM * memory resource. Closing this object will effectively release the memory held by the buffer. * Note that because of reference counting if a buffer is sliced it may not actually result in the * memory being released. */ public class CudaMemoryBuffer extends BaseDeviceMemoryBuffer { private static final Logger log = LoggerFactory.getLogger(CudaMemoryBuffer.class); private static final class CudaBufferCleaner extends MemoryBufferCleaner { private long address; private long lengthInBytes; private Cuda.Stream stream; CudaBufferCleaner(long address, long lengthInBytes, Cuda.Stream stream) { this.address = address; this.lengthInBytes = lengthInBytes; this.stream = stream; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; long origAddress = address; if (address != 0) { long s = stream == null ? 0 : stream.getStream(); try { Rmm.freeCuda(address, lengthInBytes, s); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. address = 0; lengthInBytes = 0; stream = null; } neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A CUDA BUFFER WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")"); logRefCountDebug("Leaked device buffer"); } return neededCleanup; } @Override public boolean isClean() { return address == 0; } } /** * Wrap an existing CUDA allocation in a device memory buffer. The CUDA allocation will be freed * when the resulting device memory buffer instance frees its memory resource (i.e.: when its * reference count goes to zero). * @param address device address of the CUDA memory allocation * @param lengthInBytes length of the CUDA allocation in bytes * @param stream CUDA stream to use for synchronization when freeing the allocation */ public CudaMemoryBuffer(long address, long lengthInBytes, Cuda.Stream stream) { super(address, lengthInBytes, new CudaBufferCleaner(address, lengthInBytes, stream)); } private CudaMemoryBuffer(long address, long lengthInBytes, CudaMemoryBuffer parent) { super(address, lengthInBytes, parent); } /** * Allocate memory for use on the GPU. You must close it when done. * @param bytes size in bytes to allocate * @return the buffer */ public static CudaMemoryBuffer allocate(long bytes) { return allocate(bytes, Cuda.DEFAULT_STREAM); } /** * Allocate memory for use on the GPU. You must close it when done. * @param bytes size in bytes to allocate * @param stream The stream in which to synchronize this command * @return the buffer */ public static CudaMemoryBuffer allocate(long bytes, Cuda.Stream stream) { return Rmm.allocCuda(bytes, stream); } /** * Slice off a part of the device buffer. Note that this is a zero copy operation and all * slices must be closed along with the original buffer before the memory is released to RMM. * So use this with some caution. * @param offset where to start the slice at. * @param len how many bytes to slice * @return a device buffer that will need to be closed independently from this buffer. */ @Override public synchronized final CudaMemoryBuffer slice(long offset, long len) { addressOutOfBoundsCheck(address + offset, len, "slice"); incRefCount(); return new CudaMemoryBuffer(getAddress() + offset, len, this); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/AvroOptions.java
/* * * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Options for reading an Avro file */ public class AvroOptions extends ColumnFilterOptions { public static AvroOptions DEFAULT = new AvroOptions(new Builder()); private AvroOptions(Builder builder) { super(builder); } public static Builder builder() { return new Builder(); } public static class Builder extends ColumnFilterOptions.Builder<Builder> { public AvroOptions build() { return new AvroOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/DeviceMemoryBufferView.java
/* * * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * This class represents data in some form on the GPU. The memory pointed at by this buffer is * not owned by this buffer. So you have to be sure that this buffer does not outlive the buffer * that is backing it. */ public class DeviceMemoryBufferView extends BaseDeviceMemoryBuffer { DeviceMemoryBufferView(long address, long lengthInBytes) { // Set the cleaner to null so we don't end up releasing anything super(address, lengthInBytes, (MemoryBufferCleaner) null); } /** * At the moment we don't have use for slicing a view. */ @Override public synchronized final DeviceMemoryBufferView slice(long offset, long len) { throw new UnsupportedOperationException("Slice on view is not supported"); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/DecimalUtils.java
/* * * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.math.BigDecimal; import java.util.AbstractMap; import java.util.Map; public class DecimalUtils { /** * Creates a cuDF decimal type with precision and scale */ public static DType createDecimalType(int precision, int scale) { if (precision <= DType.DECIMAL32_MAX_PRECISION) { return DType.create(DType.DTypeEnum.DECIMAL32, -scale); } else if (precision <= DType.DECIMAL64_MAX_PRECISION) { return DType.create(DType.DTypeEnum.DECIMAL64, -scale); } else if (precision <= DType.DECIMAL128_MAX_PRECISION) { return DType.create(DType.DTypeEnum.DECIMAL128, -scale); } throw new IllegalArgumentException("precision overflow: " + precision); } /** * Given decimal precision and scale, returns the lower and upper bound of current decimal type. * * Be very careful when comparing these CUDF decimal comparisons really only work * when both types are already the same precision and scale, and when you change the scale * you end up losing information. * @param precision the max precision of decimal type * @param scale the scale of decimal type * @return a Map Entry of BigDecimal, lower bound as the key, upper bound as the value */ public static Map.Entry<BigDecimal, BigDecimal> bounds(int precision, int scale) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < precision; i++) sb.append("9"); sb.append("e"); sb.append(-scale); String boundStr = sb.toString(); BigDecimal upperBound = new BigDecimal(boundStr); BigDecimal lowerBound = new BigDecimal("-" + boundStr); return new AbstractMap.SimpleImmutableEntry<>(lowerBound, upperBound); } /** * With precision and scale, checks each value of input decimal column for out of bound. * @return the boolean column represents whether specific values are out of bound or not */ public static ColumnVector outOfBounds(ColumnView input, int precision, int scale) { Map.Entry<BigDecimal, BigDecimal> boundPair = bounds(precision, scale); BigDecimal lowerBound = boundPair.getKey(); BigDecimal upperBound = boundPair.getValue(); try (ColumnVector over = greaterThan(input, upperBound); ColumnVector under = lessThan(input, lowerBound)) { return over.or(under); } } /** * Because the native lessThan operator has issues with comparing decimal values that have different * precision and scale accurately. This method takes some special steps to get rid of these issues. */ public static ColumnVector lessThan(ColumnView lhs, BigDecimal rhs) { assert (lhs.getType().isDecimalType()); int leftScale = lhs.getType().getScale(); int leftPrecision = lhs.getType().getDecimalMaxPrecision(); // First we have to round the scalar (rhs) to the same scale as lhs. Because this is a // less than and it is rhs that we are rounding, we will round away from 0 (UP) // to make sure we always return the correct value. // For example: // 100.1 < 100.19 // If we rounded down the rhs 100.19 would become 100.1, and now 100.1 is not < 100.1 BigDecimal roundedRhs = rhs.setScale(-leftScale, BigDecimal.ROUND_UP); if (roundedRhs.precision() > leftPrecision) { // converting rhs to the same precision as lhs would result in an overflow/error, but // the scale is the same so we can still figure this out. For example if LHS precision is // 4 and RHS precision is 5 we get the following... // 9999 < 99999 => true // -9999 < 99999 => true // 9999 < -99999 => false // -9999 < -99999 => false // so the result should be the same as RHS > 0 try (Scalar isPositive = Scalar.fromBool(roundedRhs.compareTo(BigDecimal.ZERO) > 0)) { return ColumnVector.fromScalar(isPositive, (int) lhs.getRowCount()); } } try (Scalar scalarRhs = Scalar.fromDecimal(roundedRhs.unscaledValue(), lhs.getType())) { return lhs.lessThan(scalarRhs); } } /** * Because the native lessThan operator has issues with comparing decimal values that have different * precision and scale accurately. This method takes some special steps to get rid of these issues. */ public static ColumnVector lessThan(BinaryOperable lhs, BigDecimal rhs, int numRows) { if (lhs instanceof ColumnView) { return lessThan((ColumnView) lhs, rhs); } Scalar scalarLhs = (Scalar) lhs; if (scalarLhs.isValid()) { try (Scalar isLess = Scalar.fromBool(scalarLhs.getBigDecimal().compareTo(rhs) < 0)) { return ColumnVector.fromScalar(isLess, numRows); } } try (Scalar nullScalar = Scalar.fromNull(DType.BOOL8)) { return ColumnVector.fromScalar(nullScalar, numRows); } } /** * Because the native greaterThan operator has issues with comparing decimal values that have different * precision and scale accurately. This method takes some special steps to get rid of these issues. */ public static ColumnVector greaterThan(ColumnView lhs, BigDecimal rhs) { assert (lhs.getType().isDecimalType()); int cvScale = lhs.getType().getScale(); int maxPrecision = lhs.getType().getDecimalMaxPrecision(); // First we have to round the scalar (rhs) to the same scale as lhs. Because this is a // greater than and it is rhs that we are rounding, we will round towards 0 (DOWN) // to make sure we always return the correct value. // For example: // 100.2 > 100.19 // If we rounded up the rhs 100.19 would become 100.2, and now 100.2 is not > 100.2 BigDecimal roundedRhs = rhs.setScale(-cvScale, BigDecimal.ROUND_DOWN); if (roundedRhs.precision() > maxPrecision) { // converting rhs to the same precision as lhs would result in an overflow/error, but // the scale is the same so we can still figure this out. For example if LHS precision is // 4 and RHS precision is 5 we get the following... // 9999 > 99999 => false // -9999 > 99999 => false // 9999 > -99999 => true // -9999 > -99999 => true // so the result should be the same as RHS < 0 try (Scalar isNegative = Scalar.fromBool(roundedRhs.compareTo(BigDecimal.ZERO) < 0)) { return ColumnVector.fromScalar(isNegative, (int) lhs.getRowCount()); } } try (Scalar scalarRhs = Scalar.fromDecimal(roundedRhs.unscaledValue(), lhs.getType())) { return lhs.greaterThan(scalarRhs); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/PartitionedTable.java
/* * * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Class to provide a PartitionedTable */ public final class PartitionedTable implements AutoCloseable { private final Table table; private final int[] partitionsOffsets; /** * The package-private constructor is only called by the partition method in Table * .TableOperation.partition * @param table - {@link Table} which contains the partitioned data * @param partitionOffsets - This param is used to populate the offsets into the returned table * where partitionOffsets[i] indicates the starting position of * partition 'i' */ PartitionedTable(Table table, int[] partitionOffsets) { this.table = table; this.partitionsOffsets = partitionOffsets; } public Table getTable() { return table; } public ColumnVector getColumn(int index) { return table.getColumn(index); } public long getNumberOfColumns() { return table.getNumberOfColumns(); } public long getRowCount() { return table.getRowCount(); } @Override public void close() { table.close(); } /** * This method returns the partitions on this table. partitionOffsets[i] indicates the * starting position of partition 'i' in the partitioned table. Size of the partitions can * be calculated by the next offset * Ex: * partitionOffsets[0, 12, 12, 49] indicates 4 partitions with the following sizes * partition[0] - 12 * partition[1] - 0 (is empty) * partition[2] - 37 * partition[3] has the remaining values of the table (N-49) */ public int[] getPartitions() { return partitionsOffsets; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CuFileResourceCleaner.java
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Keeps track and cleans a cuFile native resource. */ final class CuFileResourceCleaner extends MemoryCleaner.Cleaner { private static final Logger log = LoggerFactory.getLogger(CuFileResourceCleaner.class); private long pointer; private final CuFileResourceDestroyer destroyer; private boolean closed = false; CuFileResourceCleaner(long pointer, CuFileResourceDestroyer destroyer) { this.pointer = pointer; this.destroyer = destroyer; addRef(); } long getPointer() { return pointer; } synchronized void close(Object resource) { delRef(); if (closed) { logRefCountDebug("double free " + resource); throw new IllegalStateException("Close called too many times " + resource); } clean(false); closed = true; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; long origAddress = pointer; if (pointer != 0) { try { destroyer.destroy(pointer); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. pointer = 0; } neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A CUFile RESOURCE WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")"); logRefCountDebug("Leaked cuFile resource"); } return neededCleanup; } @Override public boolean isClean() { return pointer == 0; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HostMemoryBufferNativeUtils.java
/* * * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.io.IOException; import java.nio.ByteBuffer; /** * Wrapper for {@link HostMemoryBuffer} native callbacks so that class avoids * loading the native libraries unless one if its methods requires it. */ class HostMemoryBufferNativeUtils { static { NativeDepsLoader.loadNativeDeps(); } /** * This will turn an address into a ByteBuffer. The buffer will NOT own the memory * so closing it has no impact on the underlying memory. It should never * be used if the corresponding HostMemoryBuffer is closed. */ static native ByteBuffer wrapRangeInBuffer(long address, long len); /** * Memory map a portion of a local file * @param file path to the local file to be mapped * @param mode 0=read, 1=read+write * @param offset file offset where map starts. Must be a system page boundary. * @param len number of bytes to map * @return address of the memory-mapped region * @throws IOException I/O error during mapping */ static native long mmap(String file, int mode, long offset, long len) throws IOException; /** * Unmap a memory region that was memory-mapped. * @param address address of the memory-mapped region * @param length size of the mapped region in bytes */ static native void munmap(long address, long length); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/NullPolicy.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Specify whether to include nulls or exclude nulls in an operation. */ public enum NullPolicy { EXCLUDE(false), INCLUDE(true); NullPolicy(boolean includeNulls) { this.includeNulls = includeNulls; } final boolean includeNulls; }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/BitVectorHelper.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * This class does bit manipulation using byte arithmetic */ final class BitVectorHelper { /** * Shifts that to the left by the required bits then appends to this */ static void append(HostMemoryBuffer src, HostMemoryBuffer dst, long dstOffset, long rows) { assert dst.length * 8 - dstOffset >= rows : "validity vector bigger then available space on " + "dst: " + (dst.length * 8 - dstOffset) + " copying space needed: " + rows; long dstByteIndex = dstOffset / 8; int shiftBits = (int) (dstOffset % 8); if (shiftBits > 0) { shiftSrcLeftAndWriteToDst(src, dst, dstByteIndex, shiftBits, rows); } else { dst.copyFromHostBuffer(dstByteIndex, src, 0, getValidityLengthInBytes(rows)); } } /** * Shifts the src to the left by the given bits and writes 'length' bytes to the destination */ private static void shiftSrcLeftAndWriteToDst(HostMemoryBuffer src, HostMemoryBuffer dst, long dstOffset, int shiftByBits, long length) { assert shiftByBits > 0 && shiftByBits < 8 : "shiftByBits out of range"; int dstMask = 0xFF >> (8 - shiftByBits); // the mask to save the left side of the bits before we shift int srcLeftMask = dstMask << (8 - shiftByBits); int valueFromTheLeftOfTheLastByte = dst.getByte(dstOffset) & dstMask; long i; long byteLength = getValidityLengthInBytes(length); for (i = 0; i < byteLength; i++) { int b = src.getByte(i); int fallingBitsOnTheLeft = b & srcLeftMask; b <<= shiftByBits; b |= valueFromTheLeftOfTheLastByte; dst.setByte(dstOffset + i, (byte) b); valueFromTheLeftOfTheLastByte = fallingBitsOnTheLeft >>> (8 - shiftByBits); } if (((length % 8) + shiftByBits > 8) || length % 8 == 0) { /* Only if the last byte has data that has been shifted to spill over to the next byte execute the following statement. */ dst.setByte(dstOffset + i, (byte) (valueFromTheLeftOfTheLastByte | ~dstMask)); } } /** * This method returns the length in bytes needed to represent X number of rows * e.g. getValidityLengthInBytes(5) => 1 byte * getValidityLengthInBytes(7) => 1 byte * getValidityLengthInBytes(14) => 2 bytes */ static long getValidityLengthInBytes(long rows) { return (rows + 7) / 8; } /** * This method returns the allocation size of the validity vector which is 64-byte aligned * e.g. getValidityAllocationSizeInBytes(5) => 64 bytes * getValidityAllocationSizeInBytes(14) => 64 bytes * getValidityAllocationSizeInBytes(65) => 128 bytes */ static long getValidityAllocationSizeInBytes(long rows) { long numBytes = getValidityLengthInBytes(rows); return ((numBytes + 63) / 64) * 64; } /** * Set the validity bit to null for the given index. * @param valid the buffer to set it in. * @param index the index to set it at. * @return 1 if validity changed else 0 if it already was null. */ static int setNullAt(HostMemoryBuffer valid, long index) { long bucket = index / 8; byte currentByte = valid.getByte(bucket); int bitmask = ~(1 << (index % 8)); int ret = (currentByte >> index) & 0x1; currentByte &= bitmask; valid.setByte(bucket, currentByte); return ret; } static boolean isNull(HostMemoryBuffer valid, long index) { int b = valid.getByte(index / 8); int i = b & (1 << (index % 8)); return i == 0; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ORCWriterOptions.java
/* * * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * This class represents settings for writing ORC files. It includes meta data information * that will be used by the ORC writer to write the file. */ public class ORCWriterOptions extends CompressionMetadataWriterOptions { private ORCWriterOptions(Builder builder) { super(builder); } public static Builder builder() { return new Builder(); } public static class Builder extends CompressionMetadataWriterOptions.Builder <Builder, ORCWriterOptions> { public ORCWriterOptions build() { return new ORCWriterOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CudfColumnSizeOverflowException.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Exception thrown when CUDF operation results in a column size * exceeding CUDF column size limits */ public class CudfColumnSizeOverflowException extends CudfException { CudfColumnSizeOverflowException(String message) { super(message); } CudfColumnSizeOverflowException(String message, String nativeStacktrace) { super(message, nativeStacktrace); } CudfColumnSizeOverflowException(String message, String nativeStacktrace, Throwable cause) { super(message, nativeStacktrace, cause); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ReductionAggregation.java
/* * * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * An aggregation that can be used for a reduce. */ public final class ReductionAggregation { private final Aggregation wrapped; private ReductionAggregation(Aggregation wrapped) { this.wrapped = wrapped; } long createNativeInstance() { return wrapped.createNativeInstance(); } long getDefaultOutput() { return wrapped.getDefaultOutput(); } Aggregation getWrapped() { return wrapped; } @Override public int hashCode() { return wrapped.hashCode(); } @Override public boolean equals(Object other) { if (other == this) { return true; } else if (other instanceof ReductionAggregation) { ReductionAggregation o = (ReductionAggregation) other; return wrapped.equals(o.wrapped); } return false; } /** * Sum Aggregation */ public static ReductionAggregation sum() { return new ReductionAggregation(Aggregation.sum()); } /** * Product Aggregation. */ public static ReductionAggregation product() { return new ReductionAggregation(Aggregation.product()); } /** * Min Aggregation */ public static ReductionAggregation min() { return new ReductionAggregation(Aggregation.min()); } /** * Max Aggregation */ public static ReductionAggregation max() { return new ReductionAggregation(Aggregation.max()); } /** * Any reduction. Produces a true or 1, depending on the output type, * if any of the elements in the range are true or non-zero, otherwise produces a false or 0. * Null values are skipped. */ public static ReductionAggregation any() { return new ReductionAggregation(Aggregation.any()); } /** * All reduction. Produces true or 1, depending on the output type, if all of the elements in * the range are true or non-zero, otherwise produces a false or 0. * Null values are skipped. */ public static ReductionAggregation all() { return new ReductionAggregation(Aggregation.all()); } /** * Sum of squares reduction. */ public static ReductionAggregation sumOfSquares() { return new ReductionAggregation(Aggregation.sumOfSquares()); } /** * Arithmetic mean reduction. */ public static ReductionAggregation mean() { return new ReductionAggregation(Aggregation.mean()); } /** * Variance aggregation with 1 as the delta degrees of freedom. */ public static ReductionAggregation variance() { return new ReductionAggregation(Aggregation.variance()); } /** * Variance aggregation. * @param ddof delta degrees of freedom. The divisor used in calculation of variance is * <code>N - ddof</code>, where N is the population size. */ public static ReductionAggregation variance(int ddof) { return new ReductionAggregation(Aggregation.variance(ddof)); } /** * Standard deviation aggregation with 1 as the delta degrees of freedom. */ public static ReductionAggregation standardDeviation() { return new ReductionAggregation(Aggregation.standardDeviation()); } /** * Standard deviation aggregation. * @param ddof delta degrees of freedom. The divisor used in calculation of std is * <code>N - ddof</code>, where N is the population size. */ public static ReductionAggregation standardDeviation(int ddof) { return new ReductionAggregation(Aggregation.standardDeviation(ddof)); } /** * Median reduction. */ public static ReductionAggregation median() { return new ReductionAggregation(Aggregation.median()); } /** * Aggregate to compute the specified quantiles. Uses linear interpolation by default. */ public static ReductionAggregation quantile(double ... quantiles) { return new ReductionAggregation(Aggregation.quantile(quantiles)); } /** * Aggregate to compute various quantiles. */ public static ReductionAggregation quantile(QuantileMethod method, double ... quantiles) { return new ReductionAggregation(Aggregation.quantile(method, quantiles)); } /** * Number of unique, non-null, elements. */ public static ReductionAggregation nunique() { return new ReductionAggregation(Aggregation.nunique()); } /** * Number of unique elements. * @param nullPolicy INCLUDE if nulls should be counted else EXCLUDE. If nulls are counted they * compare as equal so multiple null values in a range would all only * increase the count by 1. */ public static ReductionAggregation nunique(NullPolicy nullPolicy) { return new ReductionAggregation(Aggregation.nunique(nullPolicy)); } /** * Get the nth, non-null, element in a group. * @param offset the offset to look at. Negative numbers go from the end of the group. Any * value outside of the group range results in a null. */ public static ReductionAggregation nth(int offset) { return new ReductionAggregation(Aggregation.nth(offset)); } /** * Get the nth element in a group. * @param offset the offset to look at. Negative numbers go from the end of the group. Any * value outside of the group range results in a null. * @param nullPolicy INCLUDE if nulls should be included in the aggregation or EXCLUDE if they * should be skipped. */ public static ReductionAggregation nth(int offset, NullPolicy nullPolicy) { return new ReductionAggregation(Aggregation.nth(offset, nullPolicy)); } /** * tDigest reduction. */ public static ReductionAggregation createTDigest(int delta) { return new ReductionAggregation(Aggregation.createTDigest(delta)); } /** * tDigest merge reduction. */ public static ReductionAggregation mergeTDigest(int delta) { return new ReductionAggregation(Aggregation.mergeTDigest(delta)); } /* * Collect the values into a list. Nulls will be skipped. */ public static ReductionAggregation collectList() { return new ReductionAggregation(Aggregation.collectList()); } /** * Collect the values into a list. * * @param nullPolicy Indicates whether to include/exclude nulls during collection. */ public static ReductionAggregation collectList(NullPolicy nullPolicy) { return new ReductionAggregation(Aggregation.collectList(nullPolicy)); } /** * Collect the values into a set. All null values will be excluded, and all NaN values are regarded as * unique instances. */ public static ReductionAggregation collectSet() { return new ReductionAggregation(Aggregation.collectSet()); } /** * Collect the values into a set. * * @param nullPolicy Indicates whether to include/exclude nulls during collection. * @param nullEquality Flag to specify whether null entries within each list should be considered equal. * @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal. */ public static ReductionAggregation collectSet(NullPolicy nullPolicy, NullEquality nullEquality, NaNEquality nanEquality) { return new ReductionAggregation(Aggregation.collectSet(nullPolicy, nullEquality, nanEquality)); } /** * Merge the partial lists produced by multiple CollectListAggregations. * NOTICE: The partial lists to be merged should NOT include any null list element (but can include null list entries). */ public static ReductionAggregation mergeLists() { return new ReductionAggregation(Aggregation.mergeLists()); } /** * Merge the partial sets produced by multiple CollectSetAggregations. Each null/NaN value will be regarded as * a unique instance. */ public static ReductionAggregation mergeSets() { return new ReductionAggregation(Aggregation.mergeSets()); } /** * Merge the partial sets produced by multiple CollectSetAggregations. * * @param nullEquality Flag to specify whether null entries within each list should be considered equal. * @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal. */ public static ReductionAggregation mergeSets(NullEquality nullEquality, NaNEquality nanEquality) { return new ReductionAggregation(Aggregation.mergeSets(nullEquality, nanEquality)); } /** * Create HistogramAggregation, computing the frequencies for each unique row. * * @return A structs column in which the first child stores unique rows from the input and the * second child stores their corresponding frequencies. */ public static ReductionAggregation histogram() { return new ReductionAggregation(Aggregation.histogram()); } /** * Create MergeHistogramAggregation, to merge multiple histograms. * * @return A new histogram in which the frequencies of the unique rows are sum up. */ public static ReductionAggregation mergeHistogram() { return new ReductionAggregation(Aggregation.mergeHistogram()); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/AssertEmptyNulls.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * This class is a Helper class to assert there are no non-empty nulls in a ColumnView * * The reason for the existence of this class is so that we can turn the asserts on/off when needed * by passing "-da:ai.rapids.cudf.AssertEmptyNulls". We need that behavior because we have tests * that explicitly test with ColumnViews that contain non-empty nulls but more importantly, there * could be cases where an external system may not have a requirement of nulls being empty, so for * us to work with those systems, we can turn off this assert in the field. */ public class AssertEmptyNulls { public static void assertNullsAreEmpty(ColumnView cv) { if (cv.type.isNestedType() || cv.type.hasOffsets()) { assert !cv.hasNonEmptyNulls() : "Column has non-empty nulls"; } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/Scalar.java
/* * * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteOrder; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.List; import java.util.Objects; /** * A single scalar value. */ public final class Scalar implements AutoCloseable, BinaryOperable { static { NativeDepsLoader.loadNativeDeps(); } private static final Logger LOG = LoggerFactory.getLogger(Scalar.class); private final DType type; private int refCount; private final OffHeapState offHeap; public static Scalar fromNull(DType type) { switch (type.typeId) { case EMPTY: case BOOL8: return new Scalar(type, makeBool8Scalar(false, false)); case INT8: return new Scalar(type, makeInt8Scalar((byte)0, false)); case UINT8: return new Scalar(type, makeUint8Scalar((byte)0, false)); case INT16: return new Scalar(type, makeInt16Scalar((short)0, false)); case UINT16: return new Scalar(type, makeUint16Scalar((short)0, false)); case INT32: return new Scalar(type, makeInt32Scalar(0, false)); case UINT32: return new Scalar(type, makeUint32Scalar(0, false)); case TIMESTAMP_DAYS: return new Scalar(type, makeTimestampDaysScalar(0, false)); case FLOAT32: return new Scalar(type, makeFloat32Scalar(0, false)); case FLOAT64: return new Scalar(type, makeFloat64Scalar(0, false)); case INT64: return new Scalar(type, makeInt64Scalar(0, false)); case UINT64: return new Scalar(type, makeUint64Scalar(0, false)); case TIMESTAMP_SECONDS: case TIMESTAMP_MILLISECONDS: case TIMESTAMP_MICROSECONDS: case TIMESTAMP_NANOSECONDS: return new Scalar(type, makeTimestampTimeScalar(type.typeId.getNativeId(), 0, false)); case STRING: return new Scalar(type, makeStringScalar(null, false)); case DURATION_DAYS: return new Scalar(type, makeDurationDaysScalar(0, false)); case DURATION_MICROSECONDS: case DURATION_MILLISECONDS: case DURATION_NANOSECONDS: case DURATION_SECONDS: return new Scalar(type, makeDurationTimeScalar(type.typeId.getNativeId(), 0, false)); case DECIMAL32: return new Scalar(type, makeDecimal32Scalar(0, type.getScale(), false)); case DECIMAL64: return new Scalar(type, makeDecimal64Scalar(0L, type.getScale(), false)); case DECIMAL128: return new Scalar(type, makeDecimal128Scalar(BigInteger.ZERO.toByteArray(), type.getScale(), false)); case LIST: throw new IllegalArgumentException("Please call 'listFromNull' to create a null list scalar."); default: throw new IllegalArgumentException("Unexpected type: " + type); } } public static Scalar fromBool(boolean value) { return new Scalar(DType.BOOL8, makeBool8Scalar(value, true)); } public static Scalar fromBool(Boolean value) { if (value == null) { return Scalar.fromNull(DType.BOOL8); } return Scalar.fromBool(value.booleanValue()); } public static Scalar fromByte(byte value) { return new Scalar(DType.INT8, makeInt8Scalar(value, true)); } public static Scalar fromByte(Byte value) { if (value == null) { return Scalar.fromNull(DType.INT8); } return Scalar.fromByte(value.byteValue()); } public static Scalar fromUnsignedByte(byte value) { return new Scalar(DType.UINT8, makeUint8Scalar(value, true)); } public static Scalar fromUnsignedByte(Byte value) { if (value == null) { return Scalar.fromNull(DType.UINT8); } return Scalar.fromUnsignedByte(value.byteValue()); } public static Scalar fromShort(short value) { return new Scalar(DType.INT16, makeInt16Scalar(value, true)); } public static Scalar fromShort(Short value) { if (value == null) { return Scalar.fromNull(DType.INT16); } return Scalar.fromShort(value.shortValue()); } public static Scalar fromUnsignedShort(short value) { return new Scalar(DType.UINT16, makeUint16Scalar(value, true)); } public static Scalar fromUnsignedShort(Short value) { if (value == null) { return Scalar.fromNull(DType.UINT16); } return Scalar.fromUnsignedShort(value.shortValue()); } /** * Returns a DURATION_DAYS scalar * @param value - days * @return - Scalar value */ public static Scalar durationDaysFromInt(int value) { return new Scalar(DType.DURATION_DAYS, makeDurationDaysScalar(value, true)); } /** * Returns a DURATION_DAYS scalar * @param value - days * @return - Scalar value */ public static Scalar durationDaysFromInt(Integer value) { if (value == null) { return Scalar.fromNull(DType.DURATION_DAYS); } return Scalar.durationDaysFromInt(value.intValue()); } public static Scalar fromInt(int value) { return new Scalar(DType.INT32, makeInt32Scalar(value, true)); } public static Scalar fromInt(Integer value) { if (value == null) { return Scalar.fromNull(DType.INT32); } return Scalar.fromInt(value.intValue()); } public static Scalar fromUnsignedInt(int value) { return new Scalar(DType.UINT32, makeUint32Scalar(value, true)); } public static Scalar fromUnsignedInt(Integer value) { if (value == null) { return Scalar.fromNull(DType.UINT32); } return Scalar.fromUnsignedInt(value.intValue()); } public static Scalar fromLong(long value) { return new Scalar(DType.INT64, makeInt64Scalar(value, true)); } public static Scalar fromLong(Long value) { if (value == null) { return Scalar.fromNull(DType.INT64); } return Scalar.fromLong(value.longValue()); } public static Scalar fromUnsignedLong(long value) { return new Scalar(DType.UINT64, makeUint64Scalar(value, true)); } public static Scalar fromUnsignedLong(Long value) { if (value == null) { return Scalar.fromNull(DType.UINT64); } return Scalar.fromUnsignedLong(value.longValue()); } public static Scalar fromFloat(float value) { return new Scalar(DType.FLOAT32, makeFloat32Scalar(value, true)); } public static Scalar fromDecimal(int scale, int unscaledValue) { long handle = makeDecimal32Scalar(unscaledValue, scale, true); return new Scalar(DType.create(DType.DTypeEnum.DECIMAL32, scale), handle); } public static Scalar fromDecimal(int scale, long unscaledValue) { long handle = makeDecimal64Scalar(unscaledValue, scale, true); return new Scalar(DType.create(DType.DTypeEnum.DECIMAL64, scale), handle); } public static Scalar fromDecimal(int scale, BigInteger unscaledValue) { byte[] unscaledValueBytes = unscaledValue.toByteArray(); byte[] finalBytes = convertDecimal128FromJavaToCudf(unscaledValueBytes); long handle = makeDecimal128Scalar(finalBytes, scale, true); return new Scalar(DType.create(DType.DTypeEnum.DECIMAL128, scale), handle); } public static Scalar fromFloat(Float value) { if (value == null) { return Scalar.fromNull(DType.FLOAT32); } return Scalar.fromFloat(value.floatValue()); } public static Scalar fromDouble(double value) { return new Scalar(DType.FLOAT64, makeFloat64Scalar(value, true)); } public static Scalar fromDouble(Double value) { if (value == null) { return Scalar.fromNull(DType.FLOAT64); } return Scalar.fromDouble(value.doubleValue()); } public static Scalar fromDecimal(BigDecimal value) { if (value == null) { return Scalar.fromNull(DType.create(DType.DTypeEnum.DECIMAL64, 0)); } DType dt = DType.fromJavaBigDecimal(value); return fromDecimal(value.unscaledValue(), dt); } public static Scalar fromDecimal(BigInteger unscaledValue, DType dt) { if (unscaledValue == null) { return Scalar.fromNull(dt); } long handle; if (dt.typeId == DType.DTypeEnum.DECIMAL32) { handle = makeDecimal32Scalar(unscaledValue.intValueExact(), dt.getScale(), true); } else if (dt.typeId == DType.DTypeEnum.DECIMAL64) { handle = makeDecimal64Scalar(unscaledValue.longValueExact(), dt.getScale(), true); } else { byte[] unscaledValueBytes = unscaledValue.toByteArray(); byte[] finalBytes = convertDecimal128FromJavaToCudf(unscaledValueBytes); handle = makeDecimal128Scalar(finalBytes, dt.getScale(), true); } return new Scalar(dt, handle); } public static Scalar timestampDaysFromInt(int value) { return new Scalar(DType.TIMESTAMP_DAYS, makeTimestampDaysScalar(value, true)); } public static Scalar timestampDaysFromInt(Integer value) { if (value == null) { return Scalar.fromNull(DType.TIMESTAMP_DAYS); } return Scalar.timestampDaysFromInt(value.intValue()); } /** * Returns a duration scalar based on the type parameter. * @param type - dtype of scalar to be returned * @param value - corresponding value for the scalar * @return - Scalar of the respective type */ public static Scalar durationFromLong(DType type, long value) { if (type.isDurationType()) { if (type.equals(DType.DURATION_DAYS)) { int intValue = (int)value; if (value != intValue) { throw new IllegalArgumentException("value too large for type " + type + ": " + value); } return durationDaysFromInt(intValue); } else { return new Scalar(type, makeDurationTimeScalar(type.typeId.getNativeId(), value, true)); } } else { throw new IllegalArgumentException("type is not a timestamp: " + type); } } /** * Returns a duration scalar based on the type parameter. * @param type - dtype of scalar to be returned * @param value - corresponding value for the scalar * @return - Scalar of the respective type */ public static Scalar durationFromLong(DType type, Long value) { if (value == null) { return Scalar.fromNull(type); } return Scalar.durationFromLong(type, value.longValue()); } public static Scalar timestampFromLong(DType type, long value) { if (type.isTimestampType()) { if (type.equals(DType.TIMESTAMP_DAYS)) { int intValue = (int)value; if (value != intValue) { throw new IllegalArgumentException("value too large for type " + type + ": " + value); } return timestampDaysFromInt(intValue); } else { return new Scalar(type, makeTimestampTimeScalar(type.typeId.getNativeId(), value, true)); } } else { throw new IllegalArgumentException("type is not a timestamp: " + type); } } public static Scalar timestampFromLong(DType type, Long value) { if (value == null) { return Scalar.fromNull(type); } return Scalar.timestampFromLong(type, value.longValue()); } public static Scalar fromString(String value) { return fromUTF8String(value == null ? null : value.getBytes(StandardCharsets.UTF_8)); } /** * Creates a String scalar from an array of UTF8 bytes. * @param value the array of UTF8 bytes * @return a String scalar */ public static Scalar fromUTF8String(byte[] value) { if (value == null) { return fromNull(DType.STRING); } return new Scalar(DType.STRING, makeStringScalar(value, true)); } /** * Creates a null scalar of list type. * * Having this special API because the element type is required to build an empty * nested column as the underlying column of the list scalar. * * @param elementType the data type of the element in the list. * @return a null scalar of list type */ public static Scalar listFromNull(HostColumnVector.DataType elementType) { try (ColumnVector col = ColumnVector.empty(elementType)) { return new Scalar(DType.LIST, makeListScalar(col.getNativeView(), false)); } } /** * Creates a scalar of list from a ColumnView. * * All the rows in the ColumnView will be copied into the Scalar. So the ColumnView * can be closed after this call completes. */ public static Scalar listFromColumnView(ColumnView list) { if (list == null) { throw new IllegalArgumentException("'list' should NOT be null." + " Please call 'listFromNull' to create a null list scalar."); } return new Scalar(DType.LIST, makeListScalar(list.getNativeView(), true)); } /** * Creates a null scalar of struct type. * * @param elementTypes data types of children in the struct * @return a null scalar of struct type */ public static Scalar structFromNull(HostColumnVector.DataType... elementTypes) { ColumnVector[] children = new ColumnVector[elementTypes.length]; long[] childHandles = new long[elementTypes.length]; RuntimeException error = null; try { for (int i = 0; i < elementTypes.length; i++) { // Build column vector having single null value rather than empty column vector, // because struct scalar requires row count of children columns == 1. children[i] = buildNullColumnVector(elementTypes[i]); childHandles[i] = children[i].getNativeView(); } return new Scalar(DType.STRUCT, makeStructScalar(childHandles, false)); } catch (RuntimeException ex) { error = ex; throw ex; } catch (Exception ex) { error = new RuntimeException(ex); throw ex; } finally { // close all empty children for (ColumnVector child : children) { // We closed all created ColumnViews when we hit null. Therefore we exit the loop. if (child == null) break; // suppress exception during the close process to ensure that all elements are closed try { child.close(); } catch (Exception ex) { if (error == null) { error = new RuntimeException(ex); continue; } error.addSuppressed(ex); } } if (error != null) throw error; } } /** * Creates a scalar of struct from a ColumnView. * * @param columns children columns of struct * @return a Struct scalar */ public static Scalar structFromColumnViews(ColumnView... columns) { if (columns == null) { throw new IllegalArgumentException("input columns should NOT be null"); } long[] columnHandles = new long[columns.length]; for (int i = 0; i < columns.length; i++) { columnHandles[i] = columns[i].getNativeView(); } return new Scalar(DType.STRUCT, makeStructScalar(columnHandles, true)); } /** * Build column vector of single row who holds a null value * * @param hostType host data type of null column vector * @return the null vector */ private static ColumnVector buildNullColumnVector(HostColumnVector.DataType hostType) { DType dt = hostType.getType(); if (!dt.isNestedType()) { try (HostColumnVector.Builder builder = HostColumnVector.builder(dt, 1)) { builder.appendNull(); try (HostColumnVector hcv = builder.build()) { return hcv.copyToDevice(); } } } else if (dt.typeId == DType.DTypeEnum.LIST) { // type of List doesn't matter here because of type erasure in Java try (HostColumnVector hcv = HostColumnVector.fromLists(hostType, (List<Integer>) null)) { return hcv.copyToDevice(); } } else if (dt.typeId == DType.DTypeEnum.STRUCT) { try (HostColumnVector hcv = HostColumnVector.fromStructs( hostType, (HostColumnVector.StructData) null)) { return hcv.copyToDevice(); } } else { throw new IllegalArgumentException("Unsupported data type: " + hostType); } } private static native void closeScalar(long scalarHandle); private static native boolean isScalarValid(long scalarHandle); private static native byte getByte(long scalarHandle); private static native short getShort(long scalarHandle); private static native int getInt(long scalarHandle); private static native long getLong(long scalarHandle); private static native byte[] getBigIntegerBytes(long scalarHandle); private static native float getFloat(long scalarHandle); private static native double getDouble(long scalarHandle); private static native byte[] getUTF8(long scalarHandle); private static native long getListAsColumnView(long scalarHandle); private static native long[] getChildrenFromStructScalar(long scalarHandle); private static native long makeBool8Scalar(boolean isValid, boolean value); private static native long makeInt8Scalar(byte value, boolean isValid); private static native long makeUint8Scalar(byte value, boolean isValid); private static native long makeInt16Scalar(short value, boolean isValid); private static native long makeUint16Scalar(short value, boolean isValid); private static native long makeInt32Scalar(int value, boolean isValid); private static native long makeUint32Scalar(int value, boolean isValid); private static native long makeInt64Scalar(long value, boolean isValid); private static native long makeUint64Scalar(long value, boolean isValid); private static native long makeFloat32Scalar(float value, boolean isValid); private static native long makeFloat64Scalar(double value, boolean isValid); private static native long makeStringScalar(byte[] value, boolean isValid); private static native long makeDurationDaysScalar(int value, boolean isValid); private static native long makeDurationTimeScalar(int dtype, long value, boolean isValid); private static native long makeTimestampDaysScalar(int value, boolean isValid); private static native long makeTimestampTimeScalar(int dtypeNativeId, long value, boolean isValid); private static native long makeDecimal32Scalar(int value, int scale, boolean isValid); private static native long makeDecimal64Scalar(long value, int scale, boolean isValid); private static native long makeDecimal128Scalar(byte[] value, int scale, boolean isValid); private static native long makeListScalar(long viewHandle, boolean isValid); private static native long makeStructScalar(long[] viewHandles, boolean isValid); private static native long repeatString(long scalarHandle, int repeatTimes); Scalar(DType type, long scalarHandle) { this.type = type; this.offHeap = new OffHeapState(scalarHandle); incRefCount(); } /** * Increment the reference count for this scalar. You need to call close on this * to decrement the reference count again. */ public synchronized Scalar incRefCount() { if (offHeap.scalarHandle == 0) { offHeap.logRefCountDebug("INC AFTER CLOSE " + this); throw new IllegalStateException("Scalar is already closed"); } ++refCount; return this; } long getScalarHandle() { return offHeap.scalarHandle; } /** * Free the memory associated with a scalar. */ @Override public synchronized void close() { refCount--; offHeap.delRef(); if (refCount == 0) { offHeap.clean(false); } else if (refCount < 0) { offHeap.logRefCountDebug("double free " + this); throw new IllegalStateException("Close called too many times " + this); } } @Override public DType getType() { return type; } public boolean isValid() { return isScalarValid(getScalarHandle()); } /** * Returns the scalar value as a boolean. */ public boolean getBoolean() { return getByte(getScalarHandle()) != 0; } /** * Returns the scalar value as a byte. */ public byte getByte() { return getByte(getScalarHandle()); } /** * Returns the scalar value as a short. */ public short getShort() { return getShort(getScalarHandle()); } /** * Returns the scalar value as an int. */ public int getInt() { return getInt(getScalarHandle()); } /** * Returns the scalar value as a long. */ public long getLong() { return getLong(getScalarHandle()); } /** * Returns the BigDecimal unscaled scalar value as a byte array. */ public byte[] getBigInteger() { byte[] res = getBigIntegerBytes(getScalarHandle()); convertInPlaceToBigEndian(res); return res; } /** * Returns the scalar value as a float. */ public float getFloat() { return getFloat(getScalarHandle()); } /** * Returns the scalar value as a double. */ public double getDouble() { return getDouble(getScalarHandle()); } /** * Returns the scalar value as a BigDecimal. */ public BigDecimal getBigDecimal() { if (this.type.typeId == DType.DTypeEnum.DECIMAL32) { return BigDecimal.valueOf(getInt(), -type.getScale()); } else if (this.type.typeId == DType.DTypeEnum.DECIMAL64) { return BigDecimal.valueOf(getLong(), -type.getScale()); } else if (this.type.typeId == DType.DTypeEnum.DECIMAL128) { return new BigDecimal(new BigInteger(getBigInteger()), -type.getScale()); } throw new IllegalArgumentException("Couldn't getBigDecimal from nonDecimal scalar"); } /** * Returns the scalar value as a Java string. */ public String getJavaString() { return new String(getUTF8(getScalarHandle()), StandardCharsets.UTF_8); } /** * Returns the scalar value as UTF-8 data. */ public byte[] getUTF8() { return getUTF8(getScalarHandle()); } /** * Returns the scalar value as a ColumnView. Callers should close the returned ColumnView to * avoid memory leak. * * The returned ColumnView is only valid as long as the Scalar remains valid. If the Scalar * is closed before this ColumnView is closed, using this ColumnView will result in undefined * behavior. */ public ColumnView getListAsColumnView() { assert DType.LIST.equals(type) : "Cannot get list for the vector of type " + type; return new ColumnView(getListAsColumnView(getScalarHandle())); } /** * Fetches views of children columns from struct scalar. * The returned ColumnViews should be closed appropriately. Otherwise, a native memory leak will occur. * * @return array of column views refer to children of struct scalar */ public ColumnView[] getChildrenFromStructScalar() { assert DType.STRUCT.equals(type) : "Cannot get table for the vector of type " + type; long[] childHandles = getChildrenFromStructScalar(getScalarHandle()); return ColumnView.getColumnViewsFromPointers(childHandles); } @Override public ColumnVector binaryOp(BinaryOp op, BinaryOperable rhs, DType outType) { if (rhs instanceof ColumnView) { ColumnView cvRhs = (ColumnView) rhs; return new ColumnVector(binaryOp(this, cvRhs, op, outType)); } else { throw new IllegalArgumentException(rhs.getClass() + " is not supported as a binary op with " + "Scalar"); } } static long binaryOp(Scalar lhs, ColumnView rhs, BinaryOp op, DType outputType) { return binaryOpSV(lhs.getScalarHandle(), rhs.getNativeView(), op.nativeId, outputType.typeId.getNativeId(), outputType.getScale()); } private static native long binaryOpSV(long lhs, long rhs, int op, int dtype, int scale); @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Scalar other = (Scalar) o; if (!type.equals(other.type)) return false; boolean valid = isValid(); if (valid != other.isValid()) return false; if (!valid) return true; switch (type.typeId) { case EMPTY: return true; case BOOL8: return getBoolean() == other.getBoolean(); case INT8: case UINT8: return getByte() == other.getByte(); case INT16: case UINT16: return getShort() == other.getShort(); case INT32: case UINT32: case TIMESTAMP_DAYS: case DECIMAL32: return getInt() == other.getInt(); case FLOAT32: return getFloat() == other.getFloat(); case FLOAT64: return getDouble() == other.getDouble(); case INT64: case UINT64: case TIMESTAMP_SECONDS: case TIMESTAMP_MILLISECONDS: case TIMESTAMP_MICROSECONDS: case TIMESTAMP_NANOSECONDS: case DECIMAL64: return getLong() == other.getLong(); case DECIMAL128: return getBigDecimal().equals(other.getBigDecimal()); case STRING: return Arrays.equals(getUTF8(), other.getUTF8()); case LIST: try (ColumnView viewMe = getListAsColumnView(); ColumnView viewO = other.getListAsColumnView()) { return viewMe.equals(viewO); } default: throw new IllegalStateException("Unexpected type: " + type); } } @Override public int hashCode() { int valueHash = 0; if (isValid()) { switch (type.typeId) { case EMPTY: valueHash = 0; break; case BOOL8: valueHash = getBoolean() ? 1 : 0; break; case INT8: case UINT8: valueHash = getByte(); break; case INT16: case UINT16: valueHash = getShort(); break; case INT32: case UINT32: case TIMESTAMP_DAYS: case DECIMAL32: case DURATION_DAYS: valueHash = getInt(); break; case INT64: case UINT64: case TIMESTAMP_SECONDS: case TIMESTAMP_MILLISECONDS: case TIMESTAMP_MICROSECONDS: case TIMESTAMP_NANOSECONDS: case DECIMAL64: case DURATION_MICROSECONDS: case DURATION_SECONDS: case DURATION_MILLISECONDS: case DURATION_NANOSECONDS: valueHash = Long.hashCode(getLong()); break; case FLOAT32: valueHash = Float.hashCode(getFloat()); break; case FLOAT64: valueHash = Double.hashCode(getDouble()); break; case STRING: valueHash = Arrays.hashCode(getUTF8()); break; case LIST: try (ColumnView v = getListAsColumnView()) { valueHash = v.hashCode(); } break; case DECIMAL128: valueHash = getBigDecimal().hashCode(); break; default: throw new IllegalStateException("Unknown scalar type: " + type); } } return Objects.hash(type, valueHash); } @Override public String toString() { StringBuilder sb = new StringBuilder("Scalar{type="); sb.append(type); if (getScalarHandle() != 0) { sb.append(" value="); switch (type.typeId) { case BOOL8: sb.append(getBoolean()); break; case INT8: sb.append(getByte()); break; case UINT8: sb.append(Byte.toUnsignedInt(getByte())); break; case INT16: sb.append(getShort()); break; case UINT16: sb.append(Short.toUnsignedInt(getShort())); break; case INT32: case TIMESTAMP_DAYS: sb.append(getInt()); break; case UINT32: sb.append(Integer.toUnsignedLong(getInt())); break; case INT64: case TIMESTAMP_SECONDS: case TIMESTAMP_MILLISECONDS: case TIMESTAMP_MICROSECONDS: case TIMESTAMP_NANOSECONDS: sb.append(getLong()); break; case UINT64: sb.append(Long.toUnsignedString(getLong())); break; case FLOAT32: sb.append(getFloat()); break; case FLOAT64: sb.append(getDouble()); break; case STRING: sb.append('"'); sb.append(getJavaString()); sb.append('"'); break; case DECIMAL32: // FALL THROUGH case DECIMAL64: // FALL THROUGH case DECIMAL128: sb.append(getBigDecimal()); break; case LIST: try (ColumnView v = getListAsColumnView()) { // It's not easy to pull out the elements so just a simple string of some metadata. sb.append(v.toString()); } break; default: throw new IllegalArgumentException("Unknown scalar type: " + type); } } sb.append("} (ID: "); sb.append(offHeap.id); sb.append(" "); sb.append(Long.toHexString(offHeap.scalarHandle)); sb.append(")"); return sb.toString(); } /** * Repeat the given string scalar a number of times specified by the <code>repeatTimes</code> * parameter. If that parameter has a non-positive value, an empty (valid) string scalar will be * returned. An invalid input scalar will always result in an invalid output scalar regardless * of the value of <code>repeatTimes</code>. * * @param repeatTimes The number of times the input string is copied to the output. * @return The resulting scalar containing repeated result of the current string. */ public Scalar repeatString(int repeatTimes) { return new Scalar(DType.STRING, repeatString(getScalarHandle(), repeatTimes)); } private static byte[] convertDecimal128FromJavaToCudf(byte[] bytes) { byte[] finalBytes = new byte[DType.DTypeEnum.DECIMAL128.sizeInBytes]; byte lastByte = bytes[0]; //Convert to 2's complement representation and make sure the sign bit is extended correctly byte setByte = (lastByte & 0x80) > 0 ? (byte)0xff : (byte)0x00; for(int i = bytes.length; i < finalBytes.length; i++) { finalBytes[i] = setByte; } // After setting the sign bits, reverse the rest of the bytes for endianness for(int k = 0; k < bytes.length; k++) { finalBytes[k] = bytes[bytes.length - k - 1]; } return finalBytes; } private void convertInPlaceToBigEndian(byte[] res) { assert ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN); int i =0; int j = res.length -1; while (j > i) { byte tmp; tmp = res[j]; res[j] = res[i]; res[i] = tmp; j--; i++; } } /** * Holds the off-heap state of the scalar so it can be cleaned up, even if it is leaked. */ private static class OffHeapState extends MemoryCleaner.Cleaner { private long scalarHandle; OffHeapState(long scalarHandle) { this.scalarHandle = scalarHandle; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { if (scalarHandle != 0) { if (logErrorIfNotClean) { LOG.error("A SCALAR WAS LEAKED(ID: " + id + " " + Long.toHexString(scalarHandle) + ")"); logRefCountDebug("Leaked scalar"); } try { closeScalar(scalarHandle); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. scalarHandle = 0; } return true; } return false; } @Override public boolean isClean() { return scalarHandle == 0; } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CompressionType.java
/* * * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Enumeration of compression formats. */ public enum CompressionType { /** No compression */ NONE(0), /** Automatically detect or select the compression codec */ AUTO(1), /** Snappy format using byte-oriented LZ77 */ SNAPPY(2), /** GZIP format using the DEFLATE algorithm */ GZIP(3), /** BZIP2 format using Burrows-Wheeler transform */ BZIP2(4), /** BROTLI format using LZ77 + Huffman + 2nd order context modeling */ BROTLI(5), /** ZIP format using DEFLATE algorithm */ ZIP(6), /** XZ format using LZMA(2) algorithm */ XZ(7), /** ZLIB format, using DEFLATE algorithm */ ZLIB(8), /** LZ4 format, using LZ77 */ LZ4(9), /** Lempel–Ziv–Oberhumer format */ LZO(10), /** Zstandard format */ ZSTD(11); final int nativeId; CompressionType(int nativeId) { this.nativeId = nativeId; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmException.java
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Exception from RMM allocator. */ public class RmmException extends RuntimeException { RmmException(String message) { super(message); } RmmException(String message, Throwable cause) { super(message, cause); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ColumnWriterOptions.java
/* * * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.ArrayList; import java.util.List; /** * Per column settings for writing Parquet/ORC files. * * The native also uses the same "column_in_metadata" for both Parquet and ORC. */ public class ColumnWriterOptions { // `isTimestampTypeInt96` is ignored in ORC private boolean isTimestampTypeInt96; private int precision; private boolean isNullable; private boolean isMap = false; private boolean isBinary = false; private String columnName; // only for Parquet private boolean hasParquetFieldId; private int parquetFieldId; private ColumnWriterOptions(AbstractStructBuilder builder) { this.columnName = builder.name; this.isNullable = builder.isNullable; this.hasParquetFieldId = builder.hasParquetFieldId; this.parquetFieldId = builder.parquetFieldId; this.childColumnOptions = (ColumnWriterOptions[]) builder.children.toArray(new ColumnWriterOptions[0]); } // The sentinel value of unknown precision (default value) public static int UNKNOWN_PRECISION = -1; /** * Constructor used for list */ private ColumnWriterOptions(ListBuilder builder) { assert(builder.children.size() == 1) : "Lists can only have one child"; this.columnName = builder.name; this.isNullable = builder.isNullable; // we are adding the child twice even though lists have one child only because the way the cudf // has implemented this it requires two children to be set for the list, but it drops the // first one. This is something that is a lower priority and might be fixed in future this.childColumnOptions = new ColumnWriterOptions[]{DUMMY_CHILD, builder.children.get(0)}; } protected ColumnWriterOptions[] childColumnOptions = {}; protected abstract static class AbstractStructBuilder<T extends AbstractStructBuilder, V extends ColumnWriterOptions> extends NestedBuilder<T, V> { /** * Builder specific to build a Struct meta */ public AbstractStructBuilder(String name, boolean isNullable) { super(name, isNullable); } public AbstractStructBuilder(String name, boolean isNullable, int parquetFieldId) { super(name, isNullable, parquetFieldId); } protected AbstractStructBuilder() { super(); } } // This child is needed as the first child of a List column meta due to how cudf has been // implemented. Cudf drops the first child from the meta if a column is a LIST. This is done // this way due to some complications in the parquet reader. There was change to fix this here: // https://github.com/rapidsai/cudf/pull/7461/commits/5ce33b40abb87cc7b76b5efeb0a3a0215f9ef6fb // but it was reverted later on here: // https://github.com/rapidsai/cudf/pull/7461/commits/f248eb7265de995a95f998d46d897fb0ae47f53e static ColumnWriterOptions DUMMY_CHILD = new ColumnWriterOptions("DUMMY"); public static abstract class NestedBuilder<T extends NestedBuilder, V extends ColumnWriterOptions> { protected List<ColumnWriterOptions> children = new ArrayList<>(); protected boolean isNullable = true; protected String name = ""; // Parquet structure needs protected boolean hasParquetFieldId; protected int parquetFieldId; /** * Builder specific to build a Struct meta */ protected NestedBuilder(String name, boolean isNullable) { this.name = name; this.isNullable = isNullable; } protected NestedBuilder(String name, boolean isNullable, int parquetFieldId) { this.name = name; this.isNullable = isNullable; this.hasParquetFieldId = true; this.parquetFieldId = parquetFieldId; } protected NestedBuilder() {} protected ColumnWriterOptions withColumn(String name, boolean isNullable) { return new ColumnWriterOptions(name, isNullable); } protected ColumnWriterOptions withColumn(String name, boolean isNullable, int parquetFieldId) { return new ColumnWriterOptions(name, isNullable, parquetFieldId); } protected ColumnWriterOptions withDecimal(String name, int precision, boolean isNullable) { return new ColumnWriterOptions(name, false, precision, isNullable); } protected ColumnWriterOptions withDecimal(String name, int precision, boolean isNullable, int parquetFieldId) { return new ColumnWriterOptions(name, false, precision, isNullable, parquetFieldId); } protected ColumnWriterOptions withTimestamp(String name, boolean isInt96, boolean isNullable) { return new ColumnWriterOptions(name, isInt96, UNKNOWN_PRECISION, isNullable); } protected ColumnWriterOptions withTimestamp(String name, boolean isInt96, boolean isNullable, int parquetFieldId) { return new ColumnWriterOptions(name, isInt96, UNKNOWN_PRECISION, isNullable, parquetFieldId); } protected ColumnWriterOptions withBinary(String name, boolean isNullable) { ColumnWriterOptions opt = listBuilder(name, isNullable) // The name here does not matter. It will not be included in the final file // This is just to get the metadata to line up properly for the C++ APIs .withColumns(false, "BINARY_DATA") .build(); opt.isBinary = true; return opt; } protected ColumnWriterOptions withBinary(String name, boolean isNullable, int parquetFieldId) { ColumnWriterOptions opt = listBuilder(name, isNullable) // The name here does not matter. It will not be included in the final file // This is just to get the metadata to line up properly for the C++ APIs .withColumn(false, "BINARY_DATA", parquetFieldId) .build(); opt.isBinary = true; return opt; } /** * Set the list column meta. * Lists should have only one child in ColumnVector, but the metadata expects a * LIST column to have two children and the first child to be the * {@link ColumnWriterOptions#DUMMY_CHILD}. * This is the current behavior in cudf and will change in future * @return this for chaining. */ public T withListColumn(ListColumnWriterOptions child) { assert (child.getChildColumnOptions().length == 2) : "Lists can only have two children"; if (child.getChildColumnOptions()[0] != DUMMY_CHILD) { throw new IllegalArgumentException("First child in the list has to be DUMMY_CHILD"); } if (child.getChildColumnOptions()[1].getColumnName().isEmpty()) { throw new IllegalArgumentException("Column name can't be empty"); } children.add(child); return (T) this; } /** * Set the map column meta. * @return this for chaining. */ public T withMapColumn(ColumnWriterOptions child) { children.add(child); return (T) this; } /** * Set a child struct meta data * @return this for chaining. */ public T withStructColumn(StructColumnWriterOptions child) { for (ColumnWriterOptions opt: child.getChildColumnOptions()) { if (opt.getColumnName().isEmpty()) { throw new IllegalArgumentException("Column name can't be empty"); } } children.add(child); return (T) this; } /** * Set column name */ public T withNonNullableColumns(String... names) { withColumns(false, names); return (T) this; } /** * Set nullable column meta data */ public T withNullableColumns(String... names) { withColumns(true, names); return (T) this; } /** * Set a simple child meta data * @return this for chaining. */ public T withColumns(boolean nullable, String... names) { for (String n : names) { children.add(withColumn(n, nullable)); } return (T) this; } /** * Set a simple child meta data * @return this for chaining. */ public T withColumn(boolean nullable, String name, int parquetFieldId) { children.add(withColumn(name, nullable, parquetFieldId)); return (T) this; } /** * Set a Decimal child meta data * @return this for chaining. */ public T withDecimalColumn(String name, int precision, boolean nullable) { children.add(withDecimal(name, precision, nullable)); return (T) this; } /** * Set a Decimal child meta data * @return this for chaining. */ public T withDecimalColumn(String name, int precision, boolean nullable, int parquetFieldId) { children.add(withDecimal(name, precision, nullable, parquetFieldId)); return (T) this; } /** * Set a Decimal child meta data * @return this for chaining. */ public T withNullableDecimalColumn(String name, int precision) { withDecimalColumn(name, precision, true); return (T) this; } /** * Set a Decimal child meta data * @return this for chaining. */ public T withDecimalColumn(String name, int precision) { withDecimalColumn(name, precision, false); return (T) this; } /** * Set a binary child meta data * @return this for chaining. */ public T withBinaryColumn(String name, boolean nullable, int parquetFieldId) { children.add(withBinary(name, nullable, parquetFieldId)); return (T) this; } /** * Set a binary child meta data * @return this for chaining. */ public T withBinaryColumn(String name, boolean nullable) { children.add(withBinary(name, nullable)); return (T) this; } /** * Set a timestamp child meta data * @return this for chaining. */ public T withTimestampColumn(String name, boolean isInt96, boolean nullable, int parquetFieldId) { children.add(withTimestamp(name, isInt96, nullable, parquetFieldId)); return (T) this; } /** * Set a timestamp child meta data * @return this for chaining. */ public T withTimestampColumn(String name, boolean isInt96, boolean nullable) { children.add(withTimestamp(name, isInt96, nullable)); return (T) this; } /** * Set a timestamp child meta data * @return this for chaining. */ public T withTimestampColumn(String name, boolean isInt96) { withTimestampColumn(name, isInt96, false); return (T) this; } /** * Set a timestamp child meta data * @return this for chaining. */ public T withNullableTimestampColumn(String name, boolean isInt96) { withTimestampColumn(name, isInt96, true); return (T) this; } public abstract V build(); } public ColumnWriterOptions(String columnName, boolean isTimestampTypeInt96, int precision, boolean isNullable) { this.isTimestampTypeInt96 = isTimestampTypeInt96; this.precision = precision; this.isNullable = isNullable; this.columnName = columnName; } public ColumnWriterOptions(String columnName, boolean isTimestampTypeInt96, int precision, boolean isNullable, int parquetFieldId) { this(columnName, isTimestampTypeInt96, precision, isNullable); this.hasParquetFieldId = true; this.parquetFieldId = parquetFieldId; } public ColumnWriterOptions(String columnName, boolean isNullable) { this.isTimestampTypeInt96 = false; this.precision = UNKNOWN_PRECISION; this.isNullable = isNullable; this.columnName = columnName; } public ColumnWriterOptions(String columnName, boolean isNullable, int parquetFieldId) { this(columnName, isNullable); this.hasParquetFieldId = true; this.parquetFieldId = parquetFieldId; } public ColumnWriterOptions(String columnName) { this(columnName, true); } @FunctionalInterface protected interface ByteArrayProducer { boolean[] apply(ColumnWriterOptions opt); } @FunctionalInterface protected interface IntArrayProducer { int[] apply(ColumnWriterOptions opt); } boolean[] getFlatIsTimeTypeInt96() { boolean[] ret = {isTimestampTypeInt96}; if (childColumnOptions.length > 0) { return getFlatBooleans(ret, (opt) -> opt.getFlatIsTimeTypeInt96()); } else { return ret; } } protected boolean[] getFlatBooleans(boolean[] ret, ByteArrayProducer producer) { boolean[][] childResults = new boolean[childColumnOptions.length][]; int totalChildrenFlatLength = ret.length; for (int i = 0 ; i < childColumnOptions.length ; i++) { ColumnWriterOptions opt = childColumnOptions[i]; childResults[i] = producer.apply(opt); totalChildrenFlatLength += childResults[i].length; } boolean[] result = new boolean[totalChildrenFlatLength]; System.arraycopy(ret, 0, result, 0, ret.length); int copiedSoFar = ret.length; for (int i = 0 ; i < childColumnOptions.length ; i++) { System.arraycopy(childResults[i], 0, result, copiedSoFar, childResults[i].length); copiedSoFar += childResults[i].length; } return result; } int[] getFlatPrecision() { int[] ret = {precision}; if (childColumnOptions.length > 0) { return getFlatInts(ret, (opt) -> opt.getFlatPrecision()); } else { return ret; } } boolean[] getFlatHasParquetFieldId() { boolean[] ret = {hasParquetFieldId}; if (childColumnOptions.length > 0) { return getFlatBooleans(ret, (opt) -> opt.getFlatHasParquetFieldId()); } else { return ret; } } int[] getFlatParquetFieldId() { int[] ret = {parquetFieldId}; if (childColumnOptions.length > 0) { return getFlatInts(ret, (opt) -> opt.getFlatParquetFieldId()); } else { return ret; } } boolean[] getFlatIsNullable() { boolean[] ret = {isNullable}; if (childColumnOptions.length > 0) { return getFlatBooleans(ret, (opt) -> opt.getFlatIsNullable()); } else { return ret; } } boolean[] getFlatIsMap() { boolean[] ret = {isMap}; if (childColumnOptions.length > 0) { return getFlatBooleans(ret, (opt) -> opt.getFlatIsMap()); } else { return ret; } } boolean[] getFlatIsBinary() { boolean[] ret = {isBinary}; if (childColumnOptions.length > 0) { return getFlatBooleans(ret, (opt) -> opt.getFlatIsBinary()); } else { return ret; } } int[] getFlatNumChildren() { int[] ret = {childColumnOptions.length}; if (childColumnOptions.length > 0) { return getFlatInts(ret, (opt) -> opt.getFlatNumChildren()); } else { return ret; } } protected int[] getFlatInts(int[] ret, IntArrayProducer producer) { int[][] childResults = new int[childColumnOptions.length][]; int totalChildrenFlatLength = ret.length; for (int i = 0 ; i < childColumnOptions.length ; i++) { ColumnWriterOptions opt = childColumnOptions[i]; childResults[i] = producer.apply(opt); totalChildrenFlatLength += childResults[i].length; } int[] result = new int[totalChildrenFlatLength]; System.arraycopy(ret, 0, result, 0, ret.length); int copiedSoFar = ret.length; for (int i = 0 ; i < childColumnOptions.length ; i++) { System.arraycopy(childResults[i], 0, result, copiedSoFar, childResults[i].length); copiedSoFar += childResults[i].length; } return result; } String[] getFlatColumnNames() { String[] ret = {columnName}; if (childColumnOptions.length > 0) { return getFlatColumnNames(ret); } else { return ret; } } protected String[] getFlatColumnNames(String[] ret) { String[][] childResults = new String[childColumnOptions.length][]; int totalChildrenFlatLength = ret.length; for (int i = 0 ; i < childColumnOptions.length ; i++) { ColumnWriterOptions opt = childColumnOptions[i]; childResults[i] = opt.getFlatColumnNames(); totalChildrenFlatLength += childResults[i].length; } String[] result = new String[totalChildrenFlatLength]; System.arraycopy(ret, 0, result, 0, ret.length); int copiedSoFar = ret.length; for (int i = 0 ; i < childColumnOptions.length ; i++) { System.arraycopy(childResults[i], 0, result, copiedSoFar, childResults[i].length); copiedSoFar += childResults[i].length; } return result; } /** * Add a Map Column to the schema. * <p> * Maps are List columns with a Struct named 'key_value' with a child named 'key' and a child * named 'value'. The caller of this method doesn't need to worry about this as this method will * take care of this without the knowledge of the caller. * * Note: This method always returns a nullabe column, cannot return non-nullable column. * Do not use this, use the next function with the parameter `isNullable`. */ @Deprecated public static ColumnWriterOptions mapColumn(String name, ColumnWriterOptions key, ColumnWriterOptions value) { StructColumnWriterOptions struct = structBuilder("key_value").build(); if (key.isNullable) { throw new IllegalArgumentException("key column can not be nullable"); } struct.childColumnOptions = new ColumnWriterOptions[]{key, value}; ColumnWriterOptions opt = listBuilder(name) .withStructColumn(struct) .build(); opt.isMap = true; return opt; } /** * Add a Map Column to the schema. * <p> * Maps are List columns with a Struct named 'key_value' with a child named 'key' and a child * named 'value'. The caller of this method doesn't need to worry about this as this method will * take care of this without the knowledge of the caller. * * Note: If this map column is a key of another map, should pass isNullable = false. * e.g.: map1(map2(int, int), int) the map2 should be non-nullable. * * @param isNullable is the returned map nullable. */ public static ColumnWriterOptions mapColumn(String name, ColumnWriterOptions key, ColumnWriterOptions value, Boolean isNullable) { if (key.isNullable) { throw new IllegalArgumentException("key column can not be nullable"); } StructColumnWriterOptions struct = structBuilder("key_value").build(); struct.childColumnOptions = new ColumnWriterOptions[]{key, value}; ColumnWriterOptions opt = listBuilder(name, isNullable) .withStructColumn(struct) .build(); opt.isMap = true; return opt; } /** * Creates a ListBuilder for column called 'name' */ public static ListBuilder listBuilder(String name) { return new ListBuilder(name, true); } /** * Creates a ListBuilder for column called 'name' */ public static ListBuilder listBuilder(String name, boolean isNullable) { return new ListBuilder(name, isNullable); } /** * Creates a StructBuilder for column called 'name' */ public static StructBuilder structBuilder(String name, boolean isNullable) { return new StructBuilder(name, isNullable); } /** * Creates a StructBuilder for column called 'name' */ public static StructBuilder structBuilder(String name, boolean isNullable, int parquetFieldId) { return new StructBuilder(name, isNullable, parquetFieldId); } /** * Creates a StructBuilder for column called 'name' */ public static StructBuilder structBuilder(String name) { return new StructBuilder(name, true); } /** * Return if the column can have null values */ public String getColumnName() { return columnName; } /** * Return if the column can have null values */ public boolean isNullable() { return isNullable; } /** * Return the precision for this column */ public int getPrecision() { return precision; } /** * Returns true if the writer is expected to write timestamps in INT96 */ public boolean isTimestampTypeInt96() { return isTimestampTypeInt96; } /** * Return the child columnOptions for this column */ public ColumnWriterOptions[] getChildColumnOptions() { return childColumnOptions; } public static class StructColumnWriterOptions extends ColumnWriterOptions { protected StructColumnWriterOptions(AbstractStructBuilder builder) { super(builder); } } public static class ListColumnWriterOptions extends ColumnWriterOptions { protected ListColumnWriterOptions(ListBuilder builder) { super(builder); } } public static class StructBuilder extends AbstractStructBuilder<StructBuilder, StructColumnWriterOptions> { public StructBuilder(String name, boolean isNullable) { super(name, isNullable); } public StructBuilder(String name, boolean isNullable, int parquetFieldId) { super(name, isNullable, parquetFieldId); } public StructColumnWriterOptions build() { return new StructColumnWriterOptions(this); } } public static class ListBuilder extends NestedBuilder<ListBuilder, ListColumnWriterOptions> { public ListBuilder(String name, boolean isNullable) { super(name, isNullable); } public ListColumnWriterOptions build() { return new ListColumnWriterOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/DefaultHostMemoryAllocator.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; public class DefaultHostMemoryAllocator implements HostMemoryAllocator { private static final HostMemoryAllocator INSTANCE = new DefaultHostMemoryAllocator(); public static HostMemoryAllocator get() { return INSTANCE; } @Override public HostMemoryBuffer allocate(long bytes, boolean preferPinned) { return HostMemoryBuffer.allocate(bytes, preferPinned); } @Override public HostMemoryBuffer allocate(long bytes) { return HostMemoryBuffer.allocate(bytes); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ScanType.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Scan operation type. */ public enum ScanType { /** * Include the current row in the scan. */ INCLUSIVE(true), /** * Exclude the current row from the scan. */ EXCLUSIVE(false); ScanType(boolean isInclusive) { this.isInclusive = isInclusive; } final boolean isInclusive; }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CSVWriterOptions.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.ArrayList; import java.util.Collections; import java.util.List; /** * Options for writing a CSV file */ public class CSVWriterOptions { private String[] columnNames; private Boolean includeHeader = false; private String rowDelimiter = "\n"; private byte fieldDelimiter = ','; private String nullValue = ""; private String falseValue = "false"; private String trueValue = "true"; // Quote style used for CSV data. // Currently supports only `MINIMAL` and `NONE`. private QuoteStyle quoteStyle = QuoteStyle.MINIMAL; private CSVWriterOptions(Builder builder) { this.columnNames = builder.columnNames.toArray(new String[builder.columnNames.size()]); this.nullValue = builder.nullValue; this.includeHeader = builder.includeHeader; this.fieldDelimiter = builder.fieldDelimiter; this.rowDelimiter = builder.rowDelimiter; this.falseValue = builder.falseValue; this.trueValue = builder.trueValue; this.quoteStyle = builder.quoteStyle; } public String[] getColumnNames() { return columnNames; } public Boolean getIncludeHeader() { return includeHeader; } public String getRowDelimiter() { return rowDelimiter; } public byte getFieldDelimiter() { return fieldDelimiter; } public String getNullValue() { return nullValue; } public String getTrueValue() { return trueValue; } public String getFalseValue() { return falseValue; } /** * Returns the quoting style used for writing CSV. */ public QuoteStyle getQuoteStyle() { return quoteStyle; } public static Builder builder() { return new Builder(); } public static class Builder { private List<String> columnNames = Collections.emptyList(); private Boolean includeHeader = false; private String rowDelimiter = "\n"; private byte fieldDelimiter = ','; private String nullValue = ""; private String falseValue = "false"; private String trueValue = "true"; private QuoteStyle quoteStyle = QuoteStyle.MINIMAL; public CSVWriterOptions build() { return new CSVWriterOptions(this); } public Builder withColumnNames(List<String> columnNames) { this.columnNames = columnNames; return this; } public Builder withColumnNames(String... columnNames) { List<String> columnNamesList = new ArrayList<>(); for (String columnName : columnNames) { columnNamesList.add(columnName); } return withColumnNames(columnNamesList); } public Builder withIncludeHeader(Boolean includeHeader) { this.includeHeader = includeHeader; return this; } public Builder withRowDelimiter(String rowDelimiter) { this.rowDelimiter = rowDelimiter; return this; } public Builder withFieldDelimiter(byte fieldDelimiter) { this.fieldDelimiter = fieldDelimiter; return this; } public Builder withNullValue(String nullValue) { this.nullValue = nullValue; return this; } public Builder withTrueValue(String trueValue) { this.trueValue = trueValue; return this; } public Builder withFalseValue(String falseValue) { this.falseValue = falseValue; return this; } /** * Sets the quote style used when writing CSV. * * Note: Only the following quoting styles are supported: * 1. MINIMAL: String columns containing special characters like row-delimiters/ * field-delimiter/quotes will be quoted. * 2. NONE: No quoting is done for any columns. */ public Builder withQuoteStyle(QuoteStyle quoteStyle) { this.quoteStyle = quoteStyle; return this; } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CuFileResourceDestroyer.java
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Destroys a cuFile native resource. */ interface CuFileResourceDestroyer { void destroy(long pointer); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/BinaryOp.java
/* * Copyright (c) 2019-2020,2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import java.util.EnumSet; /** * Mathematical binary operations. */ public enum BinaryOp { ADD(0), SUB(1), MUL(2), DIV(3), // divide using common type of lhs and rhs TRUE_DIV(4), // divide after promoting to FLOAT64 point FLOOR_DIV(5), // divide after promoting to FLOAT64 and flooring the result MOD(6), PMOD(7), // pmod PYMOD(8), // mod operator % follow by python's sign rules for negatives POW(9), INT_POW(10), // int ^ int, used to avoid floating point precision loss LOG_BASE(11), // logarithm to the base ATAN2(12), // atan2 SHIFT_LEFT(13), // bitwise shift left (<<) SHIFT_RIGHT(14), // bitwise shift right (>>) SHIFT_RIGHT_UNSIGNED(15), // bitwise shift right (>>>) BITWISE_AND(16), BITWISE_OR(17), BITWISE_XOR(18), LOGICAL_AND(19), LOGICAL_OR(20), EQUAL(21), NOT_EQUAL(22), LESS(23), GREATER(24), LESS_EQUAL(25), // <= GREATER_EQUAL(26), // >= NULL_EQUALS(27), // like EQUAL but NULL == NULL is TRUE and NULL == not NULL is FALSE NULL_MAX(28), // MAX but NULL < not NULL NULL_MIN(29), // MIN but NULL > not NULL //NOT IMPLEMENTED YET GENERIC_BINARY(30); NULL_LOGICAL_AND(31), NULL_LOGICAL_OR(32); static final EnumSet<BinaryOp> COMPARISON = EnumSet.of( EQUAL, NOT_EQUAL, LESS, GREATER, LESS_EQUAL, GREATER_EQUAL); static final EnumSet<BinaryOp> INEQUALITY_COMPARISON = EnumSet.of( LESS, GREATER, LESS_EQUAL, GREATER_EQUAL); private static final BinaryOp[] OPS = BinaryOp.values(); final int nativeId; BinaryOp(int nativeId) { this.nativeId = nativeId; } static BinaryOp fromNative(int nativeId) { for (BinaryOp type : OPS) { if (type.nativeId == nativeId) { return type; } } throw new IllegalArgumentException("Could not translate " + nativeId + " into a BinaryOp"); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ParquetWriterOptions.java
/* * * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * This class represents settings for writing Parquet files. It includes meta data information * that will be used by the Parquet writer to write the file */ public final class ParquetWriterOptions extends CompressionMetadataWriterOptions { private final StatisticsFrequency statsGranularity; private ParquetWriterOptions(Builder builder) { super(builder); this.statsGranularity = builder.statsGranularity; } public enum StatisticsFrequency { /** Do not generate statistics */ NONE(0), /** Generate column statistics for each rowgroup */ ROWGROUP(1), /** Generate column statistics for each page */ PAGE(2); final int nativeId; StatisticsFrequency(int nativeId) { this.nativeId = nativeId; } } public static Builder builder() { return new Builder(); } public StatisticsFrequency getStatisticsFrequency() { return statsGranularity; } public static class Builder extends CompressionMetadataWriterOptions.Builder <Builder, ParquetWriterOptions> { private StatisticsFrequency statsGranularity = StatisticsFrequency.ROWGROUP; public Builder() { super(); } public Builder withStatisticsFrequency(StatisticsFrequency statsGranularity) { this.statsGranularity = statsGranularity; return this; } public ParquetWriterOptions build() { return new ParquetWriterOptions(this); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HostColumnVector.java
/* * * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.StringJoiner; import java.util.function.BiConsumer; import java.util.function.Consumer; /** * Similar to a ColumnVector, but the data is stored in host memory and accessible directly from * the JVM. This class holds references to off heap memory and is reference counted to know when * to release it. Call close to decrement the reference count when you are done with the column, * and call incRefCount to increment the reference count. */ public final class HostColumnVector extends HostColumnVectorCore { /** * Interface to handle events for this HostColumnVector. Only invoked during * close, hence `onClosed` is the only event. */ public interface EventHandler { /** * `onClosed` is invoked with the updated `refCount` during `close`. * The last invocation of `onClosed` will be with `refCount=0`. * * @note the callback is invoked with this `HostColumnVector`'s lock held. * * @param cv reference to the HostColumnVector we are closing * @param refCount the updated ref count for this HostColumnVector at * the time of invocation */ void onClosed(HostColumnVector cv, int refCount); } /** * The size in bytes of an offset entry */ static final int OFFSET_SIZE = DType.INT32.getSizeInBytes(); private int refCount; private EventHandler eventHandler; /** * Create a new column vector with data populated on the host. */ HostColumnVector(DType type, long rows, Optional<Long> nullCount, HostMemoryBuffer hostDataBuffer, HostMemoryBuffer hostValidityBuffer) { this(type, rows, nullCount, hostDataBuffer, hostValidityBuffer, null); } /** * Create a new column vector with data populated on the host. * @param type the type of the vector * @param rows the number of rows in the vector. * @param nullCount the number of nulls in the vector. * @param hostDataBuffer The host side data for the vector. In the case of STRING * this is the string data stored as bytes. * @param hostValidityBuffer Arrow-like validity buffer 1 bit per row, with padding for * 64-bit alignment. * @param offsetBuffer only valid for STRING this is the offsets into * the hostDataBuffer indicating the start and end of a string * entry. It should be (rows + 1) ints. * @param nestedHcv list of child HostColumnVectorCore(s) for complex types */ //Constructor for lists and struct public HostColumnVector(DType type, long rows, Optional<Long> nullCount, HostMemoryBuffer hostDataBuffer, HostMemoryBuffer hostValidityBuffer, HostMemoryBuffer offsetBuffer, List<HostColumnVectorCore> nestedHcv) { super(type, rows, nullCount, hostDataBuffer, hostValidityBuffer, offsetBuffer, nestedHcv); refCount = 0; incRefCountInternal(true); } HostColumnVector(DType type, long rows, Optional<Long> nullCount, HostMemoryBuffer hostDataBuffer, HostMemoryBuffer hostValidityBuffer, HostMemoryBuffer offsetBuffer) { super(type, rows, nullCount, hostDataBuffer, hostValidityBuffer, offsetBuffer, new ArrayList<>()); assert !type.equals(DType.LIST) : "This constructor should not be used for list type"; if (nullCount.isPresent() && nullCount.get() > 0 && hostValidityBuffer == null) { throw new IllegalStateException("Buffer cannot have a nullCount without a validity buffer"); } if (!type.equals(DType.STRING) && !type.equals(DType.LIST)) { assert offsetBuffer == null : "offsets are only supported for STRING and LIST"; } refCount = 0; incRefCountInternal(true); } /** * Set an event handler for this host vector. This method can be invoked with * null to unset the handler. * * @param newHandler - the EventHandler to use from this point forward * @return the prior event handler, or null if not set. */ public synchronized EventHandler setEventHandler(EventHandler newHandler) { EventHandler prev = this.eventHandler; this.eventHandler = newHandler; return prev; } /** * Returns the current event handler for this HostColumnVector or null if no * handler is associated. */ public synchronized EventHandler getEventHandler() { return this.eventHandler; } /** * This is a really ugly API, but it is possible that the lifecycle of a column of * data may not have a clear lifecycle thanks to java and GC. This API informs the leak * tracking code that this is expected for this column, and big scary warnings should * not be printed when this happens. */ public void noWarnLeakExpected() { offHeap.noWarnLeakExpected(); } /** * Close this Vector and free memory allocated for HostMemoryBuffer and DeviceMemoryBuffer */ @Override public synchronized void close() { refCount--; offHeap.delRef(); if (eventHandler != null) { eventHandler.onClosed(this, refCount); } if (refCount == 0) { offHeap.clean(false); for( HostColumnVectorCore child : children) { child.close(); } } else if (refCount < 0) { offHeap.logRefCountDebug("double free " + this); throw new IllegalStateException("Close called too many times " + this); } } @Override public String toString() { return "HostColumnVector{" + "rows=" + rows + ", type=" + type + ", nullCount=" + nullCount + ", offHeap=" + offHeap + '}'; } ///////////////////////////////////////////////////////////////////////////// // METADATA ACCESS ///////////////////////////////////////////////////////////////////////////// /** * Increment the reference count for this column. You need to call close on this * to decrement the reference count again. */ public HostColumnVector incRefCount() { return incRefCountInternal(false); } private synchronized HostColumnVector incRefCountInternal(boolean isFirstTime) { offHeap.addRef(); if (refCount <= 0 && !isFirstTime) { offHeap.logRefCountDebug("INC AFTER CLOSE " + this); throw new IllegalStateException("Column is already closed"); } refCount++; return this; } /** * Returns this column's current refcount */ public synchronized int getRefCount() { return refCount; } ///////////////////////////////////////////////////////////////////////////// // DATA MOVEMENT ///////////////////////////////////////////////////////////////////////////// /** * Copy the data to the device. */ public ColumnVector copyToDevice() { if (rows == 0) { if (type.isNestedType()) { return ColumnView.NestedColumnVector.createColumnVector(type, 0, null, null, null, Optional.of(0L), children); } else { return new ColumnVector(type, 0, Optional.of(0L), null, null, null); } } // The simplest way is just to copy the buffers and pass them down. DeviceMemoryBuffer data = null; DeviceMemoryBuffer valid = null; DeviceMemoryBuffer offsets = null; try { if (!type.isNestedType()) { HostMemoryBuffer hdata = this.offHeap.data; if (hdata != null) { long dataLen = rows * type.getSizeInBytes(); if (type.equals(DType.STRING)) { // This needs a different type dataLen = getEndStringOffset(rows - 1); if (dataLen == 0 && getNullCount() == 0) { // This is a work around to an issue where a column of all empty strings must have at // least one byte or it will not be interpreted correctly. dataLen = 1; } } data = DeviceMemoryBuffer.allocate(dataLen); data.copyFromHostBuffer(hdata, 0, dataLen); } HostMemoryBuffer hvalid = this.offHeap.valid; if (hvalid != null) { long validLen = ColumnView.getValidityBufferSize((int) rows); valid = DeviceMemoryBuffer.allocate(validLen); valid.copyFromHostBuffer(hvalid, 0, validLen); } HostMemoryBuffer hoff = this.offHeap.offsets; if (hoff != null) { long offsetsLen = OFFSET_SIZE * (rows + 1); offsets = DeviceMemoryBuffer.allocate(offsetsLen); offsets.copyFromHostBuffer(hoff, 0, offsetsLen); } ColumnVector ret = new ColumnVector(type, rows, nullCount, data, valid, offsets); data = null; valid = null; offsets = null; return ret; } else { return ColumnView.NestedColumnVector.createColumnVector( type, (int) rows, offHeap.data, offHeap.valid, offHeap.offsets, nullCount, children); } } finally { if (data != null) { data.close(); } if (valid != null) { valid.close(); } if (offsets != null) { offsets.close(); } } } ///////////////////////////////////////////////////////////////////////////// // BUILDER ///////////////////////////////////////////////////////////////////////////// /** * Create a new Builder to hold the specified number of rows. Be sure to close the builder when * done with it. Please try to use {@see #build(int, Consumer)} instead to avoid needing to * close the builder. * @param type the type of vector to build. * @param rows the number of rows this builder can hold * @return the builder to use. */ public static Builder builder(DType type, int rows) { return new Builder(type, rows, 0); } /** * Create a new Builder to hold the specified number of rows and with enough space to hold the * given amount of string data. Be sure to close the builder when done with it. Please try to * use {@see #build(int, int, Consumer)} instead to avoid needing to close the builder. * @param rows the number of rows this builder can hold * @param stringBufferSize the size of the string buffer to allocate. * @return the builder to use. */ public static Builder builder(int rows, long stringBufferSize) { return new HostColumnVector.Builder(DType.STRING, rows, stringBufferSize); } /** * Create a new vector. * @param type the type of vector to build. * @param rows maximum number of rows that the vector can hold. * @param init what will initialize the vector. * @return the created vector. */ public static HostColumnVector build(DType type, int rows, Consumer<Builder> init) { try (HostColumnVector.Builder builder = builder(type, rows)) { init.accept(builder); return builder.build(); } } public static HostColumnVector build(int rows, long stringBufferSize, Consumer<Builder> init) { try (HostColumnVector.Builder builder = builder(rows, stringBufferSize)) { init.accept(builder); return builder.build(); } } public static<T> HostColumnVector fromLists(DataType dataType, List<T>... values) { try (ColumnBuilder cb = new ColumnBuilder(dataType, values.length)) { cb.appendLists(values); return cb.build(); } } public static HostColumnVector fromStructs(DataType dataType, List<StructData> values) { try (ColumnBuilder cb = new ColumnBuilder(dataType, values.size())) { cb.appendStructValues(values); return cb.build(); } } public static HostColumnVector fromStructs(DataType dataType, StructData... values) { try (ColumnBuilder cb = new ColumnBuilder(dataType, values.length)) { cb.appendStructValues(values); return cb.build(); } } public static HostColumnVector emptyStructs(DataType dataType, long rows) { StructData sd = new StructData(); try (ColumnBuilder cb = new ColumnBuilder(dataType, rows)) { for (long i = 0; i < rows; i++) { cb.append(sd); } return cb.build(); } } /** * Create a new vector from the given values. */ public static HostColumnVector boolFromBytes(byte... values) { return build(DType.BOOL8, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector fromBytes(byte... values) { return build(DType.INT8, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. * <p> * Java does not have an unsigned byte type, so the values will be * treated as if the bits represent an unsigned value. */ public static HostColumnVector fromUnsignedBytes(byte... values) { return build(DType.UINT8, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector fromShorts(short... values) { return build(DType.INT16, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. * <p> * Java does not have an unsigned short type, so the values will be * treated as if the bits represent an unsigned value. */ public static HostColumnVector fromUnsignedShorts(short... values) { return build(DType.UINT16, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector durationNanosecondsFromLongs(long... values) { return build(DType.DURATION_NANOSECONDS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector durationMicrosecondsFromLongs(long... values) { return build(DType.DURATION_MICROSECONDS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector durationMillisecondsFromLongs(long... values) { return build(DType.DURATION_MILLISECONDS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector durationSecondsFromLongs(long... values) { return build(DType.DURATION_SECONDS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector durationDaysFromInts(int... values) { return build(DType.DURATION_DAYS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector fromInts(int... values) { return build(DType.INT32, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. * <p> * Java does not have an unsigned int type, so the values will be * treated as if the bits represent an unsigned value. */ public static HostColumnVector fromUnsignedInts(int... values) { return build(DType.UINT32, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector fromLongs(long... values) { return build(DType.INT64, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. * <p> * Java does not have an unsigned long type, so the values will be * treated as if the bits represent an unsigned value. */ public static HostColumnVector fromUnsignedLongs(long... values) { return build(DType.UINT64, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector fromFloats(float... values) { return build(DType.FLOAT32, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector fromDoubles(double... values) { return build(DType.FLOAT64, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector daysFromInts(int... values) { return build(DType.TIMESTAMP_DAYS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector timestampSecondsFromLongs(long... values) { return build(DType.TIMESTAMP_SECONDS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector timestampMilliSecondsFromLongs(long... values) { return build(DType.TIMESTAMP_MILLISECONDS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector timestampMicroSecondsFromLongs(long... values) { return build(DType.TIMESTAMP_MICROSECONDS, values.length, (b) -> b.appendArray(values)); } /** * Create a new vector from the given values. */ public static HostColumnVector timestampNanoSecondsFromLongs(long... values) { return build(DType.TIMESTAMP_NANOSECONDS, values.length, (b) -> b.appendArray(values)); } /** * Create a new decimal vector from unscaled values (int array) and scale. * The created vector is of type DType.DECIMAL32, whose max precision is 9. * Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning. */ public static HostColumnVector decimalFromInts(int scale, int... values) { return build(DType.create(DType.DTypeEnum.DECIMAL32, scale), values.length, (b) -> b.appendUnscaledDecimalArray(values)); } /** * Create a new decimal vector from boxed unscaled values (Integer array) and scale. * The created vector is of type DType.DECIMAL32, whose max precision is 9. * Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning. */ public static HostColumnVector decimalFromBoxedInts(int scale, Integer... values) { return build(DType.create(DType.DTypeEnum.DECIMAL32, scale), values.length, (b) -> { for (Integer v : values) { if (v == null) { b.appendNull(); } else { b.appendUnscaledDecimal(v); } } }); } /** * Create a new decimal vector from unscaled values (long array) and scale. * The created vector is of type DType.DECIMAL64, whose max precision is 18. * Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning. */ public static HostColumnVector decimalFromLongs(int scale, long... values) { return build(DType.create(DType.DTypeEnum.DECIMAL64, scale), values.length, (b) -> b.appendUnscaledDecimalArray(values)); } /** * Create a new decimal vector from boxed unscaled values (Long array) and scale. * The created vector is of type DType.DECIMAL64, whose max precision is 18. * Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning. */ public static HostColumnVector decimalFromBoxedLongs(int scale, Long... values) { return build(DType.create(DType.DTypeEnum.DECIMAL64, scale), values.length, (b) -> { for (Long v : values) { if (v == null) { b.appendNull(); } else { b.appendUnscaledDecimal(v); } } }); } /** * Create a new decimal vector from unscaled values (BigInteger array) and scale. * The created vector is of type DType.DECIMAL128. * Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning. */ public static HostColumnVector decimalFromBigIntegers(int scale, BigInteger... values) { return build(DType.create(DType.DTypeEnum.DECIMAL128, scale), values.length, (b) -> { for (BigInteger v : values) { if (v == null) { b.appendNull(); } else { b.appendUnscaledDecimal(v); } } }); } /** * Create a new decimal vector from double floats with specific DecimalType and RoundingMode. * All doubles will be rescaled if necessary, according to scale of input DecimalType and RoundingMode. * If any overflow occurs in extracting integral part, an IllegalArgumentException will be thrown. * This API is inefficient because of slow double -> decimal conversion, so it is mainly for testing. * Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning. */ public static HostColumnVector decimalFromDoubles(DType type, RoundingMode mode, double... values) { assert type.isDecimalType(); if (type.typeId == DType.DTypeEnum.DECIMAL64) { long[] data = new long[values.length]; for (int i = 0; i < values.length; i++) { BigDecimal dec = BigDecimal.valueOf(values[i]).setScale(-type.getScale(), mode); data[i] = dec.unscaledValue().longValueExact(); } return build(type, values.length, (b) -> b.appendUnscaledDecimalArray(data)); } else { int[] data = new int[values.length]; for (int i = 0; i < values.length; i++) { BigDecimal dec = BigDecimal.valueOf(values[i]).setScale(-type.getScale(), mode); data[i] = dec.unscaledValue().intValueExact(); } return build(type, values.length, (b) -> b.appendUnscaledDecimalArray(data)); } } /** * Create a new string vector from the given values. This API * supports inline nulls. This is really intended to be used only for testing as * it is slow and memory intensive to translate between java strings and UTF8 strings. */ public static HostColumnVector fromStrings(String... values) { int rows = values.length; long nullCount = 0; // How many bytes do we need to hold the data. Sorry this is really expensive long bufferSize = 0; for (String s: values) { if (s == null) { nullCount++; } else { bufferSize += s.getBytes(StandardCharsets.UTF_8).length; } } if (nullCount > 0) { return build(rows, bufferSize, (b) -> b.appendBoxed(values)); } return build(rows, bufferSize, (b) -> { for (String s: values) { b.append(s); } }); } /** * Create a new string vector from the given values. This API * supports inline nulls. */ public static HostColumnVector fromUTF8Strings(byte[]... values) { int rows = values.length; long nullCount = 0; long bufferSize = 0; // How many bytes do we need to hold the data. for (byte[] s: values) { if (s == null) { nullCount++; } else { bufferSize += s.length; } } BiConsumer<Builder, byte[]> appendUTF8 = nullCount == 0 ? (b, s) -> b.appendUTF8String(s) : (b, s) -> { if (s == null) { b.appendNull(); } else { b.appendUTF8String(s); } }; return build(rows, bufferSize, (b) -> { for (byte[] s: values) { appendUTF8.accept(b, s); } }); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than building from primitive array of unscaledValues. * Notice: * 1. Input values will be rescaled with min scale (max scale in terms of java.math.BigDecimal), * which avoids potential precision loss due to rounding. But there exists risk of precision overflow. * 2. The scale will be zero if all input values are null. */ public static HostColumnVector fromDecimals(BigDecimal... values) { // 1. Fetch the element with max precision (maxDec). Fill with ZERO if inputs is empty. // 2. Fetch the max scale. Fill with ZERO if inputs is empty. // 3. Rescale the maxDec with the max scale, so to come out the max precision capacity we need. BigDecimal maxDec = Arrays.stream(values).filter(Objects::nonNull) .max(Comparator.comparingInt(BigDecimal::precision)) .orElse(BigDecimal.ZERO); int maxScale = Arrays.stream(values).filter(Objects::nonNull) .map(decimal -> decimal.scale()) .max(Comparator.naturalOrder()) .orElse(0); maxDec = maxDec.setScale(maxScale, RoundingMode.UNNECESSARY); return build(DType.fromJavaBigDecimal(maxDec), values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector fromBoxedBooleans(Boolean... values) { return build(DType.BOOL8, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector fromBoxedBytes(Byte... values) { return build(DType.INT8, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. * <p> * Java does not have an unsigned byte type, so the values will be * treated as if the bits represent an unsigned value. */ public static HostColumnVector fromBoxedUnsignedBytes(Byte... values) { return build(DType.UINT8, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector fromBoxedShorts(Short... values) { return build(DType.INT16, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. * <p> * Java does not have an unsigned short type, so the values will be * treated as if the bits represent an unsigned value. */ public static HostColumnVector fromBoxedUnsignedShorts(Short... values) { return build(DType.UINT16, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector durationNanosecondsFromBoxedLongs(Long... values) { return build(DType.DURATION_NANOSECONDS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector durationMicrosecondsFromBoxedLongs(Long... values) { return build(DType.DURATION_MICROSECONDS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector durationMillisecondsFromBoxedLongs(Long... values) { return build(DType.DURATION_MILLISECONDS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector durationSecondsFromBoxedLongs(Long... values) { return build(DType.DURATION_SECONDS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector durationDaysFromBoxedInts(Integer... values) { return build(DType.DURATION_DAYS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector fromBoxedInts(Integer... values) { return build(DType.INT32, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. * <p> * Java does not have an unsigned int type, so the values will be * treated as if the bits represent an unsigned value. */ public static HostColumnVector fromBoxedUnsignedInts(Integer... values) { return build(DType.UINT32, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector fromBoxedLongs(Long... values) { return build(DType.INT64, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. * <p> * Java does not have an unsigned long type, so the values will be * treated as if the bits represent an unsigned value. */ public static HostColumnVector fromBoxedUnsignedLongs(Long... values) { return build(DType.UINT64, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector fromBoxedFloats(Float... values) { return build(DType.FLOAT32, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector fromBoxedDoubles(Double... values) { return build(DType.FLOAT64, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector timestampDaysFromBoxedInts(Integer... values) { return build(DType.TIMESTAMP_DAYS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector timestampSecondsFromBoxedLongs(Long... values) { return build(DType.TIMESTAMP_SECONDS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector timestampMilliSecondsFromBoxedLongs(Long... values) { return build(DType.TIMESTAMP_MILLISECONDS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector timestampMicroSecondsFromBoxedLongs(Long... values) { return build(DType.TIMESTAMP_MICROSECONDS, values.length, (b) -> b.appendBoxed(values)); } /** * Create a new vector from the given values. This API supports inline nulls, * but is much slower than using a regular array and should really only be used * for tests. */ public static HostColumnVector timestampNanoSecondsFromBoxedLongs(Long... values) { return build(DType.TIMESTAMP_NANOSECONDS, values.length, (b) -> b.appendBoxed(values)); } /** * Build */ public static final class ColumnBuilder implements AutoCloseable { private DType type; private HostMemoryBuffer data; private HostMemoryBuffer valid; private HostMemoryBuffer offsets; private long nullCount = 0l; //TODO nullable currently not used private boolean nullable; private long rows; private long estimatedRows; private long rowCapacity = 0L; private long validCapacity = 0L; private boolean built = false; private List<ColumnBuilder> childBuilders = new ArrayList<>(); private Runnable nullHandler; // The value of currentIndex can't exceed Int32.Max. Storing currentIndex as a long is to // adapt HostMemoryBuffer.setXXX, which requires a long offset. private long currentIndex = 0; // Only for Strings: pointer of the byte (data) buffer private int currentStringByteIndex = 0; // Use bit shift instead of multiply to transform row offset to byte offset private int bitShiftBySize = 0; private static final int bitShiftByOffset = (int)(Math.log(OFFSET_SIZE) / Math.log(2)); public ColumnBuilder(HostColumnVector.DataType type, long estimatedRows) { this.type = type.getType(); this.nullable = type.isNullable(); this.rows = 0; this.estimatedRows = Math.max(estimatedRows, 1L); this.bitShiftBySize = (int)(Math.log(this.type.getSizeInBytes()) / Math.log(2)); // initialize the null handler according to the data type this.setupNullHandler(); for (int i = 0; i < type.getNumChildren(); i++) { childBuilders.add(new ColumnBuilder(type.getChild(i), estimatedRows)); } } private void setupNullHandler() { if (this.type == DType.LIST) { this.nullHandler = () -> { this.growListBuffersAndRows(); this.growValidBuffer(); setNullAt(currentIndex++); offsets.setInt(currentIndex << bitShiftByOffset, childBuilders.get(0).getCurrentIndex()); }; } else if (this.type == DType.STRING) { this.nullHandler = () -> { this.growStringBuffersAndRows(0); this.growValidBuffer(); setNullAt(currentIndex++); offsets.setInt(currentIndex << bitShiftByOffset, currentStringByteIndex); }; } else if (this.type == DType.STRUCT) { this.nullHandler = () -> { this.growStructBuffersAndRows(); this.growValidBuffer(); setNullAt(currentIndex++); for (ColumnBuilder childBuilder : childBuilders) { childBuilder.appendNull(); } }; } else { this.nullHandler = () -> { this.growFixedWidthBuffersAndRows(); this.growValidBuffer(); setNullAt(currentIndex++); }; } } public HostColumnVector build() { List<HostColumnVectorCore> hostColumnVectorCoreList = new ArrayList<>(); for (ColumnBuilder childBuilder : childBuilders) { hostColumnVectorCoreList.add(childBuilder.buildNestedInternal()); } // Aligns the valid buffer size with other buffers in terms of row size, because it grows lazily. if (valid != null) { growValidBuffer(); } HostColumnVector hostColumnVector = new HostColumnVector(type, rows, Optional.of(nullCount), data, valid, offsets, hostColumnVectorCoreList); built = true; return hostColumnVector; } private HostColumnVectorCore buildNestedInternal() { List<HostColumnVectorCore> hostColumnVectorCoreList = new ArrayList<>(); for (ColumnBuilder childBuilder : childBuilders) { hostColumnVectorCoreList.add(childBuilder.buildNestedInternal()); } // Aligns the valid buffer size with other buffers in terms of row size, because it grows lazily. if (valid != null) { growValidBuffer(); } return new HostColumnVectorCore(type, rows, Optional.of(nullCount), data, valid, offsets, hostColumnVectorCoreList); } public ColumnBuilder appendLists(List... inputLists) { for (List inputList : inputLists) { // one row append(inputList); } return this; } public ColumnBuilder appendStructValues(List<StructData> inputList) { for (StructData structInput : inputList) { // one row append(structInput); } return this; } public ColumnBuilder appendStructValues(StructData... inputList) { for (StructData structInput : inputList) { append(structInput); } return this; } /** * Grows valid buffer lazily. The valid buffer won't be materialized until the first null * value appended. This method reuses the rowCapacity to track the sizes of column. * Therefore, please call specific growBuffer method to update rowCapacity before calling * this method. */ private void growValidBuffer() { if (valid == null) { long maskBytes = ColumnView.getValidityBufferSize((int) rowCapacity); valid = HostMemoryBuffer.allocate(maskBytes); valid.setMemory(0, valid.length, (byte) 0xFF); validCapacity = rowCapacity; return; } if (validCapacity < rowCapacity) { long maskBytes = ColumnView.getValidityBufferSize((int) rowCapacity); HostMemoryBuffer newValid = HostMemoryBuffer.allocate(maskBytes); newValid.setMemory(0, newValid.length, (byte) 0xFF); valid = copyBuffer(newValid, valid); validCapacity = rowCapacity; } } /** * A method automatically grows data buffer for fixed-width columns as needed along with * incrementing the row counts. Please call this method before appending any value or null. */ private void growFixedWidthBuffersAndRows() { growFixedWidthBuffersAndRows(1); } /** * A method automatically grows data buffer for fixed-width columns for a given size as needed * along with incrementing the row counts. Please call this method before appending * multiple values or nulls. */ private void growFixedWidthBuffersAndRows(int numRows) { assert rows + numRows <= Integer.MAX_VALUE : "Row count cannot go over Integer.MAX_VALUE"; rows += numRows; if (data == null) { long neededSize = Math.max(rows, estimatedRows); data = HostMemoryBuffer.allocate(neededSize << bitShiftBySize); rowCapacity = neededSize; } else if (rows > rowCapacity) { long neededSize = Math.max(rows, rowCapacity * 2); long newCap = Math.min(neededSize, Integer.MAX_VALUE - 1); data = copyBuffer(HostMemoryBuffer.allocate(newCap << bitShiftBySize), data); rowCapacity = newCap; } } /** * A method automatically grows offsets buffer for list columns as needed along with * incrementing the row counts. Please call this method before appending any value or null. */ private void growListBuffersAndRows() { assert rows + 2 <= Integer.MAX_VALUE : "Row count cannot go over Integer.MAX_VALUE"; rows++; if (offsets == null) { offsets = HostMemoryBuffer.allocate((estimatedRows + 1) << bitShiftByOffset); offsets.setInt(0, 0); rowCapacity = estimatedRows; } else if (rows > rowCapacity) { long newCap = Math.min(rowCapacity * 2, Integer.MAX_VALUE - 2); offsets = copyBuffer(HostMemoryBuffer.allocate((newCap + 1) << bitShiftByOffset), offsets); rowCapacity = newCap; } } /** * A method automatically grows offsets and data buffer for string columns as needed along with * incrementing the row counts. Please call this method before appending any value or null. * * @param stringLength number of bytes required by the next row */ private void growStringBuffersAndRows(int stringLength) { assert rows + 2 <= Integer.MAX_VALUE : "Row count cannot go over Integer.MAX_VALUE"; rows++; if (offsets == null) { // Initialize data buffer with at least 1 byte in case the first appended value is null. data = HostMemoryBuffer.allocate(Math.max(1, stringLength)); offsets = HostMemoryBuffer.allocate((estimatedRows + 1) << bitShiftByOffset); offsets.setInt(0, 0); rowCapacity = estimatedRows; return; } if (rows > rowCapacity) { long newCap = Math.min(rowCapacity * 2, Integer.MAX_VALUE - 2); offsets = copyBuffer(HostMemoryBuffer.allocate((newCap + 1) << bitShiftByOffset), offsets); rowCapacity = newCap; } long currentLength = currentStringByteIndex + stringLength; if (currentLength > data.length) { long requiredLength = data.length; do { requiredLength = requiredLength * 2; } while (currentLength > requiredLength); data = copyBuffer(HostMemoryBuffer.allocate(requiredLength), data); } } /** * For struct columns, we only need to update rows and rowCapacity (for the growth of * valid buffer), because struct columns hold no buffer itself. * Please call this method before appending any value or null. */ private void growStructBuffersAndRows() { assert rows + 1 <= Integer.MAX_VALUE : "Row count cannot go over Integer.MAX_VALUE"; rows++; if (rowCapacity == 0) { rowCapacity = estimatedRows; } else if (rows > rowCapacity) { rowCapacity = Math.min(rowCapacity * 2, Integer.MAX_VALUE - 1); } } private HostMemoryBuffer copyBuffer(HostMemoryBuffer targetBuffer, HostMemoryBuffer buffer) { try { targetBuffer.copyFromHostBuffer(0, buffer, 0, buffer.length); buffer.close(); buffer = targetBuffer; targetBuffer = null; } finally { if (targetBuffer != null) { targetBuffer.close(); } } return buffer; } /** * Method that sets the null bit in the validity vector * @param index the row index at which the null is marked */ private void setNullAt(long index) { assert index < rows : "Index for null value should fit the column with " + rows + " rows"; nullCount += BitVectorHelper.setNullAt(valid, index); } public final ColumnBuilder appendNull() { nullHandler.run(); return this; } //For structs private ColumnBuilder append(StructData structData) { assert type.isNestedType(); if (type.equals(DType.STRUCT)) { if (structData == null || structData.dataRecord == null) { return appendNull(); } else { for (int i = 0; i < structData.getNumFields(); i++) { ColumnBuilder childBuilder = childBuilders.get(i); appendChildOrNull(childBuilder, structData.dataRecord.get(i)); } endStruct(); } } return this; } private boolean allChildrenHaveSameIndex() { if (childBuilders.size() > 0) { int expected = childBuilders.get(0).getCurrentIndex(); for (ColumnBuilder child: childBuilders) { if (child.getCurrentIndex() != expected) { return false; } } } return true; } /** * If you want to build up a struct column you can get each child `builder.getChild(N)` and * append to all of them, then when you are done call `endStruct` to update this builder. * Do not start to append to the child and then append a null to this without ending the struct * first or you might not get the results that you expected. * @return this for chaining. */ public ColumnBuilder endStruct() { assert type.equals(DType.STRUCT) : "This only works for structs"; assert allChildrenHaveSameIndex() : "Appending structs data appears to be off " + childBuilders + " should all have the same currentIndex " + type; growStructBuffersAndRows(); currentIndex++; return this; } /** * If you want to build up a list column you can get `builder.getChild(0)` and append to than, * then when you are done call `endList` and everything that was appended to that builder * will now be in the next list. Do not start to append to the child and then append a null * to this without ending the list first or you might not get the results that you expected. * @return this for chaining. */ public ColumnBuilder endList() { assert type.equals(DType.LIST); growListBuffersAndRows(); offsets.setInt(++currentIndex << bitShiftByOffset, childBuilders.get(0).getCurrentIndex()); return this; } // For lists private <T> ColumnBuilder append(List<T> inputList) { if (inputList == null) { appendNull(); } else { ColumnBuilder childBuilder = childBuilders.get(0); for (Object listElement : inputList) { appendChildOrNull(childBuilder, listElement); } endList(); } return this; } private void appendChildOrNull(ColumnBuilder childBuilder, Object listElement) { if (listElement == null) { childBuilder.appendNull(); } else if (listElement instanceof Integer) { childBuilder.append((Integer) listElement); } else if (listElement instanceof String) { childBuilder.append((String) listElement); } else if (listElement instanceof Double) { childBuilder.append((Double) listElement); } else if (listElement instanceof Float) { childBuilder.append((Float) listElement); } else if (listElement instanceof Boolean) { childBuilder.append((Boolean) listElement); } else if (listElement instanceof Long) { childBuilder.append((Long) listElement); } else if (listElement instanceof Byte) { childBuilder.append((Byte) listElement); } else if (listElement instanceof Short) { childBuilder.append((Short) listElement); } else if (listElement instanceof BigDecimal) { childBuilder.append((BigDecimal) listElement); } else if (listElement instanceof BigInteger) { childBuilder.append((BigInteger) listElement); } else if (listElement instanceof List) { childBuilder.append((List<?>) listElement); } else if (listElement instanceof StructData) { childBuilder.append((StructData) listElement); } else if (listElement instanceof byte[]) { childBuilder.appendUTF8String((byte[]) listElement); } else { throw new IllegalStateException("Unexpected element type: " + listElement.getClass()); } } @Deprecated public void incrCurrentIndex() { currentIndex = currentIndex + 1; } public int getCurrentIndex() { return (int) currentIndex; } @Deprecated public int getCurrentByteIndex() { return currentStringByteIndex; } public final ColumnBuilder append(byte value) { growFixedWidthBuffersAndRows(); assert type.isBackedByByte(); assert currentIndex < rows; data.setByte(currentIndex++ << bitShiftBySize, value); return this; } public final ColumnBuilder append(short value) { growFixedWidthBuffersAndRows(); assert type.isBackedByShort(); assert currentIndex < rows; data.setShort(currentIndex++ << bitShiftBySize, value); return this; } public final ColumnBuilder append(int value) { growFixedWidthBuffersAndRows(); assert type.isBackedByInt(); assert currentIndex < rows; data.setInt(currentIndex++ << bitShiftBySize, value); return this; } public final ColumnBuilder append(long value) { growFixedWidthBuffersAndRows(); assert type.isBackedByLong(); assert currentIndex < rows; data.setLong(currentIndex++ << bitShiftBySize, value); return this; } public final ColumnBuilder append(float value) { growFixedWidthBuffersAndRows(); assert type.equals(DType.FLOAT32); assert currentIndex < rows; data.setFloat(currentIndex++ << bitShiftBySize, value); return this; } public final ColumnBuilder append(double value) { growFixedWidthBuffersAndRows(); assert type.equals(DType.FLOAT64); assert currentIndex < rows; data.setDouble(currentIndex++ << bitShiftBySize, value); return this; } public final ColumnBuilder append(boolean value) { growFixedWidthBuffersAndRows(); assert type.equals(DType.BOOL8); assert currentIndex < rows; data.setBoolean(currentIndex++ << bitShiftBySize, value); return this; } public ColumnBuilder append(BigDecimal value) { return append(value.setScale(-type.getScale(), RoundingMode.UNNECESSARY).unscaledValue()); } public ColumnBuilder append(BigInteger unscaledVal) { growFixedWidthBuffersAndRows(); assert currentIndex < rows; if (type.typeId == DType.DTypeEnum.DECIMAL32) { data.setInt(currentIndex++ << bitShiftBySize, unscaledVal.intValueExact()); } else if (type.typeId == DType.DTypeEnum.DECIMAL64) { data.setLong(currentIndex++ << bitShiftBySize, unscaledVal.longValueExact()); } else if (type.typeId == DType.DTypeEnum.DECIMAL128) { byte[] unscaledValueBytes = unscaledVal.toByteArray(); byte[] result = convertDecimal128FromJavaToCudf(unscaledValueBytes); data.setBytes(currentIndex++ << bitShiftBySize, result, 0, result.length); } else { throw new IllegalStateException(type + " is not a supported decimal type."); } return this; } public ColumnBuilder append(String value) { assert value != null : "appendNull must be used to append null strings"; return appendUTF8String(value.getBytes(StandardCharsets.UTF_8)); } public ColumnBuilder appendUTF8String(byte[] value) { return appendUTF8String(value, 0, value.length); } public ColumnBuilder appendUTF8String(byte[] value, int srcOffset, int length) { assert value != null : "appendNull must be used to append null strings"; assert srcOffset >= 0; assert length >= 0; assert value.length + srcOffset <= length; assert type.equals(DType.STRING) : " type " + type + " is not String"; growStringBuffersAndRows(length); assert currentIndex < rows; if (length > 0) { data.setBytes(currentStringByteIndex, value, srcOffset, length); } currentStringByteIndex += length; offsets.setInt(++currentIndex << bitShiftByOffset, currentStringByteIndex); return this; } /** * Append multiple non-null byte values. */ public ColumnBuilder append(byte[] value, int srcOffset, int length) { assert type.isBackedByByte(); assert srcOffset >= 0; assert length >= 0; assert length + srcOffset <= value.length; if (length > 0) { growFixedWidthBuffersAndRows(length); assert currentIndex < rows; data.setBytes(currentIndex, value, srcOffset, length); } currentIndex += length; return this; } /** * Appends byte to a LIST of INT8/UINT8 */ public ColumnBuilder appendByteList(byte[] value) { return appendByteList(value, 0, value.length); } /** * Appends bytes to a LIST of INT8/UINT8 */ public ColumnBuilder appendByteList(byte[] value, int srcOffset, int length) { assert value != null : "appendNull must be used to append null bytes"; assert type.equals(DType.LIST) : " type " + type + " is not LIST"; getChild(0).append(value, srcOffset, length); return endList(); } /** * Accepts a byte array containing the two's-complement representation of the unscaled value, which * is in big-endian byte-order. Then, transforms it into the representation of cuDF Decimal128 for * appending. * This method is more efficient than `append(BigInteger unscaledVal)` if we can directly access the * two's-complement representation of a BigDecimal without encoding via the method `toByteArray`. */ public ColumnBuilder appendDecimal128(byte[] binary) { growFixedWidthBuffersAndRows(); assert type.getTypeId().equals(DType.DTypeEnum.DECIMAL128); assert currentIndex < rows; assert binary.length <= type.getSizeInBytes(); byte[] cuBinary = convertDecimal128FromJavaToCudf(binary); data.setBytes(currentIndex++ << bitShiftBySize, cuBinary, 0, cuBinary.length); return this; } public ColumnBuilder getChild(int index) { return childBuilders.get(index); } /** * Finish and create the immutable ColumnVector, copied to the device. */ public final ColumnVector buildAndPutOnDevice() { try (HostColumnVector tmp = build()) { return tmp.copyToDevice(); } } @Override public void close() { if (!built) { if (data != null) { data.close(); data = null; } if (valid != null) { valid.close(); valid = null; } if (offsets != null) { offsets.close(); offsets = null; } for (ColumnBuilder childBuilder : childBuilders) { childBuilder.close(); } built = true; } } @Override public String toString() { StringJoiner sj = new StringJoiner(","); for (ColumnBuilder cb : childBuilders) { sj.add(cb.toString()); } return "ColumnBuilder{" + "type=" + type + ", children=" + sj + ", data=" + data + ", valid=" + valid + ", currentIndex=" + currentIndex + ", nullCount=" + nullCount + ", estimatedRows=" + estimatedRows + ", populatedRows=" + rows + ", built=" + built + '}'; } } public static final class Builder implements AutoCloseable { private final long rows; private final DType type; private HostMemoryBuffer data; private HostMemoryBuffer valid; private HostMemoryBuffer offsets; private long currentIndex = 0; private long nullCount; private int currentStringByteIndex = 0; private boolean built; /** * Create a builder with a buffer of size rows * @param type datatype * @param rows number of rows to allocate. * @param stringBufferSize the size of the string data buffer if we are * working with Strings. It is ignored otherwise. */ Builder(DType type, long rows, long stringBufferSize) { this.type = type; this.rows = rows; if (type.equals(DType.STRING)) { if (stringBufferSize <= 0) { // We need at least one byte or we will get NULL back for data stringBufferSize = 1; } this.data = HostMemoryBuffer.allocate(stringBufferSize); // The offsets are ints and there is 1 more than the number of rows. this.offsets = HostMemoryBuffer.allocate((rows + 1) * OFFSET_SIZE); // The first offset is always 0 this.offsets.setInt(0, 0); } else { this.data = HostMemoryBuffer.allocate(rows * type.getSizeInBytes()); } } /** * Create a builder with a buffer of size rows (for testing ONLY). * @param type datatype * @param rows number of rows to allocate. * @param testData a buffer to hold the data (should be large enough to hold rows entries). * @param testValid a buffer to hold the validity vector (should be large enough to hold * rows entries or is null). * @param testOffsets a buffer to hold the offsets for strings and string categories. */ Builder(DType type, long rows, HostMemoryBuffer testData, HostMemoryBuffer testValid, HostMemoryBuffer testOffsets) { this.type = type; this.rows = rows; this.data = testData; this.valid = testValid; } public final Builder append(boolean value) { assert type.equals(DType.BOOL8); assert currentIndex < rows; data.setByte(currentIndex * type.getSizeInBytes(), value ? (byte)1 : (byte)0); currentIndex++; return this; } public final Builder append(byte value) { assert type.isBackedByByte(); assert currentIndex < rows; data.setByte(currentIndex * type.getSizeInBytes(), value); currentIndex++; return this; } public final Builder append(byte value, long count) { assert (count + currentIndex) <= rows; assert type.isBackedByByte(); data.setMemory(currentIndex * type.getSizeInBytes(), count, value); currentIndex += count; return this; } public final Builder append(short value) { assert type.isBackedByShort(); assert currentIndex < rows; data.setShort(currentIndex * type.getSizeInBytes(), value); currentIndex++; return this; } public final Builder append(int value) { assert type.isBackedByInt(); assert currentIndex < rows; data.setInt(currentIndex * type.getSizeInBytes(), value); currentIndex++; return this; } public final Builder append(long value) { assert type.isBackedByLong(); assert currentIndex < rows; data.setLong(currentIndex * type.getSizeInBytes(), value); currentIndex++; return this; } public final Builder append(float value) { assert type.equals(DType.FLOAT32); assert currentIndex < rows; data.setFloat(currentIndex * type.getSizeInBytes(), value); currentIndex++; return this; } public final Builder append(double value) { assert type.equals(DType.FLOAT64); assert currentIndex < rows; data.setDouble(currentIndex * type.getSizeInBytes(), value); currentIndex++; return this; } /** * Append java.math.BigDecimal into HostColumnVector with UNNECESSARY RoundingMode. * Input decimal should have a larger scale than column vector.Otherwise, an ArithmeticException will be thrown while rescaling. * If unscaledValue after rescaling exceeds the max precision of rapids type, * an ArithmeticException will be thrown while extracting integral. * * @param value BigDecimal value to be appended */ public final Builder append(BigDecimal value) { return append(value, RoundingMode.UNNECESSARY); } /** * Append java.math.BigDecimal into HostColumnVector with user-defined RoundingMode. * Input decimal will be rescaled according to scale of column type and RoundingMode before appended. * If unscaledValue after rescaling exceeds the max precision of rapids type, an ArithmeticException will be thrown. * * @param value BigDecimal value to be appended * @param roundingMode rounding mode determines rescaling behavior */ public final Builder append(BigDecimal value, RoundingMode roundingMode) { assert type.isDecimalType(); assert currentIndex < rows: "appended too many values " + currentIndex + " out of total rows " + rows; BigInteger unscaledValue = value.setScale(-type.getScale(), roundingMode).unscaledValue(); if (type.typeId == DType.DTypeEnum.DECIMAL32) { assert value.precision() <= DType.DECIMAL32_MAX_PRECISION : "value exceeds maximum precision for DECIMAL32"; data.setInt(currentIndex * type.getSizeInBytes(), unscaledValue.intValueExact()); } else if (type.typeId == DType.DTypeEnum.DECIMAL64) { assert value.precision() <= DType.DECIMAL64_MAX_PRECISION : "value exceeds maximum precision for DECIMAL64 "; data.setLong(currentIndex * type.getSizeInBytes(), unscaledValue.longValueExact()); } else if (type.typeId == DType.DTypeEnum.DECIMAL128) { assert value.precision() <= DType.DECIMAL128_MAX_PRECISION : "value exceeds maximum precision for DECIMAL128 "; appendUnscaledDecimal(value.unscaledValue()); return this; } else { throw new IllegalStateException(type + " is not a supported decimal type."); } currentIndex++; return this; } public final Builder appendUnscaledDecimal(int value) { assert type.typeId == DType.DTypeEnum.DECIMAL32; assert currentIndex < rows; data.setInt(currentIndex * type.getSizeInBytes(), value); currentIndex++; return this; } public final Builder appendUnscaledDecimal(long value) { assert type.typeId == DType.DTypeEnum.DECIMAL64; assert currentIndex < rows; data.setLong(currentIndex * type.getSizeInBytes(), value); currentIndex++; return this; } public final Builder appendUnscaledDecimal(BigInteger value) { assert type.typeId == DType.DTypeEnum.DECIMAL128; assert currentIndex < rows; byte[] unscaledValueBytes = value.toByteArray(); byte[] result = convertDecimal128FromJavaToCudf(unscaledValueBytes); data.setBytes(currentIndex*DType.DTypeEnum.DECIMAL128.sizeInBytes, result, 0, result.length); currentIndex++; return this; } public Builder append(String value) { assert value != null : "appendNull must be used to append null strings"; return appendUTF8String(value.getBytes(StandardCharsets.UTF_8)); } public Builder appendUTF8String(byte[] value) { return appendUTF8String(value, 0, value.length); } public Builder appendUTF8String(byte[] value, int offset, int length) { assert value != null : "appendNull must be used to append null strings"; assert offset >= 0; assert length >= 0; assert length + offset <= value.length; assert type.equals(DType.STRING); assert currentIndex < rows; // just for strings we want to throw a real exception if we would overrun the buffer long oldLen = data.getLength(); long newLen = oldLen; while (currentStringByteIndex + length > newLen) { newLen *= 2; } if (newLen > Integer.MAX_VALUE) { throw new IllegalStateException("A string buffer is not supported over 2GB in size"); } if (newLen != oldLen) { // need to grow the size of the buffer. HostMemoryBuffer newData = HostMemoryBuffer.allocate(newLen); try { newData.copyFromHostBuffer(0, data, 0, currentStringByteIndex); data.close(); data = newData; newData = null; } finally { if (newData != null) { newData.close(); } } } if (length > 0) { data.setBytes(currentStringByteIndex, value, offset, length); } currentStringByteIndex += length; currentIndex++; offsets.setInt(currentIndex * OFFSET_SIZE, currentStringByteIndex); return this; } public Builder appendArray(byte... values) { assert (values.length + currentIndex) <= rows; assert type.isBackedByByte(); data.setBytes(currentIndex * type.getSizeInBytes(), values, 0, values.length); currentIndex += values.length; return this; } public Builder appendArray(short... values) { assert type.isBackedByShort(); assert (values.length + currentIndex) <= rows; data.setShorts(currentIndex * type.getSizeInBytes(), values, 0, values.length); currentIndex += values.length; return this; } public Builder appendArray(int... values) { assert type.isBackedByInt(); assert (values.length + currentIndex) <= rows; data.setInts(currentIndex * type.getSizeInBytes(), values, 0, values.length); currentIndex += values.length; return this; } public Builder appendArray(long... values) { assert type.isBackedByLong(); assert (values.length + currentIndex) <= rows; data.setLongs(currentIndex * type.getSizeInBytes(), values, 0, values.length); currentIndex += values.length; return this; } public Builder appendArray(float... values) { assert type.equals(DType.FLOAT32); assert (values.length + currentIndex) <= rows; data.setFloats(currentIndex * type.getSizeInBytes(), values, 0, values.length); currentIndex += values.length; return this; } public Builder appendArray(double... values) { assert type.equals(DType.FLOAT64); assert (values.length + currentIndex) <= rows; data.setDoubles(currentIndex * type.getSizeInBytes(), values, 0, values.length); currentIndex += values.length; return this; } public Builder appendUnscaledDecimalArray(int... values) { assert type.typeId == DType.DTypeEnum.DECIMAL32; assert (values.length + currentIndex) <= rows; data.setInts(currentIndex * type.getSizeInBytes(), values, 0, values.length); currentIndex += values.length; return this; } public Builder appendUnscaledDecimalArray(long... values) { assert type.typeId == DType.DTypeEnum.DECIMAL64; assert (values.length + currentIndex) <= rows; data.setLongs(currentIndex * type.getSizeInBytes(), values, 0, values.length); currentIndex += values.length; return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public Builder appendBoxed(BigDecimal... values) throws IndexOutOfBoundsException { assert type.isDecimalType(); for (BigDecimal v : values) { if (v == null) { appendNull(); } else { append(v); } } return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public final Builder appendBoxed(Byte... values) throws IndexOutOfBoundsException { for (Byte b : values) { if (b == null) { appendNull(); } else { append(b); } } return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public final Builder appendBoxed(Boolean... values) throws IndexOutOfBoundsException { for (Boolean b : values) { if (b == null) { appendNull(); } else { append(b ? (byte) 1 : (byte) 0); } } return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public final Builder appendBoxed(Short... values) throws IndexOutOfBoundsException { for (Short b : values) { if (b == null) { appendNull(); } else { append(b); } } return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public final Builder appendBoxed(Integer... values) throws IndexOutOfBoundsException { for (Integer b : values) { if (b == null) { appendNull(); } else { append(b); } } return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public final Builder appendBoxed(Long... values) throws IndexOutOfBoundsException { for (Long b : values) { if (b == null) { appendNull(); } else { append(b); } } return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public final Builder appendBoxed(Float... values) throws IndexOutOfBoundsException { for (Float b : values) { if (b == null) { appendNull(); } else { append(b); } } return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public final Builder appendBoxed(Double... values) throws IndexOutOfBoundsException { for (Double b : values) { if (b == null) { appendNull(); } else { append(b); } } return this; } /** * Append multiple values. This is very slow and should really only be used for tests. * @param values the values to append, including nulls. * @return this for chaining. * @throws {@link IndexOutOfBoundsException} */ public final Builder appendBoxed(String... values) throws IndexOutOfBoundsException { for (String b : values) { if (b == null) { appendNull(); } else { append(b); } } return this; } // TODO see if we can remove this... /** * Append this vector to the end of this vector * @param columnVector - Vector to be added * @return - The CudfColumn based on this builder values */ public final Builder append(HostColumnVector columnVector) { assert columnVector.rows <= (rows - currentIndex); assert columnVector.type.equals(type); if (type.equals(DType.STRING)) { throw new UnsupportedOperationException( "Appending a string column vector client side is not currently supported"); } else { data.copyFromHostBuffer(currentIndex * type.getSizeInBytes(), columnVector.offHeap.data, 0L, columnVector.getRowCount() * type.getSizeInBytes()); } //As this is doing the append on the host assume that a null count is available long otherNc = columnVector.getNullCount(); if (otherNc != 0) { if (valid == null) { allocateBitmaskAndSetDefaultValues(); } //copy values from intCudfColumn to this BitVectorHelper.append(columnVector.offHeap.valid, valid, currentIndex, columnVector.rows); nullCount += otherNc; } currentIndex += columnVector.rows; return this; } private void allocateBitmaskAndSetDefaultValues() { long bitmaskSize = ColumnView.getValidityBufferSize((int) rows); valid = HostMemoryBuffer.allocate(bitmaskSize); valid.setMemory(0, bitmaskSize, (byte) 0xFF); } /** * Append null value. */ public final Builder appendNull() { setNullAt(currentIndex); currentIndex++; if (type.equals(DType.STRING)) { offsets.setInt(currentIndex * OFFSET_SIZE, currentStringByteIndex); } return this; } /** * Set a specific index to null. * @param index */ public final Builder setNullAt(long index) { assert index < rows; // add null if (this.valid == null) { allocateBitmaskAndSetDefaultValues(); } nullCount += BitVectorHelper.setNullAt(valid, index); return this; } /** * Finish and create the immutable CudfColumn. */ public final HostColumnVector build() { HostColumnVector cv = new HostColumnVector(type, currentIndex, Optional.of(nullCount), data, valid, offsets); built = true; return cv; } /** * Finish and create the immutable ColumnVector, copied to the device. */ public final ColumnVector buildAndPutOnDevice() { try (HostColumnVector tmp = build()) { return tmp.copyToDevice(); } } /** * Close this builder and free memory if the CudfColumn wasn't generated. Verifies that * the data was released even in the case of an error. */ @Override public final void close() { if (!built) { data.close(); data = null; if (valid != null) { valid.close(); valid = null; } if (offsets != null) { offsets.close(); offsets = null; } built = true; } } @Override public String toString() { return "Builder{" + "data=" + data + "type=" + type + ", valid=" + valid + ", currentIndex=" + currentIndex + ", nullCount=" + nullCount + ", rows=" + rows + ", built=" + built + '}'; } } public static abstract class DataType { abstract DType getType(); abstract boolean isNullable(); abstract DataType getChild(int index); abstract int getNumChildren(); } public static class ListType extends HostColumnVector.DataType { private boolean isNullable; private HostColumnVector.DataType child; public ListType(boolean isNullable, DataType child) { this.isNullable = isNullable; this.child = child; } @Override DType getType() { return DType.LIST; } @Override boolean isNullable() { return isNullable; } @Override HostColumnVector.DataType getChild(int index) { if (index > 0) { return null; } return child; } @Override int getNumChildren() { return 1; } } public static class StructData { List<Object> dataRecord; public StructData(List<Object> dataRecord) { this.dataRecord = dataRecord; } public StructData(Object... data) { this(Arrays.asList(data)); } public int getNumFields() { if (dataRecord != null) { return dataRecord.size(); } else { return 0; } } } public static class StructType extends HostColumnVector.DataType { private boolean isNullable; private List<HostColumnVector.DataType> children; public StructType(boolean isNullable, List<HostColumnVector.DataType> children) { this.isNullable = isNullable; this.children = children; } public StructType(boolean isNullable, DataType... children) { this(isNullable, Arrays.asList(children)); } @Override DType getType() { return DType.STRUCT; } @Override boolean isNullable() { return isNullable; } @Override HostColumnVector.DataType getChild(int index) { return children.get(index); } @Override int getNumChildren() { return children.size(); } } public static class BasicType extends HostColumnVector.DataType { private DType type; private boolean isNullable; public BasicType(boolean isNullable, DType type) { this.isNullable = isNullable; this.type = type; } @Override DType getType() { return type; } @Override boolean isNullable() { return isNullable; } @Override HostColumnVector.DataType getChild(int index) { return null; } @Override int getNumChildren() { return 0; } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/Aggregation.java
/* * * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.util.Arrays; /** * Represents an aggregation operation. Please note that not all aggregations work, or even make * sense in all types of aggregation operations. */ abstract class Aggregation { static { NativeDepsLoader.loadNativeDeps(); } /* * This should be kept in sync with AggregationJni.cpp. Note that the nativeId here is not the * same as the C++ cudf::aggregation::Kind. They are very closely related, but both are * implementation details and generally should be hidden from the end user. * Visible for testing. */ enum Kind { SUM(0), PRODUCT(1), MIN(2), MAX(3), COUNT(4), ANY(5), ALL(6), SUM_OF_SQUARES(7), MEAN(8), VARIANCE(9), // This can take a delta degrees of freedom STD(10), // This can take a delta degrees of freedom MEDIAN(11), QUANTILE(12), ARGMAX(13), ARGMIN(14), NUNIQUE(15), NTH_ELEMENT(16), ROW_NUMBER(17), COLLECT_LIST(18), COLLECT_SET(19), MERGE_LISTS(20), MERGE_SETS(21), LEAD(22), LAG(23), PTX(24), CUDA(25), M2(26), MERGE_M2(27), RANK(28), DENSE_RANK(29), PERCENT_RANK(30), TDIGEST(31), // This can take a delta argument for accuracy level MERGE_TDIGEST(32), // This can take a delta argument for accuracy level HISTOGRAM(33), MERGE_HISTOGRAM(34); final int nativeId; Kind(int nativeId) {this.nativeId = nativeId;} } /** * An Aggregation that only needs a kind and nothing else. */ private static class NoParamAggregation extends Aggregation { public NoParamAggregation(Kind kind) { super(kind); } @Override long createNativeInstance() { return Aggregation.createNoParamAgg(kind.nativeId); } @Override public int hashCode() { return kind.hashCode(); } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof NoParamAggregation) { NoParamAggregation o = (NoParamAggregation) other; return o.kind.equals(this.kind); } return false; } } static final class NthAggregation extends Aggregation { private final int offset; private final NullPolicy nullPolicy; private NthAggregation(int offset, NullPolicy nullPolicy) { super(Kind.NTH_ELEMENT); this.offset = offset; this.nullPolicy = nullPolicy; } @Override long createNativeInstance() { return Aggregation.createNthAgg(offset, nullPolicy.includeNulls); } @Override public int hashCode() { return 31 * offset + nullPolicy.hashCode(); } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof NthAggregation) { NthAggregation o = (NthAggregation) other; return o.offset == this.offset && o.nullPolicy == this.nullPolicy; } return false; } } private static class DdofAggregation extends Aggregation { private final int ddof; public DdofAggregation(Kind kind, int ddof) { super(kind); this.ddof = ddof; } @Override long createNativeInstance() { return Aggregation.createDdofAgg(kind.nativeId, ddof); } @Override public int hashCode() { return 31 * kind.hashCode() + ddof; } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof DdofAggregation) { DdofAggregation o = (DdofAggregation) other; return o.ddof == this.ddof; } return false; } } private static class CountLikeAggregation extends Aggregation { private final NullPolicy nullPolicy; public CountLikeAggregation(Kind kind, NullPolicy nullPolicy) { super(kind); this.nullPolicy = nullPolicy; } @Override long createNativeInstance() { return Aggregation.createCountLikeAgg(kind.nativeId, nullPolicy.includeNulls); } @Override public int hashCode() { return 31 * kind.hashCode() + nullPolicy.hashCode(); } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof CountLikeAggregation) { CountLikeAggregation o = (CountLikeAggregation) other; return o.nullPolicy == this.nullPolicy; } return false; } } private static final class QuantileAggregation extends Aggregation { private final QuantileMethod method; private final double[] quantiles; public QuantileAggregation(QuantileMethod method, double[] quantiles) { super(Kind.QUANTILE); this.method = method; this.quantiles = quantiles; } @Override long createNativeInstance() { return Aggregation.createQuantAgg(method.nativeId, quantiles); } @Override public int hashCode() { return 31 * (31 * kind.hashCode() + method.hashCode()) + Arrays.hashCode(quantiles); } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof QuantileAggregation) { QuantileAggregation o = (QuantileAggregation) other; return this.method == o.method && Arrays.equals(this.quantiles, o.quantiles); } return false; } } private static class LeadLagAggregation extends Aggregation { private final int offset; private final ColumnVector defaultOutput; LeadLagAggregation(Kind kind, int offset, ColumnVector defaultOutput) { super(kind); this.offset = offset; this.defaultOutput = defaultOutput; } @Override long createNativeInstance() { // Default output comes from a different path return Aggregation.createLeadLagAgg(kind.nativeId, offset); } @Override public int hashCode() { int ret = 31 * kind.hashCode() + offset; if (defaultOutput != null) { ret = 31 * ret + defaultOutput.hashCode(); } return ret; } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof LeadLagAggregation) { LeadLagAggregation o = (LeadLagAggregation) other; boolean ret = o.kind == this.kind && this.offset == o.offset; if (defaultOutput != null) { ret = ret && defaultOutput.equals(o.defaultOutput); } else if (o.defaultOutput != null) { // defaultOutput == null and o.defaultOutput != null so they are not equal ret = false; } // else they are both null which is the same and a noop. return ret; } return false; } @Override long getDefaultOutput() { return defaultOutput == null ? 0 : defaultOutput.getNativeView(); } } static final class CollectListAggregation extends Aggregation { private final NullPolicy nullPolicy; private CollectListAggregation(NullPolicy nullPolicy) { super(Kind.COLLECT_LIST); this.nullPolicy = nullPolicy; } @Override long createNativeInstance() { return Aggregation.createCollectListAgg(nullPolicy.includeNulls); } @Override public int hashCode() { return 31 * kind.hashCode() + nullPolicy.hashCode(); } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof CollectListAggregation) { CollectListAggregation o = (CollectListAggregation) other; return o.nullPolicy == this.nullPolicy; } return false; } } static final class CollectSetAggregation extends Aggregation { private final NullPolicy nullPolicy; private final NullEquality nullEquality; private final NaNEquality nanEquality; private CollectSetAggregation(NullPolicy nullPolicy, NullEquality nullEquality, NaNEquality nanEquality) { super(Kind.COLLECT_SET); this.nullPolicy = nullPolicy; this.nullEquality = nullEquality; this.nanEquality = nanEquality; } @Override long createNativeInstance() { return Aggregation.createCollectSetAgg(nullPolicy.includeNulls, nullEquality.nullsEqual, nanEquality.nansEqual); } @Override public int hashCode() { return 31 * kind.hashCode() + Boolean.hashCode(nullPolicy.includeNulls) + Boolean.hashCode(nullEquality.nullsEqual) + Boolean.hashCode(nanEquality.nansEqual); } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof CollectSetAggregation) { CollectSetAggregation o = (CollectSetAggregation) other; return o.nullPolicy == this.nullPolicy && o.nullEquality == this.nullEquality && o.nanEquality == this.nanEquality; } return false; } } static final class MergeSetsAggregation extends Aggregation { private final NullEquality nullEquality; private final NaNEquality nanEquality; private MergeSetsAggregation(NullEquality nullEquality, NaNEquality nanEquality) { super(Kind.MERGE_SETS); this.nullEquality = nullEquality; this.nanEquality = nanEquality; } @Override long createNativeInstance() { return Aggregation.createMergeSetsAgg(nullEquality.nullsEqual, nanEquality.nansEqual); } @Override public int hashCode() { return 31 * kind.hashCode() + Boolean.hashCode(nullEquality.nullsEqual) + Boolean.hashCode(nanEquality.nansEqual); } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof MergeSetsAggregation) { MergeSetsAggregation o = (MergeSetsAggregation) other; return o.nullEquality == this.nullEquality && o.nanEquality == this.nanEquality; } return false; } } protected final Kind kind; protected Aggregation(Kind kind) { this.kind = kind; } /** * Get the native view of a ColumnVector that provides default values to be used for some window * aggregations when there is not enough data to do the computation. This really only happens * for a very few number of window aggregations. Also note that the ownership and life cycle of * the column is controlled outside of this, so don't try to close it. * @return the native view of the column vector or 0. */ long getDefaultOutput() { return 0; } /** * returns a <code>cudf::aggregation *</code> cast to a long. We don't want to force users to * close an Aggregation. Because of this Aggregation objects are created in pure java, but when * it is time to use them this method is called to return a pointer to the c++ aggregation * instance. All values returned by this can be used multiple times, and should be closed by * calling the static close method. Yes, this creates a lot more JNI calls, but it keeps the * user API clean. */ abstract long createNativeInstance(); @Override public abstract int hashCode(); @Override public abstract boolean equals(Object other); static void close(long[] ptrs) { for (long ptr: ptrs) { if (ptr != 0) { close(ptr); } } } static native void close(long ptr); static final class SumAggregation extends NoParamAggregation { private SumAggregation() { super(Kind.SUM); } } /** * Sum reduction. */ static SumAggregation sum() { return new SumAggregation(); } static final class ProductAggregation extends NoParamAggregation { private ProductAggregation() { super(Kind.PRODUCT); } } /** * Product reduction. */ static ProductAggregation product() { return new ProductAggregation(); } static final class MinAggregation extends NoParamAggregation { private MinAggregation() { super(Kind.MIN); } } /** * Min reduction. */ static MinAggregation min() { return new MinAggregation(); } static final class MaxAggregation extends NoParamAggregation { private MaxAggregation() { super(Kind.MAX); } } /** * Max reduction. */ static MaxAggregation max() { return new MaxAggregation(); } static final class CountAggregation extends CountLikeAggregation { private CountAggregation(NullPolicy nullPolicy) { super(Kind.COUNT, nullPolicy); } } /** * Count number of valid, a.k.a. non-null, elements. */ static CountAggregation count() { return count(NullPolicy.EXCLUDE); } /** * Count number of elements. * @param nullPolicy INCLUDE if nulls should be counted. EXCLUDE if only non-null values * should be counted. */ static CountAggregation count(NullPolicy nullPolicy) { return new CountAggregation(nullPolicy); } static final class AnyAggregation extends NoParamAggregation { private AnyAggregation() { super(Kind.ANY); } } /** * Any reduction. Produces a true or 1, depending on the output type, * if any of the elements in the range are true or non-zero, otherwise produces a false or 0. * Null values are skipped. */ static AnyAggregation any() { return new AnyAggregation(); } static final class AllAggregation extends NoParamAggregation { private AllAggregation() { super(Kind.ALL); } } /** * All reduction. Produces true or 1, depending on the output type, if all of the elements in * the range are true or non-zero, otherwise produces a false or 0. * Null values are skipped. */ static AllAggregation all() { return new AllAggregation(); } static final class SumOfSquaresAggregation extends NoParamAggregation { private SumOfSquaresAggregation() { super(Kind.SUM_OF_SQUARES); } } /** * Sum of squares reduction. */ static SumOfSquaresAggregation sumOfSquares() { return new SumOfSquaresAggregation(); } static final class MeanAggregation extends NoParamAggregation { private MeanAggregation() { super(Kind.MEAN); } } /** * Arithmetic mean reduction. */ static MeanAggregation mean() { return new MeanAggregation(); } static final class M2Aggregation extends NoParamAggregation { private M2Aggregation() { super(Kind.M2); } } /** * Sum of square of differences from mean. */ static M2Aggregation M2() { return new M2Aggregation(); } static final class VarianceAggregation extends DdofAggregation { private VarianceAggregation(int ddof) { super(Kind.VARIANCE, ddof); } } /** * Variance aggregation with 1 as the delta degrees of freedom. */ static VarianceAggregation variance() { return variance(1); } /** * Variance aggregation. * @param ddof delta degrees of freedom. The divisor used in calculation of variance is * <code>N - ddof</code>, where N is the population size. */ static VarianceAggregation variance(int ddof) { return new VarianceAggregation(ddof); } static final class StandardDeviationAggregation extends DdofAggregation { private StandardDeviationAggregation(int ddof) { super(Kind.STD, ddof); } } /** * Standard deviation aggregation with 1 as the delta degrees of freedom. */ static StandardDeviationAggregation standardDeviation() { return standardDeviation(1); } /** * Standard deviation aggregation. * @param ddof delta degrees of freedom. The divisor used in calculation of std is * <code>N - ddof</code>, where N is the population size. */ static StandardDeviationAggregation standardDeviation(int ddof) { return new StandardDeviationAggregation(ddof); } static final class MedianAggregation extends NoParamAggregation { private MedianAggregation() { super(Kind.MEDIAN); } } /** * Median reduction. */ static MedianAggregation median() { return new MedianAggregation(); } /** * Aggregate to compute the specified quantiles. Uses linear interpolation by default. */ static QuantileAggregation quantile(double ... quantiles) { return quantile(QuantileMethod.LINEAR, quantiles); } /** * Aggregate to compute various quantiles. */ static QuantileAggregation quantile(QuantileMethod method, double ... quantiles) { return new QuantileAggregation(method, quantiles); } static final class ArgMaxAggregation extends NoParamAggregation { private ArgMaxAggregation() { super(Kind.ARGMAX); } } /** * Index of max element. Please note that when using this aggregation with a group by if the * data is not already sorted by the grouping keys it may be automatically sorted * prior to doing the aggregation. This would result in an index into the sorted data being * returned. */ static ArgMaxAggregation argMax() { return new ArgMaxAggregation(); } static final class ArgMinAggregation extends NoParamAggregation { private ArgMinAggregation() { super(Kind.ARGMIN); } } /** * Index of min element. Please note that when using this aggregation with a group by if the * data is not already sorted by the grouping keys it may be automatically sorted * prior to doing the aggregation. This would result in an index into the sorted data being * returned. */ static ArgMinAggregation argMin() { return new ArgMinAggregation(); } static final class NuniqueAggregation extends CountLikeAggregation { private NuniqueAggregation(NullPolicy nullPolicy) { super(Kind.NUNIQUE, nullPolicy); } } /** * Number of unique, non-null, elements. */ static NuniqueAggregation nunique() { return nunique(NullPolicy.EXCLUDE); } /** * Number of unique elements. * @param nullPolicy INCLUDE if nulls should be counted else EXCLUDE. If nulls are counted they * compare as equal so multiple null values in a range would all only * increase the count by 1. */ static NuniqueAggregation nunique(NullPolicy nullPolicy) { return new NuniqueAggregation(nullPolicy); } /** * Get the nth, non-null, element in a group. * @param offset the offset to look at. Negative numbers go from the end of the group. Any * value outside of the group range results in a null. */ static NthAggregation nth(int offset) { return nth(offset, NullPolicy.INCLUDE); } /** * Get the nth element in a group. * @param offset the offset to look at. Negative numbers go from the end of the group. Any * value outside of the group range results in a null. * @param nullPolicy INCLUDE if nulls should be included in the aggregation or EXCLUDE if they * should be skipped. */ static NthAggregation nth(int offset, NullPolicy nullPolicy) { return new NthAggregation(offset, nullPolicy); } static final class RowNumberAggregation extends NoParamAggregation { private RowNumberAggregation() { super(Kind.ROW_NUMBER); } } /** * Get the row number, only makes sense for a window operations. */ static RowNumberAggregation rowNumber() { return new RowNumberAggregation(); } static final class RankAggregation extends NoParamAggregation { private RankAggregation() { super(Kind.RANK); } } /** * Get the row's ranking. */ static RankAggregation rank() { return new RankAggregation(); } static final class DenseRankAggregation extends NoParamAggregation { private DenseRankAggregation() { super(Kind.DENSE_RANK); } } /** * Get the row's dense ranking. */ static DenseRankAggregation denseRank() { return new DenseRankAggregation(); } static final class PercentRankAggregation extends NoParamAggregation { private PercentRankAggregation() { super(Kind.PERCENT_RANK); } } /** * Get the row's percent ranking. */ static PercentRankAggregation percentRank() { return new PercentRankAggregation(); } /** * Collect the values into a list. Nulls will be skipped. */ static CollectListAggregation collectList() { return collectList(NullPolicy.EXCLUDE); } /** * Collect the values into a list. * * @param nullPolicy Indicates whether to include/exclude nulls during collection. */ static CollectListAggregation collectList(NullPolicy nullPolicy) { return new CollectListAggregation(nullPolicy); } /** * Collect the values into a set. All null values will be excluded, and all nan values are regarded as * unique instances. */ static CollectSetAggregation collectSet() { return collectSet(NullPolicy.EXCLUDE, NullEquality.UNEQUAL, NaNEquality.UNEQUAL); } /** * Collect the values into a set. * * @param nullPolicy Indicates whether to include/exclude nulls during collection. * @param nullEquality Flag to specify whether null entries within each list should be considered equal. * @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal. */ static CollectSetAggregation collectSet(NullPolicy nullPolicy, NullEquality nullEquality, NaNEquality nanEquality) { return new CollectSetAggregation(nullPolicy, nullEquality, nanEquality); } static final class MergeListsAggregation extends NoParamAggregation { private MergeListsAggregation() { super(Kind.MERGE_LISTS); } } /** * Merge the partial lists produced by multiple CollectListAggregations. * NOTICE: The partial lists to be merged should NOT include any null list element (but can include null list entries). */ static MergeListsAggregation mergeLists() { return new MergeListsAggregation(); } /** * Merge the partial sets produced by multiple CollectSetAggregations. Each null/nan value will be regarded as * a unique instance. */ static MergeSetsAggregation mergeSets() { return mergeSets(NullEquality.UNEQUAL, NaNEquality.UNEQUAL); } /** * Merge the partial sets produced by multiple CollectSetAggregations. * * @param nullEquality Flag to specify whether null entries within each list should be considered equal. * @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal. */ static MergeSetsAggregation mergeSets(NullEquality nullEquality, NaNEquality nanEquality) { return new MergeSetsAggregation(nullEquality, nanEquality); } static final class LeadAggregation extends LeadLagAggregation { private LeadAggregation(int offset, ColumnVector defaultOutput) { super(Kind.LEAD, offset, defaultOutput); } } /** * In a rolling window return the value offset entries ahead or the corresponding value from * defaultOutput if it is outside of the window. Note that this does not take any ownership of * defaultOutput and the caller mush ensure that defaultOutput remains valid during the life * time of this aggregation operation. */ static LeadAggregation lead(int offset, ColumnVector defaultOutput) { return new LeadAggregation(offset, defaultOutput); } static final class LagAggregation extends LeadLagAggregation { private LagAggregation(int offset, ColumnVector defaultOutput) { super(Kind.LAG, offset, defaultOutput); } } /** * In a rolling window return the value offset entries behind or the corresponding value from * defaultOutput if it is outside of the window. Note that this does not take any ownership of * defaultOutput and the caller mush ensure that defaultOutput remains valid during the life * time of this aggregation operation. */ static LagAggregation lag(int offset, ColumnVector defaultOutput) { return new LagAggregation(offset, defaultOutput); } public static final class MergeM2Aggregation extends NoParamAggregation { private MergeM2Aggregation() { super(Kind.MERGE_M2); } } /** * Merge the partial M2 values produced by multiple instances of M2Aggregation. */ static MergeM2Aggregation mergeM2() { return new MergeM2Aggregation(); } static class TDigestAggregation extends Aggregation { private final int delta; public TDigestAggregation(Kind kind, int delta) { super(kind); this.delta = delta; } @Override long createNativeInstance() { return Aggregation.createTDigestAgg(kind.nativeId, delta); } @Override public int hashCode() { return 31 * kind.hashCode() + delta; } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other instanceof TDigestAggregation) { TDigestAggregation o = (TDigestAggregation) other; return o.delta == this.delta; } return false; } } static TDigestAggregation createTDigest(int delta) { return new TDigestAggregation(Kind.TDIGEST, delta); } static TDigestAggregation mergeTDigest(int delta) { return new TDigestAggregation(Kind.MERGE_TDIGEST, delta); } static final class HistogramAggregation extends NoParamAggregation { private HistogramAggregation() { super(Kind.HISTOGRAM); } } static final class MergeHistogramAggregation extends NoParamAggregation { private MergeHistogramAggregation() { super(Kind.MERGE_HISTOGRAM); } } static HistogramAggregation histogram() { return new HistogramAggregation(); } static MergeHistogramAggregation mergeHistogram() { return new MergeHistogramAggregation(); } /** * Create one of the aggregations that only needs a kind, no other parameters. This does not * work for all types and for code safety reasons each kind is added separately. */ private static native long createNoParamAgg(int kind); /** * Create an nth aggregation. */ private static native long createNthAgg(int offset, boolean includeNulls); /** * Create an aggregation that uses a ddof */ private static native long createDdofAgg(int kind, int ddof); /** * Create an aggregation that is like count including nulls or not. */ private static native long createCountLikeAgg(int kind, boolean includeNulls); /** * Create quantile aggregation. */ private static native long createQuantAgg(int method, double[] quantiles); /** * Create a lead or lag aggregation. */ private static native long createLeadLagAgg(int kind, int offset); /** * Create a collect list aggregation including nulls or not. */ private static native long createCollectListAgg(boolean includeNulls); /** * Create a collect set aggregation. */ private static native long createCollectSetAgg(boolean includeNulls, boolean nullsEqual, boolean nansEqual); /** * Create a merge sets aggregation. */ private static native long createMergeSetsAgg(boolean nullsEqual, boolean nansEqual); /** * Create a TDigest aggregation. */ private static native long createTDigestAgg(int kind, int delta); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/OrderByArg.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import java.io.Serializable; /** * Provides the ordering for specific columns. */ public final class OrderByArg implements Serializable { final int index; final boolean isDescending; final boolean isNullSmallest; OrderByArg(int index, boolean isDescending, boolean isNullSmallest) { this.index = index; this.isDescending = isDescending; this.isNullSmallest = isNullSmallest; } public static OrderByArg asc(final int index) { return new OrderByArg(index, false, false); } public static OrderByArg desc(final int index) { return new OrderByArg(index, true, false); } public static OrderByArg asc(final int index, final boolean isNullSmallest) { return new OrderByArg(index, false, isNullSmallest); } public static OrderByArg desc(final int index, final boolean isNullSmallest) { return new OrderByArg(index, true, isNullSmallest); } @Override public String toString() { return "ORDER BY " + index + (isDescending ? " DESC " : " ASC ") + (isNullSmallest ? "NULL SMALLEST" : "NULL LARGEST"); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/BufferType.java
/* * * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Types of buffers supported by ColumnVectors and HostColumnVectors */ public enum BufferType { VALIDITY, OFFSET, DATA }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ChunkedPack.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * JNI interface to cudf::chunked_pack. * * ChunkedPack has an Iterator-like API with the familiar `hasNext` and `next` * methods. `next` should be used in a loop until `hasNext` returns false. * * However, `ChunkedPack.next` is special because it takes a `DeviceMemoryBuffer` as a * parameter, which means that the caller can call `next` giving any bounce buffer it * may have previously allocated. No requirement exists that the bounce buffer be the * same each time, the only requirement is that their sizes are all the same, and match * the size that was passed to `Table.makeChunkedPack` (which instantiates this class). * * The user of `ChunkedPack` must close `.close()` when done using it to clear up both * host and device resources. */ public class ChunkedPack implements AutoCloseable { long nativePtr; /** * This constructor is invoked by `Table.makeChunkedPack` after creating a native * `cudf::chunked_pack`. * @param nativePtr pointer to a `cudf::chunked_pack` */ public ChunkedPack(long nativePtr) { this.nativePtr = nativePtr; } /** * Get the final contiguous size of the table we are packing. This is * the size that the final buffer should be, just like if the user called * `cudf::pack` instead. * @return the total number of bytes for the table in contiguous layout */ public long getTotalContiguousSize() { return chunkedPackGetTotalContiguousSize(nativePtr); } /** * Method to be called to ensure that `ChunkedPack` has work left. * This method should be invoked followed by a call to `next`, until * `hasNext` returns false. * @return true if there is work left to be done (`next` should be called), * false otherwise. */ public boolean hasNext() { return chunkedPackHasNext(nativePtr); } /** * Place the next contiguous chunk of our table into `userPtr`. * * This method throws if `hasNext` is false. * @param userPtr the bounce buffer to use for this iteration * @return the number of bytes that we were able to place in `userPtr`. This is * at most `userPtr.getLength()`. */ public long next(DeviceMemoryBuffer userPtr) { return chunkedPackNext(nativePtr, userPtr.getAddress(), userPtr.getLength()); } /** * Generates opaque table metadata that can be unpacked via `cudf::unpack` * at a later time. * @return a `PackedColumnMetadata` instance referencing cuDF packed table metadata */ public PackedColumnMetadata buildMetadata() { return new PackedColumnMetadata(chunkedPackBuildMetadata(nativePtr)); } @Override public void close() { try { chunkedPackDelete(nativePtr); } finally { nativePtr = 0; } } private static native long chunkedPackGetTotalContiguousSize(long nativePtr); private static native boolean chunkedPackHasNext(long nativePtr); private static native long chunkedPackNext(long nativePtr, long userPtr, long userPtrSize); private static native long chunkedPackBuildMetadata(long nativePtr); private static native void chunkedPackDelete(long nativePtr); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CuFileReadHandle.java
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Represents a cuFile file handle for reading. */ public final class CuFileReadHandle extends CuFileHandle { /** * Construct a reader using the specified file path. * * @param path The file path for reading. */ public CuFileReadHandle(String path) { super(create(path)); } /** * Read the file content into the specified cuFile buffer. * * @param buffer The cuFile buffer to store the content. * @param fileOffset The file offset from which to read. */ public void read(CuFileBuffer buffer, long fileOffset) { readIntoBuffer(getPointer(), fileOffset, buffer.getPointer()); } private static native long create(String path); private static native void readIntoBuffer(long file, long fileOffset, long buffer); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/MemoryCleaner.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import ai.rapids.cudf.ast.CompiledExpression; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; import java.text.SimpleDateFormat; import java.util.Arrays; import java.util.Date; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import java.util.stream.StreamSupport; /** * ColumnVectors may store data off heap, and because of complicated processing the life time of * an individual vector can vary a lot. Typically a java finalizer could be used for this but * they can cause a number of performance issues related to gc, and in some cases may effectively * leak resources if the heap is large and GC's end up being delayed. * <p> * To address these issues the primary way to releasing the resources of a ColumnVector that is * stored off of the java heap should be through reference counting. Because memory leaks are * really bad for long lived daemons this is intended to be a backup. * <p> * When a ColumnVector first allocates off heap resources it should register itself with this * along with a Cleaner instance. The Cleaner instance should have no direct links to the * ColumnVector that would prevent the ColumnVector from being garbage collected. This will * use WeakReferences internally to know when the resources have been leaked. * A ColumnVector may keep a reference to the Cleaner instance and either update it as new * resources are allocated or use it to release the resources it is holding. Once the * ColumnVector's reference count reaches 0 and the resources are released. At some point * later the Cleaner itself will be released. */ public final class MemoryCleaner { private static final boolean REF_COUNT_DEBUG = Boolean.getBoolean("ai.rapids.refcount.debug"); private static final Logger log = LoggerFactory.getLogger(MemoryCleaner.class); private static final AtomicLong idGen = new AtomicLong(0); /** * Check if configured the shutdown hook which checks leaks at shutdown time. * * @return true if configured, false otherwise. */ public static boolean configuredDefaultShutdownHook() { return REF_COUNT_DEBUG; } /** * API that can be used to clean up the resources for a vector, even if there was a leak */ public static abstract class Cleaner { private final List<RefCountDebugItem> refCountDebug; public final long id = idGen.incrementAndGet(); private boolean leakExpected = false; public Cleaner() { if (REF_COUNT_DEBUG) { refCountDebug = new LinkedList<>(); } else { refCountDebug = null; } } public final void addRef() { if (REF_COUNT_DEBUG && refCountDebug != null) { synchronized(this) { refCountDebug.add(new MemoryCleaner.RefCountDebugItem("INC")); } } } public final void delRef() { if (REF_COUNT_DEBUG && refCountDebug != null) { synchronized(this) { refCountDebug.add(new MemoryCleaner.RefCountDebugItem("DEC")); } } } public final void logRefCountDebug(String message) { if (REF_COUNT_DEBUG && refCountDebug != null) { synchronized(this) { log.error("{} (ID: {}): {}", message, id, MemoryCleaner.stringJoin("\n", refCountDebug)); } } } /** * Clean up any resources not previously released. * @param logErrorIfNotClean if true we should log a leak unless it is expected. * @return true if resources were cleaned up else false. */ public final boolean clean(boolean logErrorIfNotClean) { boolean cleaned = cleanImpl(logErrorIfNotClean && !leakExpected); if (cleaned) { all.remove(id); } return cleaned; } /** * Return true if a leak is expected for this object else false. */ public final boolean isLeakExpected() { return leakExpected; } /** * Clean up any resources not previously released. * @param logErrorIfNotClean if true and there are resources to clean up a leak has happened * so log it. * @return true if resources were cleaned up else false. */ protected abstract boolean cleanImpl(boolean logErrorIfNotClean); public void noWarnLeakExpected() { leakExpected = true; } /** * Check if the underlying memory has been cleaned up or not. * @return true this is clean else false. */ public abstract boolean isClean(); } static final AtomicLong leakCount = new AtomicLong(); private static final Map<Long, CleanerWeakReference> all = new ConcurrentHashMap(); // We want to be thread safe private static final ReferenceQueue<?> collected = new ReferenceQueue<>(); private static class CleanerWeakReference<T> extends WeakReference<T> { private final Cleaner cleaner; final boolean isRmmBlocker; public CleanerWeakReference(T orig, Cleaner cleaner, ReferenceQueue collected, boolean isRmmBlocker) { super(orig, collected); this.cleaner = cleaner; this.isRmmBlocker = isRmmBlocker; } public void clean() { if (cleaner.clean(true)) { leakCount.incrementAndGet(); } } } /** * The default GPU as set by user threads. */ private static volatile int defaultGpu = -1; /** * This should be called from RMM when it is initialized. */ static void setDefaultGpu(int defaultGpuId) { defaultGpu = defaultGpuId; } private static final Thread t = new Thread(() -> { try { int currentGpuId = -1; while (true) { CleanerWeakReference next = (CleanerWeakReference)collected.remove(100); if (next != null) { try { if (currentGpuId != defaultGpu) { Cuda.setDevice(defaultGpu); currentGpuId = defaultGpu; } } catch (Throwable t) { log.error("ERROR TRYING TO SET GPU ID TO " + defaultGpu, t); } try { next.clean(); } catch (Throwable t) { log.error("CAUGHT EXCEPTION WHILE TRYING TO CLEAN " + next, t); } all.remove(next.cleaner.id); } } } catch (InterruptedException e) { // Ignored just exit } }, "Cleaner Thread"); /** * Default shutdown runnable used to be added to Java default shutdown hook. * It checks the leaks at shutdown time. */ private static final Runnable DEFAULT_SHUTDOWN_RUNNABLE = () -> { // If we are debugging things do a best effort to check for leaks at the end System.gc(); // Avoid issues on shutdown with the cleaner thread. t.interrupt(); try { t.join(1000); } catch (InterruptedException e) { // Ignored } if (defaultGpu >= 0) { Cuda.setDevice(defaultGpu); } for (CleanerWeakReference cwr : all.values()) { cwr.clean(); } }; private static final Thread DEFAULT_SHUTDOWN_THREAD = new Thread(DEFAULT_SHUTDOWN_RUNNABLE); static { t.setDaemon(true); t.start(); if (REF_COUNT_DEBUG) { Runtime.getRuntime().addShutdownHook(DEFAULT_SHUTDOWN_THREAD); } } /** * De-register the default shutdown hook from Java default Runtime, then return the corresponding * shutdown runnable. * If you want to register the default shutdown runnable in a custom shutdown hook manager * instead of Java default Runtime, should first remove it using this method and then add it * * @return the default shutdown runnable */ public static Runnable removeDefaultShutdownHook() { Runtime.getRuntime().removeShutdownHook(DEFAULT_SHUTDOWN_THREAD); return DEFAULT_SHUTDOWN_RUNNABLE; } static void register(ColumnVector vec, Cleaner cleaner) { // It is now registered... all.put(cleaner.id, new CleanerWeakReference(vec, cleaner, collected, true)); } static void register(HostColumnVectorCore vec, Cleaner cleaner) { // It is now registered... all.put(cleaner.id, new CleanerWeakReference(vec, cleaner, collected, false)); } static void register(MemoryBuffer buf, Cleaner cleaner) { // It is now registered... all.put(cleaner.id, new CleanerWeakReference(buf, cleaner, collected, buf instanceof BaseDeviceMemoryBuffer)); } static void register(Cuda.Stream stream, Cleaner cleaner) { // It is now registered... all.put(cleaner.id, new CleanerWeakReference(stream, cleaner, collected, false)); } static void register(Cuda.Event event, Cleaner cleaner) { // It is now registered... all.put(cleaner.id, new CleanerWeakReference(event, cleaner, collected, false)); } static void register(CuFileDriver driver, Cleaner cleaner) { // It is now registered... all.put(cleaner.id, new CleanerWeakReference(driver, cleaner, collected, false)); } static void register(CuFileBuffer buffer, Cleaner cleaner) { // It is now registered... all.put(cleaner.id, new CleanerWeakReference(buffer, cleaner, collected, false)); } static void register(CuFileHandle handle, Cleaner cleaner) { // It is now registered... all.put(cleaner.id, new CleanerWeakReference(handle, cleaner, collected, false)); } public static void register(CompiledExpression expr, Cleaner cleaner) { all.put(cleaner.id, new CleanerWeakReference(expr, cleaner, collected, false)); } static void register(HashJoin hashJoin, Cleaner cleaner) { all.put(cleaner.id, new CleanerWeakReference(hashJoin, cleaner, collected, true)); } /** * This is not 100% perfect and we can still run into situations where RMM buffers were not * collected and this returns false because of thread race conditions. This is just a best effort. * @return true if there are rmm blockers else false. */ static boolean bestEffortHasRmmBlockers() { return all.values().stream().anyMatch(cwr -> cwr.isRmmBlocker && !cwr.cleaner.isClean()); } /** * Convert elements in it to a String and join them together. Only use for debug messages * where the code execution itself can be disabled as this is not fast. */ private static <T> String stringJoin(String delim, Iterable<T> it) { return String.join(delim, StreamSupport.stream(it.spliterator(), false) .map((i) -> i.toString()) .collect(Collectors.toList())); } /** * When debug is enabled holds information about inc and dec of ref count. */ private static final class RefCountDebugItem { final StackTraceElement[] stackTrace; final long timeMs; final String op; public RefCountDebugItem(String op) { this.stackTrace = Thread.currentThread().getStackTrace(); this.timeMs = System.currentTimeMillis(); this.op = op; } public String toString() { Date date = new Date(timeMs); // Simple Date Format is horribly expensive only do this when debug is turned on! SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSS z"); return dateFormat.format(date) + ": " + op + "\n" + stringJoin("\n", Arrays.asList(stackTrace)) + "\n"; } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmArenaMemoryResource.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * A device memory resource that will pre-allocate a pool of resources and sub-allocate from this * pool to improve memory performance. This uses an algorithm to try and reduce fragmentation * much more than the RmmPoolMemoryResource does. */ public class RmmArenaMemoryResource<C extends RmmDeviceMemoryResource> extends RmmWrappingDeviceMemoryResource<C> { private final long size; private final boolean dumpLogOnFailure; private long handle = 0; /** * Create a new arena memory resource taking ownership of the RmmDeviceMemoryResource that it is * wrapping. * @param wrapped the memory resource to use for the pool. This should not be reused. * @param size the size of the pool * @param dumpLogOnFailure if true, dump memory log when running out of memory. */ public RmmArenaMemoryResource(C wrapped, long size, boolean dumpLogOnFailure) { super(wrapped); this.size = size; this.dumpLogOnFailure = dumpLogOnFailure; handle = Rmm.newArenaMemoryResource(wrapped.getHandle(), size, dumpLogOnFailure); } @Override public long getHandle() { return handle; } public long getSize() { return size; } @Override public void close() { if (handle != 0) { Rmm.releaseArenaMemoryResource(handle); handle = 0; } super.close(); } @Override public String toString() { return Long.toHexString(getHandle()) + "/ARENA(" + wrapped + ", " + size + ", " + dumpLogOnFailure + ")"; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/Range.java
/* * * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import ai.rapids.cudf.HostColumnVector.Builder; import java.util.function.Consumer; /** * Helper utility for creating ranges. */ public final class Range { /** * Append a range to the builder. 0 inclusive to end exclusive. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendBytes(byte end) { return appendBytes((byte) 0, end, (byte) 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendBytes(byte start, byte end) { return appendBytes(start, end, (byte) 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @param step how must to step by. * @return the builder for chaining. */ public static final Consumer<Builder> appendBytes(byte start, byte end, byte step) { assert step > 0; assert start <= end; return (b) -> { for (byte i = start; i < end; i += step) { b.append(i); } }; } /** * Append a range to the builder. 0 inclusive to end exclusive. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendShorts(short end) { return appendShorts((short) 0, end, (short) 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendShorts(short start, short end) { return appendShorts(start, end, (short) 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @param step how must to step by. * @return the builder for chaining. */ public static final Consumer<Builder> appendShorts(short start, short end, short step) { assert step > 0; assert start <= end; return (b) -> { for (short i = start; i < end; i += step) { b.append(i); } }; } /** * Append a range to the builder. 0 inclusive to end exclusive. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendInts(int end) { return appendInts(0, end, 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendInts(int start, int end) { return appendInts(start, end, 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @param step how must to step by. * @return the builder for chaining. */ public static final Consumer<Builder> appendInts(int start, int end, int step) { assert step > 0; assert start <= end; return (b) -> { for (int i = start; i < end; i += step) { b.append(i); } }; } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @param step how must to step by. * @return the builder for chaining. */ public static final Consumer<Builder> appendLongs(long start, long end, long step) { assert step > 0; assert start <= end; return (b) -> { for (long i = start; i < end; i += step) { b.append(i); } }; } /** * Append a range to the builder. 0 inclusive to end exclusive. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendLongs(long end) { return appendLongs(0, end, 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendLongs(long start, long end) { return appendLongs(start, end, 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @param step how must to step by. * @return the builder for chaining. */ public static final Consumer<Builder> appendFloats(float start, float end, float step) { assert step > 0; assert start <= end; return (b) -> { for (float i = start; i < end; i += step) { b.append(i); } }; } /** * Append a range to the builder. 0 inclusive to end exclusive. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendFloats(float end) { return appendFloats(0, end, 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendFloats(float start, float end) { return appendFloats(start, end, 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @param step how must to step by. * @return the builder for chaining. */ public static final Consumer<Builder> appendDoubles(double start, double end, double step) { assert step > 0; assert start <= end; return (b) -> { for (double i = start; i < end; i += step) { b.append(i); } }; } /** * Append a range to the builder. 0 inclusive to end exclusive. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendDoubles(double end) { return appendDoubles(0, end, 1); } /** * Append a range to the builder. start inclusive to end exclusive. * @param start first entry. * @param end last entry exclusive. * @return the consumer. */ public static final Consumer<Builder> appendDoubles(double start, double end) { return appendDoubles(start, end, 1); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/DeviceMemoryBuffer.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class represents data in some form on the GPU. Closing this object will effectively release * the memory held by the buffer. Note that because of pooling in RMM or reference counting if a * buffer is sliced it may not actually result in the memory being released. */ public class DeviceMemoryBuffer extends BaseDeviceMemoryBuffer { private static final Logger log = LoggerFactory.getLogger(DeviceMemoryBuffer.class); private static final class DeviceBufferCleaner extends MemoryBufferCleaner { private long address; private long lengthInBytes; private Cuda.Stream stream; DeviceBufferCleaner(long address, long lengthInBytes, Cuda.Stream stream) { this.address = address; this.lengthInBytes = lengthInBytes; this.stream = stream; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; long origAddress = address; if (address != 0) { long s = stream == null ? 0 : stream.getStream(); try { Rmm.free(address, lengthInBytes, s); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. address = 0; lengthInBytes = 0; stream = null; } neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A DEVICE BUFFER WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")"); logRefCountDebug("Leaked device buffer"); } return neededCleanup; } @Override public boolean isClean() { return address == 0; } } private static final class RmmDeviceBufferCleaner extends MemoryBufferCleaner { private long rmmBufferAddress; RmmDeviceBufferCleaner(long rmmBufferAddress) { this.rmmBufferAddress = rmmBufferAddress; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; if (rmmBufferAddress != 0) { Rmm.freeDeviceBuffer(rmmBufferAddress); rmmBufferAddress = 0; neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("WE LEAKED A DEVICE BUFFER!!!!"); logRefCountDebug("Leaked device buffer"); } return neededCleanup; } @Override public boolean isClean() { return rmmBufferAddress == 0; } } /** * Wrap an existing RMM allocation in a device memory buffer. The RMM allocation will be freed * when the resulting device memory buffer instance frees its memory resource (i.e.: when its * reference count goes to zero). * @param address device address of the RMM allocation * @param lengthInBytes length of the RMM allocation in bytes * @param rmmBufferAddress host address of the rmm::device_buffer that owns the device memory * @return new device memory buffer instance that wraps the existing RMM allocation */ public static DeviceMemoryBuffer fromRmm(long address, long lengthInBytes, long rmmBufferAddress) { return new DeviceMemoryBuffer(address, lengthInBytes, rmmBufferAddress); } DeviceMemoryBuffer(long address, long lengthInBytes, MemoryBufferCleaner cleaner) { super(address, lengthInBytes, cleaner); } DeviceMemoryBuffer(long address, long lengthInBytes, long rmmBufferAddress) { super(address, lengthInBytes, new RmmDeviceBufferCleaner(rmmBufferAddress)); } DeviceMemoryBuffer(long address, long lengthInBytes, Cuda.Stream stream) { super(address, lengthInBytes, new DeviceBufferCleaner(address, lengthInBytes, stream)); } private DeviceMemoryBuffer(long address, long lengthInBytes, DeviceMemoryBuffer parent) { super(address, lengthInBytes, parent); } /** * Allocate memory for use on the GPU. You must close it when done. * @param bytes size in bytes to allocate * @return the buffer */ public static DeviceMemoryBuffer allocate(long bytes) { return allocate(bytes, Cuda.DEFAULT_STREAM); } /** * Allocate memory for use on the GPU. You must close it when done. * @param bytes size in bytes to allocate * @param stream The stream in which to synchronize this command * @return the buffer */ public static DeviceMemoryBuffer allocate(long bytes, Cuda.Stream stream) { return Rmm.alloc(bytes, stream); } /** * Slice off a part of the device buffer. Note that this is a zero copy operation and all * slices must be closed along with the original buffer before the memory is released to RMM. * So use this with some caution. * @param offset where to start the slice at. * @param len how many bytes to slice * @return a device buffer that will need to be closed independently from this buffer. */ @Override public synchronized final DeviceMemoryBuffer slice(long offset, long len) { addressOutOfBoundsCheck(address + offset, len, "slice"); incRefCount(); return new DeviceMemoryBuffer(getAddress() + offset, len, this); } /** * Convert a view that is a subset of this Buffer by slicing this. * @param view the view to use as a reference. * @return the sliced buffer. */ synchronized final BaseDeviceMemoryBuffer sliceFrom(DeviceMemoryBufferView view) { if (view == null) { return null; } addressOutOfBoundsCheck(view.address, view.length, "sliceFrom"); incRefCount(); return new DeviceMemoryBuffer(view.address, view.length, this); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RollingAggregationOnColumn.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * A RollingAggregation for a specific column in a table. */ public final class RollingAggregationOnColumn { protected final RollingAggregation wrapped; protected final int columnIndex; RollingAggregationOnColumn(RollingAggregation wrapped, int columnIndex) { this.wrapped = wrapped; this.columnIndex = columnIndex; } public int getColumnIndex() { return columnIndex; } public AggregationOverWindow overWindow(WindowOptions windowOptions) { return new AggregationOverWindow(this, windowOptions); } @Override public int hashCode() { return 31 * wrapped.hashCode() + columnIndex; } @Override public boolean equals(Object other) { if (other == this) { return true; } else if (other instanceof RollingAggregationOnColumn) { RollingAggregationOnColumn o = (RollingAggregationOnColumn) other; return wrapped.equals(o.wrapped) && columnIndex == o.columnIndex; } return false; } long createNativeInstance() { return wrapped.createNativeInstance(); } long getDefaultOutput() { return wrapped.getDefaultOutput(); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/RmmLoggingResourceAdaptor.java
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * A device memory resource that will log interactions. */ public class RmmLoggingResourceAdaptor<C extends RmmDeviceMemoryResource> extends RmmWrappingDeviceMemoryResource<C> { private long handle = 0; /** * Create a new logging resource adaptor. * @param wrapped the memory resource to log interactions with. This should not be reused. * @param conf the config of where this should be logged to * @param autoFlush should the results be flushed after each entry or not. */ public RmmLoggingResourceAdaptor(C wrapped, Rmm.LogConf conf, boolean autoFlush) { super(wrapped); if (conf.loc == Rmm.LogLoc.NONE) { throw new RmmException("Cannot initialize RmmLoggingResourceAdaptor with no logging"); } handle = Rmm.newLoggingResourceAdaptor(wrapped.getHandle(), conf.loc.internalId, conf.file == null ? null : conf.file.getAbsolutePath(), autoFlush); } @Override public long getHandle() { return handle; } @Override public void close() { if (handle != 0) { Rmm.releaseLoggingResourceAdaptor(handle); handle = 0; } super.close(); } @Override public String toString() { return Long.toHexString(getHandle()) + "/LOG(" + wrapped + ")"; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/Aggregation128Utils.java
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Utility methods for breaking apart and reassembling 128-bit values during aggregations * to enable hash-based aggregations and detect overflows. */ public class Aggregation128Utils { static { NativeDepsLoader.loadNativeDeps(); } /** * Extract a 32-bit chunk from a 128-bit value. * @param col column of 128-bit values (e.g.: DECIMAL128) * @param outType integer type to use for the output column (e.g.: UINT32 or INT32) * @param chunkIdx index of the 32-bit chunk to extract where 0 is the least significant chunk * and 3 is the most significant chunk * @return column containing the specified 32-bit chunk of the input column values. A null input * row will result in a corresponding null output row. */ public static ColumnVector extractInt32Chunk(ColumnView col, DType outType, int chunkIdx) { return new ColumnVector(extractInt32Chunk(col.getNativeView(), outType.getTypeId().getNativeId(), chunkIdx)); } /** * Reassemble a column of 128-bit values from a table of four 64-bit integer columns and check * for overflow. The 128-bit value is reconstructed by overlapping the 64-bit values by 32-bits. * The least significant 32-bits of the least significant 64-bit value are used directly as the * least significant 32-bits of the final 128-bit value, and the remaining 32-bits are added to * the next most significant 64-bit value. The lower 32-bits of that sum become the next most * significant 32-bits in the final 128-bit value, and the remaining 32-bits are added to the * next most significant 64-bit input value, and so on. * * @param chunks table of four 64-bit integer columns with the columns ordered from least * significant to most significant. The last column must be of type INT64. * @param type the type to use for the resulting 128-bit value column * @return table containing a boolean column and a 128-bit value column of the requested type. * The boolean value will be true if an overflow was detected for that row's value when * it was reassembled. A null input row will result in a corresponding null output row. */ public static Table combineInt64SumChunks(Table chunks, DType type) { return new Table(combineInt64SumChunks(chunks.getNativeView(), type.getTypeId().getNativeId(), type.getScale())); } private static native long extractInt32Chunk(long columnView, int outTypeId, int chunkIdx); private static native long[] combineInt64SumChunks(long chunksTableView, int dtype, int scale); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CudaComputeMode.java
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * This is the Java mapping of CUDA device compute modes. */ public enum CudaComputeMode { /** * Default compute mode * Multiple threads can use cudaSetDevice() with this device. */ DEFAULT(0), /** * Compute-exclusive-thread mode * Only one thread in one process will be able to use cudaSetDevice() with this device. * * WARNING: This mode was deprecated! Using EXCLUSIVE_PROCESS instead. */ EXCLUSIVE(1), /** * Compute-prohibited mode * No threads can use cudaSetDevice() with this device. */ PROHIBITED(2), /** * Compute-exclusive-process mode * Many threads in one process will be able to use cudaSetDevice() with this device. */ EXCLUSIVE_PROCESS(3); private CudaComputeMode(int nativeId) { this.nativeId = nativeId; } static CudaComputeMode fromNative(int nativeId) { for (CudaComputeMode mode : COMPUTE_MODES) { if (mode.nativeId == nativeId) return mode; } throw new IllegalArgumentException("Could not translate " + nativeId + " into a CudaComputeMode"); } // mapping to the value of native mode final int nativeId; private static final CudaComputeMode[] COMPUTE_MODES = CudaComputeMode.values(); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/PinnedMemoryPool.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Comparator; import java.util.Iterator; import java.util.Objects; import java.util.Optional; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; /** * This provides a pool of pinned memory similar to what RMM does for device memory. */ public final class PinnedMemoryPool implements AutoCloseable { private static final Logger log = LoggerFactory.getLogger(PinnedMemoryPool.class); private static final long ALIGNMENT = ColumnView.hostPaddingSizeInBytes(); // These static fields should only ever be accessed when class-synchronized. // Do NOT use singleton_ directly! Use the getSingleton accessor instead. private static volatile PinnedMemoryPool singleton_ = null; private static Future<PinnedMemoryPool> initFuture = null; private final long totalPoolSize; private final long pinnedPoolBase; private final SortedSet<MemorySection> freeHeap = new TreeSet<>(new SortedByAddress()); private int numAllocatedSections = 0; private long availableBytes; private static class SortedBySize implements Comparator<MemorySection> { @Override public int compare(MemorySection s0, MemorySection s1) { return Long.compare(s0.size, s1.size); } } private static class SortedByAddress implements Comparator<MemorySection> { @Override public int compare(MemorySection s0, MemorySection s1) { return Long.compare(s0.baseAddress, s1.baseAddress); } } private static class MemorySection { private long baseAddress; private long size; MemorySection(long baseAddress, long size) { this.baseAddress = baseAddress; this.size = size; } boolean canCombine(MemorySection other) { boolean ret = (other.baseAddress + other.size) == baseAddress || (baseAddress + size) == other.baseAddress; log.trace("CAN {} COMBINE WITH {} ? {}", this, other, ret); return ret; } void combineWith(MemorySection other) { assert canCombine(other); log.trace("COMBINING {} AND {}", this, other); this.baseAddress = Math.min(baseAddress, other.baseAddress); this.size = other.size + this.size; log.trace("COMBINED TO {}\n", this); } MemorySection splitOff(long newSize) { assert this.size > newSize; MemorySection ret = new MemorySection(baseAddress, newSize); this.baseAddress += newSize; this.size -= newSize; return ret; } @Override public String toString() { return "PINNED: " + size + " bytes (0x" + Long.toHexString(baseAddress) + " to 0x" + Long.toHexString(baseAddress + size) + ")"; } } private static final class PinnedHostBufferCleaner extends MemoryBuffer.MemoryBufferCleaner { private MemorySection section; private final long origLength; PinnedHostBufferCleaner(MemorySection section, long length) { this.section = section; origLength = length; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; long origAddress = 0; if (section != null) { origAddress = section.baseAddress; try { PinnedMemoryPool.freeInternal(section); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. section = null; } neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A PINNED HOST BUFFER WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")"); logRefCountDebug("Leaked pinned host buffer"); } return neededCleanup; } @Override public boolean isClean() { return section == null; } } private static PinnedMemoryPool getSingleton() { if (singleton_ == null) { if (initFuture == null) { return null; } synchronized (PinnedMemoryPool.class) { if (singleton_ == null) { try { singleton_ = initFuture.get(); } catch (Exception e) { throw new RuntimeException("Error initializing pinned memory pool", e); } initFuture = null; } } } return singleton_; } private static void freeInternal(MemorySection section) { Objects.requireNonNull(getSingleton()).free(section); } /** * Used to indicate that memory was allocated from a reservation. This primarily is for * keeping track of outstanding allocations. */ private static void reserveAllocInternal(MemorySection section) { Objects.requireNonNull(getSingleton()).reserveAllocHappened(section); } /** * Initialize the pool. * * @param poolSize size of the pool to initialize. */ public static synchronized void initialize(long poolSize) { initialize(poolSize, -1); } /** * Initialize the pool. * * @param poolSize size of the pool to initialize. * @param gpuId gpu id to set to get memory pool from, -1 means to use default */ public static synchronized void initialize(long poolSize, int gpuId) { if (isInitialized()) { throw new IllegalStateException("Can only initialize the pool once."); } ExecutorService initService = Executors.newSingleThreadExecutor(runnable -> { Thread t = new Thread(runnable, "pinned pool init"); t.setDaemon(true); return t; }); initFuture = initService.submit(() -> new PinnedMemoryPool(poolSize, gpuId)); initService.shutdown(); } /** * Check if the pool has been initialized or not. */ public static boolean isInitialized() { return getSingleton() != null; } /** * Shut down the pool of memory. If there are outstanding allocations this may fail. */ public static synchronized void shutdown() { PinnedMemoryPool pool = getSingleton(); if (pool != null) { pool.close(); } initFuture = null; singleton_ = null; } /** * Factory method to create a pinned host memory buffer. * * @param bytes size in bytes to allocate * @return newly created buffer or null if insufficient pinned memory */ public static HostMemoryBuffer tryAllocate(long bytes) { HostMemoryBuffer result = null; PinnedMemoryPool pool = getSingleton(); if (pool != null) { result = pool.tryAllocateInternal(bytes); } return result; } /** * Factory method to create a pinned host memory reservation. * * @param bytes size in bytes to reserve * @return newly created reservation or null if insufficient pinned memory to cover it. */ public static HostMemoryReservation tryReserve(long bytes) { HostMemoryReservation result = null; PinnedMemoryPool pool = getSingleton(); if (pool != null) { result = pool.tryReserveInternal(bytes); } return result; } /** * Factory method to create a host buffer but preferably pointing to pinned memory. * It is not guaranteed that the returned buffer will be pointer to pinned memory. * * @param bytes size in bytes to allocate * @return newly created buffer */ public static HostMemoryBuffer allocate(long bytes, HostMemoryAllocator hostMemoryAllocator) { HostMemoryBuffer result = tryAllocate(bytes); if (result == null) { result = hostMemoryAllocator.allocate(bytes, false); } return result; } /** * Factory method to create a host buffer but preferably pointing to pinned memory. * It is not guaranteed that the returned buffer will be pointer to pinned memory. * * @param bytes size in bytes to allocate * @return newly created buffer */ public static HostMemoryBuffer allocate(long bytes) { return allocate(bytes, DefaultHostMemoryAllocator.get()); } /** * Get the number of bytes free in the pinned memory pool. * * @return amount of free memory in bytes or 0 if the pool is not initialized */ public static long getAvailableBytes() { PinnedMemoryPool pool = getSingleton(); if (pool != null) { return pool.getAvailableBytesInternal(); } return 0; } /** * Get the number of bytes that the pinned memory pool was allocated with. */ public static long getTotalPoolSizeBytes() { PinnedMemoryPool pool = getSingleton(); if (pool != null) { return pool.getTotalPoolSizeInternal(); } return 0; } private PinnedMemoryPool(long poolSize, int gpuId) { if (gpuId > -1) { // set the gpu device to use Cuda.setDevice(gpuId); Cuda.freeZero(); } this.totalPoolSize = poolSize; this.pinnedPoolBase = Cuda.hostAllocPinned(poolSize); freeHeap.add(new MemorySection(pinnedPoolBase, poolSize)); this.availableBytes = poolSize; } @Override public void close() { assert numAllocatedSections == 0 : "Leaked " + numAllocatedSections + " pinned allocations"; Cuda.freePinned(pinnedPoolBase); } /** * Pads a length of bytes to the alignment the CPU wants in the worst case. This helps to * calculate the size needed for a reservation if there are multiple buffers. * @param bytes the size in bytes * @return the new padded size in bytes. */ public static long padToCpuAlignment(long bytes) { return ((bytes + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; } private synchronized MemorySection tryGetInternal(long bytes, String what) { if (freeHeap.isEmpty()) { log.debug("No free pinned memory left"); return null; } // Align the allocation long alignedBytes = padToCpuAlignment(bytes); Optional<MemorySection> firstFit = freeHeap.stream() .filter(section -> section.size >= alignedBytes) .findFirst(); if (!firstFit.isPresent()) { if (log.isDebugEnabled()) { MemorySection largest = freeHeap.stream() .max(new SortedBySize()) .orElse(new MemorySection(0, 0)); log.debug("Insufficient pinned memory. {} needed, {} found", alignedBytes, largest.size); } return null; } MemorySection first = firstFit.get(); log.debug("{} {}/{} bytes pinned from {} FREE COUNT {} OUTSTANDING COUNT {}", what, bytes, alignedBytes, first, freeHeap.size(), numAllocatedSections); freeHeap.remove(first); MemorySection allocated; if (first.size == alignedBytes) { allocated = first; } else { allocated = first.splitOff(alignedBytes); freeHeap.add(first); } numAllocatedSections++; availableBytes -= allocated.size; log.debug("{} {} free {} outstanding {}", what, allocated, freeHeap, numAllocatedSections); return allocated; } private synchronized HostMemoryBuffer tryAllocateInternal(long bytes) { MemorySection allocated = tryGetInternal(bytes, "allocate"); if (allocated == null) { return null; } else { return new HostMemoryBuffer(allocated.baseAddress, bytes, new PinnedHostBufferCleaner(allocated, bytes)); } } private class PinnedReservation implements HostMemoryReservation { private MemorySection section = null; public PinnedReservation(MemorySection section) { this.section = section; } @Override public synchronized HostMemoryBuffer allocate(long bytes, boolean preferPinned) { return this.allocate(bytes); } @Override public synchronized HostMemoryBuffer allocate(long bytes) { if (section == null || section.size < bytes) { throw new OutOfMemoryError("Reservation didn't have enough space " + bytes + " / " + (section == null ? 0 : section.size)); } long alignedSize = padToCpuAlignment(bytes); MemorySection allocated; if (section.size >= bytes && section.size <= alignedSize) { allocated = section; section = null; // No need for reserveAllocInternal because the original section is already tracked } else { allocated = section.splitOff(alignedSize); PinnedMemoryPool.reserveAllocInternal(allocated); } return new HostMemoryBuffer(allocated.baseAddress, bytes, new PinnedHostBufferCleaner(allocated, bytes)); } @Override public synchronized void close() throws Exception { if (section != null) { try { PinnedMemoryPool.freeInternal(section); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. section = null; } } } } private HostMemoryReservation tryReserveInternal(long bytes) { MemorySection allocated = tryGetInternal(bytes, "allocate"); if (allocated == null) { return null; } else { return new PinnedReservation(allocated); } } private synchronized void free(MemorySection section) { log.debug("Freeing {} with {} outstanding {}", section, freeHeap, numAllocatedSections); availableBytes += section.size; Iterator<MemorySection> it = freeHeap.iterator(); while(it.hasNext()) { MemorySection current = it.next(); if (section.canCombine(current)) { it.remove(); section.combineWith(current); } } freeHeap.add(section); numAllocatedSections--; log.debug("After freeing {} outstanding {}", freeHeap, numAllocatedSections); } private synchronized void reserveAllocHappened(MemorySection section) { if (section != null && section.size > 0) { numAllocatedSections++; } } private synchronized long getAvailableBytesInternal() { return this.availableBytes; } private long getTotalPoolSizeInternal() { return this.totalPoolSize; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/MultiBufferDataSource.java
/* * * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * This is a DataSource that can take multiple HostMemoryBuffers. They * are treated as if they are all part of a single file connected end to end. */ public class MultiBufferDataSource extends DataSource { private final long sizeInBytes; private final HostMemoryBuffer[] hostBuffers; private final long[] startOffsets; private final HostMemoryAllocator allocator; // Metrics private long hostReads = 0; private long hostReadBytes = 0; private long devReads = 0; private long devReadBytes = 0; /** * Create a new data source backed by multiple buffers. * @param buffers the buffers that will back the data source. */ public MultiBufferDataSource(HostMemoryBuffer ... buffers) { this(DefaultHostMemoryAllocator.get(), buffers); } /** * Create a new data source backed by multiple buffers. * @param allocator the allocator to use for host buffers, if needed. * @param buffers the buffers that will back the data source. */ public MultiBufferDataSource(HostMemoryAllocator allocator, HostMemoryBuffer ... buffers) { int numBuffers = buffers.length; hostBuffers = new HostMemoryBuffer[numBuffers]; startOffsets = new long[numBuffers]; long currentOffset = 0; for (int i = 0; i < numBuffers; i++) { HostMemoryBuffer hmb = buffers[i]; hmb.incRefCount(); hostBuffers[i] = hmb; startOffsets[i] = currentOffset; currentOffset += hmb.getLength(); } sizeInBytes = currentOffset; this.allocator = allocator; } @Override public long size() { return sizeInBytes; } private int getStartBufferIndexForOffset(long offset) { assert (offset >= 0); // It is super common to read from the start or end of a file (the header or footer) // so special case them if (offset == 0) { return 0; } int startIndex = 0; int endIndex = startOffsets.length - 1; if (offset >= startOffsets[endIndex]) { return endIndex; } while (startIndex != endIndex) { int midIndex = (int)(((long)startIndex + endIndex) / 2); long midStartOffset = startOffsets[midIndex]; if (offset >= midStartOffset) { // It is either in mid or after mid. if (midIndex == endIndex || offset <= startOffsets[midIndex + 1]) { // We found it in mid return midIndex; } else { // It is after mid startIndex = midIndex + 1; } } else { // It is before mid endIndex = midIndex - 1; } } return startIndex; } interface DoCopy<T extends MemoryBuffer> { void copyFromHostBuffer(T dest, long destOffset, HostMemoryBuffer src, long srcOffset, long srcAmount); } private <T extends MemoryBuffer> long read(long offset, T dest, DoCopy<T> doCopy) { assert (offset >= 0); long realOffset = Math.min(offset, sizeInBytes); long realAmount = Math.min(sizeInBytes - realOffset, dest.getLength()); int index = getStartBufferIndexForOffset(realOffset); HostMemoryBuffer buffer = hostBuffers[index]; long bufferOffset = realOffset - startOffsets[index]; long bufferAmount = Math.min(buffer.length - bufferOffset, realAmount); long remainingAmount = realAmount; long currentOffset = realOffset; long outputOffset = 0; while (remainingAmount > 0) { doCopy.copyFromHostBuffer(dest, outputOffset, buffer, bufferOffset, bufferAmount); remainingAmount -= bufferAmount; outputOffset += bufferAmount; currentOffset += bufferAmount; index++; if (index < hostBuffers.length) { buffer = hostBuffers[index]; bufferOffset = currentOffset - startOffsets[index]; bufferAmount = Math.min(buffer.length - bufferOffset, remainingAmount); } } return realAmount; } @Override public HostMemoryBuffer hostRead(long offset, long amount) { assert (offset >= 0); assert (amount >= 0); long realOffset = Math.min(offset, sizeInBytes); long realAmount = Math.min(sizeInBytes - realOffset, amount); int index = getStartBufferIndexForOffset(realOffset); HostMemoryBuffer buffer = hostBuffers[index]; long bufferOffset = realOffset - startOffsets[index]; long bufferAmount = Math.min(buffer.length - bufferOffset, realAmount); if (bufferAmount == realAmount) { hostReads += 1; hostReadBytes += realAmount; // It all fits in a single buffer, so do a zero copy operation return buffer.slice(bufferOffset, bufferAmount); } else { // We will have to allocate a new buffer and copy data into it. boolean success = false; HostMemoryBuffer ret = allocator.allocate(realAmount, true); try { long amountRead = read(offset, ret, HostMemoryBuffer::copyFromHostBuffer); assert(amountRead == realAmount); hostReads += 1; hostReadBytes += amountRead; success = true; return ret; } finally { if (!success) { ret.close(); } } } } @Override public long hostRead(long offset, HostMemoryBuffer dest) { long ret = read(offset, dest, HostMemoryBuffer::copyFromHostBuffer); hostReads += 1; hostReadBytes += ret; return ret; } @Override public boolean supportsDeviceRead() { return true; } @Override public long deviceRead(long offset, DeviceMemoryBuffer dest, Cuda.Stream stream) { long ret = read(offset, dest, (destParam, destOffset, src, srcOffset, srcAmount) -> destParam.copyFromHostBufferAsync(destOffset, src, srcOffset, srcAmount, stream)); devReads += 1; devReadBytes += ret; return ret; } @Override public void close() { try { super.close(); } finally { for (HostMemoryBuffer hmb: hostBuffers) { if (hmb != null) { hmb.close(); } } } } public long getHostReads() { return hostReads; } public long getHostReadBytes() { return hostReadBytes; } public long getDevReads() { return devReads; } public long getDevReadBytes() { return devReadBytes; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/Cuda.java
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class Cuda { // This needs to happen first before calling any native methods. static { NativeDepsLoader.loadNativeDeps(); } // Defined in driver_types.h in cuda library. static final int CPU_DEVICE_ID = -1; static final long CUDA_STREAM_DEFAULT = 0; static final long CUDA_STREAM_LEGACY = 1; static final long CUDA_STREAM_PER_THREAD = 2; private final static long DEFAULT_STREAM_ID = isPtdsEnabled() ? CUDA_STREAM_PER_THREAD : CUDA_STREAM_LEGACY; private static final Logger log = LoggerFactory.getLogger(Cuda.class); private static Boolean isCompat = null; private static class StreamCleaner extends MemoryCleaner.Cleaner { private long stream; StreamCleaner(long stream) { this.stream = stream; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; long origAddress = stream; if (stream != CUDA_STREAM_DEFAULT && stream != CUDA_STREAM_LEGACY && stream != CUDA_STREAM_PER_THREAD) { destroyStream(stream); stream = 0; neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A CUDA STREAM WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")"); logRefCountDebug("Leaked stream"); } return neededCleanup; } @Override public boolean isClean() { return stream == 0; } } /** A class representing a CUDA stream */ public static final class Stream implements AutoCloseable { private final StreamCleaner cleaner; boolean closed = false; private final long id; /** * Create a new CUDA stream * @param isNonBlocking whether stream should be non-blocking with respect to the default stream */ public Stream(boolean isNonBlocking) { this.cleaner = new StreamCleaner(createStream(isNonBlocking)); this.id = cleaner.id; MemoryCleaner.register(this, cleaner); cleaner.addRef(); } private Stream() { // No cleaner for the default stream... this.cleaner = null; this.id = -1; } private Stream(long id) { this.cleaner = null; this.id = id; } /** * Wrap a given stream ID to make it accessible. */ static Stream wrap(long id) { if (id == -1) { return DEFAULT_STREAM; } return new Stream(id); } /** * Have this stream not execute new work until the work recorded in event completes. * @param event the event to wait on. */ public void waitOn(Event event) { streamWaitEvent(getStream(), event.getEvent()); } public long getStream() { return cleaner == null ? DEFAULT_STREAM_ID : cleaner.stream; } /** * Block the thread to wait until all pending work on this stream completes. Note that this * does not follow any of the java threading standards. Interrupt will not work to wake up * the thread. */ public void sync() { streamSynchronize(getStream()); } @Override public String toString() { return "CUDA STREAM (ID: " + id + " " + Long.toHexString(getStream()) + ")"; } @Override public synchronized void close() { if (cleaner != null) { cleaner.delRef(); } if (closed) { if (cleaner != null) { cleaner.logRefCountDebug("double free " + this); } throw new IllegalStateException("Close called too many times " + this); } if (cleaner != null) { cleaner.clean(false); closed = true; } } } public static final Stream DEFAULT_STREAM = new Stream(); private static class EventCleaner extends MemoryCleaner.Cleaner { private long event; EventCleaner(long event) { this.event = event; } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { boolean neededCleanup = false; long origAddress = event; if (event != 0) { try { destroyEvent(event); } finally { // Always mark the resource as freed even if an exception is thrown. // We cannot know how far it progressed before the exception, and // therefore it is unsafe to retry. event = 0; } neededCleanup = true; } if (neededCleanup && logErrorIfNotClean) { log.error("A CUDA EVENT WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")"); logRefCountDebug("Leaked event"); } return neededCleanup; } @Override public boolean isClean() { return event == 0; } } public static final class Event implements AutoCloseable { private final EventCleaner cleaner; boolean closed = false; /** * Create an event that is as fast as possible, timing is disabled and no blockingSync. */ public Event() { this(false, false); } /** * Create an event to be used for CUDA synchronization. * @param enableTiming true if the event should record timing information. * @param blockingSync true if event should use blocking synchronization. * A host thread that calls sync() to wait on an event created with this * flag will block until the event actually completes. */ public Event(boolean enableTiming, boolean blockingSync) { this.cleaner = new EventCleaner(createEvent(enableTiming, blockingSync)); MemoryCleaner.register(this, cleaner); cleaner.addRef(); } long getEvent() { return cleaner.event; } /** * Check to see if the event has completed or not. This is the equivalent of cudaEventQuery. * @return true it has completed else false. */ public boolean hasCompleted() { return eventQuery(getEvent()); } /** * Captures the contents of stream at the time of this call. This event and stream must be on * the same device. Calls such as hasCompleted() or Stream.waitEvent() will then examine or wait for * completion of the work that was captured. Uses of stream after this call do not modify event. * @param stream the stream to record the state of. */ public void record(Stream stream) { eventRecord(getEvent(), stream.getStream()); } /** * Captures the contents of the default stream at the time of this call. */ public void record() { record(DEFAULT_STREAM); } /** * Block the thread to wait for the event to complete. Note that this does not follow any of * the java threading standards. Interrupt will not work to wake up the thread. */ public void sync() { eventSynchronize(getEvent()); } @Override public String toString() { return "CUDA EVENT (ID: " + cleaner.id + " " + Long.toHexString(getEvent()) + ")"; } @Override public synchronized void close() { cleaner.delRef(); if (closed) { cleaner.logRefCountDebug("double free " + this); throw new IllegalStateException("Close called too many times " + this); } cleaner.clean(false); closed = true; } } /** * Gets the CUDA compute mode of the current device. * * @return the enum value of CudaComputeMode */ public static CudaComputeMode getComputeMode() { return CudaComputeMode.fromNative(Cuda.getNativeComputeMode()); } /** * Mapping: cudaMemGetInfo(size_t *free, size_t *total) */ public static native CudaMemInfo memGetInfo() throws CudaException; /** * Allocate pinned memory on the host. This call takes a long time, but can really speed up * memory transfers. * @param size how much memory, in bytes, to allocate. * @return the address to the allocated memory. * @throws CudaException on any error. */ static native long hostAllocPinned(long size) throws CudaException; /** * Free memory allocated with hostAllocPinned. * @param ptr the pointer returned by hostAllocPinned. * @throws CudaException on any error. */ static native void freePinned(long ptr) throws CudaException; /** * Copies bytes between buffers using the default CUDA stream. * The copy has completed when this returns, but the memory copy could overlap with * operations occurring on other streams. * Specifying pointers that do not match the copy direction results in undefined behavior. * @param dst - Destination memory address * @param src - Source memory address * @param count - Size in bytes to copy * @param kind - Type of transfer. {@link CudaMemcpyKind} */ static void memcpy(long dst, long src, long count, CudaMemcpyKind kind) { memcpy(dst, src, count, kind, DEFAULT_STREAM); } /** * Copies bytes between buffers using the default CUDA stream. * The copy has not necessarily completed when this returns, but the memory copy could * overlap with operations occurring on other streams. * Specifying pointers that do not match the copy direction results in undefined behavior. * @param dst - Destination memory address * @param src - Source memory address * @param count - Size in bytes to copy * @param kind - Type of transfer. {@link CudaMemcpyKind} */ static void asyncMemcpy(long dst, long src, long count, CudaMemcpyKind kind) { asyncMemcpy(dst, src, count, kind, DEFAULT_STREAM); } /** * Sets count bytes starting at the memory area pointed to by dst, with value. * The operation has completed when this returns, but it could overlap with operations occurring * on other streams. * @param dst - Destination memory address * @param value - Byte value to set dst with * @param count - Size in bytes to set */ public static native void memset(long dst, byte value, long count) throws CudaException; /** * Sets count bytes starting at the memory area pointed to by dst, with value. * The operation has not necessarily completed when this returns, but it could overlap with * operations occurring on other streams. * @param dst - Destination memory address * @param value - Byte value to set dst with * @param count - Size in bytes to set */ public static native void asyncMemset(long dst, byte value, long count) throws CudaException; /** * Get the id of the current device. * @return the id of the current device * @throws CudaException on any error */ public static native int getDevice() throws CudaException; /** * Get the device count. * @return returns the number of compute-capable devices * @throws CudaException on any error */ public static native int getDeviceCount() throws CudaException; /** * Set the id of the current device. * <p>Note this is relative to CUDA_SET_VISIBLE_DEVICES, e.g. if * CUDA_SET_VISIBLE_DEVICES=1,0, and you call setDevice(0), you will get device 1. * <p>Note if RMM has been initialized and the requested device ID does not * match the device used to initialize RMM then this will throw an error. * @throws CudaException on any error */ public static native void setDevice(int device) throws CudaException, CudfException; /** * Set the device for this thread to the appropriate one. Java loves threads, but cuda requires * each thread to have the device set explicitly or it falls back to CUDA_VISIBLE_DEVICES. Most * JNI calls through the cudf API will do this for you, but if you are writing your own JNI * calls that extend cudf you might want to call this before calling into your JNI APIs to * ensure that the device is set correctly. * @throws CudaException on any error */ public static native void autoSetDevice() throws CudaException; /** * Get the CUDA Driver version, which is the latest version of CUDA supported by the driver. * The version is returned as (1000 major + 10 minor). For example, CUDA 9.2 would be * represented by 9020. If no driver is installed,then 0 is returned as the driver version. * * @return the CUDA driver version * @throws CudaException on any error */ public static native int getDriverVersion() throws CudaException; /** * Get the CUDA Runtime version of the current CUDA Runtime instance. The version is returned * as (1000 major + 10 minor). For example, CUDA 9.2 would be represented by 9020. * * @return the CUDA Runtime version * @throws CudaException on any error */ public static native int getRuntimeVersion() throws CudaException; /** * Gets the CUDA device compute mode of the current device. * * @return the value of cudaComputeMode * @throws CudaException on any error */ static native int getNativeComputeMode() throws CudaException; /** * Gets the major CUDA compute capability of the current device. * * For reference: https://developer.nvidia.com/cuda-gpus * Hardware Generation Compute Capability * Ampere 8.x * Turing 7.5 * Volta 7.0, 7.2 * Pascal 6.x * Maxwell 5.x * Kepler 3.x * Fermi 2.x * * @return The Major compute capability version number of the current CUDA device * @throws CudaException on any error */ public static native int getComputeCapabilityMajor() throws CudaException; /** * Gets the minor CUDA compute capability of the current device. * * For reference: https://developer.nvidia.com/cuda-gpus * Hardware Generation Compute Capability * Ampere 8.x * Turing 7.5 * Volta 7.0, 7.2 * Pascal 6.x * Maxwell 5.x * Kepler 3.x * Fermi 2.x * * @return The Minor compute capability version number of the current CUDA device * @throws CudaException on any error */ public static native int getComputeCapabilityMinor() throws CudaException; /** * Calls cudaFree(0). This can be used to initialize the GPU after a setDevice() * @throws CudaException on any error */ public static native void freeZero() throws CudaException; /** * Create a CUDA stream * @param isNonBlocking whether stream should be non-blocking with respect to the default stream * @return handle to a CUDA stream * @throws CudaException on any error */ static native long createStream(boolean isNonBlocking) throws CudaException; /** * Destroy a CUDA stream * @param stream handle to the CUDA stream to destroy * @throws CudaException on any error */ static native void destroyStream(long stream) throws CudaException; /** * Have this stream not execute new work until the work recorded in event completes. * @param stream the stream handle. * @param event the event handle. */ static native void streamWaitEvent(long stream, long event) throws CudaException; /** * Block the thread until the pending execution on the stream completes * @param stream the stream handle * @throws CudaException on any error. */ static native void streamSynchronize(long stream) throws CudaException; /** * Create a CUDA event * @param enableTiming true if timing should be enabled. * @param blockingSync true if blocking sync should be enabled. * @return handle to a CUDA event * @throws CudaException on any error */ static native long createEvent(boolean enableTiming, boolean blockingSync) throws CudaException; /** * Destroy a CUDA event * @param event handle to the CUDA event to destroy * @throws CudaException on any error */ static native void destroyEvent(long event) throws CudaException; /** * Check to see if the event happened or not. * @param event the event handle * @return true the event finished else false. * @throws CudaException on any error. */ static native boolean eventQuery(long event) throws CudaException; /** * Reset the state of this event to be what is on the stream right now. * @param event the event handle * @param stream the stream handle * @throws CudaException on any error. */ static native void eventRecord(long event, long stream) throws CudaException; /** * Block the thread until the execution recorded in the event is complete. * @param event the event handle * @throws CudaException on any error. */ static native void eventSynchronize(long event) throws CudaException; /** * Copies bytes between buffers using the specified CUDA stream. * The copy has completed when this returns, but the memory copy could overlap with * operations occurring on other streams. * Specifying pointers that do not match the copy direction results in undefined behavior. * @param dst destination memory address * @param src source memory address * @param count size in bytes to copy * @param kind direction of transfer. {@link CudaMemcpyKind} * @param stream CUDA stream to use for the copy */ static void memcpy(long dst, long src, long count, CudaMemcpyKind kind, Stream stream) { memcpyOnStream(dst, src, count, kind.getValue(), stream.getStream()); } private static native void memcpyOnStream(long dst, long src, long count, int kind, long stream) throws CudaException; /** * Copies bytes between buffers using the specified CUDA stream. * The copy has not necessarily completed when this returns, but the memory copy could * overlap with operations occurring on other streams. * Specifying pointers that do not match the copy direction results in undefined behavior. * @param dst destination memory address * @param src source memory address * @param count size in bytes to copy * @param kind direction of transfer. {@link CudaMemcpyKind} * @param stream CUDA stream to use for the copy */ static void asyncMemcpy(long dst, long src, long count, CudaMemcpyKind kind, Stream stream) { asyncMemcpyOnStream(dst, src, count, kind.getValue(), stream.getStream()); } private static native void asyncMemcpyOnStream(long dst, long src, long count, int kind, long stream) throws CudaException; /** * This should only be used for tests, to enable or disable tests if the current environment * is not compatible with this version of the library. Currently it only does some very * basic checks, but these may be expanded in the future depending on needs. * @return true if it is compatible else false. */ public static synchronized boolean isEnvCompatibleForTesting() { if (isCompat == null) { if (NativeDepsLoader.libraryLoaded()) { try { int device = getDevice(); if (device >= 0) { isCompat = true; return isCompat; } } catch (Throwable e) { log.error("Error trying to detect device", e); } } isCompat = false; } return isCompat; } /** * Whether per-thread default stream is enabled. */ public static native boolean isPtdsEnabled(); /** * Copy data from multiple device buffer sources to multiple device buffer destinations. * For each buffer to copy there is a corresponding entry in the destination address, source * address, and copy size vectors. * @param destAddrs vector of device destination addresses * @param srcAddrs vector of device source addresses * @param copySizes vector of copy sizes * @param stream CUDA stream to use for the copy */ public static void multiBufferCopyAsync(long [] destAddrs, long [] srcAddrs, long [] copySizes, Stream stream) { // Temporary sub-par stand-in for a multi-buffer copy CUDA kernel assert(destAddrs.length == srcAddrs.length); assert(copySizes.length == destAddrs.length); try (NvtxRange copyRange = new NvtxRange("multiBufferCopyAsync", NvtxColor.CYAN)){ for (int i = 0; i < destAddrs.length; i++) { asyncMemcpy(destAddrs[i], srcAddrs[i], copySizes[i], CudaMemcpyKind.DEVICE_TO_DEVICE, stream); } } } /** * Begins an Nsight profiling session, if a profiler is currently attached. * @note if a profiler session has a already started, `profilerStart` has * no effect. */ public static native void profilerStart(); /** * Stops an active Nsight profiling session. * @note if a profiler session isn't active, `profilerStop` has * no effect. */ public static native void profilerStop(); /** * Synchronizes the whole device using cudaDeviceSynchronize. * @note this is very expensive and should almost never be used */ public static native void deviceSynchronize(); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/DType.java
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import java.math.BigDecimal; import java.util.EnumSet; import java.util.Objects; public final class DType { public static final int DECIMAL32_MAX_PRECISION = 9; public static final int DECIMAL64_MAX_PRECISION = 18; public static final int DECIMAL128_MAX_PRECISION = 38; /* enum representing various types. Whenever a new non-decimal type is added please make sure below sections are updated as well: 1. Create a singleton object of the new type. 2. Update SINGLETON_DTYPE_LOOKUP to reflect new type. The order should be maintained between DTypeEnum and SINGLETON_DTYPE_LOOKUP */ public enum DTypeEnum { EMPTY(0, 0), INT8(1, 1), INT16(2, 2), INT32(4, 3), INT64(8, 4), UINT8(1, 5), UINT16(2, 6), UINT32(4, 7), UINT64(8, 8), FLOAT32(4, 9), FLOAT64(8, 10), /** * Byte wise true non-0/false 0. In general true will be 1. */ BOOL8(1, 11), /** * Days since the UNIX epoch */ TIMESTAMP_DAYS(4, 12), /** * s since the UNIX epoch */ TIMESTAMP_SECONDS(8, 13), /** * ms since the UNIX epoch */ TIMESTAMP_MILLISECONDS(8, 14), /** * microseconds since the UNIX epoch */ TIMESTAMP_MICROSECONDS(8, 15), /** * ns since the UNIX epoch */ TIMESTAMP_NANOSECONDS(8, 16), DURATION_DAYS(4, 17), DURATION_SECONDS(8, 18), DURATION_MILLISECONDS(8, 19), DURATION_MICROSECONDS(8, 20), DURATION_NANOSECONDS(8, 21), //DICTIONARY32(4, 22), STRING(0, 23), LIST(0, 24), DECIMAL32(4, 25), DECIMAL64(8, 26), DECIMAL128(16, 27), STRUCT(0, 28); final int sizeInBytes; final int nativeId; DTypeEnum(int sizeInBytes, int nativeId) { this.sizeInBytes = sizeInBytes; this.nativeId = nativeId; } public int getNativeId() { return nativeId; } public boolean isDecimalType() { return DType.DECIMALS.contains(this); } } final DTypeEnum typeId; private final int scale; private DType(DTypeEnum id) { typeId = id; scale = 0; } /** * Constructor for Decimal Type * @param id Enum representing data type. * @param decimalScale Scale of fixed point decimal type */ private DType(DTypeEnum id, int decimalScale) { typeId = id; scale = decimalScale; } public static final DType EMPTY = new DType(DTypeEnum.EMPTY); public static final DType INT8 = new DType(DTypeEnum.INT8); public static final DType INT16 = new DType(DTypeEnum.INT16); public static final DType INT32 = new DType(DTypeEnum.INT32); public static final DType INT64 = new DType(DTypeEnum.INT64); public static final DType UINT8 = new DType(DTypeEnum.UINT8); public static final DType UINT16 = new DType(DTypeEnum.UINT16); public static final DType UINT32 = new DType(DTypeEnum.UINT32); public static final DType UINT64 = new DType(DTypeEnum.UINT64); public static final DType FLOAT32 = new DType(DTypeEnum.FLOAT32); public static final DType FLOAT64 = new DType(DTypeEnum.FLOAT64); public static final DType BOOL8 = new DType(DTypeEnum.BOOL8); public static final DType TIMESTAMP_DAYS = new DType(DTypeEnum.TIMESTAMP_DAYS); public static final DType TIMESTAMP_SECONDS = new DType(DTypeEnum.TIMESTAMP_SECONDS); public static final DType TIMESTAMP_MILLISECONDS = new DType(DTypeEnum.TIMESTAMP_MILLISECONDS); public static final DType TIMESTAMP_MICROSECONDS = new DType(DTypeEnum.TIMESTAMP_MICROSECONDS); public static final DType TIMESTAMP_NANOSECONDS = new DType(DTypeEnum.TIMESTAMP_NANOSECONDS); public static final DType DURATION_DAYS = new DType(DTypeEnum.DURATION_DAYS); public static final DType DURATION_SECONDS = new DType(DTypeEnum.DURATION_SECONDS); public static final DType DURATION_MILLISECONDS = new DType(DTypeEnum.DURATION_MILLISECONDS); public static final DType DURATION_MICROSECONDS = new DType(DTypeEnum.DURATION_MICROSECONDS); public static final DType DURATION_NANOSECONDS = new DType(DTypeEnum.DURATION_NANOSECONDS); public static final DType STRING = new DType(DTypeEnum.STRING); public static final DType LIST = new DType(DTypeEnum.LIST); public static final DType STRUCT = new DType(DTypeEnum.STRUCT); /* This is used in fromNative method to return singleton object for non-decimal types. Please make sure the order here is same as that of DTypeEnum. Whenever a new non-decimal type is added in DTypeEnum, this array needs to be updated as well.*/ private static final DType[] SINGLETON_DTYPE_LOOKUP = new DType[]{ EMPTY, INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64, FLOAT32, FLOAT64, BOOL8, TIMESTAMP_DAYS, TIMESTAMP_SECONDS, TIMESTAMP_MILLISECONDS, TIMESTAMP_MICROSECONDS, TIMESTAMP_NANOSECONDS, DURATION_DAYS, DURATION_SECONDS, DURATION_MILLISECONDS, DURATION_MICROSECONDS, DURATION_NANOSECONDS, null, // DICTIONARY32 STRING, LIST, null, // DECIMAL32 null, // DECIMAL64 null, // DECIMAL128 STRUCT }; /** * Returns max precision for Decimal Type. * @return max precision this Decimal Type can hold */ public int getDecimalMaxPrecision() { if (!isDecimalType()) { throw new IllegalArgumentException("not a decimal type: " + this); } if (typeId == DTypeEnum.DECIMAL32) return DECIMAL32_MAX_PRECISION; if (typeId == DTypeEnum.DECIMAL64) return DECIMAL64_MAX_PRECISION; return DType.DECIMAL128_MAX_PRECISION; } /** * Get the number of decimal places needed to hold the Integral Type. * NOTE: this method is NOT for Decimal Type but for Integral Type. * @return the minimum decimal precision (places) for Integral Type */ public int getPrecisionForInt() { // -128 to 127 if (typeId == DTypeEnum.INT8) return 3; // -32768 to 32767 if (typeId == DTypeEnum.INT16) return 5; // -2147483648 to 2147483647 if (typeId == DTypeEnum.INT32) return 10; // -9223372036854775808 to 9223372036854775807 if (typeId == DTypeEnum.INT64) return 19; throw new IllegalArgumentException("not an integral type: " + this); } /** * This only works for fixed width types. Variable width types like strings the value is * undefined and should be ignored. * * @return size of type in bytes. */ public int getSizeInBytes() { return typeId.sizeInBytes; } /** * Returns scale for Decimal Type. * @return scale base-10 exponent to multiply the unscaled value to produce the decimal value. * Example: Consider unscaled value = 123456 * if scale = -2, decimal value = 123456 * 10^-2 = 1234.56 * if scale = 2, decimal value = 123456 * 10^2 = 12345600 */ public int getScale() { return scale; } /** * Return enum for this DType * @return DTypeEnum */ public DTypeEnum getTypeId() { return typeId; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DType type = (DType) o; return scale == type.scale && typeId == type.typeId; } @Override public int hashCode() { return Objects.hash(typeId, scale); } @Override public String toString() { if (isDecimalType()) { return typeId + " scale:" + scale; } else { return String.valueOf(typeId); } } /** * Factory method for non-decimal DType instances. * @param dt enum corresponding to datatype. * @return DType */ public static DType create(DTypeEnum dt) { if (DType.DECIMALS.contains(dt)) { throw new IllegalArgumentException("Could not create a Decimal DType without scale"); } return DType.fromNative(dt.nativeId, 0); } /** * Factory method specialized for decimal DType instances. * @param dt enum corresponding to datatype. * @param scale base-10 exponent to multiply the unscaled value to produce the decimal value. * Example: Consider unscaled value = 123456 * if scale = -2, decimal value = 123456 * 10^-2 = 1234.56 * if scale = 2, decimal value = 123456 * 10^2 = 12345600 * @return DType */ public static DType create(DTypeEnum dt, int scale) { if (!DType.DECIMALS.contains(dt)) { throw new IllegalArgumentException("Could not create a non-Decimal DType with scale"); } return DType.fromNative(dt.nativeId, scale); } /** * Factory method for DType instances * @param nativeId nativeId of DataTypeEnun * @param scale base-10 exponent to multiply the unscaled value to produce the decimal value * Example: Consider unscaled value = 123456 * if scale = -2, decimal value = 123456 * 10^-2 = 1234.56 * if scale = 2, decimal value = 123456 * 10^2 = 12345600 * @return DType */ public static DType fromNative(int nativeId, int scale) { if (nativeId >=0 && nativeId < SINGLETON_DTYPE_LOOKUP.length) { DType ret = SINGLETON_DTYPE_LOOKUP[nativeId]; if (ret != null) { assert ret.typeId.nativeId == nativeId : "Something went wrong and it looks like " + "SINGLETON_DTYPE_LOOKUP is out of sync"; return ret; } if (nativeId == DTypeEnum.DECIMAL32.nativeId) { if (-scale > DECIMAL32_MAX_PRECISION) { throw new IllegalArgumentException( "Scale " + (-scale) + " exceeds DECIMAL32_MAX_PRECISION " + DECIMAL32_MAX_PRECISION); } return new DType(DTypeEnum.DECIMAL32, scale); } if (nativeId == DTypeEnum.DECIMAL64.nativeId) { if (-scale > DECIMAL64_MAX_PRECISION) { throw new IllegalArgumentException( "Scale " + (-scale) + " exceeds DECIMAL64_MAX_PRECISION " + DECIMAL64_MAX_PRECISION); } return new DType(DTypeEnum.DECIMAL64, scale); } if (nativeId == DTypeEnum.DECIMAL128.nativeId) { if (-scale > DECIMAL128_MAX_PRECISION) { throw new IllegalArgumentException( "Scale " + (-scale) + " exceeds DECIMAL128_MAX_PRECISION " + DECIMAL128_MAX_PRECISION); } return new DType(DTypeEnum.DECIMAL128, scale); } } throw new IllegalArgumentException("Could not translate " + nativeId + " into a DType"); } /** * Create decimal-like DType using precision and scale of Java BigDecimal. * * @param dec BigDecimal * @return DType */ public static DType fromJavaBigDecimal(BigDecimal dec) { // Notice: Compared to scale of Java BigDecimal, scale of libcudf works in opposite. // So, we negate the scale value before passing it into constructor. if (dec.precision() <= DECIMAL32_MAX_PRECISION) { return new DType(DTypeEnum.DECIMAL32, -dec.scale()); } else if (dec.precision() <= DECIMAL64_MAX_PRECISION) { return new DType(DTypeEnum.DECIMAL64, -dec.scale()); } else if (dec.precision() <= DECIMAL128_MAX_PRECISION) { return new DType(DTypeEnum.DECIMAL128, -dec.scale()); } throw new IllegalArgumentException("Precision " + dec.precision() + " exceeds max precision cuDF can support " + DECIMAL128_MAX_PRECISION); } /** * Returns true for timestamps with time level resolution, as opposed to day level resolution */ public boolean hasTimeResolution() { return TIME_RESOLUTION.contains(this.typeId); } /** * Returns true if this type is backed by int type * Namely this method will return true for the following types * DType.INT32, * DType.UINT32, * DType.DURATION_DAYS, * DType.TIMESTAMP_DAYS, * DType.DECIMAL32 */ public boolean isBackedByInt() { return INTS.contains(this.typeId); } /** * Returns true if this type is backed by long type * Namely this method will return true for the following types * DType.INT64, * DType.UINT64, * DType.DURATION_SECONDS, * DType.DURATION_MILLISECONDS, * DType.DURATION_MICROSECONDS, * DType.DURATION_NANOSECONDS, * DType.TIMESTAMP_SECONDS, * DType.TIMESTAMP_MILLISECONDS, * DType.TIMESTAMP_MICROSECONDS, * DType.TIMESTAMP_NANOSECONDS, * DType.DECIMAL64 */ public boolean isBackedByLong() { return LONGS.contains(this.typeId); } /** * Returns true if this type is backed by short type * Namely this method will return true for the following types * DType.INT16, * DType.UINT16 */ public boolean isBackedByShort() { return SHORTS.contains(this.typeId); } /** * Returns true if this type is backed by byte type * Namely this method will return true for the following types * DType.INT8, * DType.UINT8, * DType.BOOL8 */ public boolean isBackedByByte() { return BYTES.contains(this.typeId); } /** * Returns true if this type is of decimal type * Namely this method will return true for the following types * DType.DECIMAL32, * DType.DECIMAL64 */ public boolean isDecimalType() { return this.typeId.isDecimalType(); } /** * Returns true for duration types */ public boolean isDurationType() { return DURATION_TYPE.contains(this.typeId); } /** * Returns true for strictly Integer types not a type backed by * ints */ public boolean isIntegral() { return INTEGRALS.contains(this.typeId); } /** * Returns true for nested types */ public boolean isNestedType() { return NESTED_TYPE.contains(this.typeId); } @Deprecated public boolean isTimestamp() { return TIMESTAMPS.contains(this.typeId); } public boolean isTimestampType() { return TIMESTAMPS.contains(this.typeId); } /** * Returns true if the type uses a vector of offsets */ public boolean hasOffsets() { return OFFSETS_TYPE.contains(this.typeId); } private static final EnumSet<DTypeEnum> TIMESTAMPS = EnumSet.of( DTypeEnum.TIMESTAMP_DAYS, DTypeEnum.TIMESTAMP_SECONDS, DTypeEnum.TIMESTAMP_MILLISECONDS, DTypeEnum.TIMESTAMP_MICROSECONDS, DTypeEnum.TIMESTAMP_NANOSECONDS); private static final EnumSet<DTypeEnum> TIME_RESOLUTION = EnumSet.of( DTypeEnum.TIMESTAMP_SECONDS, DTypeEnum.TIMESTAMP_MILLISECONDS, DTypeEnum.TIMESTAMP_MICROSECONDS, DTypeEnum.TIMESTAMP_NANOSECONDS); private static final EnumSet<DTypeEnum> DURATION_TYPE = EnumSet.of( DTypeEnum.DURATION_DAYS, DTypeEnum.DURATION_MICROSECONDS, DTypeEnum.DURATION_MILLISECONDS, DTypeEnum.DURATION_NANOSECONDS, DTypeEnum.DURATION_SECONDS ); private static final EnumSet<DTypeEnum> LONGS = EnumSet.of( DTypeEnum.INT64, DTypeEnum.UINT64, DTypeEnum.DURATION_SECONDS, DTypeEnum.DURATION_MILLISECONDS, DTypeEnum.DURATION_MICROSECONDS, DTypeEnum.DURATION_NANOSECONDS, DTypeEnum.TIMESTAMP_SECONDS, DTypeEnum.TIMESTAMP_MILLISECONDS, DTypeEnum.TIMESTAMP_MICROSECONDS, DTypeEnum.TIMESTAMP_NANOSECONDS, // The unscaledValue of DECIMAL64 is of type INT64, which means it can be fetched by getLong. DTypeEnum.DECIMAL64 ); private static final EnumSet<DTypeEnum> INTS = EnumSet.of( DTypeEnum.INT32, DTypeEnum.UINT32, DTypeEnum.DURATION_DAYS, DTypeEnum.TIMESTAMP_DAYS, // The unscaledValue of DECIMAL32 is of type INT32, which means it can be fetched by getInt. DTypeEnum.DECIMAL32 ); private static final EnumSet<DTypeEnum> SHORTS = EnumSet.of( DTypeEnum.INT16, DTypeEnum.UINT16 ); private static final EnumSet<DTypeEnum> BYTES = EnumSet.of( DTypeEnum.INT8, DTypeEnum.UINT8, DTypeEnum.BOOL8 ); private static final EnumSet<DTypeEnum> DECIMALS = EnumSet.of( DTypeEnum.DECIMAL32, DTypeEnum.DECIMAL64, DTypeEnum.DECIMAL128 ); private static final EnumSet<DTypeEnum> NESTED_TYPE = EnumSet.of( DTypeEnum.LIST, DTypeEnum.STRUCT ); private static final EnumSet<DTypeEnum> OFFSETS_TYPE = EnumSet.of( DTypeEnum.STRING, DTypeEnum.LIST ); private static final EnumSet<DTypeEnum> INTEGRALS = EnumSet.of( DTypeEnum.INT8, DTypeEnum.INT16, DTypeEnum.INT32, DTypeEnum.INT64, DTypeEnum.UINT8, DTypeEnum.UINT16, DTypeEnum.UINT32, DTypeEnum.UINT64 ); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/HashJoin.java
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class represents a hash table built from the join keys of the right-side table for a * join operation. This hash table can then be reused across a series of left probe tables * to compute gather maps for joins more efficiently when the right-side table is not changing. * It can also be used to query the output row count of a join and then pass that result to the * operation that generates the join gather maps to avoid redundant computation when the output * row count must be checked before manifesting the join gather maps. */ public class HashJoin implements AutoCloseable { static { NativeDepsLoader.loadNativeDeps(); } private static final Logger log = LoggerFactory.getLogger(HashJoin.class); private static class HashJoinCleaner extends MemoryCleaner.Cleaner { private Table buildKeys; private long nativeHandle; HashJoinCleaner(Table buildKeys, long nativeHandle) { this.buildKeys = buildKeys; this.nativeHandle = nativeHandle; addRef(); } @Override protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) { long origAddress = nativeHandle; boolean neededCleanup = nativeHandle != 0; if (neededCleanup) { try { destroy(nativeHandle); buildKeys.close(); buildKeys = null; } finally { nativeHandle = 0; } if (logErrorIfNotClean) { log.error("A HASH TABLE WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress)); } } return neededCleanup; } @Override public boolean isClean() { return nativeHandle == 0; } } private final HashJoinCleaner cleaner; private final boolean compareNulls; private boolean isClosed = false; /** * Construct a hash table for a join from a table representing the join key columns from the * right-side table in the join. The resulting instance must be closed to release the * GPU resources associated with the instance. * @param buildKeys table view containing the join keys for the right-side join table * @param compareNulls true if null key values should match otherwise false */ public HashJoin(Table buildKeys, boolean compareNulls) { this.compareNulls = compareNulls; Table buildTable = new Table(buildKeys.getColumns()); try { long handle = create(buildTable.getNativeView(), compareNulls); this.cleaner = new HashJoinCleaner(buildTable, handle); MemoryCleaner.register(this, cleaner); } catch (Throwable t) { try { buildTable.close(); } catch (Throwable t2) { t.addSuppressed(t2); } throw t; } } @Override public synchronized void close() { cleaner.delRef(); if (isClosed) { cleaner.logRefCountDebug("double free " + this); throw new IllegalStateException("Close called too many times " + this); } cleaner.clean(false); isClosed = true; } long getNativeView() { return cleaner.nativeHandle; } /** Get the number of join key columns for the table that was used to generate the has table. */ public long getNumberOfColumns() { return cleaner.buildKeys.getNumberOfColumns(); } /** Returns true if the hash table was built to match on nulls otherwise false. */ public boolean getCompareNulls() { return compareNulls; } private static native long create(long tableView, boolean nullEqual); private static native void destroy(long handle); }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/UnsafeMemoryAccessor.java
/* * * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.lang.reflect.Field; /** * UnsafeMemory Accessor for accessing memory on host */ class UnsafeMemoryAccessor { public static final long BYTE_ARRAY_OFFSET; public static final long SHORT_ARRAY_OFFSET; public static final long INT_ARRAY_OFFSET; public static final long LONG_ARRAY_OFFSET; public static final long FLOAT_ARRAY_OFFSET; public static final long DOUBLE_ARRAY_OFFSET; private static final sun.misc.Unsafe UNSAFE; /** * Limits the number of bytes to copy per {@link sun.misc.Unsafe#copyMemory(long, long, long)} to * allow safepoint polling during a large copy. */ private static final long UNSAFE_COPY_THRESHOLD = 1024L * 1024L; private static Logger log = LoggerFactory.getLogger(UnsafeMemoryAccessor.class); static { sun.misc.Unsafe unsafe = null; try { Field unsafeField = sun.misc.Unsafe.class.getDeclaredField("theUnsafe"); unsafeField.setAccessible(true); unsafe = (sun.misc.Unsafe) unsafeField.get(null); BYTE_ARRAY_OFFSET = unsafe.arrayBaseOffset(byte[].class); SHORT_ARRAY_OFFSET = unsafe.arrayBaseOffset(short[].class); INT_ARRAY_OFFSET = unsafe.arrayBaseOffset(int[].class); LONG_ARRAY_OFFSET = unsafe.arrayBaseOffset(long[].class); FLOAT_ARRAY_OFFSET = unsafe.arrayBaseOffset(float[].class); DOUBLE_ARRAY_OFFSET = unsafe.arrayBaseOffset(double[].class); } catch (Throwable t) { log.error("Failed to get unsafe object, got this error: ", t); UNSAFE = null; throw new NullPointerException("Failed to get unsafe object, got this error: " + t.getMessage()); } UNSAFE = unsafe; } /** * Get the system memory page size. * @return system memory page size in bytes */ public static int pageSize() { return UNSAFE.pageSize(); } /** * Allocate bytes on host * @param bytes - number of bytes to allocate * @return - allocated address */ public static long allocate(long bytes) { return UNSAFE.allocateMemory(bytes); } /** * Free memory at that location * @param address - memory location */ public static void free(long address) { UNSAFE.freeMemory(address); } /** * Sets the values at this address repeatedly * @param address - memory location * @param size - number of bytes to set * @param value - value to be set * @throws IndexOutOfBoundsException */ public static void setMemory(long address, long size, byte value) { UNSAFE.setMemory(address, size, value); } /** * Sets the Byte value at that address * @param address - memory address * @param value - value to be set * @throws IndexOutOfBoundsException */ public static void setByte(long address, byte value) { UNSAFE.putByte(address, value); } /** * Sets an array of bytes. * @param address - memory address * @param values to be set * @param offset index into values to start at. * @param len the number of bytes to copy * @throws IndexOutOfBoundsException */ public static void setBytes(long address, byte[] values, long offset, long len) { copyMemory(values, UnsafeMemoryAccessor.BYTE_ARRAY_OFFSET + offset, null, address, len); } /** * Returns the Byte value at this address * @param address - memory address * @return - value * @throws IndexOutOfBoundsException */ public static byte getByte(long address) { return UNSAFE.getByte(address); } /** * Copy out an array of bytes. * @param dst where to write the data * @param dstOffset index into values to start writing at. * @param address src memory address * @param len the number of bytes to copy * @throws IndexOutOfBoundsException */ public static void getBytes(byte[] dst, long dstOffset, long address, long len) { copyMemory(null, address, dst, UnsafeMemoryAccessor.BYTE_ARRAY_OFFSET + dstOffset, len); } /** * Returns the Integer value at this address * @param address - memory address * @return - value * @throws IndexOutOfBoundsException */ public static int getInt(long address) { return UNSAFE.getInt(address); } /** * Sets the Integer value at that address * @param address - memory address * @param value - value to be set * @throws IndexOutOfBoundsException */ public static void setInt(long address, int value) { UNSAFE.putInt(address, value); } /** * Sets an array of ints. * @param address memory address * @param values to be set * @param offset index into values to start at. * @param len the number of ints to copy * @throws IndexOutOfBoundsException */ public static void setInts(long address, int[] values, long offset, long len) { copyMemory(values, UnsafeMemoryAccessor.INT_ARRAY_OFFSET + (offset * 4), null, address, len * 4); } /** * Sets the Long value at that address * @param address - memory address * @param value - value to be set * @throws IndexOutOfBoundsException */ public static void setLong(long address, long value) { UNSAFE.putLong(address, value); } /** * Sets an array of longs. * @param address memory address * @param values to be set * @param offset index into values to start at * @param len the number of longs to copy * @throws IndexOutOfBoundsException */ public static void setLongs(long address, long[] values, long offset, long len) { copyMemory(values, UnsafeMemoryAccessor.LONG_ARRAY_OFFSET + (offset * 8), null, address, len * 8); } /** * Returns the Long value at this address * @param address - memory address * @return - value * @throws IndexOutOfBoundsException */ public static long getLong(long address) { return UNSAFE.getLong(address); } /** * Copy out an array of longs. * @param dst where to write the data * @param dstIndex index into values to start writing at. * @param address src memory address * @param count the number of longs to copy * @throws IndexOutOfBoundsException */ public static void getLongs(long[] dst, long dstIndex, long address, int count) { copyMemory(null, address, dst, UnsafeMemoryAccessor.LONG_ARRAY_OFFSET + (dstIndex * 8), count * 8); } /** * Returns the Short value at this address * @param address - memory address * @return - value * @throws IndexOutOfBoundsException */ public static short getShort(long address) { return UNSAFE.getShort(address); } /** * Sets the Short value at that address * @param address - memory address * @param value - value to be set * @throws IndexOutOfBoundsException */ public static void setShort(long address, short value) { UNSAFE.putShort(address, value); } /** * Sets an array of shorts. * @param address memory address * @param values to be set * @param offset index into values to start at * @param len the number of shorts to copy * @throws IndexOutOfBoundsException */ public static void setShorts(long address, short[] values, long offset, long len) { copyMemory(values, UnsafeMemoryAccessor.SHORT_ARRAY_OFFSET + (offset * 2), null, address, len * 2); } /** * Sets the Double value at that address * @param address - memory address * @param value - value to be set * @throws IndexOutOfBoundsException */ public static void setDouble(long address, double value) { UNSAFE.putDouble(address, value); } /** * Sets an array of doubles. * @param address memory address * @param values to be set * @param offset index into values to start at * @param len the number of doubles to copy * @throws IndexOutOfBoundsException */ public static void setDoubles(long address, double[] values, long offset, long len) { copyMemory(values, UnsafeMemoryAccessor.DOUBLE_ARRAY_OFFSET + (offset * 8), null, address, len * 8); } /** * Returns the Double value at this address * @param address - memory address * @return - value * @throws IndexOutOfBoundsException */ public static double getDouble(long address) { return UNSAFE.getDouble(address); } /** * Returns the Float value at this address * @param address - memory address * @return - value * @throws IndexOutOfBoundsException */ public static float getFloat(long address) { return UNSAFE.getFloat(address); } /** * Sets the Float value at that address * @param address - memory address * @param value - value to be set * @throws IndexOutOfBoundsException */ public static void setFloat(long address, float value) { UNSAFE.putFloat(address, value); } /** * Sets an array of floats. * @param address memory address * @param values to be set * @param offset the index in values to start at * @param len the number of floats to copy * @throws IndexOutOfBoundsException */ public static void setFloats(long address, float[] values, long offset, long len) { copyMemory(values, UnsafeMemoryAccessor.FLOAT_ARRAY_OFFSET + (offset * 4), null, address, len * 4); } /** * Returns the Boolean value at this address * @param address - memory address * @return - value * @throws IndexOutOfBoundsException */ public static boolean getBoolean(long address) { return getByte(address) != 0 ? true : false; } /** * Sets the Boolean value at that address * @param address - memory address * @param value - value to be set * @throws IndexOutOfBoundsException */ public static void setBoolean(long address, boolean value) { setByte(address, (byte) (value ? 1 : 0)); } /** * Copy memory from one address to the other. */ public static void copyMemory(Object src, long srcOffset, Object dst, long dstOffset, long length) { // Check if dstOffset is before or after srcOffset to determine if we should copy // forward or backwards. This is necessary in case src and dst overlap. if (dstOffset < srcOffset) { while (length > 0) { long size = Math.min(length, UNSAFE_COPY_THRESHOLD); UNSAFE.copyMemory(src, srcOffset, dst, dstOffset, size); length -= size; srcOffset += size; dstOffset += size; } } else { srcOffset += length; dstOffset += length; while (length > 0) { long size = Math.min(length, UNSAFE_COPY_THRESHOLD); srcOffset -= size; dstOffset -= size; UNSAFE.copyMemory(src, srcOffset, dst, dstOffset, size); length -= size; } } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ArrowIPCWriterOptions.java
/* * * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Settings for writing Arrow IPC data. */ public class ArrowIPCWriterOptions extends WriterOptions { public interface DoneOnGpu { /** * A callback to indicate that the table is off of the GPU * and may be closed, even if all of the data is not yet written. * @param table the table that can be closed. */ void doneWithTheGpu(Table table); } private final long size; private final DoneOnGpu callback; private ArrowIPCWriterOptions(Builder builder) { super(builder); this.size = builder.size; this.callback = builder.callback; } public long getMaxChunkSize() { return size; } public DoneOnGpu getCallback() { return callback; } public static class Builder extends WriterBuilder<Builder> { private long size = -1; private DoneOnGpu callback = (ignored) -> {}; public Builder withMaxChunkSize(long size) { this.size = size; return this; } public Builder withCallback(DoneOnGpu callback) { if (callback == null) { this.callback = (ignored) -> {}; } else { this.callback = callback; } return this; } /** * Add the name(s) for nullable column(s). * * Please note the column names of the nested struct columns should be flattened in sequence. * For examples, * <pre> * A table with an int column and a struct column: * ["int_col", "struct_col":{"field_1", "field_2"}] * output: * ["int_col", "struct_col", "field_1", "field_2"] * * A table with an int column and a list of non-nested type column: * ["int_col", "list_col":[]] * output: * ["int_col", "list_col"] * * A table with an int column and a list of struct column: * ["int_col", "list_struct_col":[{"field_1", "field_2"}]] * output: * ["int_col", "list_struct_col", "field_1", "field_2"] * </pre> * * @param columnNames The column names corresponding to the written table(s). */ @Override public Builder withColumnNames(String... columnNames) { return super.withColumnNames(columnNames); } /** * Add the name(s) for non-nullable column(s). * * Please note the column names of the nested struct columns should be flattened in sequence. * For examples, * <pre> * A table with an int column and a struct column: * ["int_col", "struct_col":{"field_1", "field_2"}] * output: * ["int_col", "struct_col", "field_1", "field_2"] * * A table with an int column and a list of non-nested type column: * ["int_col", "list_col":[]] * output: * ["int_col", "list_col"] * * A table with an int column and a list of struct column: * ["int_col", "list_struct_col":[{"field_1", "field_2"}]] * output: * ["int_col", "list_struct_col", "field_1", "field_2"] * </pre> * * @param columnNames The column names corresponding to the written table(s). */ @Override public Builder withNotNullableColumnNames(String... columnNames) { return super.withNotNullableColumnNames(columnNames); } public ArrowIPCWriterOptions build() { return new ArrowIPCWriterOptions(this); } } public static final ArrowIPCWriterOptions DEFAULT = new ArrowIPCWriterOptions(new Builder()); public static Builder builder() { return new Builder(); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ReplacePolicy.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * Policy to specify the position of replacement values relative to null rows. */ public enum ReplacePolicy { /** * The replacement value is the first non-null value preceding the null row. */ PRECEDING(true), /** * The replacement value is the first non-null value following the null row. */ FOLLOWING(false); ReplacePolicy(boolean isPreceding) { this.isPreceding = isPreceding; } final boolean isPreceding; /** * Indicate which column the replacement should happen on. */ public ReplacePolicyWithColumn onColumn(int columnNumber) { return new ReplacePolicyWithColumn(columnNumber, this); } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/CudaException.java
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; import java.util.HashMap; import java.util.Map; /** * Exception from the cuda language/library. Be aware that because of how cuda does asynchronous * processing exceptions from cuda can be thrown by method calls that did not cause the exception * to take place. These will take place on the same thread that caused the error. * <p> * Please See * <a href="https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__ERROR.html">the cuda docs</a> * for more details on how this works. * <p> * In general you can recover from cuda errors even in async calls if you make sure that you * don't switch between threads for different parts of processing that can be retried as a chunk. */ public class CudaException extends RuntimeException { CudaException(String message, int errorCode) { this(message, "No native stacktrace is available.", errorCode); } CudaException(String message, String nativeStacktrace, int errorCode) { super(message); this.nativeStacktrace = nativeStacktrace; cudaError = CudaError.parseErrorCode(errorCode); } CudaException(String message, String nativeStacktrace, int errorCode, Throwable cause) { super(message, cause); this.nativeStacktrace = nativeStacktrace; cudaError = CudaError.parseErrorCode(errorCode); } public String getNativeStacktrace() { return nativeStacktrace; } public CudaError getCudaError() { return cudaError; } private final String nativeStacktrace; private final CudaError cudaError; /** * The Java mirror of cudaError, which facilities the tracking of CUDA errors in JVM. */ public enum CudaError { UnknownNativeError(-1), // native CUDA error type which Java doesn't have a representation cudaErrorInvalidValue(1), cudaErrorMemoryAllocation(2), cudaErrorInitializationError(3), cudaErrorCudartUnloading(4), cudaErrorProfilerDisabled(5), cudaErrorProfilerNotInitialized(6), cudaErrorProfilerAlreadyStarted(7), cudaErrorProfilerAlreadyStopped(8), cudaErrorInvalidConfiguration(9), cudaErrorInvalidPitchValue(12), cudaErrorInvalidSymbol(13), cudaErrorInvalidHostPointer(16), cudaErrorInvalidDevicePointer(17), cudaErrorInvalidTexture(18), cudaErrorInvalidTextureBinding(19), cudaErrorInvalidChannelDescriptor(20), cudaErrorInvalidMemcpyDirection(21), cudaErrorAddressOfConstant(22), cudaErrorTextureFetchFailed(23), cudaErrorTextureNotBound(24), cudaErrorSynchronizationError(25), cudaErrorInvalidFilterSetting(26), cudaErrorInvalidNormSetting(27), cudaErrorMixedDeviceExecution(28), cudaErrorNotYetImplemented(31), cudaErrorMemoryValueTooLarge(32), cudaErrorStubLibrary(34), cudaErrorInsufficientDriver(35), cudaErrorCallRequiresNewerDriver(36), cudaErrorInvalidSurface(37), cudaErrorDuplicateVariableName(43), cudaErrorDuplicateTextureName(44), cudaErrorDuplicateSurfaceName(45), cudaErrorDevicesUnavailable(46), cudaErrorIncompatibleDriverContext(49), cudaErrorMissingConfiguration(52), cudaErrorPriorLaunchFailure(53), cudaErrorLaunchMaxDepthExceeded(65), cudaErrorLaunchFileScopedTex(66), cudaErrorLaunchFileScopedSurf(67), cudaErrorSyncDepthExceeded(68), cudaErrorLaunchPendingCountExceeded(69), cudaErrorInvalidDeviceFunction(98), cudaErrorNoDevice(100), cudaErrorInvalidDevice(101), cudaErrorDeviceNotLicensed(102), cudaErrorSoftwareValidityNotEstablished(103), cudaErrorStartupFailure(127), cudaErrorInvalidKernelImage(200), cudaErrorDeviceUninitialized(201), cudaErrorMapBufferObjectFailed(205), cudaErrorUnmapBufferObjectFailed(206), cudaErrorArrayIsMapped(207), cudaErrorAlreadyMapped(208), cudaErrorNoKernelImageForDevice(209), cudaErrorAlreadyAcquired(210), cudaErrorNotMapped(211), cudaErrorNotMappedAsArray(212), cudaErrorNotMappedAsPointer(213), cudaErrorECCUncorrectable(214), cudaErrorUnsupportedLimit(215), cudaErrorDeviceAlreadyInUse(216), cudaErrorPeerAccessUnsupported(217), cudaErrorInvalidPtx(218), cudaErrorInvalidGraphicsContext(219), cudaErrorNvlinkUncorrectable(220), cudaErrorJitCompilerNotFound(221), cudaErrorUnsupportedPtxVersion(222), cudaErrorJitCompilationDisabled(223), cudaErrorUnsupportedExecAffinity(224), cudaErrorInvalidSource(300), cudaErrorFileNotFound(301), cudaErrorSharedObjectSymbolNotFound(302), cudaErrorSharedObjectInitFailed(303), cudaErrorOperatingSystem(304), cudaErrorInvalidResourceHandle(400), cudaErrorIllegalState(401), cudaErrorSymbolNotFound(500), cudaErrorNotReady(600), cudaErrorIllegalAddress(700), cudaErrorLaunchOutOfResources(701), cudaErrorLaunchTimeout(702), cudaErrorLaunchIncompatibleTexturing(703), cudaErrorPeerAccessAlreadyEnabled(704), cudaErrorPeerAccessNotEnabled(705), cudaErrorSetOnActiveProcess(708), cudaErrorContextIsDestroyed(709), cudaErrorAssert(710), cudaErrorTooManyPeers(711), cudaErrorHostMemoryAlreadyRegistered(712), cudaErrorHostMemoryNotRegistered(713), cudaErrorHardwareStackError(714), cudaErrorIllegalInstruction(715), cudaErrorMisalignedAddress(716), cudaErrorInvalidAddressSpace(717), cudaErrorInvalidPc(718), cudaErrorLaunchFailure(719), cudaErrorCooperativeLaunchTooLarge(720), cudaErrorNotPermitted(800), cudaErrorNotSupported(801), cudaErrorSystemNotReady(802), cudaErrorSystemDriverMismatch(803), cudaErrorCompatNotSupportedOnDevice(804), cudaErrorMpsConnectionFailed(805), cudaErrorMpsRpcFailure(806), cudaErrorMpsServerNotReady(807), cudaErrorMpsMaxClientsReached(808), cudaErrorMpsMaxConnectionsReached(809), cudaErrorStreamCaptureUnsupported(900), cudaErrorStreamCaptureInvalidated(901), cudaErrorStreamCaptureMerge(902), cudaErrorStreamCaptureUnmatched(903), cudaErrorStreamCaptureUnjoined(904), cudaErrorStreamCaptureIsolation(905), cudaErrorStreamCaptureImplicit(906), cudaErrorCapturedEvent(907), cudaErrorStreamCaptureWrongThread(908), cudaErrorTimeout(909), cudaErrorGraphExecUpdateFailure(910), cudaErrorExternalDevice(911), cudaErrorUnknown(999), cudaErrorApiFailureBase(10000); final int code; private static Map<Integer, CudaError> codeToError = new HashMap<Integer, CudaError>(){{ put(cudaErrorInvalidValue.code, cudaErrorInvalidValue); put(cudaErrorMemoryAllocation.code, cudaErrorMemoryAllocation); put(cudaErrorInitializationError.code, cudaErrorInitializationError); put(cudaErrorCudartUnloading.code, cudaErrorCudartUnloading); put(cudaErrorProfilerDisabled.code, cudaErrorProfilerDisabled); put(cudaErrorProfilerNotInitialized.code, cudaErrorProfilerNotInitialized); put(cudaErrorProfilerAlreadyStarted.code, cudaErrorProfilerAlreadyStarted); put(cudaErrorProfilerAlreadyStopped.code, cudaErrorProfilerAlreadyStopped); put(cudaErrorInvalidConfiguration.code, cudaErrorInvalidConfiguration); put(cudaErrorInvalidPitchValue.code, cudaErrorInvalidPitchValue); put(cudaErrorInvalidSymbol.code, cudaErrorInvalidSymbol); put(cudaErrorInvalidHostPointer.code, cudaErrorInvalidHostPointer); put(cudaErrorInvalidDevicePointer.code, cudaErrorInvalidDevicePointer); put(cudaErrorInvalidTexture.code, cudaErrorInvalidTexture); put(cudaErrorInvalidTextureBinding.code, cudaErrorInvalidTextureBinding); put(cudaErrorInvalidChannelDescriptor.code, cudaErrorInvalidChannelDescriptor); put(cudaErrorInvalidMemcpyDirection.code, cudaErrorInvalidMemcpyDirection); put(cudaErrorAddressOfConstant.code, cudaErrorAddressOfConstant); put(cudaErrorTextureFetchFailed.code, cudaErrorTextureFetchFailed); put(cudaErrorTextureNotBound.code, cudaErrorTextureNotBound); put(cudaErrorSynchronizationError.code, cudaErrorSynchronizationError); put(cudaErrorInvalidFilterSetting.code, cudaErrorInvalidFilterSetting); put(cudaErrorInvalidNormSetting.code, cudaErrorInvalidNormSetting); put(cudaErrorMixedDeviceExecution.code, cudaErrorMixedDeviceExecution); put(cudaErrorNotYetImplemented.code, cudaErrorNotYetImplemented); put(cudaErrorMemoryValueTooLarge.code, cudaErrorMemoryValueTooLarge); put(cudaErrorStubLibrary.code, cudaErrorStubLibrary); put(cudaErrorInsufficientDriver.code, cudaErrorInsufficientDriver); put(cudaErrorCallRequiresNewerDriver.code, cudaErrorCallRequiresNewerDriver); put(cudaErrorInvalidSurface.code, cudaErrorInvalidSurface); put(cudaErrorDuplicateVariableName.code, cudaErrorDuplicateVariableName); put(cudaErrorDuplicateTextureName.code, cudaErrorDuplicateTextureName); put(cudaErrorDuplicateSurfaceName.code, cudaErrorDuplicateSurfaceName); put(cudaErrorDevicesUnavailable.code, cudaErrorDevicesUnavailable); put(cudaErrorIncompatibleDriverContext.code, cudaErrorIncompatibleDriverContext); put(cudaErrorMissingConfiguration.code, cudaErrorMissingConfiguration); put(cudaErrorPriorLaunchFailure.code, cudaErrorPriorLaunchFailure); put(cudaErrorLaunchMaxDepthExceeded.code, cudaErrorLaunchMaxDepthExceeded); put(cudaErrorLaunchFileScopedTex.code, cudaErrorLaunchFileScopedTex); put(cudaErrorLaunchFileScopedSurf.code, cudaErrorLaunchFileScopedSurf); put(cudaErrorSyncDepthExceeded.code, cudaErrorSyncDepthExceeded); put(cudaErrorLaunchPendingCountExceeded.code, cudaErrorLaunchPendingCountExceeded); put(cudaErrorInvalidDeviceFunction.code, cudaErrorInvalidDeviceFunction); put(cudaErrorNoDevice.code, cudaErrorNoDevice); put(cudaErrorInvalidDevice.code, cudaErrorInvalidDevice); put(cudaErrorDeviceNotLicensed.code, cudaErrorDeviceNotLicensed); put(cudaErrorSoftwareValidityNotEstablished.code, cudaErrorSoftwareValidityNotEstablished); put(cudaErrorStartupFailure.code, cudaErrorStartupFailure); put(cudaErrorInvalidKernelImage.code, cudaErrorInvalidKernelImage); put(cudaErrorDeviceUninitialized.code, cudaErrorDeviceUninitialized); put(cudaErrorMapBufferObjectFailed.code, cudaErrorMapBufferObjectFailed); put(cudaErrorUnmapBufferObjectFailed.code, cudaErrorUnmapBufferObjectFailed); put(cudaErrorArrayIsMapped.code, cudaErrorArrayIsMapped); put(cudaErrorAlreadyMapped.code, cudaErrorAlreadyMapped); put(cudaErrorNoKernelImageForDevice.code, cudaErrorNoKernelImageForDevice); put(cudaErrorAlreadyAcquired.code, cudaErrorAlreadyAcquired); put(cudaErrorNotMapped.code, cudaErrorNotMapped); put(cudaErrorNotMappedAsArray.code, cudaErrorNotMappedAsArray); put(cudaErrorNotMappedAsPointer.code, cudaErrorNotMappedAsPointer); put(cudaErrorECCUncorrectable.code, cudaErrorECCUncorrectable); put(cudaErrorUnsupportedLimit.code, cudaErrorUnsupportedLimit); put(cudaErrorDeviceAlreadyInUse.code, cudaErrorDeviceAlreadyInUse); put(cudaErrorPeerAccessUnsupported.code, cudaErrorPeerAccessUnsupported); put(cudaErrorInvalidPtx.code, cudaErrorInvalidPtx); put(cudaErrorInvalidGraphicsContext.code, cudaErrorInvalidGraphicsContext); put(cudaErrorNvlinkUncorrectable.code, cudaErrorNvlinkUncorrectable); put(cudaErrorJitCompilerNotFound.code, cudaErrorJitCompilerNotFound); put(cudaErrorUnsupportedPtxVersion.code, cudaErrorUnsupportedPtxVersion); put(cudaErrorJitCompilationDisabled.code, cudaErrorJitCompilationDisabled); put(cudaErrorUnsupportedExecAffinity.code, cudaErrorUnsupportedExecAffinity); put(cudaErrorInvalidSource.code, cudaErrorInvalidSource); put(cudaErrorFileNotFound.code, cudaErrorFileNotFound); put(cudaErrorSharedObjectSymbolNotFound.code, cudaErrorSharedObjectSymbolNotFound); put(cudaErrorSharedObjectInitFailed.code, cudaErrorSharedObjectInitFailed); put(cudaErrorOperatingSystem.code, cudaErrorOperatingSystem); put(cudaErrorInvalidResourceHandle.code, cudaErrorInvalidResourceHandle); put(cudaErrorIllegalState.code, cudaErrorIllegalState); put(cudaErrorSymbolNotFound.code, cudaErrorSymbolNotFound); put(cudaErrorNotReady.code, cudaErrorNotReady); put(cudaErrorIllegalAddress.code, cudaErrorIllegalAddress); put(cudaErrorLaunchOutOfResources.code, cudaErrorLaunchOutOfResources); put(cudaErrorLaunchTimeout.code, cudaErrorLaunchTimeout); put(cudaErrorLaunchIncompatibleTexturing.code, cudaErrorLaunchIncompatibleTexturing); put(cudaErrorPeerAccessAlreadyEnabled.code, cudaErrorPeerAccessAlreadyEnabled); put(cudaErrorPeerAccessNotEnabled.code, cudaErrorPeerAccessNotEnabled); put(cudaErrorSetOnActiveProcess.code, cudaErrorSetOnActiveProcess); put(cudaErrorContextIsDestroyed.code, cudaErrorContextIsDestroyed); put(cudaErrorAssert.code, cudaErrorAssert); put(cudaErrorTooManyPeers.code, cudaErrorTooManyPeers); put(cudaErrorHostMemoryAlreadyRegistered.code, cudaErrorHostMemoryAlreadyRegistered); put(cudaErrorHostMemoryNotRegistered.code, cudaErrorHostMemoryNotRegistered); put(cudaErrorHardwareStackError.code, cudaErrorHardwareStackError); put(cudaErrorIllegalInstruction.code, cudaErrorIllegalInstruction); put(cudaErrorMisalignedAddress.code, cudaErrorMisalignedAddress); put(cudaErrorInvalidAddressSpace.code, cudaErrorInvalidAddressSpace); put(cudaErrorInvalidPc.code, cudaErrorInvalidPc); put(cudaErrorLaunchFailure.code, cudaErrorLaunchFailure); put(cudaErrorCooperativeLaunchTooLarge.code, cudaErrorCooperativeLaunchTooLarge); put(cudaErrorNotPermitted.code, cudaErrorNotPermitted); put(cudaErrorNotSupported.code, cudaErrorNotSupported); put(cudaErrorSystemNotReady.code, cudaErrorSystemNotReady); put(cudaErrorSystemDriverMismatch.code, cudaErrorSystemDriverMismatch); put(cudaErrorCompatNotSupportedOnDevice.code, cudaErrorCompatNotSupportedOnDevice); put(cudaErrorMpsConnectionFailed.code, cudaErrorMpsConnectionFailed); put(cudaErrorMpsRpcFailure.code, cudaErrorMpsRpcFailure); put(cudaErrorMpsServerNotReady.code, cudaErrorMpsServerNotReady); put(cudaErrorMpsMaxClientsReached.code, cudaErrorMpsMaxClientsReached); put(cudaErrorMpsMaxConnectionsReached.code, cudaErrorMpsMaxConnectionsReached); put(cudaErrorStreamCaptureUnsupported.code, cudaErrorStreamCaptureUnsupported); put(cudaErrorStreamCaptureInvalidated.code, cudaErrorStreamCaptureInvalidated); put(cudaErrorStreamCaptureMerge.code, cudaErrorStreamCaptureMerge); put(cudaErrorStreamCaptureUnmatched.code, cudaErrorStreamCaptureUnmatched); put(cudaErrorStreamCaptureUnjoined.code, cudaErrorStreamCaptureUnjoined); put(cudaErrorStreamCaptureIsolation.code, cudaErrorStreamCaptureIsolation); put(cudaErrorStreamCaptureImplicit.code, cudaErrorStreamCaptureImplicit); put(cudaErrorCapturedEvent.code, cudaErrorCapturedEvent); put(cudaErrorStreamCaptureWrongThread.code, cudaErrorStreamCaptureWrongThread); put(cudaErrorTimeout.code, cudaErrorTimeout); put(cudaErrorGraphExecUpdateFailure.code, cudaErrorGraphExecUpdateFailure); put(cudaErrorExternalDevice.code, cudaErrorExternalDevice); put(cudaErrorUnknown.code, cudaErrorUnknown); put(cudaErrorApiFailureBase.code, cudaErrorApiFailureBase); }}; CudaError(int errorCode) { this.code = errorCode; } public static CudaError parseErrorCode(int errorCode) { if (!codeToError.containsKey(errorCode)) { return UnknownNativeError; } return codeToError.get(errorCode); } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/Table.java
/* * * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; import ai.rapids.cudf.HostColumnVector.BasicType; import ai.rapids.cudf.HostColumnVector.DataType; import ai.rapids.cudf.HostColumnVector.ListType; import ai.rapids.cudf.HostColumnVector.StructData; import ai.rapids.cudf.HostColumnVector.StructType; import ai.rapids.cudf.ast.CompiledExpression; import java.io.File; import java.math.BigDecimal; import java.math.BigInteger; import java.math.MathContext; import java.math.RoundingMode; import java.nio.ByteBuffer; import java.util.*; /** * Class to represent a collection of ColumnVectors and operations that can be performed on them * collectively. * The refcount on the columns will be increased once they are passed in */ public final class Table implements AutoCloseable { static { NativeDepsLoader.loadNativeDeps(); } private final long rows; private long nativeHandle; private ColumnVector[] columns; /** * Table class makes a copy of the array of {@link ColumnVector}s passed to it. The class * will decrease the refcount * on itself and all its contents when closed and free resources if refcount is zero * @param columns - Array of ColumnVectors */ public Table(ColumnVector... columns) { assert columns != null && columns.length > 0 : "ColumnVectors can't be null or empty"; rows = columns[0].getRowCount(); for (ColumnVector columnVector : columns) { assert (null != columnVector) : "ColumnVectors can't be null"; assert (rows == columnVector.getRowCount()) : "All columns should have the same number of " + "rows " + columnVector.getType(); } // Since Arrays are mutable objects make a copy this.columns = new ColumnVector[columns.length]; long[] viewPointers = new long[columns.length]; for (int i = 0; i < columns.length; i++) { this.columns[i] = columns[i]; columns[i].incRefCount(); viewPointers[i] = columns[i].getNativeView(); } nativeHandle = createCudfTableView(viewPointers); } /** * Create a Table from an array of existing on device cudf::column pointers. Ownership of the * columns is transferred to the ColumnVectors held by the new Table. In the case of an exception * the columns will be deleted. * @param cudfColumns - Array of nativeHandles */ public Table(long[] cudfColumns) { assert cudfColumns != null && cudfColumns.length > 0 : "CudfColumns can't be null or empty"; this.columns = ColumnVector.getColumnVectorsFromPointers(cudfColumns); try { long[] views = new long[columns.length]; for (int i = 0; i < columns.length; i++) { views[i] = columns[i].getNativeView(); } nativeHandle = createCudfTableView(views); this.rows = columns[0].getRowCount(); } catch (Throwable t) { for (ColumnVector column : columns) { try { column.close(); } catch (Throwable s) { t.addSuppressed(s); } } throw t; } } /** * Provides a faster way to get access to the columns. Only to be used internally, and it should * never be modified in anyway. */ ColumnVector[] getColumns() { return columns; } /** Return the native table view handle for this table */ public long getNativeView() { return nativeHandle; } /** * Return the {@link ColumnVector} at the specified index. If you want to keep a reference to * the column around past the life time of the table, you will need to increment the reference * count on the column yourself. */ public ColumnVector getColumn(int index) { assert index < columns.length; return columns[index]; } public final long getRowCount() { return rows; } public final int getNumberOfColumns() { return columns.length; } @Override public void close() { if (nativeHandle != 0) { deleteCudfTable(nativeHandle); nativeHandle = 0; } if (columns != null) { for (int i = 0; i < columns.length; i++) { columns[i].close(); columns[i] = null; } columns = null; } } @Override public String toString() { return "Table{" + "columns=" + Arrays.toString(columns) + ", cudfTable=" + nativeHandle + ", rows=" + rows + '}'; } /** * Returns the Device memory buffer size. */ public long getDeviceMemorySize() { long total = 0; for (ColumnVector cv: columns) { total += cv.getDeviceMemorySize(); } return total; } /** * This method is internal and exposed purely for testing purpopses */ static Table removeNullMasksIfNeeded(Table table) { return new Table(removeNullMasksIfNeeded(table.nativeHandle)); } ///////////////////////////////////////////////////////////////////////////// // NATIVE APIs ///////////////////////////////////////////////////////////////////////////// private static native long[] removeNullMasksIfNeeded(long tableView) throws CudfException; private static native ContiguousTable[] contiguousSplit(long inputTable, int[] indices); private static native long makeChunkedPack(long inputTable, long bounceBufferSize, long tempMemoryResource); private static native long[] partition(long inputTable, long partitionView, int numberOfPartitions, int[] outputOffsets); private static native long[] hashPartition(long inputTable, int[] columnsToHash, int hashTypeId, int numberOfPartitions, int seed, int[] outputOffsets) throws CudfException; private static native long[] roundRobinPartition(long inputTable, int numberOfPartitions, int startPartition, int[] outputOffsets) throws CudfException; private static native void deleteCudfTable(long handle) throws CudfException; private static native long bound(long inputTable, long valueTable, boolean[] descFlags, boolean[] areNullsSmallest, boolean isUpperBound) throws CudfException; /** * Ugly long function to read CSV. This is a long function to avoid the overhead of reaching * into a java * object to try and pull out all of the options. If this becomes unwieldy we can change it. * @param columnNames names of all of the columns, even the ones filtered out * @param dTypeIds native types IDs of all of the columns. * @param dTypeScales scale of the type for all of the columns. * @param filterColumnNames name of the columns to read, or an empty array if we want to read * all of them * @param filePath the path of the file to read, or null if no path should be read. * @param address the address of the buffer to read from or 0 if we should not. * @param length the length of the buffer to read from. * @param headerRow the 0 based index row of the header can be -1 * @param delim character deliminator (must be ASCII). * @param quoteStyle quote style expected to be used in the input (represented as int) * @param quote character quote (must be ASCII). * @param comment character that starts a comment line (must be ASCII) use '\0' * @param nullValues values that should be treated as nulls * @param trueValues values that should be treated as boolean true * @param falseValues values that should be treated as boolean false */ private static native long[] readCSV(String[] columnNames, int[] dTypeIds, int[] dTypeScales, String[] filterColumnNames, String filePath, long address, long length, int headerRow, byte delim, int quoteStyle, byte quote, byte comment, String[] nullValues, String[] trueValues, String[] falseValues) throws CudfException; private static native long[] readCSVFromDataSource(String[] columnNames, int[] dTypeIds, int[] dTypeScales, String[] filterColumnNames, int headerRow, byte delim, int quoteStyle, byte quote, byte comment, String[] nullValues, String[] trueValues, String[] falseValues, long dataSourceHandle) throws CudfException; /** * read JSON data and return a pointer to a TableWithMeta object. */ private static native long readJSON(String[] columnNames, int[] dTypeIds, int[] dTypeScales, String filePath, long address, long length, boolean dayFirst, boolean lines, boolean recoverWithNulls) throws CudfException; private static native long readJSONFromDataSource(String[] columnNames, int[] dTypeIds, int[] dTypeScales, boolean dayFirst, boolean lines, boolean recoverWithNulls, long dsHandle) throws CudfException; private static native long readAndInferJSON(long address, long length, boolean dayFirst, boolean lines, boolean recoverWithNulls) throws CudfException; /** * Read in Parquet formatted data. * @param filterColumnNames name of the columns to read, or an empty array if we want to read * all of them * @param binaryToString whether to convert this column to String if binary * @param filePath the path of the file to read, or null if no path should be read. * @param address the address of the buffer to read from or 0 if we should not. * @param length the length of the buffer to read from. * @param timeUnit return type of TimeStamp in units */ private static native long[] readParquet(String[] filterColumnNames, boolean[] binaryToString, String filePath, long address, long length, int timeUnit) throws CudfException; private static native long[] readParquetFromDataSource(String[] filterColumnNames, boolean[] binaryToString, int timeUnit, long dataSourceHandle) throws CudfException; /** * Read in Avro formatted data. * @param filterColumnNames name of the columns to read, or an empty array if we want to read * all of them * @param filePath the path of the file to read, or null if no path should be read. * @param address the address of the buffer to read from or 0 if we should not. * @param length the length of the buffer to read from. */ private static native long[] readAvro(String[] filterColumnNames, String filePath, long address, long length) throws CudfException; private static native long[] readAvroFromDataSource(String[] filterColumnNames, long dataSourceHandle) throws CudfException; /** * Setup everything to write parquet formatted data to a file. * @param columnNames names that correspond to the table columns * @param numChildren Children of the top level * @param flatNumChildren flattened list of children per column * @param nullable true if the column can have nulls else false * @param metadataKeys Metadata key names to place in the Parquet file * @param metadataValues Metadata values corresponding to metadataKeys * @param compression native compression codec ID * @param statsFreq native statistics frequency ID * @param isInt96 true if timestamp type is int96 * @param precisions precision list containing all the precisions of the decimal types in * the columns * @param isMapValues true if a column is a map * @param isBinaryValues true if a column is a binary * @param filename local output path * @return a handle that is used in later calls to writeParquetChunk and writeParquetEnd. */ private static native long writeParquetFileBegin(String[] columnNames, int numChildren, int[] flatNumChildren, boolean[] nullable, String[] metadataKeys, String[] metadataValues, int compression, int statsFreq, boolean[] isInt96, int[] precisions, boolean[] isMapValues, boolean[] isBinaryValues, boolean[] hasParquetFieldIds, int[] parquetFieldIds, String filename) throws CudfException; /** * Setup everything to write parquet formatted data to a buffer. * @param columnNames names that correspond to the table columns * @param numChildren Children of the top level * @param flatNumChildren flattened list of children per column * @param nullable true if the column can have nulls else false * @param metadataKeys Metadata key names to place in the Parquet file * @param metadataValues Metadata values corresponding to metadataKeys * @param compression native compression codec ID * @param statsFreq native statistics frequency ID * @param isInt96 true if timestamp type is int96 * @param precisions precision list containing all the precisions of the decimal types in * the columns * @param isMapValues true if a column is a map * @param isBinaryValues true if a column is a binary * @param consumer consumer of host buffers produced. * @return a handle that is used in later calls to writeParquetChunk and writeParquetEnd. */ private static native long writeParquetBufferBegin(String[] columnNames, int numChildren, int[] flatNumChildren, boolean[] nullable, String[] metadataKeys, String[] metadataValues, int compression, int statsFreq, boolean[] isInt96, int[] precisions, boolean[] isMapValues, boolean[] isBinaryValues, boolean[] hasParquetFieldIds, int[] parquetFieldIds, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator ) throws CudfException; /** * Write out a table to an open handle. * @param handle the handle to the writer. * @param table the table to write out. * @param tableMemSize the size of the table in bytes to help with memory allocation. */ private static native void writeParquetChunk(long handle, long table, long tableMemSize); /** * Finish writing out parquet. * @param handle the handle. Do not use again once this returns. */ private static native void writeParquetEnd(long handle); /** * Read in ORC formatted data. * @param filterColumnNames name of the columns to read, or an empty array if we want to read * all of them * @param filePath the path of the file to read, or null if no path should be read. * @param address the address of the buffer to read from or 0 for no buffer. * @param length the length of the buffer to read from. * @param usingNumPyTypes whether the parser should implicitly promote TIMESTAMP * columns to TIMESTAMP_MILLISECONDS for compatibility with NumPy. * @param timeUnit return type of TimeStamp in units * @param decimal128Columns name of the columns which are read as Decimal128 rather than Decimal64 */ private static native long[] readORC(String[] filterColumnNames, String filePath, long address, long length, boolean usingNumPyTypes, int timeUnit, String[] decimal128Columns) throws CudfException; private static native long[] readORCFromDataSource(String[] filterColumnNames, boolean usingNumPyTypes, int timeUnit, String[] decimal128Columns, long dataSourceHandle) throws CudfException; /** * Setup everything to write ORC formatted data to a file. * @param columnNames names that correspond to the table columns * @param numChildren Children of the top level * @param flatNumChildren flattened list of children per column * @param nullable true if the column can have nulls else false * @param metadataKeys Metadata key names to place in the Parquet file * @param metadataValues Metadata values corresponding to metadataKeys * @param compression native compression codec ID * @param precisions precision list containing all the precisions of the decimal types in * the columns * @param isMapValues true if a column is a map * @param filename local output path * @return a handle that is used in later calls to writeORCChunk and writeORCEnd. */ private static native long writeORCFileBegin(String[] columnNames, int numChildren, int[] flatNumChildren, boolean[] nullable, String[] metadataKeys, String[] metadataValues, int compression, int[] precisions, boolean[] isMapValues, String filename) throws CudfException; /** * Setup everything to write ORC formatted data to a buffer. * @param columnNames names that correspond to the table columns * @param numChildren Children of the top level * @param flatNumChildren flattened list of children per column * @param nullable true if the column can have nulls else false * @param metadataKeys Metadata key names to place in the Parquet file * @param metadataValues Metadata values corresponding to metadataKeys * @param compression native compression codec ID * @param precisions precision list containing all the precisions of the decimal types in * the columns * @param isMapValues true if a column is a map * @param consumer consumer of host buffers produced. * @return a handle that is used in later calls to writeORCChunk and writeORCEnd. */ private static native long writeORCBufferBegin(String[] columnNames, int numChildren, int[] flatNumChildren, boolean[] nullable, String[] metadataKeys, String[] metadataValues, int compression, int[] precisions, boolean[] isMapValues, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator ) throws CudfException; /** * Write out a table to an open handle. * @param handle the handle to the writer. * @param table the table to write out. * @param tableMemSize the size of the table in bytes to help with memory allocation. */ private static native void writeORCChunk(long handle, long table, long tableMemSize); /** * Finish writing out ORC. * @param handle the handle. Do not use again once this returns. */ private static native void writeORCEnd(long handle); /** * Setup everything to write Arrow IPC formatted data to a file. * @param columnNames names that correspond to the table columns * @param filename local output path * @return a handle that is used in later calls to writeArrowIPCChunk and writeArrowIPCEnd. */ private static native long writeArrowIPCFileBegin(String[] columnNames, String filename); /** * Setup everything to write Arrow IPC formatted data to a buffer. * @param columnNames names that correspond to the table columns * @param consumer consumer of host buffers produced. * @param hostMemoryAllocator allocator for host memory buffers. * @return a handle that is used in later calls to writeArrowIPCChunk and writeArrowIPCEnd. */ private static native long writeArrowIPCBufferBegin(String[] columnNames, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator); /** * Convert a cudf table to an arrow table handle. * @param handle the handle to the writer. * @param tableHandle the table to convert */ private static native long convertCudfToArrowTable(long handle, long tableHandle); /** * Write out a table to an open handle. * @param handle the handle to the writer. * @param arrowHandle the arrow table to write out. * @param maxChunkSize the maximum number of rows that could * be written out in a single chunk. Generally this setting will be * followed unless for some reason the arrow table is not a single group. * This can happen when reading arrow data, but not when converting from * cudf. */ private static native void writeArrowIPCArrowChunk(long handle, long arrowHandle, long maxChunkSize); /** * Finish writing out Arrow IPC. * @param handle the handle. Do not use again once this returns. */ private static native void writeArrowIPCEnd(long handle); /** * Setup everything to read an Arrow IPC formatted data file. * @param path local input path * @return a handle that is used in later calls to readArrowIPCChunk and readArrowIPCEnd. */ private static native long readArrowIPCFileBegin(String path); /** * Setup everything to read Arrow IPC formatted data from a provider. * @param provider the class that will provide the data. * @return a handle that is used in later calls to readArrowIPCChunk and readArrowIPCEnd. */ private static native long readArrowIPCBufferBegin(ArrowReaderWrapper provider); /** * Read the next chunk/table of data. * @param handle the handle that is holding the data. * @param rowTarget the number of rows to read. * @return a pointer to an arrow table handle. */ private static native long readArrowIPCChunkToArrowTable(long handle, int rowTarget); /** * Close the arrow table handle returned by readArrowIPCChunkToArrowTable or * convertCudfToArrowTable */ private static native void closeArrowTable(long arrowHandle); /** * Convert an arrow table handle as returned by readArrowIPCChunkToArrowTable to * cudf table handles. */ private static native long[] convertArrowTableToCudf(long arrowHandle); /** * Finish reading the data. We are done. * @param handle the handle to clean up. */ private static native void readArrowIPCEnd(long handle); private static native long[] groupByAggregate(long inputTable, int[] keyIndices, int[] aggColumnsIndices, long[] aggInstances, boolean ignoreNullKeys, boolean keySorted, boolean[] keysDescending, boolean[] keysNullSmallest) throws CudfException; private static native long[] groupByScan(long inputTable, int[] keyIndices, int[] aggColumnsIndices, long[] aggInstances, boolean ignoreNullKeys, boolean keySorted, boolean[] keysDescending, boolean[] keysNullSmallest) throws CudfException; private static native long[] groupByReplaceNulls(long inputTable, int[] keyIndices, int[] replaceColumnsIndices, boolean[] isPreceding, boolean ignoreNullKeys, boolean keySorted, boolean[] keysDescending, boolean[] keysNullSmallest) throws CudfException; private static native long[] rollingWindowAggregate( long inputTable, int[] keyIndices, long[] defaultOutputs, int[] aggColumnsIndices, long[] aggInstances, int[] minPeriods, int[] preceding, int[] following, boolean[] unboundedPreceding, boolean[] unboundedFollowing, boolean ignoreNullKeys) throws CudfException; private static native long[] rangeRollingWindowAggregate(long inputTable, int[] keyIndices, int[] orderByIndices, boolean[] isOrderByAscending, int[] aggColumnsIndices, long[] aggInstances, int[] minPeriods, long[] preceding, long[] following, int[] precedingRangeExtent, int[] followingRangeExtent, boolean ignoreNullKeys) throws CudfException; private static native long sortOrder(long inputTable, long[] sortKeys, boolean[] isDescending, boolean[] areNullsSmallest) throws CudfException; private static native long[] orderBy(long inputTable, long[] sortKeys, boolean[] isDescending, boolean[] areNullsSmallest) throws CudfException; private static native long[] merge(long[] tableHandles, int[] sortKeyIndexes, boolean[] isDescending, boolean[] areNullsSmallest) throws CudfException; private static native long[] leftJoinGatherMaps(long leftKeys, long rightKeys, boolean compareNullsEqual) throws CudfException; private static native long leftJoinRowCount(long leftTable, long rightHashJoin) throws CudfException; private static native long[] leftHashJoinGatherMaps(long leftTable, long rightHashJoin) throws CudfException; private static native long[] leftHashJoinGatherMapsWithCount(long leftTable, long rightHashJoin, long outputRowCount) throws CudfException; private static native long[] innerJoinGatherMaps(long leftKeys, long rightKeys, boolean compareNullsEqual) throws CudfException; private static native long innerJoinRowCount(long table, long hashJoin) throws CudfException; private static native long[] innerHashJoinGatherMaps(long table, long hashJoin) throws CudfException; private static native long[] innerHashJoinGatherMapsWithCount(long table, long hashJoin, long outputRowCount) throws CudfException; private static native long[] fullJoinGatherMaps(long leftKeys, long rightKeys, boolean compareNullsEqual) throws CudfException; private static native long fullJoinRowCount(long leftTable, long rightHashJoin) throws CudfException; private static native long[] fullHashJoinGatherMaps(long leftTable, long rightHashJoin) throws CudfException; private static native long[] fullHashJoinGatherMapsWithCount(long leftTable, long rightHashJoin, long outputRowCount) throws CudfException; private static native long[] leftSemiJoinGatherMap(long leftKeys, long rightKeys, boolean compareNullsEqual) throws CudfException; private static native long[] leftAntiJoinGatherMap(long leftKeys, long rightKeys, boolean compareNullsEqual) throws CudfException; private static native long conditionalLeftJoinRowCount(long leftTable, long rightTable, long condition) throws CudfException; private static native long[] conditionalLeftJoinGatherMaps(long leftTable, long rightTable, long condition) throws CudfException; private static native long[] conditionalLeftJoinGatherMapsWithCount(long leftTable, long rightTable, long condition, long rowCount) throws CudfException; private static native long conditionalInnerJoinRowCount(long leftTable, long rightTable, long condition) throws CudfException; private static native long[] conditionalInnerJoinGatherMaps(long leftTable, long rightTable, long condition) throws CudfException; private static native long[] conditionalInnerJoinGatherMapsWithCount(long leftTable, long rightTable, long condition, long rowCount) throws CudfException; private static native long[] conditionalFullJoinGatherMaps(long leftTable, long rightTable, long condition) throws CudfException; private static native long conditionalLeftSemiJoinRowCount(long leftTable, long rightTable, long condition) throws CudfException; private static native long[] conditionalLeftSemiJoinGatherMap(long leftTable, long rightTable, long condition) throws CudfException; private static native long[] conditionalLeftSemiJoinGatherMapWithCount(long leftTable, long rightTable, long condition, long rowCount) throws CudfException; private static native long conditionalLeftAntiJoinRowCount(long leftTable, long rightTable, long condition) throws CudfException; private static native long[] conditionalLeftAntiJoinGatherMap(long leftTable, long rightTable, long condition) throws CudfException; private static native long[] conditionalLeftAntiJoinGatherMapWithCount(long leftTable, long rightTable, long condition, long rowCount) throws CudfException; private static native long[] mixedLeftJoinSize(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedLeftJoinGatherMaps(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedLeftJoinGatherMapsWithSize(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual, long outputRowCount, long matchesColumnView); private static native long[] mixedInnerJoinSize(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedInnerJoinGatherMaps(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedInnerJoinGatherMapsWithSize(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual, long outputRowCount, long matchesColumnView); private static native long[] mixedFullJoinGatherMaps(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedLeftSemiJoinSize(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedLeftSemiJoinGatherMap(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedLeftSemiJoinGatherMapWithSize(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual, long outputRowCount, long matchesColumnView); private static native long[] mixedLeftAntiJoinSize(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedLeftAntiJoinGatherMap(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual); private static native long[] mixedLeftAntiJoinGatherMapWithSize(long leftKeysTable, long rightKeysTable, long leftConditionTable, long rightConditionTable, long condition, boolean compareNullsEqual, long outputRowCount, long matchesColumnView); private static native long[] crossJoin(long leftTable, long rightTable) throws CudfException; private static native long[] concatenate(long[] cudfTablePointers) throws CudfException; private static native long interleaveColumns(long input); private static native long[] filter(long input, long mask); private static native long[] dropDuplicates(long nativeHandle, int[] keyColumns, int keepValue, boolean nullsEqual) throws CudfException; private static native long[] gather(long tableHandle, long gatherView, boolean checkBounds); private static native long[] scatterTable(long srcTableHandle, long scatterView, long targetTableHandle) throws CudfException; private static native long[] scatterScalars(long[] srcScalarHandles, long scatterView, long targetTableHandle) throws CudfException; private static native long[] convertToRows(long nativeHandle); private static native long[] convertToRowsFixedWidthOptimized(long nativeHandle); private static native long[] convertFromRows(long nativeColumnView, int[] types, int[] scale); private static native long[] convertFromRowsFixedWidthOptimized(long nativeColumnView, int[] types, int[] scale); private static native long[] repeatStaticCount(long tableHandle, int count); private static native long[] repeatColumnCount(long tableHandle, long columnHandle); private static native long rowBitCount(long tableHandle) throws CudfException; private static native long[] explode(long tableHandle, int index); private static native long[] explodePosition(long tableHandle, int index); private static native long[] explodeOuter(long tableHandle, int index); private static native long[] explodeOuterPosition(long tableHandle, int index); private static native long createCudfTableView(long[] nativeColumnViewHandles); private static native long[] columnViewsFromPacked(ByteBuffer metadata, long dataAddress); private static native ContigSplitGroupByResult contiguousSplitGroups(long inputTable, int[] keyIndices, boolean ignoreNullKeys, boolean keySorted, boolean[] keysDescending, boolean[] keysNullSmallest, boolean genUniqKeys); private static native long[] sample(long tableHandle, long n, boolean replacement, long seed); private static native int distinctCount(long handle, boolean nullsEqual); ///////////////////////////////////////////////////////////////////////////// // TABLE CREATION APIs ///////////////////////////////////////////////////////////////////////////// /** * Read a CSV file using the default CSVOptions. * @param schema the schema of the file. You may use Schema.INFERRED to infer the schema. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readCSV(Schema schema, File path) { return readCSV(schema, CSVOptions.DEFAULT, path); } /** * Read a CSV file. * @param schema the schema of the file. You may use Schema.INFERRED to infer the schema. * @param opts various CSV parsing options. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readCSV(Schema schema, CSVOptions opts, File path) { return new Table( readCSV(schema.getColumnNames(), schema.getTypeIds(), schema.getTypeScales(), opts.getIncludeColumnNames(), path.getAbsolutePath(), 0, 0, opts.getHeaderRow(), opts.getDelim(), opts.getQuoteStyle().nativeId, opts.getQuote(), opts.getComment(), opts.getNullValues(), opts.getTrueValues(), opts.getFalseValues())); } /** * Read CSV formatted data using the default CSVOptions. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param buffer raw UTF8 formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readCSV(Schema schema, byte[] buffer) { return readCSV(schema, CSVOptions.DEFAULT, buffer, 0, buffer.length); } /** * Read CSV formatted data. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param opts various CSV parsing options. * @param buffer raw UTF8 formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readCSV(Schema schema, CSVOptions opts, byte[] buffer) { return readCSV(schema, opts, buffer, 0, buffer.length); } /** * Read CSV formatted data. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param opts various CSV parsing options. * @param buffer raw UTF8 formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @param hostMemoryAllocator allocator for host memory buffers * @return the data parsed as a table on the GPU. */ public static Table readCSV(Schema schema, CSVOptions opts, byte[] buffer, long offset, long len, HostMemoryAllocator hostMemoryAllocator) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.length - offset; assert offset >= 0 && offset < buffer.length; try (HostMemoryBuffer newBuf = hostMemoryAllocator.allocate(len)) { newBuf.setBytes(0, buffer, offset, len); return readCSV(schema, opts, newBuf, 0, len); } } public static Table readCSV(Schema schema, CSVOptions opts, byte[] buffer, long offset, long len) { return readCSV(schema, opts, buffer, offset, len, DefaultHostMemoryAllocator.get()); } /** * Read CSV formatted data. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param opts various CSV parsing options. * @param buffer raw UTF8 formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @return the data parsed as a table on the GPU. */ public static Table readCSV(Schema schema, CSVOptions opts, HostMemoryBuffer buffer, long offset, long len) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.getLength() - offset; assert offset >= 0 && offset < buffer.length; return new Table(readCSV(schema.getColumnNames(), schema.getTypeIds(), schema.getTypeScales(), opts.getIncludeColumnNames(), null, buffer.getAddress() + offset, len, opts.getHeaderRow(), opts.getDelim(), opts.getQuoteStyle().nativeId, opts.getQuote(), opts.getComment(), opts.getNullValues(), opts.getTrueValues(), opts.getFalseValues())); } public static Table readCSV(Schema schema, CSVOptions opts, DataSource ds) { long dsHandle = DataSourceHelper.createWrapperDataSource(ds); try { return new Table(readCSVFromDataSource(schema.getColumnNames(), schema.getTypeIds(), schema.getTypeScales(), opts.getIncludeColumnNames(), opts.getHeaderRow(), opts.getDelim(), opts.getQuoteStyle().nativeId, opts.getQuote(), opts.getComment(), opts.getNullValues(), opts.getTrueValues(), opts.getFalseValues(), dsHandle)); } finally { DataSourceHelper.destroyWrapperDataSource(dsHandle); } } private static native void writeCSVToFile(long table, String[] columnNames, boolean includeHeader, String rowDelimiter, byte fieldDelimiter, String nullValue, String trueValue, String falseValue, int quoteStyle, String outputPath) throws CudfException; public void writeCSVToFile(CSVWriterOptions options, String outputPath) { writeCSVToFile(nativeHandle, options.getColumnNames(), options.getIncludeHeader(), options.getRowDelimiter(), options.getFieldDelimiter(), options.getNullValue(), options.getTrueValue(), options.getFalseValue(), options.getQuoteStyle().nativeId, outputPath); } private static native long startWriteCSVToBuffer(String[] columnNames, boolean includeHeader, String rowDelimiter, byte fieldDelimiter, String nullValue, String trueValue, String falseValue, int quoteStyle, HostBufferConsumer buffer, HostMemoryAllocator hostMemoryAllocator ) throws CudfException; private static native void writeCSVChunkToBuffer(long writerHandle, long tableHandle); private static native void endWriteCSVToBuffer(long writerHandle); private static class CSVTableWriter extends TableWriter { private HostBufferConsumer consumer; private CSVTableWriter(CSVWriterOptions options, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator) { super(startWriteCSVToBuffer(options.getColumnNames(), options.getIncludeHeader(), options.getRowDelimiter(), options.getFieldDelimiter(), options.getNullValue(), options.getTrueValue(), options.getFalseValue(), options.getQuoteStyle().nativeId, consumer, hostMemoryAllocator)); this.consumer = consumer; } @Override public void write(Table table) { if (writerHandle == 0) { throw new IllegalStateException("Writer was already closed"); } writeCSVChunkToBuffer(writerHandle, table.nativeHandle); } @Override public void close() throws CudfException { if (writerHandle != 0) { endWriteCSVToBuffer(writerHandle); writerHandle = 0; } if (consumer != null) { consumer.done(); consumer = null; } } } public static TableWriter getCSVBufferWriter(CSVWriterOptions options, HostBufferConsumer bufferConsumer, HostMemoryAllocator hostMemoryAllocator) { return new CSVTableWriter(options, bufferConsumer, hostMemoryAllocator); } public static TableWriter getCSVBufferWriter(CSVWriterOptions options, HostBufferConsumer bufferConsumer) { return getCSVBufferWriter(options, bufferConsumer, DefaultHostMemoryAllocator.get()); } /** * Read a JSON file using the default JSONOptions. * @param schema the schema of the file. You may use Schema.INFERRED to infer the schema. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readJSON(Schema schema, File path) { return readJSON(schema, JSONOptions.DEFAULT, path); } /** * Read JSON formatted data using the default JSONOptions. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param buffer raw UTF8 formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readJSON(Schema schema, byte[] buffer) { return readJSON(schema, JSONOptions.DEFAULT, buffer, 0, buffer.length); } /** * Read JSON formatted data. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param opts various JSON parsing options. * @param buffer raw UTF8 formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readJSON(Schema schema, JSONOptions opts, byte[] buffer) { return readJSON(schema, opts, buffer, 0, buffer.length); } private static Table gatherJSONColumns(Schema schema, TableWithMeta twm) { String[] neededColumns = schema.getColumnNames(); if (neededColumns == null || neededColumns.length == 0) { return twm.releaseTable(); } else { String[] foundNames = twm.getColumnNames(); HashMap<String, Integer> indices = new HashMap<>(); for (int i = 0; i < foundNames.length; i++) { indices.put(foundNames[i], i); } // We might need to rearrange the columns to match what we want. DType[] types = schema.getTypes(); ColumnVector[] columns = new ColumnVector[neededColumns.length]; try (Table tbl = twm.releaseTable()) { for (int i = 0; i < columns.length; i++) { String neededColumnName = neededColumns[i]; Integer index = indices.get(neededColumnName); if (index != null) { columns[i] = tbl.getColumn(index).incRefCount(); } else { try (Scalar s = Scalar.fromNull(types[i])) { columns[i] = ColumnVector.fromScalar(s, (int)tbl.getRowCount()); } } } return new Table(columns); } finally { for (ColumnVector c: columns) { if (c != null) { c.close(); } } } } } /** * Read a JSON file. * @param schema the schema of the file. You may use Schema.INFERRED to infer the schema. * @param opts various JSON parsing options. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readJSON(Schema schema, JSONOptions opts, File path) { try (TableWithMeta twm = new TableWithMeta( readJSON(schema.getColumnNames(), schema.getTypeIds(), schema.getTypeScales(), path.getAbsolutePath(), 0, 0, opts.isDayFirst(), opts.isLines(), opts.isRecoverWithNull()))) { return gatherJSONColumns(schema, twm); } } /** * Read JSON formatted data. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param opts various JSON parsing options. * @param buffer raw UTF8 formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @param hostMemoryAllocator allocator for host memory buffers * @return the data parsed as a table on the GPU. */ public static Table readJSON(Schema schema, JSONOptions opts, byte[] buffer, long offset, long len, HostMemoryAllocator hostMemoryAllocator) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.length - offset; assert offset >= 0 && offset < buffer.length; try (HostMemoryBuffer newBuf = hostMemoryAllocator.allocate(len)) { newBuf.setBytes(0, buffer, offset, len); return readJSON(schema, opts, newBuf, 0, len); } } public static Table readJSON(Schema schema, JSONOptions opts, byte[] buffer, long offset, long len) { return readJSON(schema, opts, buffer, offset, len, DefaultHostMemoryAllocator.get()); } /** * Read JSON formatted data and infer the column names and schema. * @param opts various JSON parsing options. * @param buffer raw UTF8 formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @return the data parsed as a table on the GPU and the metadata for the table returned. */ public static TableWithMeta readJSON(JSONOptions opts, HostMemoryBuffer buffer, long offset, long len) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.length - offset; assert offset >= 0 && offset < buffer.length; return new TableWithMeta(readAndInferJSON(buffer.getAddress() + offset, len, opts.isDayFirst(), opts.isLines(), opts.isRecoverWithNull())); } /** * Read JSON formatted data. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param opts various JSON parsing options. * @param buffer raw UTF8 formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @return the data parsed as a table on the GPU. */ public static Table readJSON(Schema schema, JSONOptions opts, HostMemoryBuffer buffer, long offset, long len) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.length - offset; assert offset >= 0 && offset < buffer.length; try (TableWithMeta twm = new TableWithMeta(readJSON(schema.getColumnNames(), schema.getTypeIds(), schema.getTypeScales(), null, buffer.getAddress() + offset, len, opts.isDayFirst(), opts.isLines(), opts.isRecoverWithNull()))) { return gatherJSONColumns(schema, twm); } } /** * Read JSON formatted data. * @param schema the schema of the data. You may use Schema.INFERRED to infer the schema. * @param opts various JSON parsing options. * @param ds the DataSource to read from. * @return the data parsed as a table on the GPU. */ public static Table readJSON(Schema schema, JSONOptions opts, DataSource ds) { long dsHandle = DataSourceHelper.createWrapperDataSource(ds); try (TableWithMeta twm = new TableWithMeta(readJSONFromDataSource(schema.getColumnNames(), schema.getTypeIds(), schema.getTypeScales(), opts.isDayFirst(), opts.isLines(), opts.isRecoverWithNull(), dsHandle))) { return gatherJSONColumns(schema, twm); } finally { DataSourceHelper.destroyWrapperDataSource(dsHandle); } } /** * Read a Parquet file using the default ParquetOptions. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readParquet(File path) { return readParquet(ParquetOptions.DEFAULT, path); } /** * Read a Parquet file. * @param opts various parquet parsing options. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readParquet(ParquetOptions opts, File path) { return new Table(readParquet(opts.getIncludeColumnNames(), opts.getReadBinaryAsString(), path.getAbsolutePath(), 0, 0, opts.timeUnit().typeId.getNativeId())); } /** * Read parquet formatted data. * @param buffer raw parquet formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readParquet(byte[] buffer) { return readParquet(ParquetOptions.DEFAULT, buffer, 0, buffer.length); } /** * Read parquet formatted data. * @param opts various parquet parsing options. * @param buffer raw parquet formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readParquet(ParquetOptions opts, byte[] buffer) { return readParquet(opts, buffer, 0, buffer.length); } /** * Read parquet formatted data. * @param opts various parquet parsing options. * @param buffer raw parquet formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @param hostMemoryAllocator allocator for host memory buffers * @return the data parsed as a table on the GPU. */ public static Table readParquet(ParquetOptions opts, byte[] buffer, long offset, long len, HostMemoryAllocator hostMemoryAllocator) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.length - offset; assert offset >= 0 && offset < buffer.length; try (HostMemoryBuffer newBuf = hostMemoryAllocator.allocate(len)) { newBuf.setBytes(0, buffer, offset, len); return readParquet(opts, newBuf, 0, len); } } public static Table readParquet(ParquetOptions opts, byte[] buffer, long offset, long len) { return readParquet(opts, buffer, offset, len, DefaultHostMemoryAllocator.get()); } /** * Read parquet formatted data. * @param opts various parquet parsing options. * @param buffer raw parquet formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @return the data parsed as a table on the GPU. */ public static Table readParquet(ParquetOptions opts, HostMemoryBuffer buffer, long offset, long len) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.getLength() - offset; assert offset >= 0 && offset < buffer.length; return new Table(readParquet(opts.getIncludeColumnNames(), opts.getReadBinaryAsString(), null, buffer.getAddress() + offset, len, opts.timeUnit().typeId.getNativeId())); } public static Table readParquet(ParquetOptions opts, DataSource ds) { long dataSourceHandle = DataSourceHelper.createWrapperDataSource(ds); try { return new Table(readParquetFromDataSource(opts.getIncludeColumnNames(), opts.getReadBinaryAsString(), opts.timeUnit().typeId.getNativeId(), dataSourceHandle)); } finally { DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); } } /** * Read an Avro file using the default AvroOptions. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readAvro(File path) { return readAvro(AvroOptions.DEFAULT, path); } /** * Read an Avro file. * @param opts various Avro parsing options. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readAvro(AvroOptions opts, File path) { return new Table(readAvro(opts.getIncludeColumnNames(), path.getAbsolutePath(), 0, 0)); } /** * Read Avro formatted data. * @param buffer raw Avro formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readAvro(byte[] buffer) { return readAvro(AvroOptions.DEFAULT, buffer, 0, buffer.length); } /** * Read Avro formatted data. * @param opts various Avro parsing options. * @param buffer raw Avro formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readAvro(AvroOptions opts, byte[] buffer) { return readAvro(opts, buffer, 0, buffer.length); } /** * Read Avro formatted data. * @param opts various Avro parsing options. * @param buffer raw Avro formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @param hostMemoryAllocator allocator for host memory buffers * @return the data parsed as a table on the GPU. */ public static Table readAvro(AvroOptions opts, byte[] buffer, long offset, long len, HostMemoryAllocator hostMemoryAllocator) { assert offset >= 0 && offset < buffer.length; assert len <= buffer.length - offset; len = len > 0 ? len : buffer.length - offset; try (HostMemoryBuffer newBuf = hostMemoryAllocator.allocate(len)) { newBuf.setBytes(0, buffer, offset, len); return readAvro(opts, newBuf, 0, len); } } public static Table readAvro(AvroOptions opts, byte[] buffer, long offset, long len) { return readAvro(opts, buffer, offset, len, DefaultHostMemoryAllocator.get()); } /** * Read Avro formatted data. * @param opts various Avro parsing options. * @param buffer raw Avro formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @return the data parsed as a table on the GPU. */ public static Table readAvro(AvroOptions opts, HostMemoryBuffer buffer, long offset, long len) { assert offset >= 0 && offset < buffer.length; assert len <= buffer.length - offset; len = len > 0 ? len : buffer.length - offset; return new Table(readAvro(opts.getIncludeColumnNames(), null, buffer.getAddress() + offset, len)); } public static Table readAvro(AvroOptions opts, DataSource ds) { long dataSourceHandle = DataSourceHelper.createWrapperDataSource(ds); try { return new Table(readAvroFromDataSource(opts.getIncludeColumnNames(), dataSourceHandle)); } finally { DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); } } /** * Read a ORC file using the default ORCOptions. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readORC(File path) { return readORC(ORCOptions.DEFAULT, path); } /** * Read a ORC file. * @param opts ORC parsing options. * @param path the local file to read. * @return the file parsed as a table on the GPU. */ public static Table readORC(ORCOptions opts, File path) { return new Table(readORC(opts.getIncludeColumnNames(), path.getAbsolutePath(), 0, 0, opts.usingNumPyTypes(), opts.timeUnit().typeId.getNativeId(), opts.getDecimal128Columns())); } /** * Read ORC formatted data. * @param buffer raw ORC formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readORC(byte[] buffer) { return readORC(ORCOptions.DEFAULT, buffer, 0, buffer.length); } /** * Read ORC formatted data. * @param opts various ORC parsing options. * @param buffer raw ORC formatted bytes. * @return the data parsed as a table on the GPU. */ public static Table readORC(ORCOptions opts, byte[] buffer) { return readORC(opts, buffer, 0, buffer.length); } /** * Read ORC formatted data. * @param opts various ORC parsing options. * @param buffer raw ORC formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @param hostMemoryAllocator allocator for host memory buffers * @return the data parsed as a table on the GPU. */ public static Table readORC(ORCOptions opts, byte[] buffer, long offset, long len, HostMemoryAllocator hostMemoryAllocator) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.length - offset; assert offset >= 0 && offset < buffer.length; try (HostMemoryBuffer newBuf = hostMemoryAllocator.allocate(len)) { newBuf.setBytes(0, buffer, offset, len); return readORC(opts, newBuf, 0, len); } } public static Table readORC(ORCOptions opts, byte[] buffer, long offset, long len) { return readORC(opts, buffer, offset, len, DefaultHostMemoryAllocator.get()); } /** * Read ORC formatted data. * @param opts various ORC parsing options. * @param buffer raw ORC formatted bytes. * @param offset the starting offset into buffer. * @param len the number of bytes to parse. * @return the data parsed as a table on the GPU. */ public static Table readORC(ORCOptions opts, HostMemoryBuffer buffer, long offset, long len) { if (len <= 0) { len = buffer.length - offset; } assert len > 0; assert len <= buffer.getLength() - offset; assert offset >= 0 && offset < buffer.length; return new Table(readORC(opts.getIncludeColumnNames(), null, buffer.getAddress() + offset, len, opts.usingNumPyTypes(), opts.timeUnit().typeId.getNativeId(), opts.getDecimal128Columns())); } public static Table readORC(ORCOptions opts, DataSource ds) { long dataSourceHandle = DataSourceHelper.createWrapperDataSource(ds); try { return new Table(readORCFromDataSource(opts.getIncludeColumnNames(), opts.usingNumPyTypes(), opts.timeUnit().typeId.getNativeId(), opts.getDecimal128Columns(), dataSourceHandle)); } finally { DataSourceHelper.destroyWrapperDataSource(dataSourceHandle); } } private static class ParquetTableWriter extends TableWriter { HostBufferConsumer consumer; private ParquetTableWriter(ParquetWriterOptions options, File outputFile) { super(writeParquetFileBegin(options.getFlatColumnNames(), options.getTopLevelChildren(), options.getFlatNumChildren(), options.getFlatIsNullable(), options.getMetadataKeys(), options.getMetadataValues(), options.getCompressionType().nativeId, options.getStatisticsFrequency().nativeId, options.getFlatIsTimeTypeInt96(), options.getFlatPrecision(), options.getFlatIsMap(), options.getFlatIsBinary(), options.getFlatHasParquetFieldId(), options.getFlatParquetFieldId(), outputFile.getAbsolutePath())); this.consumer = null; } private ParquetTableWriter(ParquetWriterOptions options, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator) { super(writeParquetBufferBegin(options.getFlatColumnNames(), options.getTopLevelChildren(), options.getFlatNumChildren(), options.getFlatIsNullable(), options.getMetadataKeys(), options.getMetadataValues(), options.getCompressionType().nativeId, options.getStatisticsFrequency().nativeId, options.getFlatIsTimeTypeInt96(), options.getFlatPrecision(), options.getFlatIsMap(), options.getFlatIsBinary(), options.getFlatHasParquetFieldId(), options.getFlatParquetFieldId(), consumer, hostMemoryAllocator)); this.consumer = consumer; } @Override public void write(Table table) { if (writerHandle == 0) { throw new IllegalStateException("Writer was already closed"); } writeParquetChunk(writerHandle, table.nativeHandle, table.getDeviceMemorySize()); } @Override public void close() throws CudfException { if (writerHandle != 0) { writeParquetEnd(writerHandle); } writerHandle = 0; if (consumer != null) { consumer.done(); consumer = null; } } } /** * Get a table writer to write parquet data to a file. * @param options the parquet writer options. * @param outputFile where to write the file. * @return a table writer to use for writing out multiple tables. */ public static TableWriter writeParquetChunked(ParquetWriterOptions options, File outputFile) { return new ParquetTableWriter(options, outputFile); } /** * Get a table writer to write parquet data and handle each chunk with a callback. * @param options the parquet writer options. * @param consumer a class that will be called when host buffers are ready with parquet * formatted data in them. * @param hostMemoryAllocator allocator for host memory buffers * @return a table writer to use for writing out multiple tables. */ public static TableWriter writeParquetChunked(ParquetWriterOptions options, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator) { return new ParquetTableWriter(options, consumer, hostMemoryAllocator); } public static TableWriter writeParquetChunked(ParquetWriterOptions options, HostBufferConsumer consumer) { return writeParquetChunked(options, consumer, DefaultHostMemoryAllocator.get()); } /** * This is an evolving API and most likely be removed in future releases. Please use with the * caveat that this will not exist in the near future. * @param options the Parquet writer options. * @param consumer a class that will be called when host buffers are ready with Parquet * formatted data in them. * @param hostMemoryAllocator allocator for host memory buffers * @param columnViews ColumnViews to write to Parquet */ public static void writeColumnViewsToParquet(ParquetWriterOptions options, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator, ColumnView... columnViews) { assert columnViews != null && columnViews.length > 0 : "ColumnViews can't be null or empty"; long rows = columnViews[0].getRowCount(); for (ColumnView columnView : columnViews) { assert (null != columnView) : "ColumnViews can't be null"; assert (rows == columnView.getRowCount()) : "All columns should have the same number of " + "rows " + columnView.getType(); } // Since Arrays are mutable objects make a copy long[] viewPointers = new long[columnViews.length]; for (int i = 0; i < columnViews.length; i++) { viewPointers[i] = columnViews[i].getNativeView(); } long nativeHandle = createCudfTableView(viewPointers); try { try ( ParquetTableWriter writer = new ParquetTableWriter(options, consumer, hostMemoryAllocator) ) { long total = 0; for (ColumnView cv : columnViews) { total += cv.getDeviceMemorySize(); } writeParquetChunk(writer.writerHandle, nativeHandle, total); } } finally { deleteCudfTable(nativeHandle); } } public static void writeColumnViewsToParquet(ParquetWriterOptions options, HostBufferConsumer consumer, ColumnView... columnViews) { writeColumnViewsToParquet(options, consumer, DefaultHostMemoryAllocator.get(), columnViews); } private static class ORCTableWriter extends TableWriter { HostBufferConsumer consumer; private ORCTableWriter(ORCWriterOptions options, File outputFile) { super(writeORCFileBegin(options.getFlatColumnNames(), options.getTopLevelChildren(), options.getFlatNumChildren(), options.getFlatIsNullable(), options.getMetadataKeys(), options.getMetadataValues(), options.getCompressionType().nativeId, options.getFlatPrecision(), options.getFlatIsMap(), outputFile.getAbsolutePath())); this.consumer = null; } private ORCTableWriter(ORCWriterOptions options, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator) { super(writeORCBufferBegin(options.getFlatColumnNames(), options.getTopLevelChildren(), options.getFlatNumChildren(), options.getFlatIsNullable(), options.getMetadataKeys(), options.getMetadataValues(), options.getCompressionType().nativeId, options.getFlatPrecision(), options.getFlatIsMap(), consumer, hostMemoryAllocator)); this.consumer = consumer; } @Override public void write(Table table) { if (writerHandle == 0) { throw new IllegalStateException("Writer was already closed"); } writeORCChunk(writerHandle, table.nativeHandle, table.getDeviceMemorySize()); } @Override public void close() throws CudfException { if (writerHandle != 0) { writeORCEnd(writerHandle); } writerHandle = 0; if (consumer != null) { consumer.done(); consumer = null; } } } /** * Get a table writer to write ORC data to a file. * @param options the ORC writer options. * @param outputFile where to write the file. * @return a table writer to use for writing out multiple tables. */ public static TableWriter writeORCChunked(ORCWriterOptions options, File outputFile) { return new ORCTableWriter(options, outputFile); } /** * Get a table writer to write ORC data and handle each chunk with a callback. * @param options the ORC writer options. * @param consumer a class that will be called when host buffers are ready with ORC * formatted data in them. * @param hostMemoryAllocator allocator for host memory buffers * @return a table writer to use for writing out multiple tables. */ public static TableWriter writeORCChunked(ORCWriterOptions options, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator) { return new ORCTableWriter(options, consumer, hostMemoryAllocator); } public static TableWriter writeORCChunked(ORCWriterOptions options, HostBufferConsumer consumer) { return writeORCChunked(options, consumer, DefaultHostMemoryAllocator.get()); } private static class ArrowIPCTableWriter extends TableWriter { private final ArrowIPCWriterOptions.DoneOnGpu callback; private HostBufferConsumer consumer; private long maxChunkSize; private ArrowIPCTableWriter(ArrowIPCWriterOptions options, File outputFile) { super(writeArrowIPCFileBegin(options.getColumnNames(), outputFile.getAbsolutePath())); this.callback = options.getCallback(); this.consumer = null; this.maxChunkSize = options.getMaxChunkSize(); } private ArrowIPCTableWriter(ArrowIPCWriterOptions options, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator) { super(writeArrowIPCBufferBegin(options.getColumnNames(), consumer, hostMemoryAllocator)); this.callback = options.getCallback(); this.consumer = consumer; this.maxChunkSize = options.getMaxChunkSize(); } @Override public void write(Table table) { if (writerHandle == 0) { throw new IllegalStateException("Writer was already closed"); } long arrowHandle = convertCudfToArrowTable(writerHandle, table.nativeHandle); try { callback.doneWithTheGpu(table); writeArrowIPCArrowChunk(writerHandle, arrowHandle, maxChunkSize); } finally { closeArrowTable(arrowHandle); } } @Override public void close() throws CudfException { if (writerHandle != 0) { writeArrowIPCEnd(writerHandle); } writerHandle = 0; if (consumer != null) { consumer.done(); consumer = null; } } } /** * Get a table writer to write arrow IPC data to a file. * @param options the arrow IPC writer options. * @param outputFile where to write the file. * @return a table writer to use for writing out multiple tables. */ public static TableWriter writeArrowIPCChunked(ArrowIPCWriterOptions options, File outputFile) { return new ArrowIPCTableWriter(options, outputFile); } /** * Get a table writer to write arrow IPC data and handle each chunk with a callback. * @param options the arrow IPC writer options. * @param consumer a class that will be called when host buffers are ready with arrow IPC * formatted data in them. * @param hostMemoryAllocator allocator for host memory buffers * @return a table writer to use for writing out multiple tables. */ public static TableWriter writeArrowIPCChunked(ArrowIPCWriterOptions options, HostBufferConsumer consumer, HostMemoryAllocator hostMemoryAllocator) { return new ArrowIPCTableWriter(options, consumer, hostMemoryAllocator); } public static TableWriter writeArrowIPCChunked(ArrowIPCWriterOptions options, HostBufferConsumer consumer) { return writeArrowIPCChunked(options, consumer, DefaultHostMemoryAllocator.get()); } private static class ArrowReaderWrapper implements AutoCloseable { private HostBufferProvider provider; private HostMemoryBuffer buffer; private final HostMemoryAllocator hostMemoryAllocator; private ArrowReaderWrapper(HostBufferProvider provider, HostMemoryAllocator hostMemoryAllocator) { this.provider = provider; this.hostMemoryAllocator = hostMemoryAllocator; buffer = this.hostMemoryAllocator.allocate(10 * 1024 * 1024, false); } // Called From JNI public long readInto(long dstAddress, long amount) { long totalRead = 0; long amountLeft = amount; while (amountLeft > 0) { long amountToCopy = Math.min(amountLeft, buffer.length); long amountRead = provider.readInto(buffer, amountToCopy); buffer.copyToMemory(totalRead + dstAddress, amountRead); amountLeft -= amountRead; totalRead += amountRead; if (amountRead < amountToCopy) { // EOF amountLeft = 0; } } return totalRead; } @Override public void close() { if (provider != null) { provider.close(); provider = null; } if (buffer != null) { buffer.close(); buffer = null; } } } private static class ArrowIPCStreamedTableReader implements StreamedTableReader { private final ArrowIPCOptions.NeedGpu callback; private long handle; private ArrowReaderWrapper provider; private ArrowIPCStreamedTableReader(ArrowIPCOptions options, File inputFile) { this.provider = null; this.handle = readArrowIPCFileBegin( inputFile.getAbsolutePath()); this.callback = options.getCallback(); } private ArrowIPCStreamedTableReader(ArrowIPCOptions options, HostBufferProvider provider, HostMemoryAllocator hostMemoryAllocator) { this.provider = new ArrowReaderWrapper(provider, hostMemoryAllocator); this.handle = readArrowIPCBufferBegin(this.provider); this.callback = options.getCallback(); } @Override public Table getNextIfAvailable() throws CudfException { // In this case rowTarget is the minimum number of rows to read. return getNextIfAvailable(1); } @Override public Table getNextIfAvailable(int rowTarget) throws CudfException { long arrowTableHandle = readArrowIPCChunkToArrowTable(handle, rowTarget); try { if (arrowTableHandle == 0) { return null; } callback.needTheGpu(); return new Table(convertArrowTableToCudf(arrowTableHandle)); } finally { closeArrowTable(arrowTableHandle); } } @Override public void close() throws CudfException { if (handle != 0) { readArrowIPCEnd(handle); } handle = 0; if (provider != null) { provider.close(); provider = null; } } } /** * Get a reader that will return tables. * @param options options for reading. * @param inputFile the file to read the Arrow IPC formatted data from * @return a reader. */ public static StreamedTableReader readArrowIPCChunked(ArrowIPCOptions options, File inputFile) { return new ArrowIPCStreamedTableReader(options, inputFile); } /** * Get a reader that will return tables. * @param inputFile the file to read the Arrow IPC formatted data from * @return a reader. */ public static StreamedTableReader readArrowIPCChunked(File inputFile) { return readArrowIPCChunked(ArrowIPCOptions.DEFAULT, inputFile); } /** * Get a reader that will return tables. * @param options options for reading. * @param provider what will provide the data being read. * @return a reader. */ public static StreamedTableReader readArrowIPCChunked(ArrowIPCOptions options, HostBufferProvider provider, HostMemoryAllocator hostMemoryAllocator) { return new ArrowIPCStreamedTableReader(options, provider, hostMemoryAllocator); } public static StreamedTableReader readArrowIPCChunked(ArrowIPCOptions options, HostBufferProvider provider) { return new ArrowIPCStreamedTableReader(options, provider, DefaultHostMemoryAllocator.get()); } /** * Get a reader that will return tables. * @param provider what will provide the data being read. * @return a reader. */ public static StreamedTableReader readArrowIPCChunked(HostBufferProvider provider) { return readArrowIPCChunked(ArrowIPCOptions.DEFAULT, provider); } /** * Concatenate multiple tables together to form a single table. * The schema of each table (i.e.: number of columns and types of each column) must be equal * across all tables and will determine the schema of the resulting table. */ public static Table concatenate(Table... tables) { if (tables.length < 2) { throw new IllegalArgumentException("concatenate requires 2 or more tables"); } int numColumns = tables[0].getNumberOfColumns(); long[] tableHandles = new long[tables.length]; for (int i = 0; i < tables.length; ++i) { tableHandles[i] = tables[i].nativeHandle; assert tables[i].getNumberOfColumns() == numColumns : "all tables must have the same schema"; } return new Table(concatenate(tableHandles)); } /** * Interleave all columns into a single column. Columns must all have the same data type and length. * * Example: * ``` * input = [[A1, A2, A3], [B1, B2, B3]] * return = [A1, B1, A2, B2, A3, B3] * ``` * * @return The interleaved columns as a single column */ public ColumnVector interleaveColumns() { assert this.getNumberOfColumns() >= 2 : ".interleaveColumns() operation requires at least 2 columns"; return new ColumnVector(interleaveColumns(this.nativeHandle)); } /** * Repeat each row of this table count times. * @param count the number of times to repeat each row. * @return the new Table. */ public Table repeat(int count) { return new Table(repeatStaticCount(this.nativeHandle, count)); } /** * Create a new table by repeating each row of this table. The number of * repetitions of each row is defined by the corresponding value in counts. * @param counts the number of times to repeat each row. Cannot have nulls, must be an * Integer type, and must have one entry for each row in the table. * @return the new Table. * @throws CudfException on any error. */ public Table repeat(ColumnView counts) { return new Table(repeatColumnCount(this.nativeHandle, counts.getNativeView())); } /** * Partition this table using the mapping in partitionMap. partitionMap must be an integer * column. The number of rows in partitionMap must be the same as this table. Each row * in the map will indicate which partition the rows in the table belong to. * @param partitionMap the partitions for each row. * @param numberOfPartitions number of partitions * @return {@link PartitionedTable} Table that exposes a limited functionality of the * {@link Table} class */ public PartitionedTable partition(ColumnView partitionMap, int numberOfPartitions) { int[] partitionOffsets = new int[numberOfPartitions]; return new PartitionedTable(new Table(partition( getNativeView(), partitionMap.getNativeView(), partitionOffsets.length, partitionOffsets)), partitionOffsets); } /** * Find smallest indices in a sorted table where values should be inserted to maintain order. * <pre> * Example: * * Single column: * idx 0 1 2 3 4 * inputTable = { 10, 20, 20, 30, 50 } * valuesTable = { 20 } * result = { 1 } * * Multi Column: * idx 0 1 2 3 4 * inputTable = {{ 10, 20, 20, 20, 20 }, * { 5.0, .5, .5, .7, .7 }, * { 90, 77, 78, 61, 61 }} * valuesTable = {{ 20 }, * { .7 }, * { 61 }} * result = { 3 } * </pre> * The input table and the values table need to be non-empty (row count > 0) * @param areNullsSmallest per column, true if nulls are assumed smallest * @param valueTable the table of values to find insertion locations for * @param descFlags per column indicates the ordering, true if descending. * @return ColumnVector with lower bound indices for all rows in valueTable */ public ColumnVector lowerBound(boolean[] areNullsSmallest, Table valueTable, boolean[] descFlags) { assertForBounds(valueTable); return new ColumnVector(bound(this.nativeHandle, valueTable.nativeHandle, descFlags, areNullsSmallest, false)); } /** * Find smallest indices in a sorted table where values should be inserted to maintain order. * This is a convenience method. It pulls out the columns indicated by the args and sets up the * ordering properly to call `lowerBound`. * @param valueTable the table of values to find insertion locations for * @param args the sort order used to sort this table. * @return ColumnVector with lower bound indices for all rows in valueTable */ public ColumnVector lowerBound(Table valueTable, OrderByArg... args) { boolean[] areNullsSmallest = new boolean[args.length]; boolean[] descFlags = new boolean[args.length]; ColumnVector[] inputColumns = new ColumnVector[args.length]; ColumnVector[] searchColumns = new ColumnVector[args.length]; for (int i = 0; i < args.length; i++) { areNullsSmallest[i] = args[i].isNullSmallest; descFlags[i] = args[i].isDescending; inputColumns[i] = columns[args[i].index]; searchColumns[i] = valueTable.columns[args[i].index]; } try (Table input = new Table(inputColumns); Table search = new Table(searchColumns)) { return input.lowerBound(areNullsSmallest, search, descFlags); } } /** * Find largest indices in a sorted table where values should be inserted to maintain order. * Given a sorted table return the upper bound. * <pre> * Example: * * Single column: * idx 0 1 2 3 4 * inputTable = { 10, 20, 20, 30, 50 } * valuesTable = { 20 } * result = { 3 } * * Multi Column: * idx 0 1 2 3 4 * inputTable = {{ 10, 20, 20, 20, 20 }, * { 5.0, .5, .5, .7, .7 }, * { 90, 77, 78, 61, 61 }} * valuesTable = {{ 20 }, * { .7 }, * { 61 }} * result = { 5 } * </pre> * The input table and the values table need to be non-empty (row count > 0) * @param areNullsSmallest per column, true if nulls are assumed smallest * @param valueTable the table of values to find insertion locations for * @param descFlags per column indicates the ordering, true if descending. * @return ColumnVector with upper bound indices for all rows in valueTable */ public ColumnVector upperBound(boolean[] areNullsSmallest, Table valueTable, boolean[] descFlags) { assertForBounds(valueTable); return new ColumnVector(bound(this.nativeHandle, valueTable.nativeHandle, descFlags, areNullsSmallest, true)); } /** * Find largest indices in a sorted table where values should be inserted to maintain order. * This is a convenience method. It pulls out the columns indicated by the args and sets up the * ordering properly to call `upperBound`. * @param valueTable the table of values to find insertion locations for * @param args the sort order used to sort this table. * @return ColumnVector with upper bound indices for all rows in valueTable */ public ColumnVector upperBound(Table valueTable, OrderByArg... args) { boolean[] areNullsSmallest = new boolean[args.length]; boolean[] descFlags = new boolean[args.length]; ColumnVector[] inputColumns = new ColumnVector[args.length]; ColumnVector[] searchColumns = new ColumnVector[args.length]; for (int i = 0; i < args.length; i++) { areNullsSmallest[i] = args[i].isNullSmallest; descFlags[i] = args[i].isDescending; inputColumns[i] = columns[args[i].index]; searchColumns[i] = valueTable.columns[args[i].index]; } try (Table input = new Table(inputColumns); Table search = new Table(searchColumns)) { return input.upperBound(areNullsSmallest, search, descFlags); } } private void assertForBounds(Table valueTable) { assert this.getRowCount() != 0 : "Input table cannot be empty"; assert valueTable.getRowCount() != 0 : "Value table cannot be empty"; for (int i = 0; i < Math.min(columns.length, valueTable.columns.length); i++) { assert valueTable.columns[i].getType().equals(this.getColumn(i).getType()) : "Input and values tables' data types do not match"; } } /** * Joins two tables all of the left against all of the right. Be careful as this * gets very big and you can easily use up all of the GPUs memory. * @param right the right table * @return the joined table. The order of the columns returned will be left columns, * right columns. */ public Table crossJoin(Table right) { return new Table(Table.crossJoin(this.nativeHandle, right.nativeHandle)); } ///////////////////////////////////////////////////////////////////////////// // TABLE MANIPULATION APIs ///////////////////////////////////////////////////////////////////////////// /** * Get back a gather map that can be used to sort the data. This allows you to sort by data * that does not appear in the final result and not pay the cost of gathering the data that * is only needed for sorting. * @param args what order to sort the data by * @return a gather map */ public ColumnVector sortOrder(OrderByArg... args) { long[] sortKeys = new long[args.length]; boolean[] isDescending = new boolean[args.length]; boolean[] areNullsSmallest = new boolean[args.length]; for (int i = 0; i < args.length; i++) { int index = args[i].index; assert (index >= 0 && index < columns.length) : "index is out of range 0 <= " + index + " < " + columns.length; isDescending[i] = args[i].isDescending; areNullsSmallest[i] = args[i].isNullSmallest; sortKeys[i] = columns[index].getNativeView(); } return new ColumnVector(sortOrder(nativeHandle, sortKeys, isDescending, areNullsSmallest)); } /** * Orders the table using the sortkeys returning a new allocated table. The caller is * responsible for cleaning up * the {@link ColumnVector} returned as part of the output {@link Table} * <p> * Example usage: orderBy(true, OrderByArg.asc(0), OrderByArg.desc(3)...); * @param args Suppliers to initialize sortKeys. * @return Sorted Table */ public Table orderBy(OrderByArg... args) { long[] sortKeys = new long[args.length]; boolean[] isDescending = new boolean[args.length]; boolean[] areNullsSmallest = new boolean[args.length]; for (int i = 0; i < args.length; i++) { int index = args[i].index; assert (index >= 0 && index < columns.length) : "index is out of range 0 <= " + index + " < " + columns.length; isDescending[i] = args[i].isDescending; areNullsSmallest[i] = args[i].isNullSmallest; sortKeys[i] = columns[index].getNativeView(); } return new Table(orderBy(nativeHandle, sortKeys, isDescending, areNullsSmallest)); } /** * Merge multiple already sorted tables keeping the sort order the same. * This is a more efficient version of concatenate followed by orderBy, but requires that * the input already be sorted. * @param tables the tables that should be merged. * @param args the ordering of the tables. Should match how they were sorted * initially. * @return a combined sorted table. */ public static Table merge(Table[] tables, OrderByArg... args) { assert tables.length > 0; long[] tableHandles = new long[tables.length]; Table first = tables[0]; assert args.length <= first.columns.length; for (int i = 0; i < tables.length; i++) { Table t = tables[i]; assert t != null; assert t.columns.length == first.columns.length; tableHandles[i] = t.nativeHandle; } int[] sortKeyIndexes = new int[args.length]; boolean[] isDescending = new boolean[args.length]; boolean[] areNullsSmallest = new boolean[args.length]; for (int i = 0; i < args.length; i++) { int index = args[i].index; assert (index >= 0 && index < first.columns.length) : "index is out of range 0 <= " + index + " < " + first.columns.length; isDescending[i] = args[i].isDescending; areNullsSmallest[i] = args[i].isNullSmallest; sortKeyIndexes[i] = index; } return new Table(merge(tableHandles, sortKeyIndexes, isDescending, areNullsSmallest)); } /** * Merge multiple already sorted tables keeping the sort order the same. * This is a more efficient version of concatenate followed by orderBy, but requires that * the input already be sorted. * @param tables the tables that should be merged. * @param args the ordering of the tables. Should match how they were sorted * initially. * @return a combined sorted table. */ public static Table merge(List<Table> tables, OrderByArg... args) { return merge(tables.toArray(new Table[tables.size()]), args); } /** * Returns aggregate operations grouped by columns provided in indices * @param groupByOptions Options provided in the builder * @param indices columns to be considered for groupBy */ public GroupByOperation groupBy(GroupByOptions groupByOptions, int... indices) { return groupByInternal(groupByOptions, indices); } /** * Returns aggregate operations grouped by columns provided in indices * with default options as below: * - null is considered as key while grouping. * - keys are not presorted. * - empty key order array. * - empty null order array. * @param indices columns to be considered for groupBy */ public GroupByOperation groupBy(int... indices) { return groupByInternal(GroupByOptions.builder().withIgnoreNullKeys(false).build(), indices); } private GroupByOperation groupByInternal(GroupByOptions groupByOptions, int[] indices) { int[] operationIndicesArray = copyAndValidate(indices); return new GroupByOperation(this, groupByOptions, operationIndicesArray); } /** * Round-robin partition a table into the specified number of partitions. The first row is placed * in the specified starting partition, the next row is placed in the next partition, and so on. * When the last partition is reached then next partition is partition 0 and the algorithm * continues until all rows have been placed in partitions, evenly distributing the rows * among the partitions. * @param numberOfPartitions - number of partitions to use * @param startPartition - starting partition index (i.e.: where first row is placed). * @return - {@link PartitionedTable} - Table that exposes a limited functionality of the * {@link Table} class */ public PartitionedTable roundRobinPartition(int numberOfPartitions, int startPartition) { int[] partitionOffsets = new int[numberOfPartitions]; return new PartitionedTable(new Table(Table.roundRobinPartition(nativeHandle, numberOfPartitions, startPartition, partitionOffsets)), partitionOffsets); } public TableOperation onColumns(int... indices) { int[] operationIndicesArray = copyAndValidate(indices); return new TableOperation(this, operationIndicesArray); } private int[] copyAndValidate(int[] indices) { int[] operationIndicesArray = new int[indices.length]; for (int i = 0; i < indices.length; i++) { operationIndicesArray[i] = indices[i]; assert operationIndicesArray[i] >= 0 && operationIndicesArray[i] < columns.length : "operation index is out of range 0 <= " + operationIndicesArray[i] + " < " + columns.length; } return operationIndicesArray; } /** * Filters this table using a column of boolean values as a mask, returning a new one. * <p> * Given a mask column, each element `i` from the input columns * is copied to the output columns if the corresponding element `i` in the mask is * non-null and `true`. This operation is stable: the input order is preserved. * <p> * This table and mask columns must have the same number of rows. * <p> * The output table has size equal to the number of elements in boolean_mask * that are both non-null and `true`. * <p> * If the original table row count is zero, there is no error, and an empty table is returned. * @param mask column of type {@link DType#BOOL8} used as a mask to filter * the input column * @return table containing copy of all elements of this table passing * the filter defined by the boolean mask */ public Table filter(ColumnView mask) { assert mask.getType().equals(DType.BOOL8) : "Mask column must be of type BOOL8"; assert getRowCount() == 0 || getRowCount() == mask.getRowCount() : "Mask column has incorrect size"; return new Table(filter(nativeHandle, mask.getNativeView())); } /** * Enum to specify which of duplicate rows/elements will be copied to the output. */ public enum DuplicateKeepOption { KEEP_ANY(0), KEEP_FIRST(1), KEEP_LAST(2), KEEP_NONE(3); final int keepValue; DuplicateKeepOption(int keepValue) { this.keepValue = keepValue; } } /** * Copy rows of the current table to an output table such that duplicate rows in the key columns * are ignored (i.e., only one row from the duplicate ones will be copied). These keys columns are * a subset of the current table columns and their indices are specified by an input array. * * The order of rows in the output table is not specified. * * @param keyColumns Array of indices representing key columns from the current table. * @param keep Option specifying to keep any, first, last, or none of the found duplicates. * @param nullsEqual Flag to denote whether nulls are treated as equal when comparing rows of the * key columns to check for uniqueness. * * @return Table with unique keys. */ public Table dropDuplicates(int[] keyColumns, DuplicateKeepOption keep, boolean nullsEqual) { assert keyColumns.length >= 1 : "Input keyColumns must contain indices of at least one column"; return new Table(dropDuplicates(nativeHandle, keyColumns, keep.keepValue, nullsEqual)); } /** * Count how many rows in the table are distinct from one another. * @param nullsEqual if nulls should be considered equal to each other or not. */ public int distinctCount(NullEquality nullsEqual) { return distinctCount(nativeHandle, nullsEqual.nullsEqual); } /** * Count how many rows in the table are distinct from one another. * Nulls are considered to be equal to one another. */ public int distinctCount() { return distinctCount(nativeHandle, true); } /** * Split a table at given boundaries, but the result of each split has memory that is laid out * in a contiguous range of memory. This allows for us to optimize copying the data in a single * operation. * * <code> * Example: * input: [{10, 12, 14, 16, 18, 20, 22, 24, 26, 28}, * {50, 52, 54, 56, 58, 60, 62, 64, 66, 68}] * splits: {2, 5, 9} * output: [{{10, 12}, {14, 16, 18}, {20, 22, 24, 26}, {28}}, * {{50, 52}, {54, 56, 58}, {60, 62, 64, 66}, {68}}] * </code> * @param indices A vector of indices where to make the split * @return The tables split at those points. NOTE: It is the responsibility of the caller to * close the result. Each table and column holds a reference to the original buffer. But both * the buffer and the table must be closed for the memory to be released. */ public ContiguousTable[] contiguousSplit(int... indices) { return contiguousSplit(nativeHandle, indices); } /** * Create an instance of `ChunkedPack` which can be used to pack this table * contiguously in memory utilizing a bounce buffer of size `bounceBufferSize`. * * This version of `makeChunkedPack` takes a `RmmDviceMemoryResource`, which can be used * to pre-allocate all scratch and temporary space required for the state of `cudf::chunked_pack`. * * The caller is responsible for calling close on the returned `ChunkedPack` object. * * @param bounceBufferSize The size of bounce buffer that will be utilized to pack into * @param tempMemoryResource A memory resource that is used to satisfy allocations for * temporary and thrust scratch space. * @return An instance of `ChunkedPack` that the caller must use to finish the operation. */ public ChunkedPack makeChunkedPack( long bounceBufferSize, RmmDeviceMemoryResource tempMemoryResource) { long tempMemoryResourceHandle = tempMemoryResource.getHandle(); return new ChunkedPack( makeChunkedPack(nativeHandle, bounceBufferSize, tempMemoryResourceHandle)); } /** * Create an instance of `ChunkedPack` which can be used to pack this table * contiguously in memory utilizing a bounce buffer of size `bounceBufferSize`. * * This version of `makeChunkedPack` makes use of the default per-device memory resource, * for scratch and temporary space required for the state of `cudf::chunked_pack`. * * The caller is responsible for calling close on the returned `ChunkedPack` object. * * @param bounceBufferSize The size of bounce buffer that will be utilized to pack into * @return An instance of `ChunkedPack` that the caller must use to finish the operation. */ public ChunkedPack makeChunkedPack(long bounceBufferSize) { return new ChunkedPack( makeChunkedPack(nativeHandle, bounceBufferSize, 0)); } /** * Explodes a list column's elements. * * Any list is exploded, which means the elements of the list in each row are expanded * into new rows in the output. The corresponding rows for other columns in the input * are duplicated. * * <code> * Example: * input: [[5,10,15], 100], * [[20,25], 200], * [[30], 300] * index: 0 * output: [5, 100], * [10, 100], * [15, 100], * [20, 200], * [25, 200], * [30, 300] * </code> * * Nulls propagate in different ways depending on what is null. * <code> * input: [[5,null,15], 100], * [null, 200] * index: 0 * output: [5, 100], * [null, 100], * [15, 100] * </code> * Note that null lists are completely removed from the output * and nulls inside lists are pulled out and remain. * * @param index Column index to explode inside the table. * @return A new table with explode_col exploded. */ public Table explode(int index) { assert 0 <= index && index < columns.length : "Column index is out of range"; assert columns[index].getType().equals(DType.LIST) : "Column to explode must be of type LIST"; return new Table(explode(nativeHandle, index)); } /** * Explodes a list column's elements and includes a position column. * * Any list is exploded, which means the elements of the list in each row are expanded into new rows * in the output. The corresponding rows for other columns in the input are duplicated. A position * column is added that has the index inside the original list for each row. Example: * <code> * input: [[5,10,15], 100], * [[20,25], 200], * [[30], 300] * index: 0 * output: [0, 5, 100], * [1, 10, 100], * [2, 15, 100], * [0, 20, 200], * [1, 25, 200], * [0, 30, 300] * </code> * * Nulls and empty lists propagate in different ways depending on what is null or empty. * <code> * input: [[5,null,15], 100], * [null, 200] * index: 0 * output: [5, 100], * [null, 100], * [15, 100] * </code> * * Note that null lists are not included in the resulting table, but nulls inside * lists and empty lists will be represented with a null entry for that column in that row. * * @param index Column index to explode inside the table. * @return A new table with exploded value and position. The column order of return table is * [cols before explode_input, explode_position, explode_value, cols after explode_input]. */ public Table explodePosition(int index) { assert 0 <= index && index < columns.length : "Column index is out of range"; assert columns[index].getType().equals(DType.LIST) : "Column to explode must be of type LIST"; return new Table(explodePosition(nativeHandle, index)); } /** * Explodes a list column's elements. * * Any list is exploded, which means the elements of the list in each row are expanded * into new rows in the output. The corresponding rows for other columns in the input * are duplicated. * * <code> * Example: * input: [[5,10,15], 100], * [[20,25], 200], * [[30], 300], * index: 0 * output: [5, 100], * [10, 100], * [15, 100], * [20, 200], * [25, 200], * [30, 300] * </code> * * Nulls propagate in different ways depending on what is null. * <code> * input: [[5,null,15], 100], * [null, 200] * index: 0 * output: [5, 100], * [null, 100], * [15, 100], * [null, 200] * </code> * Note that null lists are completely removed from the output * and nulls inside lists are pulled out and remain. * * @param index Column index to explode inside the table. * @return A new table with explode_col exploded. */ public Table explodeOuter(int index) { assert 0 <= index && index < columns.length : "Column index is out of range"; assert columns[index].getType().equals(DType.LIST) : "Column to explode must be of type LIST"; return new Table(explodeOuter(nativeHandle, index)); } /** * Explodes a list column's elements retaining any null entries or empty lists and includes a * position column. * * Any list is exploded, which means the elements of the list in each row are expanded into new rows * in the output. The corresponding rows for other columns in the input are duplicated. A position * column is added that has the index inside the original list for each row. Example: * * <code> * Example: * input: [[5,10,15], 100], * [[20,25], 200], * [[30], 300], * index: 0 * output: [0, 5, 100], * [1, 10, 100], * [2, 15, 100], * [0, 20, 200], * [1, 25, 200], * [0, 30, 300] * </code> * * Nulls and empty lists propagate as null entries in the result. * <code> * input: [[5,null,15], 100], * [null, 200], * [[], 300] * index: 0 * output: [0, 5, 100], * [1, null, 100], * [2, 15, 100], * [0, null, 200], * [0, null, 300] * </code> * * returns * * @param index Column index to explode inside the table. * @return A new table with exploded value and position. The column order of return table is * [cols before explode_input, explode_position, explode_value, cols after explode_input]. */ public Table explodeOuterPosition(int index) { assert 0 <= index && index < columns.length : "Column index is out of range"; assert columns[index].getType().equals(DType.LIST) : "Column to explode must be of type LIST"; return new Table(explodeOuterPosition(nativeHandle, index)); } /** * Returns an approximate cumulative size in bits of all columns in the `table_view` for each row. * This function counts bits instead of bytes to account for the null mask which only has one * bit per row. Each row in the returned column is the sum of the per-row bit size for each column * in the table. * * In some cases, this is an inexact approximation. Specifically, columns of lists and strings * require N+1 offsets to represent N rows. It is up to the caller to calculate the small * additional overhead of the terminating offset for any group of rows being considered. * * This function returns the per-row bit sizes as the columns are currently formed. This can * end up being larger than the number you would get by gathering the rows. Specifically, * the push-down of struct column validity masks can nullify rows that contain data for * string or list columns. In these cases, the size returned is conservative such that: * row_bit_count(column(x)) >= row_bit_count(gather(column(x))) * * @return INT32 column of bit size per row of the table */ public ColumnVector rowBitCount() { return new ColumnVector(rowBitCount(getNativeView())); } /** * Gathers the rows of this table according to `gatherMap` such that row "i" * in the resulting table's columns will contain row "gatherMap[i]" from this table. * The number of rows in the result table will be equal to the number of elements in * `gatherMap`. * * A negative value `i` in the `gatherMap` is interpreted as `i+n`, where * `n` is the number of rows in this table. * @param gatherMap the map of indexes. Must be non-nullable and integral type. * @return the resulting Table. */ public Table gather(ColumnView gatherMap) { return gather(gatherMap, OutOfBoundsPolicy.NULLIFY); } /** * Gathers the rows of this table according to `gatherMap` such that row "i" * in the resulting table's columns will contain row "gatherMap[i]" from this table. * The number of rows in the result table will be equal to the number of elements in * `gatherMap`. * * A negative value `i` in the `gatherMap` is interpreted as `i+n`, where * `n` is the number of rows in this table. * * @param gatherMap the map of indexes. Must be non-nullable and integral type. * @param outOfBoundsPolicy policy to use when an out-of-range value is in `gatherMap`. * @return the resulting Table. */ public Table gather(ColumnView gatherMap, OutOfBoundsPolicy outOfBoundsPolicy) { boolean checkBounds = outOfBoundsPolicy == OutOfBoundsPolicy.NULLIFY; return new Table(gather(nativeHandle, gatherMap.getNativeView(), checkBounds)); } /** * Scatters values from the source table into the target table out-of-place, returning a new * result table. The scatter is performed according to a scatter map such that row `scatterMap[i]` * of the destination table gets row `i` of the source table. All other rows of the destination * table equal corresponding rows of the target table. * * The number of columns in source must match the number of columns in target and their * corresponding data types must be the same. * * If the same index appears more than once in the scatter map, the result is undefined. * * A negative value `i` in the `scatterMap` is interpreted as `i + n`, where `n` is the number of * rows in the `target` table. * * @param scatterMap The map of indexes. Must be non-nullable and integral type. * @param target The table into which rows from the current table are to be scattered out-of-place. * @return A new table which is the result of out-of-place scattering the source table into the * target table. */ public Table scatter(ColumnView scatterMap, Table target) { return new Table(scatterTable(nativeHandle, scatterMap.getNativeView(), target.getNativeView())); } /** * Scatters values from the source rows into the target table out-of-place, returning a new result * table. The scatter is performed according to a scatter map such that row `scatterMap[i]` of the * destination table is replaced by the source row `i`. All other rows of the destination table * equal corresponding rows of the target table. * * The number of elements in source must match the number of columns in target and their * corresponding data types must be the same. * * If the same index appears more than once in the scatter map, the result is undefined. * * A negative value `i` in the `scatterMap` is interpreted as `i + n`, where `n` is the number of * rows in the `target` table. * * @param source The input scalars containing values to be scattered into the target table. * @param scatterMap The map of indexes. Must be non-nullable and integral type. * @param target The table into which the values from source are to be scattered out-of-place. * @return A new table which is the result of out-of-place scattering the source values into the * target table. */ public static Table scatter(Scalar[] source, ColumnView scatterMap, Table target) { long[] srcScalarHandles = new long[source.length]; for(int i = 0; i < source.length; ++i) { assert source[i] != null : "Scalar vectors passed in should not contain null"; srcScalarHandles[i] = source[i].getScalarHandle(); } return new Table(scatterScalars(srcScalarHandles, scatterMap.getNativeView(), target.getNativeView())); } private static GatherMap[] buildJoinGatherMaps(long[] gatherMapData) { long bufferSize = gatherMapData[0]; long leftAddr = gatherMapData[1]; long leftHandle = gatherMapData[2]; long rightAddr = gatherMapData[3]; long rightHandle = gatherMapData[4]; GatherMap[] maps = new GatherMap[2]; maps[0] = new GatherMap(DeviceMemoryBuffer.fromRmm(leftAddr, bufferSize, leftHandle)); maps[1] = new GatherMap(DeviceMemoryBuffer.fromRmm(rightAddr, bufferSize, rightHandle)); return maps; } /** * Computes the gather maps that can be used to manifest the result of a left equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the table argument represents the key columns from the right table. Two {@link GatherMap} * instances will be returned that can be used to gather the left and right tables, * respectively, to produce the result of the left join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightKeys join key columns from the right table * @param compareNullsEqual true if null key values should match otherwise false * @return left and right table gather maps */ public GatherMap[] leftJoinGatherMaps(Table rightKeys, boolean compareNullsEqual) { if (getNumberOfColumns() != rightKeys.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightKeys.getNumberOfColumns()); } long[] gatherMapData = leftJoinGatherMaps(getNativeView(), rightKeys.getNativeView(), compareNullsEqual); return buildJoinGatherMaps(gatherMapData); } /** * Computes the number of rows resulting from a left equi-join between two tables. * It is assumed this table instance holds the key columns from the left table, and the * {@link HashJoin} argument has been constructed from the key columns from the right table. * @param rightHash hash table built from join key columns from the right table * @return row count of the join result */ public long leftJoinRowCount(HashJoin rightHash) { if (getNumberOfColumns() != rightHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightHash.getNumberOfColumns()); } return leftJoinRowCount(getNativeView(), rightHash.getNativeView()); } /** * Computes the gather maps that can be used to manifest the result of a left equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the {@link HashJoin} argument has been constructed from the key columns from the right table. * Two {@link GatherMap} instances will be returned that can be used to gather the left and right * tables, respectively, to produce the result of the left join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightHash hash table built from join key columns from the right table * @return left and right table gather maps */ public GatherMap[] leftJoinGatherMaps(HashJoin rightHash) { if (getNumberOfColumns() != rightHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightHash.getNumberOfColumns()); } long[] gatherMapData = leftHashJoinGatherMaps(getNativeView(), rightHash.getNativeView()); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of a left equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the {@link HashJoin} argument has been constructed from the key columns from the right table. * Two {@link GatherMap} instances will be returned that can be used to gather the left and right * tables, respectively, to produce the result of the left join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing an output row count that was previously computed from * {@link #leftJoinRowCount(HashJoin)}. * WARNING: Passing a row count that is smaller than the actual row count will result * in undefined behavior. * @param rightHash hash table built from join key columns from the right table * @param outputRowCount number of output rows in the join result * @return left and right table gather maps */ public GatherMap[] leftJoinGatherMaps(HashJoin rightHash, long outputRowCount) { if (getNumberOfColumns() != rightHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightHash.getNumberOfColumns()); } long[] gatherMapData = leftHashJoinGatherMapsWithCount(getNativeView(), rightHash.getNativeView(), outputRowCount); return buildJoinGatherMaps(gatherMapData); } /** * Computes the number of rows from the result of a left join between two tables when a * conditional expression is true. It is assumed this table instance holds the columns from * the left table, and the table argument represents the columns from the right table. * @param rightTable the right side table of the join in the join * @param condition conditional expression to evaluate during the join * @return row count for the join result */ public long conditionalLeftJoinRowCount(Table rightTable, CompiledExpression condition) { return conditionalLeftJoinRowCount(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); } /** * Computes the gather maps that can be used to manifest the result of a left join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the left join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightTable the right side table of the join in the join * @param condition conditional expression to evaluate during the join * @return left and right table gather maps */ public GatherMap[] conditionalLeftJoinGatherMaps(Table rightTable, CompiledExpression condition) { long[] gatherMapData = conditionalLeftJoinGatherMaps(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of a left join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the left join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing an output row count that was previously computed from * {@link #conditionalLeftJoinRowCount(Table, CompiledExpression)}. * WARNING: Passing a row count that is smaller than the actual row count will result * in undefined behavior. * @param rightTable the right side table of the join in the join * @param condition conditional expression to evaluate during the join * @param outputRowCount number of output rows in the join result * @return left and right table gather maps */ public GatherMap[] conditionalLeftJoinGatherMaps(Table rightTable, CompiledExpression condition, long outputRowCount) { long[] gatherMapData = conditionalLeftJoinGatherMapsWithCount(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle(), outputRowCount); return buildJoinGatherMaps(gatherMapData); } /** * Computes output size information for a left join between two tables using a mix of equality * and inequality conditions. The entire join condition is assumed to be a logical AND of the * equality condition and inequality condition. * NOTE: It is the responsibility of the caller to close the resulting size information object * or native resources can be leaked! * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return size information for the join */ public static MixedJoinSize mixedLeftJoinSize(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] mixedSizeInfo = mixedLeftJoinSize( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); assert mixedSizeInfo.length == 2; long outputRowCount = mixedSizeInfo[0]; long matchesColumnHandle = mixedSizeInfo[1]; return new MixedJoinSize(outputRowCount, new ColumnVector(matchesColumnHandle)); } /** * Computes the gather maps that can be used to manifest the result of a left join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the left join. * It is the responsibility of the caller to close the resulting gather map instances. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return left and right table gather maps */ public static GatherMap[] mixedLeftJoinGatherMaps(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] gatherMapData = mixedLeftJoinGatherMaps( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of a left join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the left join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing the size result from * {@link #mixedLeftJoinSize(Table, Table, Table, Table, CompiledExpression, NullEquality)} * when the output size was computed previously. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @param joinSize mixed join size result * @return left and right table gather maps */ public static GatherMap[] mixedLeftJoinGatherMaps(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality, MixedJoinSize joinSize) { long[] gatherMapData = mixedLeftJoinGatherMapsWithSize( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL, joinSize.getOutputRowCount(), joinSize.getMatches().getNativeView()); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of an inner equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the table argument represents the key columns from the right table. Two {@link GatherMap} * instances will be returned that can be used to gather the left and right tables, * respectively, to produce the result of the inner join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightKeys join key columns from the right table * @param compareNullsEqual true if null key values should match otherwise false * @return left and right table gather maps */ public GatherMap[] innerJoinGatherMaps(Table rightKeys, boolean compareNullsEqual) { if (getNumberOfColumns() != rightKeys.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightKeys.getNumberOfColumns()); } long[] gatherMapData = innerJoinGatherMaps(getNativeView(), rightKeys.getNativeView(), compareNullsEqual); return buildJoinGatherMaps(gatherMapData); } /** * Computes the number of rows resulting from an inner equi-join between two tables. * @param otherHash hash table built from join key columns from the other table * @return row count of the join result */ public long innerJoinRowCount(HashJoin otherHash) { if (getNumberOfColumns() != otherHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "otherKeys: " + otherHash.getNumberOfColumns()); } return innerJoinRowCount(getNativeView(), otherHash.getNativeView()); } /** * Computes the gather maps that can be used to manifest the result of an inner equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the {@link HashJoin} argument has been constructed from the key columns from the right table. * Two {@link GatherMap} instances will be returned that can be used to gather the left and right * tables, respectively, to produce the result of the inner join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightHash hash table built from join key columns from the right table * @return left and right table gather maps */ public GatherMap[] innerJoinGatherMaps(HashJoin rightHash) { if (getNumberOfColumns() != rightHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightHash.getNumberOfColumns()); } long[] gatherMapData = innerHashJoinGatherMaps(getNativeView(), rightHash.getNativeView()); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of an inner equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the {@link HashJoin} argument has been constructed from the key columns from the right table. * Two {@link GatherMap} instances will be returned that can be used to gather the left and right * tables, respectively, to produce the result of the inner join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing an output row count that was previously computed from * {@link #innerJoinRowCount(HashJoin)}. * WARNING: Passing a row count that is smaller than the actual row count will result * in undefined behavior. * @param rightHash hash table built from join key columns from the right table * @param outputRowCount number of output rows in the join result * @return left and right table gather maps */ public GatherMap[] innerJoinGatherMaps(HashJoin rightHash, long outputRowCount) { if (getNumberOfColumns() != rightHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightHash.getNumberOfColumns()); } long[] gatherMapData = innerHashJoinGatherMapsWithCount(getNativeView(), rightHash.getNativeView(), outputRowCount); return buildJoinGatherMaps(gatherMapData); } /** * Computes the number of rows from the result of an inner join between two tables when a * conditional expression is true. It is assumed this table instance holds the columns from * the left table, and the table argument represents the columns from the right table. * @param rightTable the right side table of the join in the join * @param condition conditional expression to evaluate during the join * @return row count for the join result */ public long conditionalInnerJoinRowCount(Table rightTable, CompiledExpression condition) { return conditionalInnerJoinRowCount(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); } /** * Computes the gather maps that can be used to manifest the result of an inner join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the inner join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightTable the right side table of the join * @param condition conditional expression to evaluate during the join * @return left and right table gather maps */ public GatherMap[] conditionalInnerJoinGatherMaps(Table rightTable, CompiledExpression condition) { long[] gatherMapData = conditionalInnerJoinGatherMaps(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of an inner join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the inner join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing an output row count that was previously computed from * {@link #conditionalInnerJoinRowCount(Table, CompiledExpression)}. * WARNING: Passing a row count that is smaller than the actual row count will result * in undefined behavior. * @param rightTable the right side table of the join in the join * @param condition conditional expression to evaluate during the join * @param outputRowCount number of output rows in the join result * @return left and right table gather maps */ public GatherMap[] conditionalInnerJoinGatherMaps(Table rightTable, CompiledExpression condition, long outputRowCount) { long[] gatherMapData = conditionalInnerJoinGatherMapsWithCount(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle(), outputRowCount); return buildJoinGatherMaps(gatherMapData); } /** * Computes output size information for an inner join between two tables using a mix of equality * and inequality conditions. The entire join condition is assumed to be a logical AND of the * equality condition and inequality condition. * NOTE: It is the responsibility of the caller to close the resulting size information object * or native resources can be leaked! * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return size information for the join */ public static MixedJoinSize mixedInnerJoinSize(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] mixedSizeInfo = mixedInnerJoinSize( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); assert mixedSizeInfo.length == 2; long outputRowCount = mixedSizeInfo[0]; long matchesColumnHandle = mixedSizeInfo[1]; return new MixedJoinSize(outputRowCount, new ColumnVector(matchesColumnHandle)); } /** * Computes the gather maps that can be used to manifest the result of an inner join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the inner join. * It is the responsibility of the caller to close the resulting gather map instances. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return left and right table gather maps */ public static GatherMap[] mixedInnerJoinGatherMaps(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] gatherMapData = mixedInnerJoinGatherMaps( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of an inner join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the inner join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing the size result from * {@link #mixedInnerJoinSize(Table, Table, Table, Table, CompiledExpression, NullEquality)} * when the output size was computed previously. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @param joinSize mixed join size result * @return left and right table gather maps */ public static GatherMap[] mixedInnerJoinGatherMaps(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality, MixedJoinSize joinSize) { long[] gatherMapData = mixedInnerJoinGatherMapsWithSize( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL, joinSize.getOutputRowCount(), joinSize.getMatches().getNativeView()); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of an full equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the table argument represents the key columns from the right table. Two {@link GatherMap} * instances will be returned that can be used to gather the left and right tables, * respectively, to produce the result of the full join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightKeys join key columns from the right table * @param compareNullsEqual true if null key values should match otherwise false * @return left and right table gather maps */ public GatherMap[] fullJoinGatherMaps(Table rightKeys, boolean compareNullsEqual) { if (getNumberOfColumns() != rightKeys.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightKeys.getNumberOfColumns()); } long[] gatherMapData = fullJoinGatherMaps(getNativeView(), rightKeys.getNativeView(), compareNullsEqual); return buildJoinGatherMaps(gatherMapData); } /** * Computes the number of rows resulting from a full equi-join between two tables. * It is assumed this table instance holds the key columns from the left table, and the * {@link HashJoin} argument has been constructed from the key columns from the right table. * Note that unlike {@link #leftJoinRowCount(HashJoin)} and {@link #innerJoinRowCount(HashJoin), * this will perform some redundant calculations compared to * {@link #fullJoinGatherMaps(HashJoin, long)}. * @param rightHash hash table built from join key columns from the right table * @return row count of the join result */ public long fullJoinRowCount(HashJoin rightHash) { if (getNumberOfColumns() != rightHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightHash.getNumberOfColumns()); } return fullJoinRowCount(getNativeView(), rightHash.getNativeView()); } /** * Computes the gather maps that can be used to manifest the result of a full equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the {@link HashJoin} argument has been constructed from the key columns from the right table. * Two {@link GatherMap} instances will be returned that can be used to gather the left and right * tables, respectively, to produce the result of the full join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightHash hash table built from join key columns from the right table * @return left and right table gather maps */ public GatherMap[] fullJoinGatherMaps(HashJoin rightHash) { if (getNumberOfColumns() != rightHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightHash.getNumberOfColumns()); } long[] gatherMapData = fullHashJoinGatherMaps(getNativeView(), rightHash.getNativeView()); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of a full equi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the {@link HashJoin} argument has been constructed from the key columns from the right table. * Two {@link GatherMap} instances will be returned that can be used to gather the left and right * tables, respectively, to produce the result of the full join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing an output row count that was previously computed from * {@link #fullJoinRowCount(HashJoin)}. * WARNING: Passing a row count that is smaller than the actual row count will result * in undefined behavior. * @param rightHash hash table built from join key columns from the right table * @param outputRowCount number of output rows in the join result * @return left and right table gather maps */ public GatherMap[] fullJoinGatherMaps(HashJoin rightHash, long outputRowCount) { if (getNumberOfColumns() != rightHash.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightHash.getNumberOfColumns()); } long[] gatherMapData = fullHashJoinGatherMapsWithCount(getNativeView(), rightHash.getNativeView(), outputRowCount); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of a full join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the full join. * It is the responsibility of the caller to close the resulting gather map instances. * @param rightTable the right side table of the join * @param condition conditional expression to evaluate during the join * @return left and right table gather maps */ public GatherMap[] conditionalFullJoinGatherMaps(Table rightTable, CompiledExpression condition) { long[] gatherMapData = conditionalFullJoinGatherMaps(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); return buildJoinGatherMaps(gatherMapData); } /** * Computes the gather maps that can be used to manifest the result of a full join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * Two {@link GatherMap} instances will be returned that can be used to gather * the left and right tables, respectively, to produce the result of the full join. * It is the responsibility of the caller to close the resulting gather map instances. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return left and right table gather maps */ public static GatherMap[] mixedFullJoinGatherMaps(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] gatherMapData = mixedFullJoinGatherMaps( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); return buildJoinGatherMaps(gatherMapData); } private static GatherMap buildSemiJoinGatherMap(long[] gatherMapData) { long bufferSize = gatherMapData[0]; long leftAddr = gatherMapData[1]; long leftHandle = gatherMapData[2]; return new GatherMap(DeviceMemoryBuffer.fromRmm(leftAddr, bufferSize, leftHandle)); } /** * Computes the gather map that can be used to manifest the result of a left semi-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the table argument represents the key columns from the right table. The {@link GatherMap} * instance returned can be used to gather the left table to produce the result of the * left semi-join. * It is the responsibility of the caller to close the resulting gather map instance. * @param rightKeys join key columns from the right table * @param compareNullsEqual true if null key values should match otherwise false * @return left table gather map */ public GatherMap leftSemiJoinGatherMap(Table rightKeys, boolean compareNullsEqual) { if (getNumberOfColumns() != rightKeys.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightKeys.getNumberOfColumns()); } long[] gatherMapData = leftSemiJoinGatherMap(getNativeView(), rightKeys.getNativeView(), compareNullsEqual); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes the number of rows from the result of a left semi join between two tables when a * conditional expression is true. It is assumed this table instance holds the columns from * the left table, and the table argument represents the columns from the right table. * @param rightTable the right side table of the join in the join * @param condition conditional expression to evaluate during the join * @return row count for the join result */ public long conditionalLeftSemiJoinRowCount(Table rightTable, CompiledExpression condition) { return conditionalLeftSemiJoinRowCount(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); } /** * Computes the gather map that can be used to manifest the result of a left semi join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. The {@link GatherMap} instance returned can be used to gather the left table * to produce the result of the left semi join. * It is the responsibility of the caller to close the resulting gather map instance. * @param rightTable the right side table of the join * @param condition conditional expression to evaluate during the join * @return left table gather map */ public GatherMap conditionalLeftSemiJoinGatherMap(Table rightTable, CompiledExpression condition) { long[] gatherMapData = conditionalLeftSemiJoinGatherMap(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes the gather map that can be used to manifest the result of a left semi join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. The {@link GatherMap} instance returned can be used to gather the left table * to produce the result of the left semi join. * It is the responsibility of the caller to close the resulting gather map instance. * This interface allows passing an output row count that was previously computed from * {@link #conditionalLeftSemiJoinRowCount(Table, CompiledExpression)}. * WARNING: Passing a row count that is smaller than the actual row count will result * in undefined behavior. * @param rightTable the right side table of the join * @param condition conditional expression to evaluate during the join * @param outputRowCount number of output rows in the join result * @return left table gather map */ public GatherMap conditionalLeftSemiJoinGatherMap(Table rightTable, CompiledExpression condition, long outputRowCount) { long[] gatherMapData = conditionalLeftSemiJoinGatherMapWithCount(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle(), outputRowCount); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes output size information for a left semi join between two tables using a mix of * equality and inequality conditions. The entire join condition is assumed to be a logical AND * of the equality condition and inequality condition. * NOTE: It is the responsibility of the caller to close the resulting size information object * or native resources can be leaked! * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return size information for the join */ public static MixedJoinSize mixedLeftSemiJoinSize(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] mixedSizeInfo = mixedLeftSemiJoinSize( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); assert mixedSizeInfo.length == 2; long outputRowCount = mixedSizeInfo[0]; long matchesColumnHandle = mixedSizeInfo[1]; return new MixedJoinSize(outputRowCount, new ColumnVector(matchesColumnHandle)); } /** * Computes the gather map that can be used to manifest the result of a left semi join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * A {@link GatherMap} instance will be returned that can be used to gather * the left table to produce the result of the left semi join. * It is the responsibility of the caller to close the resulting gather map instances. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return left and right table gather maps */ public static GatherMap mixedLeftSemiJoinGatherMap(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] gatherMapData = mixedLeftSemiJoinGatherMap( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes the gather map that can be used to manifest the result of a left semi join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * A {@link GatherMap} instance will be returned that can be used to gather * the left table to produce the result of the left semi join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing the size result from * {@link #mixedLeftSemiJoinSize(Table, Table, Table, Table, CompiledExpression, NullEquality)} * when the output size was computed previously. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @param joinSize mixed join size result * @return left and right table gather maps */ public static GatherMap mixedLeftSemiJoinGatherMap(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality, MixedJoinSize joinSize) { long[] gatherMapData = mixedLeftSemiJoinGatherMapWithSize( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL, joinSize.getOutputRowCount(), joinSize.getMatches().getNativeView()); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes the gather map that can be used to manifest the result of a left anti-join between * two tables. It is assumed this table instance holds the key columns from the left table, and * the table argument represents the key columns from the right table. The {@link GatherMap} * instance returned can be used to gather the left table to produce the result of the * left anti-join. * It is the responsibility of the caller to close the resulting gather map instance. * @param rightKeys join key columns from the right table * @param compareNullsEqual true if null key values should match otherwise false * @return left table gather map */ public GatherMap leftAntiJoinGatherMap(Table rightKeys, boolean compareNullsEqual) { if (getNumberOfColumns() != rightKeys.getNumberOfColumns()) { throw new IllegalArgumentException("column count mismatch, this: " + getNumberOfColumns() + "rightKeys: " + rightKeys.getNumberOfColumns()); } long[] gatherMapData = leftAntiJoinGatherMap(getNativeView(), rightKeys.getNativeView(), compareNullsEqual); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes the number of rows from the result of a left anti join between two tables when a * conditional expression is true. It is assumed this table instance holds the columns from * the left table, and the table argument represents the columns from the right table. * @param rightTable the right side table of the join in the join * @param condition conditional expression to evaluate during the join * @return row count for the join result */ public long conditionalLeftAntiJoinRowCount(Table rightTable, CompiledExpression condition) { return conditionalLeftAntiJoinRowCount(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); } /** * Computes the gather map that can be used to manifest the result of a left anti join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. The {@link GatherMap} instance returned can be used to gather the left table * to produce the result of the left anti join. * It is the responsibility of the caller to close the resulting gather map instance. * @param rightTable the right side table of the join * @param condition conditional expression to evaluate during the join * @return left table gather map */ public GatherMap conditionalLeftAntiJoinGatherMap(Table rightTable, CompiledExpression condition) { long[] gatherMapData = conditionalLeftAntiJoinGatherMap(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle()); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes the gather map that can be used to manifest the result of a left anti join between * two tables when a conditional expression is true. It is assumed this table instance holds * the columns from the left table, and the table argument represents the columns from the * right table. The {@link GatherMap} instance returned can be used to gather the left table * to produce the result of the left anti join. * It is the responsibility of the caller to close the resulting gather map instance. * This interface allows passing an output row count that was previously computed from * {@link #conditionalLeftAntiJoinRowCount(Table, CompiledExpression)}. * WARNING: Passing a row count that is smaller than the actual row count will result * in undefined behavior. * @param rightTable the right side table of the join * @param condition conditional expression to evaluate during the join * @param outputRowCount number of output rows in the join result * @return left table gather map */ public GatherMap conditionalLeftAntiJoinGatherMap(Table rightTable, CompiledExpression condition, long outputRowCount) { long[] gatherMapData = conditionalLeftAntiJoinGatherMapWithCount(getNativeView(), rightTable.getNativeView(), condition.getNativeHandle(), outputRowCount); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes output size information for a left anti join between two tables using a mix of * equality and inequality conditions. The entire join condition is assumed to be a logical AND * of the equality condition and inequality condition. * NOTE: It is the responsibility of the caller to close the resulting size information object * or native resources can be leaked! * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return size information for the join */ public static MixedJoinSize mixedLeftAntiJoinSize(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] mixedSizeInfo = mixedLeftAntiJoinSize( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); assert mixedSizeInfo.length == 2; long outputRowCount = mixedSizeInfo[0]; long matchesColumnHandle = mixedSizeInfo[1]; return new MixedJoinSize(outputRowCount, new ColumnVector(matchesColumnHandle)); } /** * Computes the gather map that can be used to manifest the result of a left anti join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * A {@link GatherMap} instance will be returned that can be used to gather * the left table to produce the result of the left anti join. * It is the responsibility of the caller to close the resulting gather map instances. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @return left and right table gather maps */ public static GatherMap mixedLeftAntiJoinGatherMap(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality) { long[] gatherMapData = mixedLeftAntiJoinGatherMap( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL); return buildSemiJoinGatherMap(gatherMapData); } /** * Computes the gather map that can be used to manifest the result of a left anti join between * two tables using a mix of equality and inequality conditions. The entire join condition is * assumed to be a logical AND of the equality condition and inequality condition. * A {@link GatherMap} instance will be returned that can be used to gather * the left table to produce the result of the left anti join. * It is the responsibility of the caller to close the resulting gather map instances. * This interface allows passing the size result from * {@link #mixedLeftAntiJoinSize(Table, Table, Table, Table, CompiledExpression, NullEquality)} * when the output size was computed previously. * @param leftKeys the left table's key columns for the equality condition * @param rightKeys the right table's key columns for the equality condition * @param leftConditional the left table's columns needed to evaluate the inequality condition * @param rightConditional the right table's columns needed to evaluate the inequality condition * @param condition the inequality condition of the join * @param nullEquality whether nulls should compare as equal * @param joinSize mixed join size result * @return left and right table gather maps */ public static GatherMap mixedLeftAntiJoinGatherMap(Table leftKeys, Table rightKeys, Table leftConditional, Table rightConditional, CompiledExpression condition, NullEquality nullEquality, MixedJoinSize joinSize) { long[] gatherMapData = mixedLeftAntiJoinGatherMapWithSize( leftKeys.getNativeView(), rightKeys.getNativeView(), leftConditional.getNativeView(), rightConditional.getNativeView(), condition.getNativeHandle(), nullEquality == NullEquality.EQUAL, joinSize.getOutputRowCount(), joinSize.getMatches().getNativeView()); return buildSemiJoinGatherMap(gatherMapData); } /** * For details about how this method functions refer to * {@link #convertToRowsFixedWidthOptimized()}. * * The only thing different between this method and {@link #convertToRowsFixedWidthOptimized()} * is that this can handle roughly 250M columns while {@link #convertToRowsFixedWidthOptimized()} * can only handle columns less than 100 */ public ColumnVector[] convertToRows() { long[] ptrs = convertToRows(nativeHandle); return ColumnVector.getColumnVectorsFromPointers(ptrs); } /** * Convert this table of columns into a row major format that is useful for interacting with other * systems that do row major processing of the data. Currently only fixed-width column types are * supported. * <p/> * The output is one or more ColumnVectors that are lists of bytes. A ColumnVector that is a * list of bytes can have at most 2GB of data stored in it. Multiple ColumnVectors are returned * if not all of the data can fit in a single one. * <p/> * Each row in the returned ColumnVector array corresponds to a row in the input table. The rows * will be in the same order as the input Table. The first ColumnVector in the array will hold * the first N rows followed by the second ColumnVector and so on. The following illustrates * this and also shows some of the internal structure that will be explained later. * <p/><pre> * result[0]: * | row 0 | validity for row 0 | padding | * ... * | row N | validity for row N | padding | * result[1]: * |row N+1 | validity for row N+1 | padding | * ... * </pre> * * The format of each row is similar in layout to a C struct where each column will have padding * in front of it to align it properly. Each row has padding inserted at the end so the next row * is aligned to a 64-bit boundary. This is so that the first column will always start at the * beginning (first byte) of the list of bytes and each row has a consistent layout for fixed * width types. * <p/> * Validity bytes are added to the end of the row. There will be one byte for each 8 columns in a * row. Because the validity is byte aligned there is no padding between it and the last column * in the row. * <p/> * For example a table consisting of the following columns A, B, C with the corresponding types * <p/><pre> * | A - BOOL8 (8-bit) | B - INT16 (16-bit) | C - DURATION_DAYS (32-bit) | * </pre> * <p/> * Will have a layout that looks like * <p/><pre> * | A_0 | P | B_0 | B_1 | C_0 | C_1 | C_2 | C_3 | V0 | P | P | P | P | P | P | P | * </pre> * <p/> * In this P corresponds to a byte of padding, [LETTER]_[NUMBER] represents the NUMBER * byte of the corresponding LETTER column, and V[NUMBER] is a validity byte for the `NUMBER * 8` * to `(NUMBER + 1) * 8` columns. * <p/> * The order of the columns will not be changed, but to reduce the total amount of padding it is * recommended to order the columns in the following way. * <p/> * <ol> * <li>64-bit columns</li> * <li>32-bit columns</li> * <li>16-bit columns</li> * <li>8-bit columns</li> * </ol> * <p/> * This way padding is only inserted at the end of a row to make the next column 64-bit aligned. * So for the example above if the columns were ordered C, B, A the layout would be. * <pre> * | C_0 | C_1 | C_2 | C_3 | B_0 | B_1 | A_0 | V0 | * </pre> * This would have reduced the overall size of the data transferred by half. * <p/> * One of the main motivations for doing a row conversion on the GPU is to avoid cache problems * when walking through columnar data on the CPU in a row wise manner. If you are not transferring * very many columns it is likely to be more efficient to just pull back the columns and walk * through them. This is especially true of a single column of fixed width data. The extra * padding will slow down the transfer and looking at only a handful of buffers is not likely to * cause cache issues. * <p/> * There are some limits on the size of a single row. If the row is larger than 1KB this will * throw an exception. */ public ColumnVector[] convertToRowsFixedWidthOptimized() { long[] ptrs = convertToRowsFixedWidthOptimized(nativeHandle); return ColumnVector.getColumnVectorsFromPointers(ptrs); } /** * Convert a column of list of bytes that is formatted like the output from `convertToRows` * and convert it back to a table. * * NOTE: This method doesn't support nested types * * @param vec the row data to process. * @param schema the types of each column. * @return the parsed table. */ public static Table convertFromRows(ColumnView vec, DType ... schema) { int[] types = new int[schema.length]; int[] scale = new int[schema.length]; for (int i = 0; i < schema.length; i++) { types[i] = schema[i].typeId.nativeId; scale[i] = schema[i].getScale(); } return new Table(convertFromRows(vec.getNativeView(), types, scale)); } /** * Convert a column of list of bytes that is formatted like the output from `convertToRows` * and convert it back to a table. * * NOTE: This method doesn't support nested types * * @param vec the row data to process. * @param schema the types of each column. * @return the parsed table. */ public static Table convertFromRowsFixedWidthOptimized(ColumnView vec, DType ... schema) { int[] types = new int[schema.length]; int[] scale = new int[schema.length]; for (int i = 0; i < schema.length; i++) { types[i] = schema[i].typeId.nativeId; scale[i] = schema[i].getScale(); } return new Table(convertFromRowsFixedWidthOptimized(vec.getNativeView(), types, scale)); } /** * Construct a table from a packed representation. * @param metadata host-based metadata for the table * @param data GPU data buffer for the table * @return table which is zero-copy reconstructed from the packed-form */ public static Table fromPackedTable(ByteBuffer metadata, DeviceMemoryBuffer data) { // Ensure the metadata buffer is direct so it can be passed to JNI ByteBuffer directBuffer = metadata; if (!directBuffer.isDirect()) { directBuffer = ByteBuffer.allocateDirect(metadata.remaining()); directBuffer.put(metadata); directBuffer.flip(); } long[] columnViewAddresses = columnViewsFromPacked(directBuffer, data.getAddress()); ColumnVector[] columns = new ColumnVector[columnViewAddresses.length]; Table result = null; try { for (int i = 0; i < columns.length; i++) { long columnViewAddress = columnViewAddresses[i]; // setting address to zero, so we don't clean it in case of an exception as it // will be cleaned up by the ColumnView constructor columnViewAddresses[i] = 0; columns[i] = ColumnVector.fromViewWithContiguousAllocation(columnViewAddress, data); } result = new Table(columns); } catch (Throwable t) { try { ColumnView.cleanupColumnViews(columnViewAddresses, columns, t); } catch (Throwable s){ t.addSuppressed(s); } finally { throw t; } } // close columns to leave the resulting table responsible for freeing underlying columns for (ColumnVector column : columns) { column.close(); } return result; } /** * Gather `n` samples from table randomly * Note: does not preserve the ordering * Example: * input: {col1: {1, 2, 3, 4, 5}, col2: {6, 7, 8, 9, 10}} * n: 3 * replacement: false * * output: {col1: {3, 1, 4}, col2: {8, 6, 9}} * * replacement: true * * output: {col1: {3, 1, 1}, col2: {8, 6, 6}} * * throws "logic_error" if `n` > table rows and `replacement` == FALSE. * throws "logic_error" if `n` < 0. * * @param n non-negative number of samples expected from table * @param replacement Allow or disallow sampling of the same row more than once. * @param seed Seed value to initiate random number generator. * * @return Table containing samples */ public Table sample(long n, boolean replacement, long seed) { return new Table(sample(nativeHandle, n, replacement, seed)); } ///////////////////////////////////////////////////////////////////////////// // HELPER CLASSES ///////////////////////////////////////////////////////////////////////////// /** * class to encapsulate indices and table */ private final static class Operation { final int[] indices; final Table table; Operation(Table table, int... indices) { this.indices = indices; this.table = table; } } /** * Internal class used to keep track of operations on a given column. */ private static final class ColumnOps { private final HashMap<Aggregation, List<Integer>> ops = new HashMap<>(); /** * Add an operation on a given column * @param op the operation * @param index the column index the operation is on. * @return 1 if it was not a duplicate or 0 if it was a duplicate. This is mostly for * bookkeeping so we can easily allocate the correct data size later on. */ public int add(Aggregation op, int index) { int ret = 0; List<Integer> indexes = ops.get(op); if (indexes == null) { ret++; indexes = new ArrayList<>(); ops.put(op, indexes); } indexes.add(index); return ret; } public Set<Aggregation> operations() { return ops.keySet(); } public Collection<List<Integer>> outputIndices() { return ops.values(); } } /** * Internal class used to keep track of operations on a given column. */ private static final class ColumnWindowOps { // Map AggOp -> Output column index. private final HashMap<AggregationOverWindow, List<Integer>> ops = new HashMap<>(); public int add(AggregationOverWindow op, int index) { int ret = 0; List<Integer> indexes = ops.get(op); if (indexes == null) { ret++; indexes = new ArrayList<>(); ops.put(op, indexes); } indexes.add(index); return ret; } public Set<AggregationOverWindow> operations() { return ops.keySet(); } public Collection<List<Integer>> outputIndices() { return ops.values(); } } /** * Class representing groupby operations */ public static final class GroupByOperation { private final Operation operation; private final GroupByOptions groupByOptions; GroupByOperation(final Table table, GroupByOptions groupByOptions, final int... indices) { operation = new Operation(table, indices); this.groupByOptions = groupByOptions; } /** * Aggregates the group of columns represented by indices * Usage: * aggregate(count(), max(2),...); * example: * input : 1, 1, 1 * 1, 2, 1 * 2, 4, 5 * * table.groupBy(0, 2).count() * * col0, col1 * output: 1, 1 * 1, 2 * 2, 1 ==> aggregated count */ public Table aggregate(GroupByAggregationOnColumn... aggregates) { assert aggregates != null; // To improve performance and memory we want to remove duplicate operations // and also group the operations by column so hopefully cudf can do multiple aggregations // in a single pass. // Use a tree map to make debugging simpler (columns are all in the same order) TreeMap<Integer, ColumnOps> groupedOps = new TreeMap<>(); // Total number of operations that will need to be done. int keysLength = operation.indices.length; int totalOps = 0; for (int outputIndex = 0; outputIndex < aggregates.length; outputIndex++) { GroupByAggregationOnColumn agg = aggregates[outputIndex]; ColumnOps ops = groupedOps.computeIfAbsent(agg.getColumnIndex(), (idx) -> new ColumnOps()); totalOps += ops.add(agg.getWrapped().getWrapped(), outputIndex + keysLength); } int[] aggColumnIndexes = new int[totalOps]; long[] aggOperationInstances = new long[totalOps]; try { int opIndex = 0; for (Map.Entry<Integer, ColumnOps> entry: groupedOps.entrySet()) { int columnIndex = entry.getKey(); for (Aggregation operation: entry.getValue().operations()) { aggColumnIndexes[opIndex] = columnIndex; aggOperationInstances[opIndex] = operation.createNativeInstance(); opIndex++; } } assert opIndex == totalOps : opIndex + " == " + totalOps; try (Table aggregate = new Table(groupByAggregate( operation.table.nativeHandle, operation.indices, aggColumnIndexes, aggOperationInstances, groupByOptions.getIgnoreNullKeys(), groupByOptions.getKeySorted(), groupByOptions.getKeysDescending(), groupByOptions.getKeysNullSmallest()))) { // prepare the final table ColumnVector[] finalCols = new ColumnVector[keysLength + aggregates.length]; // get the key columns for (int aggIndex = 0; aggIndex < keysLength; aggIndex++) { finalCols[aggIndex] = aggregate.getColumn(aggIndex); } int inputColumn = keysLength; // Now get the aggregation columns for (ColumnOps ops: groupedOps.values()) { for (List<Integer> indices: ops.outputIndices()) { for (int outIndex: indices) { finalCols[outIndex] = aggregate.getColumn(inputColumn); } inputColumn++; } } return new Table(finalCols); } } finally { Aggregation.close(aggOperationInstances); } } /** * Computes row-based window aggregation functions on the Table/projection, * based on windows specified in the argument. * * This method enables queries such as the following SQL: * * SELECT user_id, * MAX(sales_amt) OVER(PARTITION BY user_id ORDER BY date * ROWS BETWEEN 1 PRECEDING and 1 FOLLOWING) * FROM my_sales_table WHERE ... * * Each window-aggregation is represented by a different {@link AggregationOverWindow} argument, * indicating: * 1. the {@link Aggregation.Kind}, * 2. the number of rows preceding and following the current row, within a window, * 3. the minimum number of observations within the defined window * * This method returns a {@link Table} instance, with one result column for each specified * window aggregation. * * In this example, for the following input: * * [ // user_id, sales_amt * { "user1", 10 }, * { "user2", 20 }, * { "user1", 20 }, * { "user1", 10 }, * { "user2", 30 }, * { "user2", 80 }, * { "user1", 50 }, * { "user1", 60 }, * { "user2", 40 } * ] * * Partitioning (grouping) by `user_id` yields the following `sales_amt` vector * (with 2 groups, one for each distinct `user_id`): * * [ 10, 20, 10, 50, 60, 20, 30, 80, 40 ] * <-------user1-------->|<------user2-------> * * The SUM aggregation is applied with 1 preceding and 1 following * row, with a minimum of 1 period. The aggregation window is thus 3 rows wide, * yielding the following column: * * [ 30, 40, 80, 120, 110, 50, 130, 150, 120 ] * * @param windowAggregates the window-aggregations to be performed * @return Table instance, with each column containing the result of each aggregation. * @throws IllegalArgumentException if the window arguments are not of type * {@link WindowOptions.FrameType#ROWS}, * i.e. a timestamp column is specified for a window-aggregation. */ public Table aggregateWindows(AggregationOverWindow... windowAggregates) { // To improve performance and memory we want to remove duplicate operations // and also group the operations by column so hopefully cudf can do multiple aggregations // in a single pass. // Use a tree map to make debugging simpler (columns are all in the same order) TreeMap<Integer, ColumnWindowOps> groupedOps = new TreeMap<>(); // Map agg-col-id -> Agg ColOp. // Total number of operations that will need to be done. int totalOps = 0; for (int outputIndex = 0; outputIndex < windowAggregates.length; outputIndex++) { AggregationOverWindow agg = windowAggregates[outputIndex]; if (agg.getWindowOptions().getFrameType() != WindowOptions.FrameType.ROWS) { throw new IllegalArgumentException("Expected ROWS-based window specification. Unexpected window type: " + agg.getWindowOptions().getFrameType()); } ColumnWindowOps ops = groupedOps.computeIfAbsent(agg.getColumnIndex(), (idx) -> new ColumnWindowOps()); totalOps += ops.add(agg, outputIndex); } int[] aggColumnIndexes = new int[totalOps]; long[] aggInstances = new long[totalOps]; try { int[] aggPrecedingWindows = new int[totalOps]; int[] aggFollowingWindows = new int[totalOps]; boolean[] unboundedPreceding = new boolean[totalOps]; boolean[] unboundedFollowing = new boolean[totalOps]; int[] aggMinPeriods = new int[totalOps]; long[] defaultOutputs = new long[totalOps]; int opIndex = 0; for (Map.Entry<Integer, ColumnWindowOps> entry: groupedOps.entrySet()) { int columnIndex = entry.getKey(); for (AggregationOverWindow operation: entry.getValue().operations()) { aggColumnIndexes[opIndex] = columnIndex; aggInstances[opIndex] = operation.createNativeInstance(); Scalar p = operation.getWindowOptions().getPrecedingScalar(); aggPrecedingWindows[opIndex] = p == null || !p.isValid() ? 0 : p.getInt(); Scalar f = operation.getWindowOptions().getFollowingScalar(); aggFollowingWindows[opIndex] = f == null || ! f.isValid() ? 1 : f.getInt(); unboundedPreceding[opIndex] = operation.getWindowOptions().isUnboundedPreceding(); unboundedFollowing[opIndex] = operation.getWindowOptions().isUnboundedFollowing(); aggMinPeriods[opIndex] = operation.getWindowOptions().getMinPeriods(); defaultOutputs[opIndex] = operation.getDefaultOutput(); opIndex++; } } assert opIndex == totalOps : opIndex + " == " + totalOps; try (Table aggregate = new Table(rollingWindowAggregate( operation.table.nativeHandle, operation.indices, defaultOutputs, aggColumnIndexes, aggInstances, aggMinPeriods, aggPrecedingWindows, aggFollowingWindows, unboundedPreceding, unboundedFollowing, groupByOptions.getIgnoreNullKeys()))) { // prepare the final table ColumnVector[] finalCols = new ColumnVector[windowAggregates.length]; int inputColumn = 0; // Now get the aggregation columns for (ColumnWindowOps ops: groupedOps.values()) { for (List<Integer> indices: ops.outputIndices()) { for (int outIndex: indices) { finalCols[outIndex] = aggregate.getColumn(inputColumn); } inputColumn++; } } return new Table(finalCols); } } finally { Aggregation.close(aggInstances); } } /** * Computes range-based window aggregation functions on the Table/projection, * based on windows specified in the argument. * * This method enables queries such as the following SQL: * * SELECT user_id, * MAX(sales_amt) OVER(PARTITION BY user_id ORDER BY date * RANGE BETWEEN INTERVAL 1 DAY PRECEDING and CURRENT ROW) * FROM my_sales_table WHERE ... * * Each window-aggregation is represented by a different {@link AggregationOverWindow} argument, * indicating: * 1. the {@link Aggregation.Kind}, * 2. the index for the timestamp column to base the window definitions on * 2. the number of DAYS preceding and following the current row's date, to consider in the window * 3. the minimum number of observations within the defined window * * This method returns a {@link Table} instance, with one result column for each specified * window aggregation. * * In this example, for the following input: * * [ // user, sales_amt, YYYYMMDD (date) * { "user1", 10, 20200101 }, * { "user2", 20, 20200101 }, * { "user1", 20, 20200102 }, * { "user1", 10, 20200103 }, * { "user2", 30, 20200101 }, * { "user2", 80, 20200102 }, * { "user1", 50, 20200107 }, * { "user1", 60, 20200107 }, * { "user2", 40, 20200104 } * ] * * Partitioning (grouping) by `user_id`, and ordering by `date` yields the following `sales_amt` vector * (with 2 groups, one for each distinct `user_id`): * * Date :(202001-) [ 01, 02, 03, 07, 07, 01, 01, 02, 04 ] * Input: [ 10, 20, 10, 50, 60, 20, 30, 80, 40 ] * <-------user1-------->|<---------user2---------> * * The SUM aggregation is applied, with 1 day preceding, and 1 day following, with a minimum of 1 period. * The aggregation window is thus 3 *days* wide, yielding the following output column: * * Results: [ 30, 40, 30, 110, 110, 130, 130, 130, 40 ] * * @param windowAggregates the window-aggregations to be performed * @return Table instance, with each column containing the result of each aggregation. * @throws IllegalArgumentException if the window arguments are not of type * {@link WindowOptions.FrameType#RANGE} or the orderBys are not of (Boolean-exclusive) integral type * i.e. the timestamp-column was not specified for the aggregation. */ public Table aggregateWindowsOverRanges(AggregationOverWindow... windowAggregates) { // To improve performance and memory we want to remove duplicate operations // and also group the operations by column so hopefully cudf can do multiple aggregations // in a single pass. // Use a tree map to make debugging simpler (columns are all in the same order) TreeMap<Integer, ColumnWindowOps> groupedOps = new TreeMap<>(); // Map agg-col-id -> Agg ColOp. // Total number of operations that will need to be done. int totalOps = 0; for (int outputIndex = 0; outputIndex < windowAggregates.length; outputIndex++) { AggregationOverWindow agg = windowAggregates[outputIndex]; if (agg.getWindowOptions().getFrameType() != WindowOptions.FrameType.RANGE) { throw new IllegalArgumentException("Expected range-based window specification. Unexpected window type: " + agg.getWindowOptions().getFrameType()); } DType orderByType = operation.table.getColumn(agg.getWindowOptions().getOrderByColumnIndex()).getType(); switch (orderByType.getTypeId()) { case INT8: case INT16: case INT32: case INT64: case UINT8: case UINT16: case UINT32: case UINT64: case FLOAT32: case FLOAT64: case TIMESTAMP_MILLISECONDS: case TIMESTAMP_SECONDS: case TIMESTAMP_DAYS: case TIMESTAMP_NANOSECONDS: case TIMESTAMP_MICROSECONDS: case DECIMAL32: case DECIMAL64: case DECIMAL128: case STRING: break; default: throw new IllegalArgumentException("Expected range-based window orderBy's " + "type: integral (Boolean-exclusive), decimal, timestamp, and string"); } ColumnWindowOps ops = groupedOps.computeIfAbsent(agg.getColumnIndex(), (idx) -> new ColumnWindowOps()); totalOps += ops.add(agg, outputIndex); } int[] aggColumnIndexes = new int[totalOps]; int[] orderByColumnIndexes = new int[totalOps]; boolean[] isOrderByOrderAscending = new boolean[totalOps]; long[] aggInstances = new long[totalOps]; long[] aggPrecedingWindows = new long[totalOps]; long[] aggFollowingWindows = new long[totalOps]; try { int[] aggPrecedingWindowsExtent = new int[totalOps]; int[] aggFollowingWindowsExtent = new int[totalOps]; int[] aggMinPeriods = new int[totalOps]; int opIndex = 0; for (Map.Entry<Integer, ColumnWindowOps> entry: groupedOps.entrySet()) { int columnIndex = entry.getKey(); for (AggregationOverWindow op: entry.getValue().operations()) { aggColumnIndexes[opIndex] = columnIndex; aggInstances[opIndex] = op.createNativeInstance(); WindowOptions windowOptions = op.getWindowOptions(); Scalar p = windowOptions.getPrecedingScalar(); Scalar f = windowOptions.getFollowingScalar(); if ((p == null || !p.isValid()) && !(windowOptions.isUnboundedPreceding() || windowOptions.isCurrentRowPreceding())) { throw new IllegalArgumentException("Some kind of preceding must be set and a preceding column is not currently supported"); } if ((f == null || !f.isValid()) && !(windowOptions.isUnboundedFollowing() || windowOptions.isCurrentRowFollowing())) { throw new IllegalArgumentException("some kind of following must be set and a follow column is not currently supported"); } aggPrecedingWindows[opIndex] = p == null ? 0 : p.getScalarHandle(); aggFollowingWindows[opIndex] = f == null ? 0 : f.getScalarHandle(); aggPrecedingWindowsExtent[opIndex] = windowOptions.getPrecedingBoundsExtent().nominalValue; aggFollowingWindowsExtent[opIndex] = windowOptions.getFollowingBoundsExtent().nominalValue; aggMinPeriods[opIndex] = op.getWindowOptions().getMinPeriods(); assert (op.getWindowOptions().getFrameType() == WindowOptions.FrameType.RANGE); orderByColumnIndexes[opIndex] = op.getWindowOptions().getOrderByColumnIndex(); isOrderByOrderAscending[opIndex] = op.getWindowOptions().isOrderByOrderAscending(); if (op.getDefaultOutput() != 0) { throw new IllegalArgumentException("Operations with a default output are not " + "supported on time based rolling windows"); } opIndex++; } } assert opIndex == totalOps : opIndex + " == " + totalOps; try (Table aggregate = new Table(rangeRollingWindowAggregate( operation.table.nativeHandle, operation.indices, orderByColumnIndexes, isOrderByOrderAscending, aggColumnIndexes, aggInstances, aggMinPeriods, aggPrecedingWindows, aggFollowingWindows, aggPrecedingWindowsExtent, aggFollowingWindowsExtent, groupByOptions.getIgnoreNullKeys()))) { // prepare the final table ColumnVector[] finalCols = new ColumnVector[windowAggregates.length]; int inputColumn = 0; // Now get the aggregation columns for (ColumnWindowOps ops: groupedOps.values()) { for (List<Integer> indices: ops.outputIndices()) { for (int outIndex: indices) { finalCols[outIndex] = aggregate.getColumn(inputColumn); } inputColumn++; } } return new Table(finalCols); } } finally { Aggregation.close(aggInstances); } } public Table scan(GroupByScanAggregationOnColumn... aggregates) { assert aggregates != null; // To improve performance and memory we want to remove duplicate operations // and also group the operations by column so hopefully cudf can do multiple aggregations // in a single pass. // Use a tree map to make debugging simpler (columns are all in the same order) TreeMap<Integer, ColumnOps> groupedOps = new TreeMap<>(); // Total number of operations that will need to be done. int keysLength = operation.indices.length; int totalOps = 0; for (int outputIndex = 0; outputIndex < aggregates.length; outputIndex++) { GroupByScanAggregationOnColumn agg = aggregates[outputIndex]; ColumnOps ops = groupedOps.computeIfAbsent(agg.getColumnIndex(), (idx) -> new ColumnOps()); totalOps += ops.add(agg.getWrapped().getWrapped(), outputIndex + keysLength); } int[] aggColumnIndexes = new int[totalOps]; long[] aggOperationInstances = new long[totalOps]; try { int opIndex = 0; for (Map.Entry<Integer, ColumnOps> entry: groupedOps.entrySet()) { int columnIndex = entry.getKey(); for (Aggregation operation: entry.getValue().operations()) { aggColumnIndexes[opIndex] = columnIndex; aggOperationInstances[opIndex] = operation.createNativeInstance(); opIndex++; } } assert opIndex == totalOps : opIndex + " == " + totalOps; try (Table aggregate = new Table(groupByScan( operation.table.nativeHandle, operation.indices, aggColumnIndexes, aggOperationInstances, groupByOptions.getIgnoreNullKeys(), groupByOptions.getKeySorted(), groupByOptions.getKeysDescending(), groupByOptions.getKeysNullSmallest()))) { // prepare the final table ColumnVector[] finalCols = new ColumnVector[keysLength + aggregates.length]; // get the key columns for (int aggIndex = 0; aggIndex < keysLength; aggIndex++) { finalCols[aggIndex] = aggregate.getColumn(aggIndex); } int inputColumn = keysLength; // Now get the aggregation columns for (ColumnOps ops: groupedOps.values()) { for (List<Integer> indices: ops.outputIndices()) { for (int outIndex: indices) { finalCols[outIndex] = aggregate.getColumn(inputColumn); } inputColumn++; } } return new Table(finalCols); } } finally { Aggregation.close(aggOperationInstances); } } public Table replaceNulls(ReplacePolicyWithColumn... replacements) { assert replacements != null; // TODO in the future perhaps to improve performance and memory we want to // remove duplicate operations. boolean[] isPreceding = new boolean[replacements.length]; int [] columnIndexes = new int[replacements.length]; for (int index = 0; index < replacements.length; index++) { isPreceding[index] = replacements[index].policy.isPreceding; columnIndexes[index] = replacements[index].column; } return new Table(groupByReplaceNulls( operation.table.nativeHandle, operation.indices, columnIndexes, isPreceding, groupByOptions.getIgnoreNullKeys(), groupByOptions.getKeySorted(), groupByOptions.getKeysDescending(), groupByOptions.getKeysNullSmallest())); } /** * Splits the groups in a single table into separate tables according to the grouping keys. * Each split table represents a single group. * * This API will be used by some grouping related operators to process the data * group by group. * * Example: * Grouping column index: 0 * Input: A table of 3 rows (two groups) * a 1 * b 2 * b 3 * * Result: * Two tables, one group one table. * Result[0]: * a 1 * * Result[1]: * b 2 * b 3 * * Note, the order of the groups returned is NOT always the same with that in the input table. * The split is done in native to avoid copying the offset array to JVM. * * @return The tables split according to the groups in the table. NOTE: It is the * responsibility of the caller to close the result. Each table and column holds a * reference to the original buffer. But both the buffer and the table must be closed * for the memory to be released. */ public ContiguousTable[] contiguousSplitGroups() { try (ContigSplitGroupByResult ret = Table.contiguousSplitGroups( operation.table.nativeHandle, operation.indices, groupByOptions.getIgnoreNullKeys(), groupByOptions.getKeySorted(), groupByOptions.getKeysDescending(), groupByOptions.getKeysNullSmallest(), false) // not generate uniq key table ) { // take the ownership of the `groups` in ContigSplitGroupByResult return ret.releaseGroups(); } } /** * Similar to {@link #contiguousSplitGroups}, return an extra uniq key table in which * each row is corresponding to a group split. * * Splits the groups in a single table into separate tables according to the grouping keys. * Each split table represents a single group. * * Example, see the example in {@link #contiguousSplitGroups} * The `uniqKeysTable` in ContigSplitGroupByResult is: * a * b * Note: only 2 rows because of only has 2 split groups * * @return The split groups and uniq key table. */ public ContigSplitGroupByResult contiguousSplitGroupsAndGenUniqKeys() { return Table.contiguousSplitGroups( operation.table.nativeHandle, operation.indices, groupByOptions.getIgnoreNullKeys(), groupByOptions.getKeySorted(), groupByOptions.getKeysDescending(), groupByOptions.getKeysNullSmallest(), true); // generate uniq key table } } public static final class TableOperation { private final Operation operation; TableOperation(final Table table, final int... indices) { operation = new Operation(table, indices); } /** * Hash partition a table into the specified number of partitions. Uses the default MURMUR3 * hashing. * @param numberOfPartitions - number of partitions to use * @return - {@link PartitionedTable} - Table that exposes a limited functionality of the * {@link Table} class */ public PartitionedTable hashPartition(int numberOfPartitions) { return hashPartition(HashType.MURMUR3, numberOfPartitions); } /** * Hash partition a table into the specified number of partitions. * @param type the type of hash to use. Depending on the type of hash different restrictions * on the hash column(s) may exist. Not all hash functions are guaranteed to work * besides IDENTITY and MURMUR3. * @param numberOfPartitions - number of partitions to use * @return {@link PartitionedTable} - Table that exposes a limited functionality of the * {@link Table} class */ public PartitionedTable hashPartition(HashType type, int numberOfPartitions) { final int DEFAULT_HASH_SEED = 0; return hashPartition(type, numberOfPartitions, DEFAULT_HASH_SEED); } /** * Hash partition a table into the specified number of partitions. * @param type the type of hash to use. Depending on the type of hash different restrictions * on the hash column(s) may exist. Not all hash functions are guaranteed to work * besides IDENTITY and MURMUR3. * @param numberOfPartitions number of partitions to use * @param seed the seed value for hashing * @return Table that exposes a limited functionality of the {@link Table} class */ public PartitionedTable hashPartition(HashType type, int numberOfPartitions, int seed) { int[] partitionOffsets = new int[numberOfPartitions]; return new PartitionedTable(new Table(Table.hashPartition( operation.table.nativeHandle, operation.indices, type.nativeId, partitionOffsets.length, seed, partitionOffsets)), partitionOffsets); } } ///////////////////////////////////////////////////////////////////////////// // BUILDER ///////////////////////////////////////////////////////////////////////////// /** * Create a table on the GPU with data from the CPU. This is not fast and intended mostly for * tests. */ public static final class TestBuilder { private final List<DataType> types = new ArrayList<>(); private final List<Object> typeErasedData = new ArrayList<>(); public TestBuilder column(String... values) { types.add(new BasicType(true, DType.STRING)); typeErasedData.add(values); return this; } public TestBuilder column(Boolean... values) { types.add(new BasicType(true, DType.BOOL8)); typeErasedData.add(values); return this; } public TestBuilder column(Byte... values) { types.add(new BasicType(true, DType.INT8)); typeErasedData.add(values); return this; } public TestBuilder column(Short... values) { types.add(new BasicType(true, DType.INT16)); typeErasedData.add(values); return this; } public TestBuilder column(Integer... values) { types.add(new BasicType(true, DType.INT32)); typeErasedData.add(values); return this; } public TestBuilder column(Long... values) { types.add(new BasicType(true, DType.INT64)); typeErasedData.add(values); return this; } public TestBuilder column(Float... values) { types.add(new BasicType(true, DType.FLOAT32)); typeErasedData.add(values); return this; } public TestBuilder column(Double... values) { types.add(new BasicType(true, DType.FLOAT64)); typeErasedData.add(values); return this; } public TestBuilder column(ListType dataType, List<?>... values) { types.add(dataType); typeErasedData.add(values); return this; } public TestBuilder column(String[]... values) { types.add(new ListType(true, new BasicType(true, DType.STRING))); typeErasedData.add(values); return this; } public TestBuilder column(Boolean[]... values) { types.add(new ListType(true, new BasicType(true, DType.BOOL8))); typeErasedData.add(values); return this; } public TestBuilder column(Byte[]... values) { types.add(new ListType(true, new BasicType(true, DType.INT8))); typeErasedData.add(values); return this; } public TestBuilder column(Short[]... values) { types.add(new ListType(true, new BasicType(true, DType.INT16))); typeErasedData.add(values); return this; } public TestBuilder column(Integer[]... values) { types.add(new ListType(true, new BasicType(true, DType.INT32))); typeErasedData.add(values); return this; } public TestBuilder column(Long[]... values) { types.add(new ListType(true, new BasicType(true, DType.INT64))); typeErasedData.add(values); return this; } public TestBuilder column(Float[]... values) { types.add(new ListType(true, new BasicType(true, DType.FLOAT32))); typeErasedData.add(values); return this; } public TestBuilder column(Double[]... values) { types.add(new ListType(true, new BasicType(true, DType.FLOAT64))); typeErasedData.add(values); return this; } public TestBuilder column(StructType dataType, StructData... values) { types.add(dataType); typeErasedData.add(values); return this; } public TestBuilder column(StructType dataType, StructData[]... values) { types.add(new ListType(true, dataType)); typeErasedData.add(values); return this; } public TestBuilder timestampDayColumn(Integer... values) { types.add(new BasicType(true, DType.TIMESTAMP_DAYS)); typeErasedData.add(values); return this; } public TestBuilder timestampNanosecondsColumn(Long... values) { types.add(new BasicType(true, DType.TIMESTAMP_NANOSECONDS)); typeErasedData.add(values); return this; } public TestBuilder timestampMillisecondsColumn(Long... values) { types.add(new BasicType(true, DType.TIMESTAMP_MILLISECONDS)); typeErasedData.add(values); return this; } public TestBuilder timestampMicrosecondsColumn(Long... values) { types.add(new BasicType(true, DType.TIMESTAMP_MICROSECONDS)); typeErasedData.add(values); return this; } public TestBuilder timestampSecondsColumn(Long... values) { types.add(new BasicType(true, DType.TIMESTAMP_SECONDS)); typeErasedData.add(values); return this; } public TestBuilder decimal32Column(int scale, Integer... unscaledValues) { types.add(new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL32, scale))); typeErasedData.add(unscaledValues); return this; } public TestBuilder decimal32Column(int scale, RoundingMode mode, Double... values) { types.add(new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL32, scale))); BigDecimal[] data = Arrays.stream(values).map((x) -> { if (x == null) return null; return BigDecimal.valueOf(x).setScale(-scale, mode); }).toArray(BigDecimal[]::new); typeErasedData.add(data); return this; } public TestBuilder decimal32Column(int scale, RoundingMode mode, String... values) { types.add(new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL32, scale))); BigDecimal[] data = Arrays.stream(values).map((x) -> { if (x == null) return null; return new BigDecimal(x).setScale(-scale, mode); }).toArray(BigDecimal[]::new); typeErasedData.add(data); return this; } public TestBuilder decimal64Column(int scale, Long... unscaledValues) { types.add(new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL64, scale))); typeErasedData.add(unscaledValues); return this; } public TestBuilder decimal64Column(int scale, RoundingMode mode, Double... values) { types.add(new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL64, scale))); BigDecimal[] data = Arrays.stream(values).map((x) -> { if (x == null) return null; return BigDecimal.valueOf(x).setScale(-scale, mode); }).toArray(BigDecimal[]::new); typeErasedData.add(data); return this; } public TestBuilder decimal64Column(int scale, RoundingMode mode, String... values) { types.add(new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL64, scale))); BigDecimal[] data = Arrays.stream(values).map((x) -> { if (x == null) return null; return new BigDecimal(x).setScale(-scale, mode); }).toArray(BigDecimal[]::new); typeErasedData.add(data); return this; } public TestBuilder decimal128Column(int scale, RoundingMode mode, BigInteger... values) { types.add(new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL128, scale))); BigDecimal[] data = Arrays.stream(values).map((x) -> { if (x == null) return null; return new BigDecimal(x, scale, new MathContext(38, mode)); }).toArray(BigDecimal[]::new); typeErasedData.add(data); return this; } private static ColumnVector from(DType type, Object dataArray) { ColumnVector ret = null; switch (type.typeId) { case STRING: ret = ColumnVector.fromStrings((String[]) dataArray); break; case BOOL8: ret = ColumnVector.fromBoxedBooleans((Boolean[]) dataArray); break; case INT8: ret = ColumnVector.fromBoxedBytes((Byte[]) dataArray); break; case INT16: ret = ColumnVector.fromBoxedShorts((Short[]) dataArray); break; case INT32: ret = ColumnVector.fromBoxedInts((Integer[]) dataArray); break; case INT64: ret = ColumnVector.fromBoxedLongs((Long[]) dataArray); break; case TIMESTAMP_DAYS: ret = ColumnVector.timestampDaysFromBoxedInts((Integer[]) dataArray); break; case TIMESTAMP_SECONDS: ret = ColumnVector.timestampSecondsFromBoxedLongs((Long[]) dataArray); break; case TIMESTAMP_MILLISECONDS: ret = ColumnVector.timestampMilliSecondsFromBoxedLongs((Long[]) dataArray); break; case TIMESTAMP_MICROSECONDS: ret = ColumnVector.timestampMicroSecondsFromBoxedLongs((Long[]) dataArray); break; case TIMESTAMP_NANOSECONDS: ret = ColumnVector.timestampNanoSecondsFromBoxedLongs((Long[]) dataArray); break; case FLOAT32: ret = ColumnVector.fromBoxedFloats((Float[]) dataArray); break; case FLOAT64: ret = ColumnVector.fromBoxedDoubles((Double[]) dataArray); break; case DECIMAL32: case DECIMAL64: case DECIMAL128: int scale = type.getScale(); if (dataArray instanceof Integer[]) { BigDecimal[] data = Arrays.stream(((Integer[]) dataArray)) .map((i) -> i == null ? null : BigDecimal.valueOf(i, -scale)) .toArray(BigDecimal[]::new); ret = ColumnVector.build(type, data.length, (b) -> b.appendBoxed(data)); } else if (dataArray instanceof Long[]) { BigDecimal[] data = Arrays.stream(((Long[]) dataArray)) .map((i) -> i == null ? null : BigDecimal.valueOf(i, -scale)) .toArray(BigDecimal[]::new); ret = ColumnVector.build(type, data.length, (b) -> b.appendBoxed(data)); } else if (dataArray instanceof BigDecimal[]) { BigDecimal[] data = (BigDecimal[]) dataArray; ret = ColumnVector.build(type, data.length, (b) -> b.appendBoxed(data)); } else { throw new IllegalArgumentException( "Data array of invalid type(" + dataArray.getClass() + ") to build decimal column"); } break; default: throw new IllegalArgumentException(type + " is not supported yet"); } return ret; } @SuppressWarnings("unchecked") private static <T> ColumnVector fromLists(DataType dataType, Object[] dataArray) { List[] dataLists = new List[dataArray.length]; for (int i = 0; i < dataLists.length; ++i) { // The element in dataArray can be an array or list, because the below overloaded // version accepts a List of Array as rows. // `public TestBuilder column(ListType dataType, List<?>... values)` Object dataList = dataArray[i]; dataLists[i] = dataList == null ? null : (dataList instanceof List ? (List)dataList : Arrays.asList((Object[])dataList)); } return ColumnVector.fromLists(dataType, dataLists); } private static ColumnVector fromStructs(DataType dataType, StructData[] dataArray) { return ColumnVector.fromStructs(dataType, dataArray); } public Table build() { List<ColumnVector> columns = new ArrayList<>(types.size()); try { for (int i = 0; i < types.size(); i++) { DataType dataType = types.get(i); DType dtype = dataType.getType(); Object dataArray = typeErasedData.get(i); if (dtype.isNestedType()) { if (dtype.equals(DType.LIST)) { columns.add(fromLists(dataType, (Object[]) dataArray)); } else if (dtype.equals(DType.STRUCT)) { columns.add(fromStructs(dataType, (StructData[]) dataArray)); } else { throw new IllegalStateException("Unexpected nested type: " + dtype); } } else { columns.add(from(dtype, dataArray)); } } return new Table(columns.toArray(new ColumnVector[columns.size()])); } finally { for (ColumnVector cv : columns) { cv.close(); } } } } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/GroupByScanAggregationOnColumn.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * A GroupByScanAggregation for a specific column in a table. */ public final class GroupByScanAggregationOnColumn { protected final GroupByScanAggregation wrapped; protected final int columnIndex; GroupByScanAggregationOnColumn(GroupByScanAggregation wrapped, int columnIndex) { this.wrapped = wrapped; this.columnIndex = columnIndex; } public int getColumnIndex() { return columnIndex; } @Override public int hashCode() { return 31 * wrapped.hashCode() + columnIndex; } @Override public boolean equals(Object other) { if (other == this) { return true; } else if (other instanceof GroupByScanAggregationOnColumn) { GroupByScanAggregationOnColumn o = (GroupByScanAggregationOnColumn) other; return wrapped.equals(o.wrapped) && columnIndex == o.columnIndex; } return false; } long createNativeInstance() { return wrapped.createNativeInstance(); } long getDefaultOutput() { return wrapped.getDefaultOutput(); } GroupByScanAggregation getWrapped() { return wrapped; } }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/NaNEquality.java
/* * * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ai.rapids.cudf; /** * How should NaNs be compared in an operation. In floating point there are multiple * different binary representations for NaN. */ public enum NaNEquality { /** * No NaN representation is considered equal to any NaN representation, even for the * exact same representation. */ UNEQUAL(false), /** * All representations of NaN are considered to be equal. */ ALL_EQUAL(true); NaNEquality(boolean nansEqual) { this.nansEqual = nansEqual; } final boolean nansEqual; }
0
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids
rapidsai_public_repos/cudf/java/src/main/java/ai/rapids/cudf/ContigSplitGroupByResult.java
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.rapids.cudf; /** * Used to save groups and uniq key table for `Table.contiguousSplitGroupsAndGenUniqKeys` * Each row in uniq key table is corresponding to a group * Resource management note: * This class is the owner of `groups` and * `uniqKeysTable`(or uniqKeyColumns if table is not constructed) * 1: Use `closeGroups` and `closeUniqKeyTable` to close the resources separately * if you want to close eagerly. * 2: Or auto close them by `AutoCloseable` * Use `releaseGroups` to release the ownership of the `groups` to the caller, * then the caller is responsible to close the `groups` */ public class ContigSplitGroupByResult implements AutoCloseable { // set by JNI cpp code private ContiguousTable[] groups; // set by JNI cpp code, used to construct an uniq key Table private long[] uniqKeyColumns; // An additional table is introduced to store the group keys, // and each key is corresponding to a group. private Table uniqKeysTable; /** * Get the key table, each row in the key table is corresponding to a group. * Note: Close the key table by `closeUniqKeyTable` * * @return the key table, it could be null if invoking native method `Table.contiguousSplitGroups` * with `genUniqKeys` as false */ public Table getUniqKeyTable() { if (uniqKeysTable == null && uniqKeyColumns != null && uniqKeyColumns.length > 0) { // new `Table` asserts uniqKeyColumns.length > 0 uniqKeysTable = new Table(uniqKeyColumns); uniqKeyColumns = null; } return uniqKeysTable; } /** * Close the key table or key columns */ public void closeUniqKeyTable() { if (uniqKeysTable != null) { uniqKeysTable.close(); uniqKeysTable = null; } else if (uniqKeyColumns != null) { for (long handle : uniqKeyColumns) { ColumnVector.deleteCudfColumn(handle); } uniqKeyColumns = null; } } /** * Get the split group tables. * Note: Close the group tables by `closeGroups` * * @return the split group tables */ public ContiguousTable[] getGroups() { return groups; } /** * Release the ownership of the `groups` * The caller is responsible to close the returned groups. * * @return split group tables */ ContiguousTable[] releaseGroups() { ContiguousTable[] copy = groups; groups = null; return copy; } /** * Close the split group tables */ public void closeGroups() { if (groups != null) { for (ContiguousTable contig : groups) { contig.close(); } groups = null; } } @Override public void close() { try { closeUniqKeyTable(); } finally { closeGroups(); } } }
0