repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ScalarTest.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.BasicType;
import ai.rapids.cudf.HostColumnVector.ListType;
import ai.rapids.cudf.HostColumnVector.StructType;
import org.junit.jupiter.api.Test;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
import static org.junit.jupiter.api.Assertions.*;
public class ScalarTest extends CudfTestBase {
@Test
public void testDoubleClose() {
Scalar s = Scalar.fromNull(DType.INT32);
s.close();
assertThrows(IllegalStateException.class, s::close);
}
@Test
public void testIncRefAndDoubleFree() {
Scalar s = Scalar.fromNull(DType.INT32);
try (Scalar ignored1 = s) {
try (Scalar ignored2 = s.incRefCount()) {
try (Scalar ignored3 = s.incRefCount()) {
}
}
}
assertThrows(IllegalStateException.class, s::close);
}
@Test
public void testNull() {
for (DType.DTypeEnum dataType : DType.DTypeEnum.values()) {
DType type;
if (dataType.isDecimalType()) {
type = DType.create(dataType, -3);
} else {
type = DType.create(dataType);
}
if (!type.isNestedType()) {
try (Scalar s = Scalar.fromNull(type)) {
assertEquals(type, s.getType());
assertFalse(s.isValid(), "null validity for " + type);
}
}
// create elementType for nested types
HostColumnVector.DataType hDataType;
if (DType.EMPTY.equals(type)) {
continue;
} else if (DType.LIST.equals(type)) {
// list of list of int32
hDataType = new ListType(true, new BasicType(true, DType.INT32));
} else if (DType.STRUCT.equals(type)) {
// list of struct of int32
hDataType = new StructType(true, new BasicType(true, DType.INT32));
} else {
// list of non nested type
hDataType = new BasicType(true, type);
}
// test list scalar with elementType(`type`)
try (Scalar s = Scalar.listFromNull(hDataType); ColumnView listCv = s.getListAsColumnView()) {
assertFalse(s.isValid(), "null validity for " + type);
assertEquals(DType.LIST, s.getType());
assertEquals(type, listCv.getType());
assertEquals(0L, listCv.getRowCount());
assertEquals(0L, listCv.getNullCount());
if (type.isNestedType()) {
try (ColumnView child = listCv.getChildColumnView(0)) {
assertEquals(DType.INT32, child.getType());
assertEquals(0L, child.getRowCount());
assertEquals(0L, child.getNullCount());
}
}
}
// test struct scalar with elementType(`type`)
try (Scalar s = Scalar.structFromNull(hDataType, hDataType, hDataType)) {
assertFalse(s.isValid(), "null validity for " + type);
assertEquals(DType.STRUCT, s.getType());
ColumnView[] children = s.getChildrenFromStructScalar();
try {
for (ColumnView child : children) {
assertEquals(hDataType.getType(), child.getType());
assertEquals(1L, child.getRowCount());
assertEquals(1L, child.getNullCount());
}
} finally {
for (ColumnView child : children) child.close();
}
}
}
}
@Test
public void testBool() {
try (Scalar s = Scalar.fromBool(false)) {
assertEquals(DType.BOOL8, s.getType());
assertTrue(s.isValid());
assertFalse(s.getBoolean());
}
}
@Test
public void testByte() {
try (Scalar s = Scalar.fromByte((byte) 1)) {
assertEquals(DType.INT8, s.getType());
assertTrue(s.isValid());
assertEquals(1, s.getByte());
}
}
@Test
public void testShort() {
try (Scalar s = Scalar.fromShort((short) 2)) {
assertEquals(DType.INT16, s.getType());
assertTrue(s.isValid());
assertEquals(2, s.getShort());
}
}
@Test
public void testInt() {
try (Scalar s = Scalar.fromInt(3)) {
assertEquals(DType.INT32, s.getType());
assertTrue(s.isValid());
assertEquals(3, s.getInt());
}
}
@Test
public void testLong() {
try (Scalar s = Scalar.fromLong(4)) {
assertEquals(DType.INT64, s.getType());
assertTrue(s.isValid());
assertEquals(4L, s.getLong());
}
}
@Test
public void testFloat() {
try (Scalar s = Scalar.fromFloat(5.1f)) {
assertEquals(DType.FLOAT32, s.getType());
assertTrue(s.isValid());
assertEquals(5.1f, s.getFloat());
}
}
@Test
public void testDouble() {
try (Scalar s = Scalar.fromDouble(6.2)) {
assertEquals(DType.FLOAT64, s.getType());
assertTrue(s.isValid());
assertEquals(6.2, s.getDouble());
}
}
@Test
public void testDecimal() {
BigDecimal[] bigDecimals = new BigDecimal[] {
BigDecimal.valueOf(1234, 0),
BigDecimal.valueOf(12345678, 2),
BigDecimal.valueOf(1234567890123L, 6),
new BigDecimal(new BigInteger("12312341234123412341234123412341234120"), 4)
};
for (BigDecimal dec : bigDecimals) {
try (Scalar s = Scalar.fromDecimal(dec)) {
DType dtype = DType.fromJavaBigDecimal(dec);
assertEquals(dtype, s.getType());
assertTrue(s.isValid());
if (dtype.getTypeId() == DType.DTypeEnum.DECIMAL64) {
assertEquals(dec.unscaledValue().longValueExact(), s.getLong());
} else if (dtype.getTypeId() == DType.DTypeEnum.DECIMAL32) {
assertEquals(dec.unscaledValue().intValueExact(), s.getInt());
} else if (dtype.getTypeId() == DType.DTypeEnum.DECIMAL128) {
assertEquals(dec.unscaledValue(), s.getBigDecimal().unscaledValue());
}
assertEquals(dec, s.getBigDecimal());
}
try (Scalar s = Scalar.fromDecimal(-dec.scale(), dec.unscaledValue().intValueExact())) {
assertEquals(dec, s.getBigDecimal());
} catch (java.lang.ArithmeticException ex) {
try (Scalar s = Scalar.fromDecimal(-dec.scale(), dec.unscaledValue().longValueExact())) {
assertEquals(dec, s.getBigDecimal());
assertTrue(s.getType().isBackedByLong());
} catch (java.lang.ArithmeticException e) {
try (Scalar s = Scalar.fromDecimal(-dec.scale(), dec.unscaledValue())) {
assertEquals(dec, s.getBigDecimal());
}
}
}
}
}
@Test
public void testTimestampDays() {
try (Scalar s = Scalar.timestampDaysFromInt(7)) {
assertEquals(DType.TIMESTAMP_DAYS, s.getType());
assertTrue(s.isValid());
assertEquals(7, s.getInt());
}
}
@Test
public void testTimestampSeconds() {
try (Scalar s = Scalar.timestampFromLong(DType.TIMESTAMP_SECONDS, 8)) {
assertEquals(DType.TIMESTAMP_SECONDS, s.getType());
assertTrue(s.isValid());
assertEquals(8L, s.getLong());
}
}
@Test
public void testTimestampMilliseconds() {
try (Scalar s = Scalar.timestampFromLong(DType.TIMESTAMP_MILLISECONDS, 9)) {
assertEquals(DType.TIMESTAMP_MILLISECONDS, s.getType());
assertTrue(s.isValid());
assertEquals(9L, s.getLong());
}
}
@Test
public void testTimestampMicroseconds() {
try (Scalar s = Scalar.timestampFromLong(DType.TIMESTAMP_MICROSECONDS, 10)) {
assertEquals(DType.TIMESTAMP_MICROSECONDS, s.getType());
assertTrue(s.isValid());
assertEquals(10L, s.getLong());
}
}
@Test
public void testTimestampNanoseconds() {
try (Scalar s = Scalar.timestampFromLong(DType.TIMESTAMP_NANOSECONDS, 11)) {
assertEquals(DType.TIMESTAMP_NANOSECONDS, s.getType());
assertTrue(s.isValid());
assertEquals(11L, s.getLong());
}
}
@Test
public void testString() {
try (Scalar s = Scalar.fromString("TEST")) {
assertEquals(DType.STRING, s.getType());
assertTrue(s.isValid());
assertEquals("TEST", s.getJavaString());
assertArrayEquals(new byte[] {'T', 'E', 'S', 'T'}, s.getUTF8());
}
}
@Test
public void testUTF8String() {
try (Scalar s = Scalar.fromUTF8String("TEST".getBytes(StandardCharsets.UTF_8))) {
assertEquals(DType.STRING, s.getType());
assertTrue(s.isValid());
assertEquals("TEST", s.getJavaString());
assertArrayEquals(new byte[] {'T', 'E', 'S', 'T'}, s.getUTF8());
}
try (Scalar s = Scalar.fromUTF8String("".getBytes(StandardCharsets.UTF_8))) {
assertEquals(DType.STRING, s.getType());
assertTrue(s.isValid());
assertEquals("", s.getJavaString());
assertArrayEquals(new byte[] {}, s.getUTF8());
}
}
@Test
public void testList() {
// list of int
try (ColumnVector listInt = ColumnVector.fromInts(1, 2, 3, 4);
Scalar s = Scalar.listFromColumnView(listInt)) {
assertEquals(DType.LIST, s.getType());
assertTrue(s.isValid());
try (ColumnView v = s.getListAsColumnView()) {
assertColumnsAreEqual(listInt, v);
}
}
// list of list
HostColumnVector.DataType listDT =
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32));
try (ColumnVector listList =
ColumnVector.fromLists(listDT, Arrays.asList(1, 2, 3), Arrays.asList(4, 5, 6));
Scalar s = Scalar.listFromColumnView(listList)) {
assertEquals(DType.LIST, s.getType());
assertTrue(s.isValid());
try (ColumnView v = s.getListAsColumnView()) {
assertColumnsAreEqual(listList, v);
}
}
}
@Test
public void testStruct() {
try (ColumnVector col0 = ColumnVector.fromInts(1);
ColumnVector col1 = ColumnVector.fromBoxedDoubles(1.2);
ColumnVector col2 = ColumnVector.fromStrings("a");
ColumnVector col3 = ColumnVector.fromDecimals(BigDecimal.TEN);
ColumnVector col4 = ColumnVector.daysFromInts(10);
ColumnVector col5 = ColumnVector.durationSecondsFromLongs(12345L);
Scalar s = Scalar.structFromColumnViews(col0, col1, col2, col3, col4, col5, col0, col1)) {
assertEquals(DType.STRUCT, s.getType());
assertTrue(s.isValid());
ColumnView[] children = s.getChildrenFromStructScalar();
try {
assertColumnsAreEqual(col0, children[0]);
assertColumnsAreEqual(col1, children[1]);
assertColumnsAreEqual(col2, children[2]);
assertColumnsAreEqual(col3, children[3]);
assertColumnsAreEqual(col4, children[4]);
assertColumnsAreEqual(col5, children[5]);
assertColumnsAreEqual(col0, children[6]);
assertColumnsAreEqual(col1, children[7]);
} finally {
for (ColumnView child : children) child.close();
}
}
// test Struct Scalar with null members
try (ColumnVector col0 = ColumnVector.fromInts(1);
ColumnVector col1 = ColumnVector.fromBoxedDoubles((Double) null);
ColumnVector col2 = ColumnVector.fromStrings((String) null);
Scalar s1 = Scalar.structFromColumnViews(col0, col1, col2);
Scalar s2 = Scalar.structFromColumnViews(col1, col2)) {
ColumnView[] children = s1.getChildrenFromStructScalar();
try {
assertColumnsAreEqual(col0, children[0]);
assertColumnsAreEqual(col1, children[1]);
assertColumnsAreEqual(col2, children[2]);
} finally {
for (ColumnView child : children) child.close();
}
ColumnView[] children2 = s2.getChildrenFromStructScalar();
try {
assertColumnsAreEqual(col1, children2[0]);
assertColumnsAreEqual(col2, children2[1]);
} finally {
for (ColumnView child : children2) child.close();
}
}
// test Struct Scalar with single column
try (ColumnVector col0 = ColumnVector.fromInts(1234);
Scalar s = Scalar.structFromColumnViews(col0)) {
ColumnView[] children = s.getChildrenFromStructScalar();
try {
assertColumnsAreEqual(col0, children[0]);
} finally {
children[0].close();
}
}
// test Struct Scalar without column
try (Scalar s = Scalar.structFromColumnViews()) {
assertEquals(DType.STRUCT, s.getType());
assertTrue(s.isValid());
ColumnView[] children = s.getChildrenFromStructScalar();
assertEquals(0, children.length);
}
// test Struct Scalar with nested types
HostColumnVector.DataType listType =
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32));
HostColumnVector.DataType structType =
new HostColumnVector.StructType(true, new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.INT64));
HostColumnVector.DataType nestedStructType = new HostColumnVector.StructType(
true, new HostColumnVector.BasicType(true, DType.STRING), listType, structType);
try (ColumnVector strCol = ColumnVector.fromStrings("AAAAAA");
ColumnVector listCol = ColumnVector.fromLists(listType, Arrays.asList(1, 2, 3, 4, 5));
ColumnVector structCol =
ColumnVector.fromStructs(structType, new HostColumnVector.StructData(1, -1L));
ColumnVector nestedStructCol = ColumnVector.fromStructs(nestedStructType,
new HostColumnVector.StructData(
null, Arrays.asList(1, 2, null), new HostColumnVector.StructData(null, 10L)));
Scalar s = Scalar.structFromColumnViews(strCol, listCol, structCol, nestedStructCol)) {
assertEquals(DType.STRUCT, s.getType());
assertTrue(s.isValid());
ColumnView[] children = s.getChildrenFromStructScalar();
try {
assertColumnsAreEqual(strCol, children[0]);
assertColumnsAreEqual(listCol, children[1]);
assertColumnsAreEqual(structCol, children[2]);
assertColumnsAreEqual(nestedStructCol, children[3]);
} finally {
for (ColumnView child : children) child.close();
}
}
}
@Test
public void testRepeatString() {
// Invalid scalar.
try (Scalar nullString = Scalar.fromString(null)) {
Scalar result = nullString.repeatString(5);
assertFalse(result.isValid());
}
// Empty string.
try (Scalar emptyString = Scalar.fromString("")) {
Scalar result = emptyString.repeatString(5);
assertTrue(result.isValid());
assertEquals("", result.getJavaString());
}
// Negative repeatTimes.
try (Scalar s = Scalar.fromString("Hello World");
Scalar result = s.repeatString(-100)) {
assertTrue(result.isValid());
assertEquals("", result.getJavaString());
}
// Zero repeatTimes.
try (Scalar s = Scalar.fromString("Hello World");
Scalar result = s.repeatString(0)) {
assertTrue(result.isValid());
assertEquals("", result.getJavaString());
}
// Trivial input, output is copied exactly from input.
try (Scalar s = Scalar.fromString("Hello World");
Scalar result = s.repeatString(1)) {
assertTrue(result.isValid());
assertEquals(s.getJavaString(), result.getJavaString());
}
// Trivial input.
try (Scalar s = Scalar.fromString("abcxyz-");
Scalar result = s.repeatString(3)) {
assertTrue(result.isValid());
assertEquals("abcxyz-abcxyz-abcxyz-", result.getJavaString());
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/LargeTableTest.java
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
/**
* Test for operations on tables with large row counts.
*/
public class LargeTableTest extends CudfTestBase {
static final long RMM_POOL_SIZE_LARGE = 10L * 1024 * 1024 * 1024;
public LargeTableTest() {
// Set large RMM pool size. Ensure that the test does not run out of memory,
// for large row counts.
super(RmmAllocationMode.POOL, RMM_POOL_SIZE_LARGE);
}
/**
* Tests that exploding large array columns will result in CudfColumnOverflowException
* if the column size limit is crossed.
*/
@Test
public void testExplodeOverflow() {
int numRows = 1000_000;
int arraySize = 1000;
String str = "abc";
// 1 Million rows, each row being { "abc", [ 0, 0, 0... ] },
// with 1000 elements in the array in each row.
// When the second column is exploded, it produces 1 Billion rows.
// The string row is repeated once for each element in the array,
// thus producing a 1 Billion row string column, with 3 Billion chars
// in the child column. This should cause an overflow exception.
boolean [] arrBools = new boolean[arraySize];
for (char i = 0; i < arraySize; ++i) { arrBools[i] = false; }
Exception exception = assertThrows(CudfColumnSizeOverflowException.class, ()->{
try (Scalar strScalar = Scalar.fromString(str);
ColumnVector arrRow = ColumnVector.fromBooleans(arrBools);
Scalar arrScalar = Scalar.listFromColumnView(arrRow);
ColumnVector strVector = ColumnVector.fromScalar(strScalar, numRows);
ColumnVector arrVector = ColumnVector.fromScalar(arrScalar, numRows);
Table inputTable = new Table(strVector, arrVector);
Table outputTable = inputTable.explode(1)) {
assertEquals(outputTable.getColumns()[0].getRowCount(), numRows * arraySize);
fail("Exploding this large table should have caused a CudfColumnSizeOverflowException.");
}});
assertTrue(exception.getMessage().contains("Size of output exceeds the column size limit"));
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/FloatColumnVectorTest.java
|
/*
*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.junit.jupiter.api.Test;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class FloatColumnVectorTest extends CudfTestBase {
@Test
public void testCreateColumnVectorBuilder() {
try (ColumnVector floatColumnVector = ColumnVector.build(DType.FLOAT32, 3,
(b) -> b.append(1.0f))) {
assertFalse(floatColumnVector.hasNulls());
}
}
@Test
public void testArrayAllocation() {
try (HostColumnVector floatColumnVector = HostColumnVector.fromFloats(2.1f, 3.02f, 5.003f)) {
assertFalse(floatColumnVector.hasNulls());
assertEqualsWithinPercentage(floatColumnVector.getFloat(0), 2.1, 0.01);
assertEqualsWithinPercentage(floatColumnVector.getFloat(1), 3.02, 0.01);
assertEqualsWithinPercentage(floatColumnVector.getFloat(2), 5.003, 0.001);
}
}
@Test
public void testUpperIndexOutOfBoundsException() {
try (HostColumnVector floatColumnVector = HostColumnVector.fromFloats(2.1f, 3.02f, 5.003f)) {
assertThrows(AssertionError.class, () -> floatColumnVector.getFloat(3));
assertFalse(floatColumnVector.hasNulls());
}
}
@Test
public void testLowerIndexOutOfBoundsException() {
try (HostColumnVector floatColumnVector = HostColumnVector.fromFloats(2.1f, 3.02f, 5.003f)) {
assertFalse(floatColumnVector.hasNulls());
assertThrows(AssertionError.class, () -> floatColumnVector.getFloat(-1));
}
}
@Test
public void testAddingNullValues() {
try (HostColumnVector cv = HostColumnVector.fromBoxedFloats(
new Float[]{2f, 3f, 4f, 5f, 6f, 7f, null, null})) {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
}
}
@Test
public void testOverrunningTheBuffer() {
try (Builder builder = HostColumnVector.builder(DType.FLOAT32, 3)) {
assertThrows(AssertionError.class,
() -> builder.append(2.1f).appendNull().appendArray(5.003f, 4.0f).build());
}
}
@Test
public void testCastToFloat() {
try (ColumnVector doubleColumnVector = ColumnVector.fromDoubles(new double[]{4.3, 3.8, 8});
ColumnVector shortColumnVector = ColumnVector.fromShorts(new short[]{100});
ColumnVector tmp1 = doubleColumnVector.asFloats();
HostColumnVector floatColumnVector1 = tmp1.copyToHost();
ColumnVector tmp2 = shortColumnVector.asFloats();
HostColumnVector floatColumnVector2 = tmp2.copyToHost()) {
assertEqualsWithinPercentage(4.3, floatColumnVector1.getFloat(0), 0.001);
assertEqualsWithinPercentage(3.8, floatColumnVector1.getFloat(1), 0.001);
assertEquals(8, floatColumnVector1.getFloat(2));
assertEquals(100, floatColumnVector2.getFloat(0));
}
}
@Test
void testAppendVector() {
Random random = new Random(192312989128L);
for (int dstSize = 1; dstSize <= 100; dstSize++) {
for (int dstPrefilledSize = 0; dstPrefilledSize < dstSize; dstPrefilledSize++) {
final int srcSize = dstSize - dstPrefilledSize;
for (int sizeOfDataNotToAdd = 0; sizeOfDataNotToAdd <= dstPrefilledSize; sizeOfDataNotToAdd++) {
try (Builder dst = HostColumnVector.builder(DType.FLOAT32, dstSize);
HostColumnVector src = HostColumnVector.build(DType.FLOAT32, srcSize, (b) -> {
for (int i = 0; i < srcSize; i++) {
if (random.nextBoolean()) {
b.appendNull();
} else {
b.append(random.nextFloat());
}
}
});
Builder gtBuilder = HostColumnVector.builder(DType.FLOAT32,
dstPrefilledSize)) {
assertEquals(dstSize, srcSize + dstPrefilledSize);
//add the first half of the prefilled list
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
if (random.nextBoolean()) {
dst.appendNull();
gtBuilder.appendNull();
} else {
float a = random.nextFloat();
dst.append(a);
gtBuilder.append(a);
}
}
// append the src vector
dst.append(src);
try (HostColumnVector dstVector = dst.build();
HostColumnVector gt = gtBuilder.build()) {
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
assertEquals(gt.isNull(i), dstVector.isNull(i));
if (!gt.isNull(i)) {
assertEquals(gt.getFloat(i), dstVector.getFloat(i));
}
}
for (int i = dstPrefilledSize - sizeOfDataNotToAdd, j = 0; i < dstSize - sizeOfDataNotToAdd && j < srcSize; i++, j++) {
assertEquals(src.isNull(j), dstVector.isNull(i));
if (!src.isNull(j)) {
assertEquals(src.getFloat(j), dstVector.getFloat(i));
}
}
if (dstVector.hasValidityVector()) {
long maxIndex =
BitVectorHelper.getValidityAllocationSizeInBytes(dstVector.getRowCount()) * 8;
for (long i = dstSize - sizeOfDataNotToAdd; i < maxIndex; i++) {
assertFalse(dstVector.isNullExtendedRange(i));
}
}
}
}
}
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/IntColumnVectorTest.java
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.junit.jupiter.api.Test;
import java.util.Random;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class IntColumnVectorTest extends CudfTestBase {
@Test
public void testCreateColumnVectorBuilder() {
try (ColumnVector intColumnVector = ColumnVector.build(DType.INT32, 3, (b) -> b.append(1))) {
assertFalse(intColumnVector.hasNulls());
}
try (ColumnVector intColumnVector = ColumnBuilderHelper.buildOnDevice(
new HostColumnVector.BasicType(true, DType.INT32), 3, (b) -> b.append(1))) {
assertFalse(intColumnVector.hasNulls());
}
}
@Test
public void testArrayAllocation() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertEquals(cv.getInt(0), 2);
assertEquals(cv.getInt(1), 3);
assertEquals(cv.getInt(2), 5);
};
try (HostColumnVector cv = HostColumnVector.fromInts(2, 3, 5)) {
verify.accept(cv);
}
try (HostColumnVector cv = ColumnBuilderHelper.fromInts(true, 2, 3, 5)) {
verify.accept(cv);
}
}
@Test
public void testUnsignedArrayAllocation() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertEquals(0xfedcba98L, Integer.toUnsignedLong(cv.getInt(0)));
assertEquals(0x80000000L, Integer.toUnsignedLong(cv.getInt(1)));
assertEquals(5, Integer.toUnsignedLong(cv.getInt(2)));
};
try (HostColumnVector cv = HostColumnVector.fromUnsignedInts(0xfedcba98, 0x80000000, 5)) {
verify.accept(cv);
}
try (HostColumnVector cv = ColumnBuilderHelper.fromInts(false, 0xfedcba98, 0x80000000, 5)) {
verify.accept(cv);
}
}
@Test
public void testUpperIndexOutOfBoundsException() {
Consumer<HostColumnVector> verify = (cv) -> {
assertThrows(AssertionError.class, () -> cv.getInt(3));
assertFalse(cv.hasNulls());
};
try (HostColumnVector icv = HostColumnVector.fromInts(2, 3, 5)) {
verify.accept(icv);
}
try (HostColumnVector icv = ColumnBuilderHelper.fromInts(true, 2, 3, 5)) {
verify.accept(icv);
}
}
@Test
public void testLowerIndexOutOfBoundsException() {
Consumer<HostColumnVector> verify = (cv) -> {
assertFalse(cv.hasNulls());
assertThrows(AssertionError.class, () -> cv.getInt(-1));
};
try (HostColumnVector icv = HostColumnVector.fromInts(2, 3, 5)) {
verify.accept(icv);
}
try (HostColumnVector icv = ColumnBuilderHelper.fromInts(true, 2, 3, 5)) {
verify.accept(icv);
}
}
@Test
public void testAddingNullValues() {
Consumer<HostColumnVector> verify = (cv) -> {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
};
try (HostColumnVector cv = HostColumnVector.fromBoxedInts(2, 3, 4, 5, 6, 7, null, null)) {
verify.accept(cv);
}
try (HostColumnVector cv = ColumnBuilderHelper.fromBoxedInts(true, 2, 3, 4, 5, 6, 7, null, null)) {
verify.accept(cv);
}
}
@Test
public void testAddingUnsignedNullValues() {
Consumer<HostColumnVector> verify = (cv) -> {
assertTrue(cv.hasNulls());
assertEquals(2, cv.getNullCount());
for (int i = 0; i < 6; i++) {
assertFalse(cv.isNull(i));
}
assertEquals(0xfedbca98L, Integer.toUnsignedLong(cv.getInt(4)));
assertEquals(0x80000000L, Integer.toUnsignedLong(cv.getInt(5)));
assertTrue(cv.isNull(6));
assertTrue(cv.isNull(7));
};
try (HostColumnVector cv = HostColumnVector.fromBoxedUnsignedInts(
2, 3, 4, 5, 0xfedbca98, 0x80000000, null, null)) {
verify.accept(cv);
}
try (HostColumnVector cv = ColumnBuilderHelper.fromBoxedInts(false,
2, 3, 4, 5, 0xfedbca98, 0x80000000, null, null)) {
verify.accept(cv);
}
}
@Test
public void testOverrunningTheBuffer() {
try (Builder builder = HostColumnVector.builder(DType.INT32, 3)) {
assertThrows(AssertionError.class,
() -> builder.append(2).appendNull().appendArray(new int[]{5, 4}).build());
}
}
@Test
public void testCastToInt() {
try (ColumnVector doubleColumnVector = ColumnVector.fromDoubles(new double[]{4.3, 3.8, 8});
ColumnVector shortColumnVector = ColumnVector.fromShorts(new short[]{100});
ColumnVector intColumnVector1 = doubleColumnVector.asInts();
ColumnVector expected1 = ColumnVector.fromInts(4, 3, 8);
ColumnVector intColumnVector2 = shortColumnVector.asInts();
ColumnVector expected2 = ColumnVector.fromInts(100)) {
AssertUtils.assertColumnsAreEqual(expected1, intColumnVector1);
AssertUtils.assertColumnsAreEqual(expected2, intColumnVector2);
}
}
@Test
void testAppendVector() {
Random random = new Random(192312989128L);
for (int dstSize = 1; dstSize <= 100; dstSize++) {
for (int dstPrefilledSize = 0; dstPrefilledSize < dstSize; dstPrefilledSize++) {
final int srcSize = dstSize - dstPrefilledSize;
for (int sizeOfDataNotToAdd = 0; sizeOfDataNotToAdd <= dstPrefilledSize; sizeOfDataNotToAdd++) {
try (Builder dst = HostColumnVector.builder(DType.INT32, dstSize);
HostColumnVector src = HostColumnVector.build(DType.INT32, srcSize, (b) -> {
for (int i = 0; i < srcSize; i++) {
if (random.nextBoolean()) {
b.appendNull();
} else {
b.append(random.nextInt());
}
}
});
Builder gtBuilder = HostColumnVector.builder(DType.INT32,
dstPrefilledSize)) {
assertEquals(dstSize, srcSize + dstPrefilledSize);
//add the first half of the prefilled list
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
if (random.nextBoolean()) {
dst.appendNull();
gtBuilder.appendNull();
} else {
int a = random.nextInt();
dst.append(a);
gtBuilder.append(a);
}
}
// append the src vector
dst.append(src);
try (HostColumnVector dstVector = dst.build();
HostColumnVector gt = gtBuilder.build()) {
for (int i = 0; i < dstPrefilledSize - sizeOfDataNotToAdd; i++) {
assertEquals(gt.isNull(i), dstVector.isNull(i));
if (!gt.isNull(i)) {
assertEquals(gt.getInt(i), dstVector.getInt(i));
}
}
for (int i = dstPrefilledSize - sizeOfDataNotToAdd, j = 0; i < dstSize - sizeOfDataNotToAdd && j < srcSize; i++, j++) {
assertEquals(src.isNull(j), dstVector.isNull(i));
if (!src.isNull(j)) {
assertEquals(src.getInt(j), dstVector.getInt(i));
}
}
if (dstVector.hasValidityVector()) {
long maxIndex =
BitVectorHelper.getValidityAllocationSizeInBytes(dstVector.getRowCount()) * 8;
for (long i = dstSize - sizeOfDataNotToAdd; i < maxIndex; i++) {
assertFalse(dstVector.isNullExtendedRange(i));
}
}
}
}
}
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/CudaFatalTest.java
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class CudaFatalTest {
@Test
public void testCudaFatalException() {
try (ColumnVector cv = ColumnVector.fromInts(1, 2, 3, 4, 5)) {
try (ColumnView badCv = ColumnView.fromDeviceBuffer(new BadDeviceBuffer(), 0, DType.INT8, 256);
ColumnView ret = badCv.sub(badCv);
HostColumnVector hcv = ret.copyToHost()) {
} catch (CudaException ignored) {
}
// CUDA API invoked by libcudf failed because of previous unrecoverable fatal error
assertThrows(CudaFatalException.class, () -> {
try (ColumnVector cv2 = cv.asLongs()) {
} catch (CudaFatalException ex) {
assertEquals(CudaException.CudaError.cudaErrorIllegalAddress, ex.getCudaError());
throw ex;
}
});
}
// CUDA API invoked by RMM failed because of previous unrecoverable fatal error
assertThrows(CudaFatalException.class, () -> {
try (ColumnVector cv = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5)) {
} catch (CudaFatalException ex) {
assertEquals(CudaException.CudaError.cudaErrorIllegalAddress, ex.getCudaError());
throw ex;
}
});
}
private static class BadDeviceBuffer extends BaseDeviceMemoryBuffer {
public BadDeviceBuffer() {
super(256L, 256L, (MemoryBufferCleaner) null);
}
@Override
public MemoryBuffer slice(long offset, long len) {
return null;
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ColumnViewNonEmptyNullsTest.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* This class will house only tests that need to explicitly set non-empty nulls
*/
public class ColumnViewNonEmptyNullsTest extends CudfTestBase {
private static final HostMemoryAllocator hostMemoryAllocator = DefaultHostMemoryAllocator.get();
@Test
void testAndNullReconfigureNulls() {
try (ColumnVector v0 = ColumnVector.fromBoxedInts(0, 100, null, null, Integer.MIN_VALUE, null);
ColumnVector v1 = ColumnVector.fromBoxedInts(0, 100, 1, 2, Integer.MIN_VALUE, null);
ColumnVector intResult = v1.mergeAndSetValidity(BinaryOp.BITWISE_AND, v0);
ColumnVector v2 = ColumnVector.fromStrings("0", "100", "1", "2", "MIN_VALUE", "3");
ColumnVector v3 = v0.mergeAndSetValidity(BinaryOp.BITWISE_AND, v1, v2);
ColumnVector stringResult = v2.mergeAndSetValidity(BinaryOp.BITWISE_AND, v0, v1);
ColumnVector stringExpected = ColumnVector.fromStrings("0", "100", null, null, "MIN_VALUE", null);
ColumnVector noMaskResult = v2.mergeAndSetValidity(BinaryOp.BITWISE_AND)) {
assertColumnsAreEqual(v0, intResult);
assertColumnsAreEqual(v0, v3);
assertColumnsAreEqual(stringExpected, stringResult);
assertColumnsAreEqual(v2, noMaskResult);
}
}
@Test
void testOrNullReconfigureNulls() {
try (ColumnVector v0 = ColumnVector.fromBoxedInts(0, 100, null, null, Integer.MIN_VALUE, null);
ColumnVector v1 = ColumnVector.fromBoxedInts(0, 100, 1, 2, Integer.MIN_VALUE, null);
ColumnVector v2 = ColumnVector.fromBoxedInts(0, 100, 1, 2, Integer.MIN_VALUE, Integer.MAX_VALUE);
ColumnVector intResultV0 = v1.mergeAndSetValidity(BinaryOp.BITWISE_OR, v0);
ColumnVector intResultV0V1 = v1.mergeAndSetValidity(BinaryOp.BITWISE_OR, v0, v1);
ColumnVector intResultMulti = v1.mergeAndSetValidity(BinaryOp.BITWISE_OR, v0, v0, v1, v1, v0, v1, v0);
ColumnVector intResultv0v1v2 = v2.mergeAndSetValidity(BinaryOp.BITWISE_OR, v0, v1, v2);
ColumnVector v3 = ColumnVector.fromStrings("0", "100", "1", "2", "MIN_VALUE", "3");
ColumnVector stringResult = v3.mergeAndSetValidity(BinaryOp.BITWISE_OR, v0, v1);
ColumnVector stringExpected = ColumnVector.fromStrings("0", "100", "1", "2", "MIN_VALUE", null);
ColumnVector noMaskResult = v3.mergeAndSetValidity(BinaryOp.BITWISE_OR)) {
assertColumnsAreEqual(v0, intResultV0);
assertColumnsAreEqual(v1, intResultV0V1);
assertColumnsAreEqual(v1, intResultMulti);
assertColumnsAreEqual(v2, intResultv0v1v2);
assertColumnsAreEqual(stringExpected, stringResult);
assertColumnsAreEqual(v3, noMaskResult);
}
}
/**
* The caller needs to make sure to close the returned ColumnView
*/
private ColumnView[] getColumnViewWithNonEmptyNulls() {
List<Integer> list0 = Arrays.asList(1, 2, 3);
List<Integer> list1 = Arrays.asList(4, 5, null);
List<Integer> list2 = Arrays.asList(7, 8, 9);
List<Integer> list3 = null;
ColumnVector input = ColumnVectorTest.makeListsColumn(DType.INT32, list0, list1, list2, list3);
// Modify the validity buffer
BaseDeviceMemoryBuffer dmb = input.getDeviceBufferFor(BufferType.VALIDITY);
try (HostMemoryBuffer newValidity = hostMemoryAllocator.allocate(64)) {
newValidity.copyFromDeviceBuffer(dmb);
BitVectorHelper.setNullAt(newValidity, 1);
dmb.copyFromHostBuffer(newValidity);
}
try (HostColumnVector hostColumnVector = input.copyToHost()) {
assert (hostColumnVector.isNull(1));
assert (hostColumnVector.isNull(3));
}
try (ColumnVector expectedOffsetsBeforePurge = ColumnVector.fromInts(0, 3, 6, 9, 9)) {
ColumnView offsetsCvBeforePurge = input.getListOffsetsView();
assertColumnsAreEqual(expectedOffsetsBeforePurge, offsetsCvBeforePurge);
}
ColumnView colWithNonEmptyNulls = new ColumnView(input.type, input.rows, Optional.of(2L), dmb,
input.getDeviceBufferFor(BufferType.OFFSET), input.getChildColumnViews());
assertEquals(2, colWithNonEmptyNulls.nullCount);
return new ColumnView[]{input, colWithNonEmptyNulls};
}
@Test
void testPurgeNonEmptyNullsList() {
ColumnView[] values = getColumnViewWithNonEmptyNulls();
try (ColumnView colWithNonEmptyNulls = values[1];
ColumnView input = values[0];
// purge non-empty nulls
ColumnView colWithEmptyNulls = colWithNonEmptyNulls.purgeNonEmptyNulls();
ColumnVector expectedOffsetsAfterPurge = ColumnVector.fromInts(0, 3, 3, 6, 6);
ColumnView offsetsCvAfterPurge = colWithEmptyNulls.getListOffsetsView()) {
assertTrue(colWithNonEmptyNulls.hasNonEmptyNulls());
assertColumnsAreEqual(expectedOffsetsAfterPurge, offsetsCvAfterPurge);
assertFalse(colWithEmptyNulls.hasNonEmptyNulls());
}
}
@Test
void testPurgeNonEmptyNullsStruct() {
ColumnView[] values = getColumnViewWithNonEmptyNulls();
try (ColumnView listCol = values[1];
ColumnView input = values[0];
ColumnView stringsCol = ColumnVector.fromStrings("A", "col", "of", "Strings");
ColumnView structView = ColumnView.makeStructView(stringsCol, listCol);
ColumnView structWithEmptyNulls = structView.purgeNonEmptyNulls();
ColumnView newListChild = structWithEmptyNulls.getChildColumnView(1);
ColumnVector expectedOffsetsAfterPurge = ColumnVector.fromInts(0, 3, 3, 6, 6);
ColumnView offsetsCvAfterPurge = newListChild.getListOffsetsView()) {
assertColumnsAreEqual(expectedOffsetsAfterPurge, offsetsCvAfterPurge);
assertFalse(newListChild.hasNonEmptyNulls());
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/TestUtils.java
|
/*
*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.io.File;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.stream.IntStream;
import static java.lang.Double.NEGATIVE_INFINITY;
import static java.lang.Double.POSITIVE_INFINITY;
/**
* Utility class for generating test data
*/
class TestUtils {
static int NULL = 0x00000001;
static int ZERO = 0x00000002;
static int INF = 0x00000004;
static int NAN = 0x00000008;
static int NEG_ZERO = 0x00000010;
static int ALL = NULL|ZERO|INF|NAN|NEG_ZERO;
static int NONE = 0;
private static boolean hasZero(int v) {
return (v & ZERO) > 0;
}
private static boolean hasNegativeZero(int v) {
return (v & NEG_ZERO) > 0;
}
private static boolean hasNan(int v) {
return (v & NAN) > 0;
}
private static boolean hasNull(int v) {
return (v & NULL) > 0;
}
private static boolean hasInf(int v) {
return (v & INF) > 0;
}
static Long[] getLongs(final long seed, final int size) {
return getLongs(seed, size, ALL);
}
static Double[] getDoubles(final long seed, final int size) {
return getDoubles(seed, size, ALL);
}
static Integer[] getIntegers(final long seed, final int size) {
return getIntegers(seed, size, ALL);
}
/**
* A convenience method for generating a fixed set of Integer values. This is by no means uniformly
* distributed. i.e. some values have more probability of occurrence than others.
*
* @param seed Seed value to be used to generate values
* @param size Number of values to be generated
* @param specialValues Values to include. Please refer to {@link TestUtils#ALL} for possible values
*/
static Long[] getLongs(final long seed, final int size, int specialValues) {
Random r = new Random(seed);
Long[] result = new Long[size];
List<Long> v = new ArrayList();
if (hasZero(specialValues)) v.add(0L);
if (hasNull(specialValues)) v.add(null);
Long[] v_arr = v.stream().toArray(Long[]::new);
IntStream.range(0, size).forEach(index -> {
switch (r.nextInt(v_arr.length + 4)) {
case 0:
result[index] = (long) (Long.MAX_VALUE * r.nextDouble());
break;
case 1:
result[index] = (long) (Long.MIN_VALUE * r.nextDouble());
break;
case 2:
result[index] = Long.MIN_VALUE;
break;
case 3:
result[index] = Long.MAX_VALUE;
break;
case 4:
result[index] = v_arr[0];
break;
default:
result[index] = v_arr[1];
}
});
return result;
}
/**
* A convenience method for generating a fixed set of Integer values. This is by no means uniformly
* distributed. i.e. some values have more probability of occurrence than others.
*
* @param seed Seed value to be used to generate values
* @param size Number of values to be generated
* @param specialValues Values to include. Please refer to {@link TestUtils#ALL} for possible values
*/
static Integer[] getIntegers(final long seed, final int size, int specialValues) {
Random r = new Random(seed);
Integer[] result = new Integer[size];
List<Integer> v = new ArrayList();
if (hasZero(specialValues)) v.add(0);
if (hasNull(specialValues)) v.add(null);
Integer[] v_arr = v.stream().toArray(Integer[]::new);
IntStream.range(0, size).forEach(index -> {
switch (r.nextInt(v_arr.length + 4)) {
case 0:
result[index] = (int) (Integer.MAX_VALUE * r.nextDouble());
break;
case 1:
result[index] = (int) (Integer.MIN_VALUE * r.nextDouble());
break;
case 2:
result[index] = Integer.MIN_VALUE;
break;
case 3:
result[index] = Integer.MAX_VALUE;
break;
case 4:
result[index] = v_arr[0];
break;
default:
result[index] = v_arr[1];
}
});
return result;
}
/**
* A convenience method for generating a fixed set of Double values. This is by no means uniformly
* distributed. i.e. some values have more probability of occurrence than others.
*
* @param seed Seed value to be used to generate values
* @param size Number of values to be generated
* @param specialValues Values to include. Please refer to {@link TestUtils#ALL} for possible values
*/
static Double[] getDoubles(final long seed, final int size, int specialValues) {
Random r = new Random(seed);
Double[] result = new Double[size];
List<Double> v = new ArrayList();
if (hasZero(specialValues)) v.add(0.0);
if (hasNegativeZero(specialValues)) v.add(-0.0);
if (hasInf(specialValues)) {
v.add(POSITIVE_INFINITY);
v.add(NEGATIVE_INFINITY);
}
if (hasNan(specialValues)) v.add(Double.NaN);
if (hasNull(specialValues)) v.add(null);
Double[] v_arr = v.stream().toArray(Double[]::new);
IntStream.range(0, size).forEach(index -> {
switch (r.nextInt(v_arr.length + 4)) {
case 0:
result[index] = 1 + (Double.MAX_VALUE * r.nextDouble() - 2);
break;
case 1:
result[index] = 1 + (Double.MIN_VALUE * r.nextDouble() - 2);
break;
case 2:
result[index] = Double.MIN_VALUE;
break;
case 3:
result[index] = Double.MAX_VALUE;
break;
case 4:
result[index] = v_arr[0];
break;
case 5:
result[index] = v_arr[1];
break;
case 6:
result[index] = v_arr[2];
break;
case 7:
result[index] = v_arr[3];
break;
case 8:
result[index] = v_arr[4];
break;
default:
result[index] = v_arr[5];
}
});
return result;
}
public static File getResourceAsFile(String resourceName) {
URL url = TestUtils.class.getClassLoader().getResource(resourceName);
if (url == null) {
throw new IllegalArgumentException("Unable to locate resource: " + resourceName);
}
try {
return new File(url.toURI());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ArrowColumnVectorTest.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import ai.rapids.cudf.HostColumnVector.ListType;
import ai.rapids.cudf.HostColumnVector.StructType;
import org.apache.arrow.memory.BufferAllocator;
import org.apache.arrow.memory.RootAllocator;
import org.apache.arrow.vector.BigIntVector;
import org.apache.arrow.vector.DateDayVector;
import org.apache.arrow.vector.DecimalVector;
import org.apache.arrow.vector.Float4Vector;
import org.apache.arrow.vector.Float8Vector;
import org.apache.arrow.vector.IntVector;
import org.apache.arrow.vector.VarCharVector;
import org.apache.arrow.vector.complex.ListVector;
import org.apache.arrow.vector.complex.StructVector;
import org.apache.arrow.vector.util.Text;
import org.junit.jupiter.api.Test;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class ArrowColumnVectorTest extends CudfTestBase {
@Test
void testArrowIntMultiBatches() {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.INT32));
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
int numVecs = 4;
IntVector[] vectors = new IntVector[numVecs];
try {
ArrayList<Integer> expectedArr = new ArrayList<Integer>();
for (int j = 0; j < numVecs; j++) {
int pos = 0;
int count = 10000;
IntVector vector = new IntVector("intVec", allocator);
int start = count * j;
int end = count * (j + 1);
for (int i = start; i < end; i++) {
expectedArr.add(i);
((IntVector) vector).setSafe(pos, i);
pos++;
}
vector.setValueCount(count);
vectors[j] = vector;
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), data, valid, null);
}
try (ColumnVector cv = builder.buildAndPutOnDevice();
ColumnVector expected = ColumnVector.fromBoxedInts(expectedArr.toArray(new Integer[0]))) {
assertEquals(cv.getType(), DType.INT32);
assertColumnsAreEqual(expected, cv, "ints");
}
} finally {
for (int i = 0; i < numVecs; i++) {
vectors[i].close();
}
}
}
@Test
void testArrowLong() {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.INT64));
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (BigIntVector vector = new BigIntVector("vec", allocator)) {
ArrayList<Long> expectedArr = new ArrayList<Long>();
int count = 10000;
for (int i = 0; i < count; i++) {
expectedArr.add(new Long(i));
((BigIntVector) vector).setSafe(i, i);
}
vector.setValueCount(count);
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), data, valid, null);
try (ColumnVector cv = builder.buildAndPutOnDevice();
ColumnVector expected = ColumnVector.fromBoxedLongs(expectedArr.toArray(new Long[0]))) {
assertEquals(cv.getType(), DType.INT64);
assertColumnsAreEqual(expected, cv, "Longs");
}
}
}
@Test
void testArrowLongOnHeap() {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.INT64));
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (BigIntVector vector = new BigIntVector("vec", allocator)) {
ArrayList<Long> expectedArr = new ArrayList<Long>();
int count = 10000;
for (int i = 0; i < count; i++) {
expectedArr.add(new Long(i));
((BigIntVector) vector).setSafe(i, i);
}
vector.setValueCount(count);
// test that we handle convert buffer to direct byte buffer if its on the heap
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer dataOnHeap = ByteBuffer.allocate(data.remaining());
dataOnHeap.put(data);
dataOnHeap.flip();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
ByteBuffer validOnHeap = ByteBuffer.allocate(valid.remaining());
validOnHeap.put(data);
validOnHeap.flip();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), dataOnHeap, validOnHeap, null);
try (ColumnVector cv = builder.buildAndPutOnDevice();
ColumnVector expected = ColumnVector.fromBoxedLongs(expectedArr.toArray(new Long[0]))) {
assertEquals(cv.getType(), DType.INT64);
assertColumnsAreEqual(expected, cv, "Longs");
}
}
}
@Test
void testArrowDouble() {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.FLOAT64));
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (Float8Vector vector = new Float8Vector("vec", allocator)) {
ArrayList<Double> expectedArr = new ArrayList<Double>();
int count = 10000;
for (int i = 0; i < count; i++) {
expectedArr.add(new Double(i));
((Float8Vector) vector).setSafe(i, i);
}
vector.setValueCount(count);
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), data, valid, null);
double[] array = expectedArr.stream().mapToDouble(i->i).toArray();
try (ColumnVector cv = builder.buildAndPutOnDevice();
ColumnVector expected = ColumnVector.fromDoubles(array)) {
assertEquals(cv.getType(), DType.FLOAT64);
assertColumnsAreEqual(expected, cv, "doubles");
}
}
}
@Test
void testArrowFloat() {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.FLOAT32));
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (Float4Vector vector = new Float4Vector("vec", allocator)) {
ArrayList<Float> expectedArr = new ArrayList<Float>();
int count = 10000;
for (int i = 0; i < count; i++) {
expectedArr.add(new Float(i));
((Float4Vector) vector).setSafe(i, i);
}
vector.setValueCount(count);
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), data, valid, null);
float[] floatArray = new float[expectedArr.size()];
int i = 0;
for (Float f : expectedArr) {
floatArray[i++] = (f != null ? f : Float.NaN); // Or whatever default you want.
}
try (ColumnVector cv = builder.buildAndPutOnDevice();
ColumnVector expected = ColumnVector.fromFloats(floatArray)) {
assertEquals(cv.getType(), DType.FLOAT32);
assertColumnsAreEqual(expected, cv, "floats");
}
}
}
@Test
void testArrowString() {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.STRING));
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (VarCharVector vector = new VarCharVector("vec", allocator)) {
ArrayList<String> expectedArr = new ArrayList<String>();
int count = 10000;
for (int i = 0; i < count; i++) {
String toAdd = i + "testString";
expectedArr.add(toAdd);
((VarCharVector) vector).setSafe(i, new Text(toAdd));
}
vector.setValueCount(count);
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
ByteBuffer offsets = vector.getOffsetBuffer().nioBuffer();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), data, valid, offsets);
try (ColumnVector cv = builder.buildAndPutOnDevice();
ColumnVector expected = ColumnVector.fromStrings(expectedArr.toArray(new String[0]))) {
assertEquals(cv.getType(), DType.STRING);
assertColumnsAreEqual(expected, cv, "Strings");
}
}
}
@Test
void testArrowStringOnHeap() {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.STRING));
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (VarCharVector vector = new VarCharVector("vec", allocator)) {
ArrayList<String> expectedArr = new ArrayList<String>();
int count = 10000;
for (int i = 0; i < count; i++) {
String toAdd = i + "testString";
expectedArr.add(toAdd);
((VarCharVector) vector).setSafe(i, new Text(toAdd));
}
vector.setValueCount(count);
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
ByteBuffer offsets = vector.getOffsetBuffer().nioBuffer();
ByteBuffer dataOnHeap = ByteBuffer.allocate(data.remaining());
dataOnHeap.put(data);
dataOnHeap.flip();
ByteBuffer validOnHeap = ByteBuffer.allocate(valid.remaining());
validOnHeap.put(data);
validOnHeap.flip();
ByteBuffer offsetsOnHeap = ByteBuffer.allocate(offsets.remaining());
offsetsOnHeap.put(offsets);
offsetsOnHeap.flip();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), dataOnHeap, validOnHeap, offsetsOnHeap);
try (ColumnVector cv = builder.buildAndPutOnDevice();
ColumnVector expected = ColumnVector.fromStrings(expectedArr.toArray(new String[0]));) {
assertEquals(cv.getType(), DType.STRING);
assertColumnsAreEqual(expected, cv, "Strings");
}
}
}
@Test
void testArrowDays() {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.TIMESTAMP_DAYS));
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (DateDayVector vector = new DateDayVector("vec", allocator)) {
ArrayList<Integer> expectedArr = new ArrayList<Integer>();
int count = 10000;
for (int i = 0; i < count; i++) {
expectedArr.add(i);
((DateDayVector) vector).setSafe(i, i);
}
vector.setValueCount(count);
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), data, valid, null);
int[] array = expectedArr.stream().mapToInt(i->i).toArray();
try (ColumnVector cv = builder.buildAndPutOnDevice();
ColumnVector expected = ColumnVector.daysFromInts(array);) {
assertEquals(cv.getType(), DType.TIMESTAMP_DAYS);
assertColumnsAreEqual(expected, cv, "timestamp days");
}
}
}
@Test
void testArrowDecimalThrows() {
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (DecimalVector vector = new DecimalVector("vec", allocator, 7, 3)) {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.create(DType.DTypeEnum.DECIMAL32, 3)));
((DecimalVector) vector).setSafe(0, -3);
((DecimalVector) vector).setSafe(1, 1);
((DecimalVector) vector).setSafe(2, 2);
((DecimalVector) vector).setSafe(3, 3);
((DecimalVector) vector).setSafe(4, 4);
((DecimalVector) vector).setSafe(5, 5);
vector.setValueCount(6);
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), data, valid, null);
assertThrows(IllegalArgumentException.class, () -> {
builder.buildAndPutOnDevice();
});
}
}
@Test
void testArrowDecimal64Throws() {
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (DecimalVector vector = new DecimalVector("vec", allocator, 18, 0)) {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new HostColumnVector.BasicType(true, DType.create(DType.DTypeEnum.DECIMAL64, -11)));
((DecimalVector) vector).setSafe(0, -3);
((DecimalVector) vector).setSafe(1, 1);
((DecimalVector) vector).setSafe(2, 2);
vector.setValueCount(3);
ByteBuffer data = vector.getDataBuffer().nioBuffer();
ByteBuffer valid = vector.getValidityBuffer().nioBuffer();
builder.addBatch(vector.getValueCount(), vector.getNullCount(), data, valid, null);
assertThrows(IllegalArgumentException.class, () -> {
builder.buildAndPutOnDevice();
});
}
}
@Test
void testArrowListThrows() {
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (ListVector vector = ListVector.empty("list", allocator)) {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new ListType(true, new HostColumnVector.BasicType(true, DType.STRING)));
// buffer don't matter as we expect it to throw anyway
builder.addBatch(vector.getValueCount(), vector.getNullCount(), null, null, null);
assertThrows(IllegalArgumentException.class, () -> {
builder.buildAndPutOnDevice();
});
}
}
@Test
void testArrowStructThrows() {
BufferAllocator allocator = new RootAllocator(Long.MAX_VALUE);
try (StructVector vector = StructVector.empty("struct", allocator)) {
ArrowColumnBuilder builder = new ArrowColumnBuilder(new StructType(true, new HostColumnVector.BasicType(true, DType.STRING)));
// buffer don't matter as we expect it to throw anyway
builder.addBatch(vector.getValueCount(), vector.getNullCount(), null, null, null);
assertThrows(IllegalArgumentException.class, () -> {
builder.buildAndPutOnDevice();
});
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ColumnBuilderHelper.java
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Objects;
import java.util.function.Consumer;
/**
* ColumnBuilderHelper helps to test ColumnBuilder with existed ColumnVector tests.
*/
public class ColumnBuilderHelper {
public static HostColumnVector build(
HostColumnVector.DataType type,
int rows,
Consumer<HostColumnVector.ColumnBuilder> init) {
try (HostColumnVector.ColumnBuilder b = new HostColumnVector.ColumnBuilder(type, rows)) {
init.accept(b);
return b.build();
}
}
public static ColumnVector buildOnDevice(
HostColumnVector.DataType type,
int rows,
Consumer<HostColumnVector.ColumnBuilder> init) {
try (HostColumnVector.ColumnBuilder b = new HostColumnVector.ColumnBuilder(type, rows)) {
init.accept(b);
return b.buildAndPutOnDevice();
}
}
public static HostColumnVector decimalFromBigInts(int scale, BigInteger... values) {
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(true, DType.create(DType.DTypeEnum.DECIMAL128, -scale)),
values.length,
(b) -> {
for (BigInteger v : values)
if (v == null) b.appendNull();
else b.appendDecimal128(v.toByteArray());
});
}
public static HostColumnVector fromBoxedBytes(boolean signed, Byte... values) {
DType dt = signed ? DType.INT8 : DType.UINT8;
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(true, dt),
values.length,
(b) -> {
for (Byte v : values)
if (v == null) b.appendNull();
else b.append(v);
});
}
public static HostColumnVector fromBoxedDoubles(Double... values) {
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(true, DType.FLOAT64),
values.length,
(b) -> {
for (Double v : values)
if (v == null) b.appendNull();
else b.append(v);
});
}
public static HostColumnVector fromBoxedInts(boolean signed, Integer... values) {
DType dt = signed ? DType.INT32 : DType.UINT32;
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(true, dt),
values.length,
(b) -> {
for (Integer v : values)
if (v == null) b.appendNull();
else b.append(v);
});
}
public static HostColumnVector fromBoxedLongs(boolean signed, Long... values) {
DType dt = signed ? DType.INT64 : DType.UINT64;
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(true, dt),
values.length,
(b) -> {
for (Long v : values)
if (v == null) b.appendNull();
else b.append(v);
});
}
public static HostColumnVector fromBytes(boolean signed, byte... values) {
DType dt = signed ? DType.INT8 : DType.UINT8;
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(false, dt),
values.length,
(b) -> {
for (byte v : values) b.append(v);
});
}
public static HostColumnVector fromDecimals(BigDecimal... values) {
// Simply copy from HostColumnVector.fromDecimals
BigDecimal maxDec = Arrays.stream(values).filter(Objects::nonNull)
.max(Comparator.comparingInt(BigDecimal::precision))
.orElse(BigDecimal.ZERO);
int maxScale = Arrays.stream(values).filter(Objects::nonNull)
.map(decimal -> decimal.scale())
.max(Comparator.naturalOrder())
.orElse(0);
maxDec = maxDec.setScale(maxScale, RoundingMode.UNNECESSARY);
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(true, DType.fromJavaBigDecimal(maxDec)),
values.length,
(b) -> {
for (BigDecimal v : values)
if (v == null) b.appendNull();
else b.append(v);
});
}
public static HostColumnVector fromDoubles(double... values) {
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(false, DType.FLOAT64),
values.length,
(b) -> {
for (double v : values) b.append(v);
});
}
public static HostColumnVector fromInts(boolean signed, int... values) {
DType dt = signed ? DType.INT32 : DType.UINT32;
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(false, dt),
values.length,
(b) -> {
for (int v : values) b.append(v);
});
}
public static HostColumnVector fromLongs(boolean signed, long... values) {
DType dt = signed ? DType.INT64 : DType.UINT64;
return ColumnBuilderHelper.build(
new HostColumnVector.BasicType(false, dt),
values.length,
(b) -> {
for (long v : values) b.append(v);
});
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/TableTest.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.BasicType;
import ai.rapids.cudf.HostColumnVector.Builder;
import ai.rapids.cudf.HostColumnVector.DataType;
import ai.rapids.cudf.HostColumnVector.ListType;
import ai.rapids.cudf.HostColumnVector.StructData;
import ai.rapids.cudf.HostColumnVector.StructType;
import ai.rapids.cudf.ast.BinaryOperation;
import ai.rapids.cudf.ast.BinaryOperator;
import ai.rapids.cudf.ast.ColumnReference;
import ai.rapids.cudf.ast.CompiledExpression;
import ai.rapids.cudf.ast.TableReference;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.parquet.hadoop.util.HadoopInputFile;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.OriginalType;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import java.io.*;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
import static ai.rapids.cudf.AssertUtils.assertPartialColumnsAreEqual;
import static ai.rapids.cudf.AssertUtils.assertPartialTablesAreEqual;
import static ai.rapids.cudf.AssertUtils.assertTableTypes;
import static ai.rapids.cudf.AssertUtils.assertTablesAreEqual;
import static ai.rapids.cudf.ColumnWriterOptions.mapColumn;
import static ai.rapids.cudf.ParquetWriterOptions.listBuilder;
import static ai.rapids.cudf.ParquetWriterOptions.structBuilder;
import static ai.rapids.cudf.Table.TestBuilder;
import static ai.rapids.cudf.Table.removeNullMasksIfNeeded;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TableTest extends CudfTestBase {
private static final HostMemoryAllocator hostMemoryAllocator = DefaultHostMemoryAllocator.get();
private static final File TEST_PARQUET_FILE = TestUtils.getResourceAsFile("acq.parquet");
private static final File TEST_PARQUET_FILE_CHUNKED_READ = TestUtils.getResourceAsFile("splittable.parquet");
private static final File TEST_PARQUET_FILE_BINARY = TestUtils.getResourceAsFile("binary.parquet");
private static final File TEST_ORC_FILE = TestUtils.getResourceAsFile("TestOrcFile.orc");
private static final File TEST_ORC_TIMESTAMP_DATE_FILE = TestUtils.getResourceAsFile("timestamp-date-test.orc");
private static final File TEST_DECIMAL_PARQUET_FILE = TestUtils.getResourceAsFile("decimal.parquet");
private static final File TEST_ALL_TYPES_PLAIN_AVRO_FILE = TestUtils.getResourceAsFile("alltypes_plain.avro");
private static final File TEST_SIMPLE_CSV_FILE = TestUtils.getResourceAsFile("simple.csv");
private static final File TEST_SIMPLE_JSON_FILE = TestUtils.getResourceAsFile("people.json");
private static final File TEST_JSON_ERROR_FILE = TestUtils.getResourceAsFile("people_with_invalid_lines.json");
private static final Schema CSV_DATA_BUFFER_SCHEMA = Schema.builder()
.column(DType.INT32, "A")
.column(DType.FLOAT64, "B")
.column(DType.INT64, "C")
.build();
private static final byte[] CSV_DATA_BUFFER = ("A|B|C\n" +
"'0'|'110.0'|'120'\n" +
"1|111.0|121\n" +
"2|112.0|122\n" +
"3|113.0|123\n" +
"4|114.0|124\n" +
"5|115.0|125\n" +
"6|116.0|126\n" +
"7|NULL|127\n" +
"8|118.2|128\n" +
"9|119.8|129").getBytes(StandardCharsets.UTF_8);
void assertTablesHaveSameValues(HashMap<Object, Integer>[] expectedTable, Table table) {
assertEquals(expectedTable.length, table.getNumberOfColumns());
int numCols = table.getNumberOfColumns();
long numRows = table.getRowCount();
for (int col = 0; col < numCols; col++) {
for (long row = 0; row < numRows; row++) {
try (HostColumnVector cv = table.getColumn(col).copyToHost()) {
Object key = 0;
if (cv.getType().equals(DType.INT32)) {
key = cv.getInt(row);
} else {
key = cv.getDouble(row);
}
assertTrue(expectedTable[col].containsKey(key));
Integer count = expectedTable[col].get(key);
if (count == 1) {
expectedTable[col].remove(key);
} else {
expectedTable[col].put(key, count - 1);
}
}
}
}
for (int i = 0 ; i < expectedTable.length ; i++) {
assertTrue(expectedTable[i].isEmpty());
}
}
@Test
void testDistinctCount() {
try (Table table1 = new Table.TestBuilder()
.column(5, 3, null, null, 5)
.build()) {
assertEquals(3, table1.distinctCount());
assertEquals(4, table1.distinctCount(NullEquality.UNEQUAL));
}
}
@Test
void testMergeSimple() {
try (Table table1 = new Table.TestBuilder()
.column(5, 3, 3, 1, 1)
.column(5, 3, null, 1, 2)
.column(1, 3, 5, 7, 9)
.build();
Table table2 = new Table.TestBuilder()
.column(1, 2, 7)
.column(3, 2, 2)
.column(1, 3, 10)
.build();
Table expected = new Table.TestBuilder()
.column(1, 1, 1, 2, 3, 3, 5, 7)
.column(3, 2, 1, 2, null, 3, 5, 2)
.column(1, 9, 7, 3, 5, 3, 1, 10)
.build();
Table sortedTable1 = table1.orderBy(OrderByArg.asc(0), OrderByArg.desc(1));
Table sortedTable2 = table2.orderBy(OrderByArg.asc(0), OrderByArg.desc(1));
Table merged = Table.merge(Arrays.asList(sortedTable1, sortedTable2), OrderByArg.asc(0), OrderByArg.desc(1))) {
assertTablesAreEqual(expected, merged);
}
}
@Test
void testOrderByAD() {
try (Table table = new Table.TestBuilder()
.column(5, 3, 3, 1, 1)
.column(5, 3, 4, 1, 2)
.column(1, 3, 5, 7, 9)
.build();
Table expected = new Table.TestBuilder()
.column(1, 1, 3, 3, 5)
.column(2, 1, 4, 3, 5)
.column(9, 7, 5, 3, 1)
.build();
Table sortedTable = table.orderBy(OrderByArg.asc(0), OrderByArg.desc(1))) {
assertTablesAreEqual(expected, sortedTable);
}
}
@Test
void testSortOrderSimple() {
try (Table table = new Table.TestBuilder()
.column(5, 3, 3, 1, 1)
.column(5, 3, 4, 1, 2)
.column(1, 3, 5, 7, 9)
.build();
Table expected = new Table.TestBuilder()
.column(1, 1, 3, 3, 5)
.column(2, 1, 4, 3, 5)
.column(9, 7, 5, 3, 1)
.build();
ColumnVector gatherMap = table.sortOrder(OrderByArg.asc(0), OrderByArg.desc(1));
Table sortedTable = table.gather(gatherMap)) {
assertTablesAreEqual(expected, sortedTable);
}
}
@Test
void testOrderByDD() {
try (Table table = new Table.TestBuilder()
.column(5, 3, 3, 1, 1)
.column(5, 3, 4, 1, 2)
.column(1, 3, 5, 7, 9)
.build();
Table expected = new Table.TestBuilder()
.column(5, 3, 3, 1, 1)
.column(5, 4, 3, 2, 1)
.column(1, 5, 3, 9, 7)
.build();
Table sortedTable = table.orderBy(OrderByArg.desc(0), OrderByArg.desc(1))) {
assertTablesAreEqual(expected, sortedTable);
}
}
@Test
void testOrderByWithNulls() {
try (Table table = new Table.TestBuilder()
.column(5, null, 3, 1, 1)
.column(5, 3, 4, null, null)
.column("4", "3", "2", "1", "0")
.column(1, 3, 5, 7, 9)
.build();
Table expected = new Table.TestBuilder()
.column(1, 1, 3, 5, null)
.column(null, null, 4, 5, 3)
.column("1", "0", "2", "4", "3")
.column(7, 9, 5, 1, 3)
.build();
Table sortedTable = table.orderBy(OrderByArg.asc(0), OrderByArg.desc(1))) {
assertTablesAreEqual(expected, sortedTable);
}
}
@Test
void testOrderByWithNullsAndStrings() {
try (Table table = new Table.TestBuilder()
.column("4", "3", "2", "1", "0")
.column(5, null, 3, 1, 1)
.column(5, 3, 4, null, null)
.column(1, 3, 5, 7, 9)
.build();
Table expected = new Table.TestBuilder()
.column("0", "1", "2", "3", "4")
.column(1, 1, 3, null, 5)
.column(null, null, 4, 3, 5)
.column(9, 7, 5, 3, 1)
.build();
Table sortedTable = table.orderBy(OrderByArg.asc(0))) {
assertTablesAreEqual(expected, sortedTable);
}
}
@Test
void testTableCreationIncreasesRefCountWithDoubleFree() {
//tests the Table increases the refcount on column vectors
assertThrows(IllegalStateException.class, () -> {
try (ColumnVector v1 = ColumnVector.build(DType.INT32, 5, Range.appendInts(5));
ColumnVector v2 = ColumnVector.build(DType.INT32, 5, Range.appendInts(5))) {
assertDoesNotThrow(() -> {
try (Table t = new Table(new ColumnVector[]{v1, v2})) {
v1.close();
v2.close();
}
});
}
});
}
@Test
void testGetRows() {
try (ColumnVector v1 = ColumnVector.build(DType.INT32, 5, Range.appendInts(5));
ColumnVector v2 = ColumnVector.build(DType.INT32, 5, Range.appendInts(5));
Table t = new Table(new ColumnVector[]{v1, v2})) {
assertEquals(5, t.getRowCount());
}
}
@Test
void testSettingNullVectors() {
ColumnVector[] columnVectors = null;
assertThrows(AssertionError.class, () -> new Table(columnVectors));
}
@Test
void testAllRowsSize() {
try (ColumnVector v1 = ColumnVector.build(DType.INT32, 4, Range.appendInts(4));
ColumnVector v2 = ColumnVector.build(DType.INT32, 5, Range.appendInts(5))) {
assertThrows(AssertionError.class, () -> {
try (Table t = new Table(new ColumnVector[]{v1, v2})) {
}
});
}
}
@Test
void testGetNumberOfColumns() {
try (ColumnVector v1 = ColumnVector.build(DType.INT32, 5, Range.appendInts(5));
ColumnVector v2 = ColumnVector.build(DType.INT32, 5, Range.appendInts(5));
Table t = new Table(new ColumnVector[]{v1, v2})) {
assertEquals(2, t.getNumberOfColumns());
}
}
@Test
void testReadJSONFile() {
Schema schema = Schema.builder()
.column(DType.STRING, "name")
.column(DType.INT32, "age")
.build();
JSONOptions opts = JSONOptions.builder()
.withLines(true)
.build();
try (Table expected = new Table.TestBuilder()
.column("Michael", "Andy", "Justin")
.column(null, 30, 19)
.build();
Table table = Table.readJSON(schema, opts, TEST_SIMPLE_JSON_FILE)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadJSONFromDataSource() throws IOException {
Schema schema = Schema.builder()
.column(DType.STRING, "name")
.column(DType.INT32, "age")
.build();
JSONOptions opts = JSONOptions.builder()
.withLines(true)
.build();
try (Table expected = new Table.TestBuilder()
.column("Michael", "Andy", "Justin")
.column(null, 30, 19)
.build();
MultiBufferDataSource source = sourceFrom(TEST_SIMPLE_JSON_FILE);
Table table = Table.readJSON(schema, opts, source)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadJSONFileWithInvalidLines() {
Schema schema = Schema.builder()
.column(DType.STRING, "name")
.column(DType.INT32, "age")
.build();
// test with recoverWithNulls=true
{
JSONOptions opts = JSONOptions.builder()
.withLines(true)
.withRecoverWithNull(true)
.build();
try (Table expected = new Table.TestBuilder()
.column("Michael", "Andy", null, "Justin")
.column(null, 30, null, 19)
.build();
Table table = Table.readJSON(schema, opts, TEST_JSON_ERROR_FILE)) {
assertTablesAreEqual(expected, table);
}
}
// test with recoverWithNulls=false
{
JSONOptions opts = JSONOptions.builder()
.withLines(true)
.withRecoverWithNull(false)
.build();
assertThrows(CudfException.class, () ->
Table.readJSON(schema, opts, TEST_JSON_ERROR_FILE));
}
}
@Test
void testReadJSONFileWithDifferentColumnOrder() {
Schema schema = Schema.builder()
.column(DType.INT32, "age")
.column(DType.STRING, "name")
.build();
JSONOptions opts = JSONOptions.builder()
.withLines(true)
.build();
try (Table expected = new Table.TestBuilder()
.column(null, 30, 19)
.column("Michael", "Andy", "Justin")
.build();
Table table = Table.readJSON(schema, opts, TEST_SIMPLE_JSON_FILE)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadJSONBufferInferred() {
JSONOptions opts = JSONOptions.builder()
.withDayFirst(true)
.build();
byte[] data = ("[false,A,1,2]\n" +
"[true,B,2,3]\n" +
"[false,C,3,4]\n" +
"[true,D,4,5]").getBytes(StandardCharsets.UTF_8);
try (Table expected = new Table.TestBuilder()
.column(false, true, false, true)
.column("A", "B", "C", "D")
.column(1L, 2L, 3L, 4L)
.column(2L, 3L, 4L, 5L)
.build();
Table table = Table.readJSON(Schema.INFERRED, opts, data)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadJSONSubColumns() {
// JSON file has 2 columns, here only read 1 column
Schema schema = Schema.builder()
.column(DType.INT32, "age")
.build();
JSONOptions opts = JSONOptions.builder()
.withLines(true)
.build();
try (Table expected = new Table.TestBuilder()
.column(null, 30, 19)
.build();
Table table = Table.readJSON(schema, opts, TEST_SIMPLE_JSON_FILE)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadJSONBuffer() {
// JSON reader will set the column according to the iterator if can't infer the name
// So we must set the same name accordingly
Schema schema = Schema.builder()
.column(DType.STRING, "0")
.column(DType.INT32, "1")
.column(DType.INT32, "2")
.build();
JSONOptions opts = JSONOptions.builder()
.build();
byte[] data = ("[A,1,2]\n" +
"[B,2,3]\n" +
"[C,3,4]\n" +
"[D,4,5]").getBytes(StandardCharsets.UTF_8);
try (Table expected = new Table.TestBuilder()
.column("A", "B", "C", "D")
.column(1, 2, 3, 4)
.column(2, 3, 4, 5)
.build();
Table table = Table.readJSON(schema, opts, data)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadJSONBufferWithOffset() {
// JSON reader will set the column according to the iterator if can't infer the name
// So we must set the same name accordingly
Schema schema = Schema.builder()
.column(DType.STRING, "0")
.column(DType.INT32, "1")
.column(DType.INT32, "2")
.build();
JSONOptions opts = JSONOptions.builder()
.build();
int bytesToIgnore = 8;
byte[] data = ("[A,1,2]\n" +
"[B,2,3]\n" +
"[C,3,4]\n" +
"[D,4,5]").getBytes(StandardCharsets.UTF_8);
try (Table expected = new Table.TestBuilder()
.column("B", "C", "D")
.column(2, 3, 4)
.column(3, 4, 5)
.build();
Table table = Table.readJSON(schema, opts, data,
bytesToIgnore, data.length - bytesToIgnore)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadJSONTableWithMeta() {
JSONOptions opts = JSONOptions.builder()
.build();
byte[] data = ("{ \"A\": 1, \"B\": 2, \"C\": \"X\"}\n" +
"{ \"A\": 2, \"B\": 4, \"C\": \"Y\"}\n" +
"{ \"A\": 3, \"B\": 6, \"C\": \"Z\"}\n" +
"{ \"A\": 4, \"B\": 8, \"C\": \"W\"}\n").getBytes(StandardCharsets.UTF_8);
final int numBytes = data.length;
try (HostMemoryBuffer hostbuf = hostMemoryAllocator.allocate(numBytes)) {
hostbuf.setBytes(0, data, 0, numBytes);
try (Table expected = new Table.TestBuilder()
.column(1L, 2L, 3L, 4L)
.column(2L, 4L, 6L, 8L)
.column("X", "Y", "Z", "W")
.build();
TableWithMeta tablemeta = Table.readJSON(opts, hostbuf, 0, numBytes);
Table table = tablemeta.releaseTable()) {
assertArrayEquals(new String[] { "A", "B", "C" }, tablemeta.getColumnNames());
assertTablesAreEqual(expected, table);
}
}
}
@Test
void testReadCSVPrune() {
Schema schema = Schema.builder()
.column(DType.INT32, "A")
.column(DType.FLOAT64, "B")
.column(DType.INT64, "C")
.build();
CSVOptions opts = CSVOptions.builder()
.includeColumn("A")
.includeColumn("B")
.build();
try (Table expected = new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.2, 119.8)
.build();
Table table = Table.readCSV(schema, opts, TEST_SIMPLE_CSV_FILE)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadCSVBufferInferred() {
CSVOptions opts = CSVOptions.builder()
.includeColumn("A")
.includeColumn("B")
.hasHeader()
.withComment('#')
.build();
byte[] data = ("A,B,C\n" +
"0,110.0,120'\n" +
"#0.5,1.0,200\n" +
"1,111.0,121\n" +
"2,112.0,122\n" +
"3,113.0,123\n" +
"4,114.0,124\n" +
"5,115.0,125\n" +
"6,116.0,126\n" +
"7,117.0,127\n" +
"8,118.2,128\n" +
"9,119.8,129").getBytes(StandardCharsets.UTF_8);
try (Table expected = new Table.TestBuilder()
.column(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L)
.column(110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.2, 119.8)
.build();
Table table = Table.readCSV(Schema.INFERRED, opts, data)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadCSVBuffer() {
CSVOptions opts = CSVOptions.builder()
.includeColumn("A")
.includeColumn("B")
.hasHeader()
.withDelim('|')
.withQuote('\'')
.withNullValue("NULL")
.build();
try (Table expected = new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, null, 118.2, 119.8)
.build();
Table table = Table.readCSV(TableTest.CSV_DATA_BUFFER_SCHEMA, opts,
TableTest.CSV_DATA_BUFFER)) {
assertTablesAreEqual(expected, table);
}
}
byte[][] sliceBytes(byte[] data, int slices) {
slices = Math.min(data.length, slices);
// We are not going to worry about making it super even here.
// The last one gets the extras.
int bytesPerSlice = data.length / slices;
byte[][] ret = new byte[slices][];
int startingAt = 0;
for (int i = 0; i < (slices - 1); i++) {
ret[i] = new byte[bytesPerSlice];
System.arraycopy(data, startingAt, ret[i], 0, bytesPerSlice);
startingAt += bytesPerSlice;
}
// Now for the last one
ret[slices - 1] = new byte[data.length - startingAt];
System.arraycopy(data, startingAt, ret[slices - 1], 0, data.length - startingAt);
return ret;
}
@Test
void testReadCSVBufferMultiBuffer() {
CSVOptions opts = CSVOptions.builder()
.includeColumn("A")
.includeColumn("B")
.hasHeader()
.withDelim('|')
.withQuote('\'')
.withNullValue("NULL")
.build();
byte[][] data = sliceBytes(CSV_DATA_BUFFER, 10);
try (Table expected = new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, null, 118.2, 119.8)
.build();
MultiBufferDataSource source = sourceFrom(data);
Table table = Table.readCSV(TableTest.CSV_DATA_BUFFER_SCHEMA, opts, source)) {
assertTablesAreEqual(expected, table);
}
}
public static byte[] arrayFrom(File f) throws IOException {
long len = f.length();
if (len > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Sorry cannot read " + f +
" into an array it does not fit");
}
int remaining = (int)len;
byte[] ret = new byte[remaining];
try (java.io.FileInputStream fin = new java.io.FileInputStream(f)) {
int at = 0;
while (remaining > 0) {
int amount = fin.read(ret, at, remaining);
at += amount;
remaining -= amount;
}
}
return ret;
}
public static MultiBufferDataSource sourceFrom(File f) throws IOException {
long len = f.length();
byte[] tmp = new byte[(int)Math.min(32 * 1024, len)];
try (HostMemoryBuffer buffer = HostMemoryBuffer.allocate(len)) {
try (java.io.FileInputStream fin = new java.io.FileInputStream(f)) {
long at = 0;
while (at < len) {
int amount = fin.read(tmp);
buffer.setBytes(at, tmp, 0, amount);
at += amount;
}
}
return new MultiBufferDataSource(buffer);
}
}
public static MultiBufferDataSource sourceFrom(byte[] data) {
long len = data.length;
try (HostMemoryBuffer buffer = HostMemoryBuffer.allocate(len)) {
buffer.setBytes(0, data, 0, len);
return new MultiBufferDataSource(buffer);
}
}
public static MultiBufferDataSource sourceFrom(byte[][] data) {
HostMemoryBuffer[] buffers = new HostMemoryBuffer[data.length];
try {
for (int i = 0; i < data.length; i++) {
byte[] subData = data[i];
buffers[i] = HostMemoryBuffer.allocate(subData.length);
buffers[i].setBytes(0, subData, 0, subData.length);
}
return new MultiBufferDataSource(buffers);
} finally {
for (HostMemoryBuffer buffer: buffers) {
if (buffer != null) {
buffer.close();
}
}
}
}
@Test
void testReadCSVDataSource() {
CSVOptions opts = CSVOptions.builder()
.includeColumn("A")
.includeColumn("B")
.hasHeader()
.withDelim('|')
.withQuote('\'')
.withNullValue("NULL")
.build();
try (Table expected = new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, null, 118.2, 119.8)
.build();
MultiBufferDataSource source = sourceFrom(TableTest.CSV_DATA_BUFFER);
Table table = Table.readCSV(TableTest.CSV_DATA_BUFFER_SCHEMA, opts, source)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadCSVWithOffset() {
CSVOptions opts = CSVOptions.builder()
.includeColumn("A")
.includeColumn("B")
.hasHeader(false)
.withDelim('|')
.withNullValue("NULL")
.build();
int bytesToIgnore = 24;
try (Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(111.0, 112.0, 113.0, 114.0, 115.0, 116.0, null, 118.2, 119.8)
.build();
Table table = Table.readCSV(TableTest.CSV_DATA_BUFFER_SCHEMA, opts,
TableTest.CSV_DATA_BUFFER, bytesToIgnore, CSV_DATA_BUFFER.length - bytesToIgnore)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadCSVOtherTypes() {
final byte[] CSV_DATA_WITH_TYPES = ("A,B,C,D\n" +
"0,true,120,\"zero\"\n" +
"1,True,121,\"one\"\n" +
"2,false,122,\"two\"\n" +
"3,false,123,\"three\"\n" +
"4,TRUE,124,\"four\"\n" +
"5,true,125,\"five\"\n" +
"6,true,126,\"six\"\n" +
"7,NULL,127,NULL\n" +
"8,false,128,\"eight\"\n" +
"9,false,129,\"nine\uD80C\uDC3F\"").getBytes(StandardCharsets.UTF_8);
final Schema CSV_DATA_WITH_TYPES_SCHEMA = Schema.builder()
.column(DType.INT32, "A")
.column(DType.BOOL8, "B")
.column(DType.INT64, "C")
.column(DType.STRING, "D")
.build();
CSVOptions opts = CSVOptions.builder()
.includeColumn("A", "B", "D")
.hasHeader(true)
.withNullValue("NULL")
.withQuote('"')
.withTrueValue("true", "True", "TRUE")
.withFalseValue("false")
.build();
try (Table expected = new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(true, true, false, false, true, true, true, null, false, false)
.column("zero", "one", "two", "three", "four", "five", "six", null, "eight", "nine\uD80C\uDC3F")
.build();
Table table = Table.readCSV(CSV_DATA_WITH_TYPES_SCHEMA, opts, CSV_DATA_WITH_TYPES)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadCSV() {
Schema schema = Schema.builder()
.column(DType.INT32, "A")
.column(DType.FLOAT64, "B")
.column(DType.INT64, "C")
.column(DType.STRING, "D")
.build();
try (Table expected = new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.2, 119.8)
.column(120L, 121L, 122L, 123L, 124L, 125L, 126L, 127L, 128L, 129L)
.column("one", "two", "three", "four", "five", "six", "seven\ud801\uddb8", "eight\uBF68", "nine\u03E8", "ten")
.build();
Table table = Table.readCSV(schema, TEST_SIMPLE_CSV_FILE)) {
assertTablesAreEqual(expected, table);
}
}
private void testWriteCSVToFileImpl(char fieldDelim, boolean includeHeader,
String trueValue, String falseValue) throws IOException {
File outputFile = File.createTempFile("testWriteCSVToFile", ".csv");
Schema schema = Schema.builder()
.column(DType.INT32, "i")
.column(DType.FLOAT64, "f")
.column(DType.BOOL8, "b")
.column(DType.STRING, "str")
.build();
CSVWriterOptions writeOptions = CSVWriterOptions.builder()
.withColumnNames(schema.getColumnNames())
.withIncludeHeader(includeHeader)
.withFieldDelimiter((byte)fieldDelim)
.withRowDelimiter("\n")
.withNullValue("\\N")
.withTrueValue(trueValue)
.withFalseValue(falseValue)
.build();
try (Table inputTable
= new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
.column(false, true, false, true, false, true, false, true, false, true)
.column("All", "the", "leaves", "are", "brown", "and", "the", "sky", "is", "grey")
.build()) {
inputTable.writeCSVToFile(writeOptions, outputFile.getAbsolutePath());
// Read back.
CSVOptions readOptions = CSVOptions.builder()
.includeColumn("i")
.includeColumn("f")
.includeColumn("b")
.includeColumn("str")
.hasHeader(includeHeader)
.withDelim(fieldDelim)
.withTrueValue(trueValue)
.withFalseValue(falseValue)
.build();
try (Table readTable = Table.readCSV(schema, readOptions, outputFile)) {
assertTablesAreEqual(inputTable, readTable);
}
} finally {
outputFile.delete();
}
}
@Test
void testWriteCSVToFile() throws IOException {
final boolean INCLUDE_HEADER = true;
final boolean NO_HEADER = false;
testWriteCSVToFileImpl(',', INCLUDE_HEADER, "true", "false");
testWriteCSVToFileImpl(',', NO_HEADER, "TRUE", "FALSE");
testWriteCSVToFileImpl('\u0001', INCLUDE_HEADER, "T", "F");
testWriteCSVToFileImpl('\u0001', NO_HEADER, "True", "False");
}
private void testWriteUnquotedCSVToFileImpl(char fieldDelim) throws IOException {
File outputFile = File.createTempFile("testWriteUnquotedCSVToFile", ".csv");
Schema schema = Schema.builder()
.column(DType.STRING, "str")
.build();
CSVWriterOptions writeOptions = CSVWriterOptions.builder()
.withColumnNames(schema.getColumnNames())
.withIncludeHeader(false)
.withFieldDelimiter((byte)fieldDelim)
.withRowDelimiter("\n")
.withNullValue("\\N")
.withQuoteStyle(QuoteStyle.NONE)
.build();
try (Table inputTable
= new Table.TestBuilder()
.column("All" + fieldDelim + "the" + fieldDelim + "leaves",
"are\"brown",
"and\nthe\nsky\nis\ngrey")
.build()) {
inputTable.writeCSVToFile(writeOptions, outputFile.getAbsolutePath());
// Read back.
CSVOptions readOptions = CSVOptions.builder()
.includeColumn("str")
.hasHeader(false)
.withDelim(fieldDelim)
.withQuoteStyle(QuoteStyle.NONE)
.build();
try (Table readTable = Table.readCSV(schema, readOptions, outputFile);
Table expected = new Table.TestBuilder()
.column("All", "are\"brown", "and", "the", "sky", "is", "grey")
.build()) {
assertTablesAreEqual(expected, readTable);
}
} finally {
outputFile.delete();
}
}
@Test
void testWriteUnquotedCSVToFile() throws IOException {
testWriteUnquotedCSVToFileImpl(',');
testWriteUnquotedCSVToFileImpl('\u0001');
}
private void testChunkedCSVWriterUnquotedImpl(char fieldDelim) throws IOException {
Schema schema = Schema.builder()
.column(DType.STRING, "str")
.build();
CSVWriterOptions writeOptions = CSVWriterOptions.builder()
.withColumnNames(schema.getColumnNames())
.withIncludeHeader(false)
.withFieldDelimiter((byte)fieldDelim)
.withRowDelimiter("\n")
.withNullValue("\\N")
.withQuoteStyle(QuoteStyle.NONE)
.build();
try (Table inputTable
= new Table.TestBuilder()
.column("All" + fieldDelim + "the" + fieldDelim + "leaves",
"are\"brown",
"and\nthe\nsky\nis\ngrey")
.build();
MyBufferConsumer consumer = new MyBufferConsumer()) {
try (TableWriter writer = Table.getCSVBufferWriter(writeOptions, consumer)) {
writer.write(inputTable);
writer.write(inputTable);
writer.write(inputTable);
}
// Read back.
CSVOptions readOptions = CSVOptions.builder()
.includeColumn("str")
.hasHeader(false)
.withDelim(fieldDelim)
.withNullValue("\\N")
.withQuoteStyle(QuoteStyle.NONE)
.build();
try (Table readTable = Table.readCSV(schema, readOptions, consumer.buffer, 0, consumer.offset);
Table section = new Table.TestBuilder()
.column("All", "are\"brown", "and", "the", "sky", "is", "grey")
.build();
Table expected = Table.concatenate(section, section, section)) {
assertTablesAreEqual(expected, readTable);
}
}
}
@Test
void testChunkedCSVWriterUnquoted() throws IOException {
testChunkedCSVWriterUnquotedImpl(',');
testChunkedCSVWriterUnquotedImpl('\u0001');
}
private void testChunkedCSVWriterImpl(char fieldDelim, boolean includeHeader,
String trueValue, String falseValue) throws IOException {
Schema schema = Schema.builder()
.column(DType.INT32, "i")
.column(DType.FLOAT64, "f")
.column(DType.BOOL8, "b")
.column(DType.STRING, "str")
.build();
CSVWriterOptions writeOptions = CSVWriterOptions.builder()
.withColumnNames(schema.getColumnNames())
.withIncludeHeader(includeHeader)
.withFieldDelimiter((byte)fieldDelim)
.withRowDelimiter("\n")
.withNullValue("\\N")
.withTrueValue(trueValue)
.withFalseValue(falseValue)
.build();
try (Table inputTable
= new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 8, null)
.column(0.0, 1.0, 2.0, 3.0, 4.0, null, 6.0, 7.0, 8.0, 9.0)
.column(false, true, null, true, false, true, null, true, false, true)
.column("All", "the", "leaves", "are", "brown", "and", "the", "sky", "is", null)
.build();
MyBufferConsumer consumer = new MyBufferConsumer()) {
try (TableWriter writer = Table.getCSVBufferWriter(writeOptions, consumer)) {
writer.write(inputTable);
writer.write(inputTable);
writer.write(inputTable);
}
// Read back.
CSVOptions readOptions = CSVOptions.builder()
.includeColumn("i")
.includeColumn("f")
.includeColumn("b")
.includeColumn("str")
.hasHeader(includeHeader)
.withDelim(fieldDelim)
.withNullValue("\\N")
.withTrueValue(trueValue)
.withFalseValue(falseValue)
.build();
try (Table readTable = Table.readCSV(schema, readOptions, consumer.buffer, 0, consumer.offset);
Table expected = Table.concatenate(inputTable, inputTable, inputTable)) {
assertTablesAreEqual(expected, readTable);
}
}
}
@Test
void testChunkedCSVWriter() throws IOException {
final boolean INCLUDE_HEADER = true;
final boolean NO_HEADER = false;
testChunkedCSVWriterImpl(',', NO_HEADER, "true", "false");
testChunkedCSVWriterImpl(',', INCLUDE_HEADER, "TRUE", "FALSE");
testChunkedCSVWriterImpl('\u0001', NO_HEADER, "T", "F");
testChunkedCSVWriterImpl('\u0001', INCLUDE_HEADER, "True", "False");
}
@Test
void testReadParquet() {
ParquetOptions opts = ParquetOptions.builder()
.includeColumn("loan_id")
.includeColumn("zip")
.includeColumn("num_units")
.build();
try (Table table = Table.readParquet(opts, TEST_PARQUET_FILE)) {
long rows = table.getRowCount();
assertEquals(1000, rows);
assertTableTypes(new DType[]{DType.INT64, DType.INT32, DType.INT32}, table);
}
}
@Test
void testReadParquetFromDataSource() throws IOException {
ParquetOptions opts = ParquetOptions.builder()
.includeColumn("loan_id")
.includeColumn("zip")
.includeColumn("num_units")
.build();
try (MultiBufferDataSource source = sourceFrom(TEST_PARQUET_FILE);
Table table = Table.readParquet(opts, source)) {
long rows = table.getRowCount();
assertEquals(1000, rows);
assertTableTypes(new DType[]{DType.INT64, DType.INT32, DType.INT32}, table);
}
}
@Test
void testReadParquetMultiBuffer() throws IOException {
ParquetOptions opts = ParquetOptions.builder()
.includeColumn("loan_id")
.includeColumn("zip")
.includeColumn("num_units")
.build();
byte [][] data = sliceBytes(arrayFrom(TEST_PARQUET_FILE), 10);
try (MultiBufferDataSource source = sourceFrom(data);
Table table = Table.readParquet(opts, source)) {
long rows = table.getRowCount();
assertEquals(1000, rows);
assertTableTypes(new DType[]{DType.INT64, DType.INT32, DType.INT32}, table);
}
}
@Test
void testReadParquetBinary() {
ParquetOptions opts = ParquetOptions.builder()
.includeColumn("value1", true)
.includeColumn("value2", false)
.build();
try (Table table = Table.readParquet(opts, TEST_PARQUET_FILE_BINARY)) {
assertTableTypes(new DType[]{DType.STRING, DType.STRING}, table);
ColumnView columnView = table.getColumn(0);
assertEquals(DType.STRING, columnView.getType());
}
}
List<Byte> asList(String str) {
byte[] bytes = str.getBytes(Charsets.UTF_8);
List<Byte> ret = new ArrayList<>(bytes.length);
for(int i = 0; i < bytes.length; i++) {
ret.add(bytes[i]);
}
return ret;
}
@Test
void testParquetWriteToBufferChunkedBinary() {
// We create a String table and a Binary table with the same data in them to
// avoid trying to read the binary data back in the same way. At least until the
// API for that is stable
String string1 = "ABC";
String string2 = "DEF";
List<Byte> bin1 = asList(string1);
List<Byte> bin2 = asList(string2);
try (Table binTable = new Table.TestBuilder()
.column(new ListType(true, new BasicType(false, DType.UINT8)),
bin1, bin2)
.build();
Table stringTable = new Table.TestBuilder()
.column(string1, string2)
.build();
MyBufferConsumer consumer = new MyBufferConsumer()) {
ParquetWriterOptions options = ParquetWriterOptions.builder()
.withBinaryColumn("_c0", true)
.build();
try (TableWriter writer = Table.writeParquetChunked(options, consumer)) {
writer.write(binTable);
writer.write(binTable);
writer.write(binTable);
}
ParquetOptions opts = ParquetOptions.builder()
.includeColumn("_c0")
.build();
try (Table table1 = Table.readParquet(opts, consumer.buffer, 0, consumer.offset);
Table concat = Table.concatenate(stringTable, stringTable, stringTable)) {
assertTablesAreEqual(concat, table1);
}
}
}
@Test
void testReadParquetBuffer() throws IOException {
ParquetOptions opts = ParquetOptions.builder()
.includeColumn("loan_id")
.includeColumn("coborrow_credit_score")
.includeColumn("borrower_credit_score")
.build();
byte[] buffer = new byte[(int) TEST_PARQUET_FILE.length() + 1024];
int bufferLen = 0;
try (FileInputStream in = new FileInputStream(TEST_PARQUET_FILE)) {
bufferLen = in.read(buffer);
}
try (Table table = Table.readParquet(opts, buffer, 0, bufferLen)) {
long rows = table.getRowCount();
assertEquals(1000, rows);
assertTableTypes(new DType[]{DType.INT64, DType.FLOAT64, DType.FLOAT64}, table);
}
}
@Test
void testReadParquetFull() {
try (Table table = Table.readParquet(TEST_PARQUET_FILE)) {
long rows = table.getRowCount();
assertEquals(1000, rows);
DType[] expectedTypes = new DType[]{
DType.INT64, // loan_id
DType.INT32, // orig_channel
DType.FLOAT64, // orig_interest_rate
DType.INT32, // orig_upb
DType.INT32, // orig_loan_term
DType.TIMESTAMP_DAYS, // orig_date
DType.TIMESTAMP_DAYS, // first_pay_date
DType.FLOAT64, // orig_ltv
DType.FLOAT64, // orig_cltv
DType.FLOAT64, // num_borrowers
DType.FLOAT64, // dti
DType.FLOAT64, // borrower_credit_score
DType.INT32, // first_home_buyer
DType.INT32, // loan_purpose
DType.INT32, // property_type
DType.INT32, // num_units
DType.INT32, // occupancy_status
DType.INT32, // property_state
DType.INT32, // zip
DType.FLOAT64, // mortgage_insurance_percent
DType.INT32, // product_type
DType.FLOAT64, // coborrow_credit_score
DType.FLOAT64, // mortgage_insurance_type
DType.INT32, // relocation_mortgage_indicator
DType.INT32, // quarter
DType.INT32 // seller_id
};
assertTableTypes(expectedTypes, table);
}
}
@Test
void testReadParquetContainsDecimalData() {
try (Table table = Table.readParquet(TEST_DECIMAL_PARQUET_FILE)) {
long rows = table.getRowCount();
assertEquals(100, rows);
DType[] expectedTypes = new DType[]{
DType.create(DType.DTypeEnum.DECIMAL64, 0), // Decimal(18, 0)
DType.create(DType.DTypeEnum.DECIMAL32, -3), // Decimal(7, 3)
DType.create(DType.DTypeEnum.DECIMAL64, -10), // Decimal(10, 10)
DType.create(DType.DTypeEnum.DECIMAL32, 0), // Decimal(1, 0)
DType.create(DType.DTypeEnum.DECIMAL64, -15), // Decimal(18, 15)
DType.create(DType.DTypeEnum.DECIMAL128, -10), // Decimal(20, 10)
DType.INT64,
DType.FLOAT32
};
assertTableTypes(expectedTypes, table);
}
}
@Test
void testChunkedReadParquet() {
try (ParquetChunkedReader reader = new ParquetChunkedReader(240000,
TEST_PARQUET_FILE_CHUNKED_READ)) {
int numChunks = 0;
long totalRows = 0;
while(reader.hasNext()) {
++numChunks;
try(Table chunk = reader.readChunk()) {
totalRows += chunk.getRowCount();
}
}
assertEquals(2, numChunks);
assertEquals(40000, totalRows);
}
}
@Test
void testChunkedReadParquetFromDataSource() throws IOException {
try (MultiBufferDataSource source = sourceFrom(TEST_PARQUET_FILE_CHUNKED_READ);
ParquetChunkedReader reader = new ParquetChunkedReader(240000, ParquetOptions.DEFAULT, source)) {
int numChunks = 0;
long totalRows = 0;
while(reader.hasNext()) {
++numChunks;
try(Table chunk = reader.readChunk()) {
totalRows += chunk.getRowCount();
}
}
assertEquals(2, numChunks);
assertEquals(40000, totalRows);
}
}
@Test
void testReadAvro() {
AvroOptions opts = AvroOptions.builder()
.includeColumn("bool_col")
.includeColumn("int_col")
.includeColumn("timestamp_col")
.build();
try (Table expected = new Table.TestBuilder()
.column(true, false, true, false, true, false, true, false)
.column(0, 1, 0, 1, 0, 1, 0, 1)
.column(1235865600000000L, 1235865660000000L, 1238544000000000L, 1238544060000000L,
1233446400000000L, 1233446460000000L, 1230768000000000L, 1230768060000000L)
.build();
Table table = Table.readAvro(opts, TEST_ALL_TYPES_PLAIN_AVRO_FILE)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadAvroFromDataSource() throws IOException {
AvroOptions opts = AvroOptions.builder()
.includeColumn("bool_col")
.includeColumn("int_col")
.includeColumn("timestamp_col")
.build();
try (Table expected = new Table.TestBuilder()
.column(true, false, true, false, true, false, true, false)
.column(0, 1, 0, 1, 0, 1, 0, 1)
.column(1235865600000000L, 1235865660000000L, 1238544000000000L, 1238544060000000L,
1233446400000000L, 1233446460000000L, 1230768000000000L, 1230768060000000L)
.build();
MultiBufferDataSource source = sourceFrom(TEST_ALL_TYPES_PLAIN_AVRO_FILE);
Table table = Table.readAvro(opts, source)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadAvroBuffer() throws IOException{
AvroOptions opts = AvroOptions.builder()
.includeColumn("bool_col")
.includeColumn("timestamp_col")
.build();
byte[] buffer = Files.readAllBytes(TEST_ALL_TYPES_PLAIN_AVRO_FILE.toPath());
int bufferLen = buffer.length;
try (Table expected = new Table.TestBuilder()
.column(true, false, true, false, true, false, true, false)
.column(1235865600000000L, 1235865660000000L, 1238544000000000L, 1238544060000000L,
1233446400000000L, 1233446460000000L, 1230768000000000L, 1230768060000000L)
.build();
Table table = Table.readAvro(opts, buffer, 0, bufferLen)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadAvroFull() {
try (Table expected = new Table.TestBuilder()
.column(4, 5, 6, 7, 2, 3, 0, 1)
.column(true, false, true, false, true, false, true, false)
.column(0, 1, 0, 1, 0, 1, 0, 1)
.column(0, 1, 0, 1, 0, 1, 0, 1)
.column(0, 1, 0, 1, 0, 1, 0, 1)
.column(0L, 10L, 0L, 10L, 0L, 10L, 0L, 10L)
.column(0.0f, 1.100000023841858f, 0.0f, 1.100000023841858f, 0.0f, 1.100000023841858f, 0.0f, 1.100000023841858f)
.column(0.0d, 10.1d, 0.0d, 10.1d, 0.0d, 10.1d, 0.0d, 10.1d)
.column("03/01/09", "03/01/09", "04/01/09", "04/01/09", "02/01/09", "02/01/09", "01/01/09", "01/01/09")
.column("0", "1", "0", "1", "0", "1", "0", "1")
.column(1235865600000000L, 1235865660000000L, 1238544000000000L, 1238544060000000L,
1233446400000000L, 1233446460000000L, 1230768000000000L, 1230768060000000L)
.build();
Table table = Table.readAvro(TEST_ALL_TYPES_PLAIN_AVRO_FILE)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadORC() {
ORCOptions opts = ORCOptions.builder()
.includeColumn("string1")
.includeColumn("float1")
.includeColumn("int1")
.build();
try (Table expected = new Table.TestBuilder()
.column("hi","bye")
.column(1.0f,2.0f)
.column(65536,65536)
.build();
Table table = Table.readORC(opts, TEST_ORC_FILE)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadORCFromDataSource() throws IOException {
ORCOptions opts = ORCOptions.builder()
.includeColumn("string1")
.includeColumn("float1")
.includeColumn("int1")
.build();
try (Table expected = new Table.TestBuilder()
.column("hi","bye")
.column(1.0f,2.0f)
.column(65536,65536)
.build();
MultiBufferDataSource source = sourceFrom(TEST_ORC_FILE);
Table table = Table.readORC(opts, source)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadORCBuffer() throws IOException {
ORCOptions opts = ORCOptions.builder()
.includeColumn("string1")
.includeColumn("float1")
.includeColumn("int1")
.build();
int bufferLen = 0;
byte[] buffer = Files.readAllBytes(TEST_ORC_FILE.toPath());
bufferLen = buffer.length;
try (Table expected = new Table.TestBuilder()
.column("hi","bye")
.column(1.0f,2.0f)
.column(65536,65536)
.build();
Table table = Table.readORC(opts, buffer, 0, bufferLen)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadORCFull() {
try (Table expected = new Table.TestBuilder()
.column(false, true)
.column((byte)1, (byte)100)
.column((short)1024, (short)2048)
.column(65536, 65536)
.column(9223372036854775807L,9223372036854775807L)
.column(1.0f, 2.0f)
.column(-15.0, -5.0)
.column("hi", "bye")
.build();
Table table = Table.readORC(TEST_ORC_FILE)) {
assertTablesAreEqual(expected, table);
}
}
@Test
void testReadORCNumPyTypes() {
// by default ORC will promote TIMESTAMP_DAYS to TIMESTAMP_MILLISECONDS
DType found;
try (Table table = Table.readORC(TEST_ORC_TIMESTAMP_DATE_FILE)) {
assertEquals(2, table.getNumberOfColumns());
found = table.getColumn(0).getType();
assertTrue(found.isTimestampType());
assertEquals(DType.TIMESTAMP_MILLISECONDS, table.getColumn(1).getType());
}
// specifying no NumPy types should load them as TIMESTAMP_DAYS
ORCOptions opts = ORCOptions.builder().withNumPyTypes(false).build();
try (Table table = Table.readORC(opts, TEST_ORC_TIMESTAMP_DATE_FILE)) {
assertEquals(2, table.getNumberOfColumns());
assertEquals(found, table.getColumn(0).getType());
assertEquals(DType.TIMESTAMP_DAYS, table.getColumn(1).getType());
}
}
@Test
void testReadORCTimeUnit() {
// specifying no NumPy types should load them as TIMESTAMP_DAYS.
// specifying a specific type will return the result in that unit
ORCOptions opts = ORCOptions.builder()
.withNumPyTypes(false)
.withTimeUnit(DType.TIMESTAMP_SECONDS)
.build();
try (Table table = Table.readORC(opts, TEST_ORC_TIMESTAMP_DATE_FILE)) {
assertEquals(2, table.getNumberOfColumns());
assertEquals(DType.TIMESTAMP_SECONDS, table.getColumn(0).getType());
assertEquals(DType.TIMESTAMP_DAYS, table.getColumn(1).getType());
}
}
@Test
void testCrossJoin() {
try (Table leftTable = new Table.TestBuilder()
.column(100, 101, 102)
.build();
Table rightTable = new Table.TestBuilder()
.column(200, null)
.build();
Table expected = new Table.TestBuilder()
.column( 100, 100, 101, 101, 102, 102) // left
.column( null, 200, null, 200, null, 200) // right
.build();
Table joinedTable = leftTable.crossJoin(rightTable);
Table orderedJoinedTable =
joinedTable.orderBy(
OrderByArg.asc(0, true),
OrderByArg.asc(1, true))) {
assertTablesAreEqual(expected, orderedJoinedTable);
}
}
private void verifyJoinGatherMaps(GatherMap[] maps, Table expected) {
assertEquals(2, maps.length);
int numRows = (int) expected.getRowCount();
assertEquals(numRows, maps[0].getRowCount());
assertEquals(numRows, maps[1].getRowCount());
try (ColumnVector leftMap = maps[0].toColumnView(0, numRows).copyToColumnVector();
ColumnVector rightMap = maps[1].toColumnView(0, numRows).copyToColumnVector();
Table result = new Table(leftMap, rightMap);
Table orderedResult = result.orderBy(OrderByArg.asc(0, true))) {
assertTablesAreEqual(expected, orderedResult);
}
}
private void verifySemiJoinGatherMap(GatherMap map, Table expected) {
int numRows = (int) expected.getRowCount();
assertEquals(numRows, map.getRowCount());
try (ColumnVector leftMap = map.toColumnView(0, numRows).copyToColumnVector();
Table result = new Table(leftMap);
Table orderedResult = result.orderBy(OrderByArg.asc(0, true))) {
assertTablesAreEqual(expected, orderedResult);
}
}
@Test
void testLeftJoinGatherMaps() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, 32).build();
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, 3)
.build()) {
GatherMap[] maps = leftKeys.leftJoinGatherMaps(rightKeys, false);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testLeftJoinGatherMapsNulls() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.leftJoinGatherMaps(rightKeys, true);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testLeftHashJoinGatherMaps() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, 32).build();
HashJoin rightHash = new HashJoin(rightKeys, false);
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, 3)
.build()) {
GatherMap[] maps = leftKeys.leftJoinGatherMaps(rightHash);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testLeftHashJoinGatherMapsWithCount() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, 32).build();
HashJoin rightHash = new HashJoin(rightKeys, false);
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, 3)
.build()) {
long rowCount = leftKeys.leftJoinRowCount(rightHash);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = leftKeys.leftJoinGatherMaps(rightHash, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testLeftHashJoinGatherMapsNulls() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
HashJoin rightHash = new HashJoin(rightKeys, true);
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.leftJoinGatherMaps(rightHash);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testLeftHashJoinGatherMapsNullsWithCount() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
HashJoin rightHash = new HashJoin(rightKeys,true);
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build()) {
long rowCount = leftKeys.leftJoinRowCount(rightHash);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = leftKeys.leftJoinGatherMaps(rightHash, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalLeftJoinGatherMaps() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9)
.column(inv, inv, 0, 1, 3, inv, inv, 0, 1, inv, 1, inv, 0, 1)
.build();
CompiledExpression condition = expr.compile()) {
GatherMap[] maps = left.conditionalLeftJoinGatherMaps(right, condition);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalLeftJoinGatherMapsNulls() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build();
CompiledExpression condition = expr.compile()) {
GatherMap[] maps = left.conditionalLeftJoinGatherMaps(right, condition);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalLeftJoinGatherMapsWithCount() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9)
.column(inv, inv, 0, 1, 3, inv, inv, 0, 1, inv, 1, inv, 0, 1)
.build();
CompiledExpression condition = expr.compile()) {
long rowCount = left.conditionalLeftJoinRowCount(right, condition);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = left.conditionalLeftJoinGatherMaps(right, condition, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalLeftJoinGatherMapsNullsWithCount() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build();
CompiledExpression condition = expr.compile()) {
long rowCount = left.conditionalLeftJoinRowCount(right, condition);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = left.conditionalLeftJoinGatherMaps(right, condition, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedLeftJoinGatherMaps() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, inv)
.build()) {
GatherMap[] maps = Table.mixedLeftJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.UNEQUAL);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedLeftJoinGatherMapsNulls() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9)
.column(0, inv, inv, inv, inv, inv, inv, 0, 2, 1, inv)
.build()) {
GatherMap[] maps = Table.mixedLeftJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.EQUAL);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedLeftJoinGatherMapsWithSize() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column(inv, inv, 2, inv, inv, inv, inv, 0, 1, inv)
.build();
MixedJoinSize sizeInfo = Table.mixedLeftJoinSize(leftKeys, rightKeys, left, right,
condition, NullEquality.UNEQUAL)) {
assertEquals(expected.getRowCount(), sizeInfo.getOutputRowCount());
GatherMap[] maps = Table.mixedLeftJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.UNEQUAL, sizeInfo);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedLeftJoinGatherMapsNullsWithSize() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9)
.column(0, inv, inv, inv, inv, inv, inv, 0, 2, 1, inv)
.build();
MixedJoinSize sizeInfo = Table.mixedLeftJoinSize(leftKeys, rightKeys, left, right,
condition, NullEquality.EQUAL)) {
assertEquals(expected.getRowCount(), sizeInfo.getOutputRowCount());
GatherMap[] maps = Table.mixedLeftJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.EQUAL, sizeInfo);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testInnerJoinGatherMaps() {
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, 32).build();
Table expected = new Table.TestBuilder()
.column(2, 7, 8, 9) // left
.column(2, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.innerJoinGatherMaps(rightKeys, false);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testInnerJoinGatherMapsNulls() {
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(2, 7, 7, 8, 8, 9) // left
.column(2, 0, 1, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.innerJoinGatherMaps(rightKeys, true);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testInnerHashJoinGatherMaps() {
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, 32).build();
HashJoin rightHash = new HashJoin(rightKeys, false);
Table expected = new Table.TestBuilder()
.column(2, 7, 8, 9) // left
.column(2, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.innerJoinGatherMaps(rightHash);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testInnerHashJoinGatherMapsWithCount() {
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, 32).build();
HashJoin rightHash = new HashJoin(rightKeys, false);
Table expected = new Table.TestBuilder()
.column(2, 7, 8, 9) // left
.column(2, 0, 1, 3) // right
.build()) {
long rowCount = leftKeys.innerJoinRowCount(rightHash);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = leftKeys.innerJoinGatherMaps(rightHash, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testInnerHashJoinGatherMapsNulls() {
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
HashJoin rightHash = new HashJoin(rightKeys, true);
Table expected = new Table.TestBuilder()
.column(2, 7, 7, 8, 8, 9) // left
.column(2, 0, 1, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.innerJoinGatherMaps(rightHash);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testInnerHashJoinGatherMapsNullsWithCount() {
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
HashJoin rightHash = new HashJoin(rightKeys, true);
Table expected = new Table.TestBuilder()
.column(2, 7, 7, 8, 8, 9) // left
.column(2, 0, 1, 0, 1, 3) // right
.build()) {
long rowCount = leftKeys.innerJoinRowCount(rightHash);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = leftKeys.innerJoinGatherMaps(rightHash, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalInnerJoinGatherMaps() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column(2, 2, 2, 5, 5, 7, 9, 9)
.column(0, 1, 3, 0, 1, 1, 0, 1)
.build();
CompiledExpression condition = expr.compile()) {
GatherMap[] maps = left.conditionalInnerJoinGatherMaps(right, condition);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
// Test non-null-supporting equality at least once.
@Test
void testConditionalInnerJoinGatherMapsEqual() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(2, 9) // left
.column(2, 3) // right
.build();
CompiledExpression condition = expr.compile()) {
GatherMap[] maps = left.conditionalInnerJoinGatherMaps(right, condition);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalInnerJoinGatherMapsNulls() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(2, 7, 7, 8, 8, 9) // left
.column(2, 0, 1, 0, 1, 3) // right
.build();
CompiledExpression condition = expr.compile()) {
GatherMap[] maps = left.conditionalInnerJoinGatherMaps(right, condition);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalInnerJoinGatherMapsWithCount() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column(2, 2, 2, 5, 5, 7, 9, 9)
.column(0, 1, 3, 0, 1, 1, 0, 1)
.build();
CompiledExpression condition = expr.compile()) {
long rowCount = left.conditionalInnerJoinRowCount(right, condition);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = left.conditionalInnerJoinGatherMaps(right, condition, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalInnerJoinGatherMapsNullsWithCount() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(2, 7, 7, 8, 8, 9) // left
.column(2, 0, 1, 0, 1, 3) // right
.build();
CompiledExpression condition = expr.compile()) {
long rowCount = left.conditionalInnerJoinRowCount(right, condition);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = left.conditionalInnerJoinGatherMaps(right, condition, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedInnerJoinGatherMaps() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(2, 7, 8)
.column(2, 0, 1)
.build()) {
GatherMap[] maps = Table.mixedInnerJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.UNEQUAL);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedInnerJoinGatherMapsNulls() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(0, 7, 7, 8)
.column(0, 0, 2, 1)
.build()) {
GatherMap[] maps = Table.mixedInnerJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.EQUAL);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedInnerJoinGatherMapsWithSize() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(2, 7, 8)
.column(2, 0, 1)
.build();
MixedJoinSize sizeInfo = Table.mixedInnerJoinSize(leftKeys, rightKeys, left, right,
condition, NullEquality.UNEQUAL)) {
assertEquals(expected.getRowCount(), sizeInfo.getOutputRowCount());
GatherMap[] maps = Table.mixedInnerJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.UNEQUAL, sizeInfo);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedInnerJoinGatherMapsNullsWithSize() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(0, 7, 7, 8)
.column(0, 0, 2, 1)
.build();
MixedJoinSize sizeInfo = Table.mixedInnerJoinSize(leftKeys, rightKeys, left, right,
condition, NullEquality.EQUAL)) {
assertEquals(expected.getRowCount(), sizeInfo.getOutputRowCount());
GatherMap[] maps = Table.mixedInnerJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.EQUAL, sizeInfo);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testFullJoinGatherMaps() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, null, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, null).build();
Table expected = new Table.TestBuilder()
.column(inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9) // left
.column( 4, 5, inv, inv, 2, inv, inv, inv, inv, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.fullJoinGatherMaps(rightKeys, false);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testFullJoinGatherMapsNulls() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column( 4, 5, inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.fullJoinGatherMaps(rightKeys, true);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testFullHashJoinGatherMaps() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, null, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, null).build();
HashJoin rightHash = new HashJoin(rightKeys, false);
Table expected = new Table.TestBuilder()
.column(inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9) // left
.column( 4, 5, inv, inv, 2, inv, inv, inv, inv, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.fullJoinGatherMaps(rightHash);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testFullHashJoinGatherMapsWithCount() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, null, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, null).build();
HashJoin rightHash = new HashJoin(rightKeys, false);
Table expected = new Table.TestBuilder()
.column(inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9) // left
.column( 4, 5, inv, inv, 2, inv, inv, inv, inv, 0, 1, 3) // right
.build()) {
long rowCount = leftKeys.fullJoinRowCount(rightHash);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = leftKeys.fullJoinGatherMaps(rightHash, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testFullHashJoinGatherMapsNulls() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
HashJoin rightHash = new HashJoin(rightKeys, true);
Table expected = new Table.TestBuilder()
.column(inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column( 4, 5, inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build()) {
GatherMap[] maps = leftKeys.fullJoinGatherMaps(rightHash);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testFullHashJoinGatherMapsNullsWithCount() {
final int inv = Integer.MIN_VALUE;
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
HashJoin rightHash = new HashJoin(rightKeys, true);
Table expected = new Table.TestBuilder()
.column(inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column( 4, 5, inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build()) {
long rowCount = leftKeys.fullJoinRowCount(rightHash);
assertEquals(expected.getRowCount(), rowCount);
GatherMap[] maps = leftKeys.fullJoinGatherMaps(rightHash, rowCount);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalFullJoinGatherMaps() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column(inv, inv, inv, 0, 1, 2, 2, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9)
.column( 2, 4, 5, inv, inv, 0, 1, 3, inv, inv, 0, 1, inv, 1, inv, 0, 1)
.build();
CompiledExpression condition = expr.compile()) {
GatherMap[] maps = left.conditionalFullJoinGatherMaps(right, condition);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testConditionalFullJoinGatherMapsNulls() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9) // left
.column( 4, 5, inv, inv, 2, inv, inv, inv, inv, 0, 1, 0, 1, 3) // right
.build();
CompiledExpression condition = expr.compile()) {
GatherMap[] maps = left.conditionalFullJoinGatherMaps(right, condition);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedFullJoinGatherMaps() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(inv, inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.column( 3, 4, 5, inv, inv, 2, inv, inv, inv, inv, 0, 1, inv)
.build()) {
GatherMap[] maps = Table.mixedFullJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.UNEQUAL);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedFullJoinGatherMapsNulls() {
final int inv = Integer.MIN_VALUE;
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(inv, inv, inv, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9)
.column( 3, 4, 5, 0, inv, inv, inv, inv, inv, inv, 0, 2, 1, inv)
.build()) {
GatherMap[] maps = Table.mixedFullJoinGatherMaps(leftKeys, rightKeys, left, right, condition,
NullEquality.EQUAL);
try {
verifyJoinGatherMaps(maps, expected);
} finally {
for (GatherMap map : maps) {
map.close();
}
}
}
}
@Test
void testMixedLeftSemiJoinGatherMap() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(2, 7, 8)
.build();
GatherMap map = Table.mixedLeftSemiJoinGatherMap(leftKeys, rightKeys, left, right,
condition, NullEquality.UNEQUAL)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testMixedLeftSemiJoinGatherMapNulls() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(0, 7, 8)
.build();
GatherMap map = Table.mixedLeftSemiJoinGatherMap(leftKeys, rightKeys, left, right,
condition, NullEquality.EQUAL)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testMixedLeftSemiJoinGatherMapWithSize() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(2, 7, 8)
.build();
MixedJoinSize sizeInfo = Table.mixedLeftSemiJoinSize(leftKeys, rightKeys, left, right,
condition, NullEquality.UNEQUAL)) {
assertEquals(expected.getRowCount(), sizeInfo.getOutputRowCount());
try (GatherMap map = Table.mixedLeftSemiJoinGatherMap(leftKeys, rightKeys, left, right,
condition, NullEquality.UNEQUAL, sizeInfo)) {
verifySemiJoinGatherMap(map, expected);
}
}
}
@Test
void testMixedLeftSemiJoinGatherMapNullsWithSize() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(0, 7, 8)
.build();
MixedJoinSize sizeInfo = Table.mixedLeftSemiJoinSize(leftKeys, rightKeys, left, right,
condition, NullEquality.EQUAL)) {
assertEquals(expected.getRowCount(), sizeInfo.getOutputRowCount());
try (GatherMap map = Table.mixedLeftSemiJoinGatherMap(leftKeys, rightKeys, left, right,
condition, NullEquality.EQUAL, sizeInfo)) {
verifySemiJoinGatherMap(map, expected);
}
}
}
@Test
void testMixedLeftAntiJoinGatherMap() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(0, 1, 3, 4, 5, 6, 9)
.build();
GatherMap map = Table.mixedLeftAntiJoinGatherMap(leftKeys, rightKeys, left, right,
condition, NullEquality.UNEQUAL)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testMixedLeftAntiJoinGatherMapNulls() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4, 5, 6, 9)
.build();
GatherMap map = Table.mixedLeftAntiJoinGatherMap(leftKeys, rightKeys, left, right,
condition, NullEquality.EQUAL)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testMixedLeftAntiJoinGatherMapWithSize() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8)
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5)
.column(7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(0, 1, 3, 4, 5, 6, 9)
.build();
MixedJoinSize sizeInfo = Table.mixedLeftAntiJoinSize(leftKeys, rightKeys, left, right,
condition, NullEquality.UNEQUAL)) {
assertEquals(expected.getRowCount(), sizeInfo.getOutputRowCount());
try (GatherMap map = Table.mixedLeftAntiJoinGatherMap(leftKeys, rightKeys, left, right,
condition, NullEquality.UNEQUAL, sizeInfo)) {
verifySemiJoinGatherMap(map, expected);
}
}
}
@Test
void testMixedLeftAntiJoinGatherMapNullsWithSize() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(1, TableReference.LEFT),
new ColumnReference(1, TableReference.RIGHT));
try (CompiledExpression condition = expr.compile();
Table left = new Table.TestBuilder()
.column(null, 3, 9, 0, 1, 7, 4, null, 5, 8)
.column( 1, 2, 3, 4, 5, 6, 7, 8, 9, 0)
.build();
Table leftKeys = new Table(left.getColumn(0));
Table right = new Table.TestBuilder()
.column(null, 5, null, 8, 10, 32)
.column( 0, 1, 2, 3, 4, 5)
.column( 7, 8, 9, 0, 1, 2).build();
Table rightKeys = new Table(right.getColumn(0));
Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4, 5, 6, 9)
.build();
MixedJoinSize sizeInfo = Table.mixedLeftAntiJoinSize(leftKeys, rightKeys, left, right,
condition, NullEquality.EQUAL)) {
assertEquals(expected.getRowCount(), sizeInfo.getOutputRowCount());
try (GatherMap map = Table.mixedLeftAntiJoinGatherMap(leftKeys, rightKeys, left, right,
condition, NullEquality.EQUAL, sizeInfo)) {
verifySemiJoinGatherMap(map, expected);
}
}
}
@Test
void testLeftSemiJoinGatherMap() {
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, 32).build();
Table expected = new Table.TestBuilder()
.column(2, 7, 8, 9) // left
.build();
GatherMap map = leftKeys.leftSemiJoinGatherMap(rightKeys, false)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testLeftSemiJoinGatherMapNulls() {
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(2, 7, 8, 9) // left
.build();
GatherMap map = leftKeys.leftSemiJoinGatherMap(rightKeys, true)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testConditionalLeftSemiJoinGatherMap() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column(2, 5, 7, 9) // left
.build();
CompiledExpression condition = expr.compile();
GatherMap map = left.conditionalLeftSemiJoinGatherMap(right, condition)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testConditionalLeftSemiJoinGatherMapNulls() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(2, 7, 8, 9) // left
.build();
CompiledExpression condition = expr.compile();
GatherMap map = left.conditionalLeftSemiJoinGatherMap(right, condition)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testConditionalLeftSemiJoinGatherMapWithCount() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column(2, 5, 7, 9) // left
.build();
CompiledExpression condition = expr.compile()) {
long rowCount = left.conditionalLeftSemiJoinRowCount(right, condition);
assertEquals(expected.getRowCount(), rowCount);
try (GatherMap map =
left.conditionalLeftSemiJoinGatherMap(right, condition, rowCount)) {
verifySemiJoinGatherMap(map, expected);
}
}
}
@Test
void testConditionalLeftSemiJoinGatherMapNullsWithCount() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(2, 7, 8, 9) // left
.build();
CompiledExpression condition = expr.compile()) {
long rowCount = left.conditionalLeftSemiJoinRowCount(right, condition);
assertEquals(expected.getRowCount(), rowCount);
try (GatherMap map =
left.conditionalLeftSemiJoinGatherMap(right, condition, rowCount)) {
verifySemiJoinGatherMap(map, expected);
}
}
}
@Test
void testAntiSemiJoinGatherMap() {
try (Table leftKeys = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table rightKeys = new Table.TestBuilder().column(6, 5, 9, 8, 10, 32).build();
Table expected = new Table.TestBuilder()
.column(0, 1, 3, 4, 5, 6) // left
.build();
GatherMap map = leftKeys.leftAntiJoinGatherMap(rightKeys, false)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testAntiSemiJoinGatherMapNulls() {
try (Table leftKeys = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table rightKeys = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(0, 1, 3, 4, 5, 6) // left
.build();
GatherMap map = leftKeys.leftAntiJoinGatherMap(rightKeys, true)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testConditionalLeftAntiJoinGatherMap() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column(0, 1, 3, 4, 6, 8) // left
.build();
CompiledExpression condition = expr.compile();
GatherMap map = left.conditionalLeftAntiJoinGatherMap(right, condition)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testConditionalAntiSemiJoinGatherMapNulls() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(0, 1, 3, 4, 5, 6) // left
.build();
CompiledExpression condition = expr.compile();
GatherMap map = left.conditionalLeftAntiJoinGatherMap(right, condition)) {
verifySemiJoinGatherMap(map, expected);
}
}
@Test
void testConditionalLeftAntiJoinGatherMapWithCount() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.GREATER,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder().column(2, 3, 9, 0, 1, 7, 4, 6, 5, 8).build();
Table right = new Table.TestBuilder()
.column(6, 5, 9, 8, 10, 32)
.column(0, 1, 2, 3, 4, 5).build();
Table expected = new Table.TestBuilder()
.column(0, 1, 3, 4, 6, 8) // left
.build();
CompiledExpression condition = expr.compile()) {
long rowCount = left.conditionalLeftAntiJoinRowCount(right, condition);
assertEquals(expected.getRowCount(), rowCount);
try (GatherMap map =
left.conditionalLeftAntiJoinGatherMap(right, condition, rowCount)) {
verifySemiJoinGatherMap(map, expected);
}
}
}
@Test
void testConditionalAntiSemiJoinGatherMapNullsWithCount() {
BinaryOperation expr = new BinaryOperation(BinaryOperator.NULL_EQUAL,
new ColumnReference(0, TableReference.LEFT),
new ColumnReference(0, TableReference.RIGHT));
try (Table left = new Table.TestBuilder()
.column(2, 3, 9, 0, 1, 7, 4, null, null, 8)
.build();
Table right = new Table.TestBuilder()
.column(null, null, 9, 8, 10, 32)
.build();
Table expected = new Table.TestBuilder()
.column(0, 1, 3, 4, 5, 6) // left
.build();
CompiledExpression condition = expr.compile()) {
long rowCount = left.conditionalLeftAntiJoinRowCount(right, condition);
assertEquals(expected.getRowCount(), rowCount);
try (GatherMap map =
left.conditionalLeftAntiJoinGatherMap(right, condition, rowCount)) {
verifySemiJoinGatherMap(map, expected);
}
}
}
@Test
void testBoundsNulls() {
boolean[] descFlags = new boolean[1];
try (Table table = new TestBuilder()
.column(null, 20, 20, 20, 30)
.build();
Table values = new TestBuilder()
.column(15)
.build();
ColumnVector expected = ColumnVector.fromBoxedInts(1)) {
try (ColumnVector columnVector = getBoundsCv(descFlags, true, table, values)) {
assertColumnsAreEqual(expected, columnVector);
}
try (ColumnVector columnVector = getBoundsCv(descFlags, false, table, values)) {
assertColumnsAreEqual(expected, columnVector);
}
}
}
@Test
void testBoundsValuesSizeBigger() {
boolean[] descFlags = new boolean[2];
try(Table table = new TestBuilder()
.column(90, 100, 120, 130, 135)
.column(.5, .5, .5, .7, .7)
.build();
Table values = new TestBuilder()
.column(120)
.column(.3)
.column(.7)
.build()) {
assertThrows(CudfException.class, () -> getBoundsCv(descFlags, true, table, values));
assertThrows(CudfException.class, () -> getBoundsCv(descFlags, false, table, values));
}
}
@Test
void testBoundsInputSizeBigger() {
boolean[] descFlags = new boolean[3];
try(Table table = new TestBuilder()
.column(90, 100, 120, 130, 135)
.column(.5, .5, .5, .7, .7)
.column(90, 100, 120, 130, 135)
.build();
Table values = new TestBuilder()
.column(120)
.column(.3)
.build()) {
assertThrows(CudfException.class, () -> getBoundsCv(descFlags, true, table, values));
assertThrows(CudfException.class, () -> getBoundsCv(descFlags, false, table, values));
}
}
@Test
void testBoundsMultiCol() {
boolean[] descFlags = new boolean[4];
try (Table table = new TestBuilder()
.column(10, 20, 20, 20, 20)
.column(5.0, .5, .5, .7, .7)
.column("1","2","3","4","4")
.column(90, 77, 78, 61, 61)
.build();
Table values = new TestBuilder()
.column(20)
.column(0.7)
.column("4")
.column(61)
.build()) {
try (ColumnVector columnVector = getBoundsCv(descFlags, true, table, values);
ColumnVector expected = ColumnVector.fromBoxedInts(5)) {
assertColumnsAreEqual(expected, columnVector);
}
try (ColumnVector columnVector = getBoundsCv(descFlags, false, table, values);
ColumnVector expected = ColumnVector.fromBoxedInts(3)) {
assertColumnsAreEqual(expected, columnVector);
}
}
}
@Test
void testBoundsFloatsMultiVal() {
boolean[] descFlags = new boolean[1];
try (Table table = new TestBuilder()
.column(10.0, 20.6, 20.7)
.build();
Table values = new TestBuilder()
.column(20.3, 20.8)
.build();
ColumnVector expected = ColumnVector.fromBoxedInts(1, 3)) {
try (ColumnVector columnVector = getBoundsCv(descFlags, true, table, values)) {
assertColumnsAreEqual(expected, columnVector);
}
try (ColumnVector columnVector = getBoundsCv(descFlags, false, table, values)) {
assertColumnsAreEqual(expected, columnVector);
}
}
}
@Test
void testBoundsFloatsSingleCol() {
boolean[] descFlags = {false};
try(Table table = new TestBuilder()
.column(10.0, 20.6, 20.7)
.build();
Table values = new TestBuilder()
.column(20.6)
.build()) {
try (ColumnVector columnVector = getBoundsCv(descFlags, true, table, values);
ColumnVector expected = ColumnVector.fromBoxedInts(2)) {
assertColumnsAreEqual(expected, columnVector);
}
try (ColumnVector columnVector = getBoundsCv(descFlags, false, table, values);
ColumnVector expected = ColumnVector.fromBoxedInts(1)) {
assertColumnsAreEqual(expected, columnVector);
}
}
}
@Test
void testBoundsFloatsSingleColDesc() {
boolean[] descFlags = new boolean[] {true};
try(Table table = new TestBuilder()
.column(20.7, 20.6, 10.0)
.build();
Table values = new TestBuilder()
.column(20.6)
.build()) {
try (ColumnVector columnVector = getBoundsCv(descFlags, true, table, values);
ColumnVector expected = ColumnVector.fromBoxedInts(2)) {
assertColumnsAreEqual(expected, columnVector);
}
try (ColumnVector columnVector = getBoundsCv(descFlags, false, table, values);
ColumnVector expected = ColumnVector.fromBoxedInts(1)) {
assertColumnsAreEqual(expected, columnVector);
}
}
}
@Test
void testBoundsIntsSingleCol() {
boolean[] descFlags = new boolean[1];
try(Table table = new TestBuilder()
.column(10, 20, 20, 20, 20)
.build();
Table values = new TestBuilder()
.column(20)
.build()) {
try (ColumnVector columnVector = getBoundsCv(descFlags, true, table, values);
ColumnVector expected = ColumnVector.fromBoxedInts(5)) {
assertColumnsAreEqual(expected, columnVector);
}
try (ColumnVector columnVector = getBoundsCv(descFlags, false, table, values);
ColumnVector expected = ColumnVector.fromBoxedInts(1)) {
assertColumnsAreEqual(expected, columnVector);
}
}
}
@Test
void testBoundsIntsSingleColDesc() {
boolean[] descFlags = new boolean[]{true};
try (Table table = new TestBuilder()
.column(20, 20, 20, 20, 10)
.build();
Table values = new TestBuilder()
.column(5)
.build();
ColumnVector expected = ColumnVector.fromBoxedInts(5)) {
try (ColumnVector columnVector = getBoundsCv(descFlags, true, table, values)) {
assertColumnsAreEqual(expected, columnVector);
}
try (ColumnVector columnVector = getBoundsCv(descFlags, false, table, values)) {
assertColumnsAreEqual(expected, columnVector);
}
}
}
@Test
void testBoundsString() {
boolean[] descFlags = new boolean[1];
try (ColumnVector cIn = ColumnVector.build(DType.STRING, 4, (b) -> {
for (int i = 0; i < 4; i++) {
b.appendUTF8String(String.valueOf(i).getBytes());
}
});
Table table = new Table(cIn);
ColumnVector cVal = ColumnVector.fromStrings("0");
Table values = new Table(cVal)) {
try (ColumnVector cv = getBoundsCv(descFlags, true, table, values);
ColumnVector expected = ColumnVector.fromInts(1)) {
assertColumnsAreEqual(expected, cv);
}
try (ColumnVector cv = getBoundsCv(descFlags, false, table, values);
ColumnVector expected = ColumnVector.fromInts(0)) {
assertColumnsAreEqual(expected, cv);
}
}
}
@Test
void testBoundsEmptyValues() {
boolean[] descFlags = new boolean[1];
try (ColumnVector cv = ColumnVector.fromBoxedLongs();
Table table = new TestBuilder()
.column(10, 20, 20, 20, 20)
.build();
Table values = new Table(cv)) {
assertThrows(AssertionError.class,
() -> getBoundsCv(descFlags, true, table, values).close());
assertThrows(AssertionError.class,
() -> getBoundsCv(descFlags, false, table, values).close());
}
}
@Test
void testBoundsEmptyInput() {
boolean[] descFlags = new boolean[1];
try (ColumnVector cv = ColumnVector.fromBoxedLongs();
Table table = new Table(cv);
Table values = new TestBuilder()
.column(20)
.build()) {
assertThrows(AssertionError.class,
() -> getBoundsCv(descFlags, true, table, values).close());
assertThrows(AssertionError.class,
() -> getBoundsCv(descFlags, false, table, values).close());
}
}
private ColumnVector getBoundsCv(boolean[] descFlags, boolean isUpperBound,
Table table, Table values) {
boolean[] nullsAreSmallest = new boolean[descFlags.length];
Arrays.fill(nullsAreSmallest, true);
return isUpperBound ?
table.upperBound(nullsAreSmallest, values, descFlags) :
table.lowerBound(nullsAreSmallest, values, descFlags);
}
@Test
void testRepeat() {
try (Table t = new Table.TestBuilder()
.column(1, 2)
.column("a", "b")
.decimal32Column(-3, 12, -25)
.decimal64Column(2, 11111L, -22222L)
.build();
Table expected = new Table.TestBuilder()
.column(1, 1, 1, 2, 2, 2)
.column("a", "a", "a", "b", "b", "b")
.decimal32Column(-3, 12, 12, 12, -25, -25, -25)
.decimal64Column(2, 11111L, 11111L, 11111L, -22222L, -22222L, -22222L)
.build();
Table repeated = t.repeat(3)) {
assertTablesAreEqual(expected, repeated);
}
}
@Test
void testRepeatColumn() {
try (Table t = new Table.TestBuilder()
.column(1, 2)
.column("a", "b")
.decimal32Column(-3, 12, -25)
.decimal64Column(2, 11111L, -22222L)
.build();
ColumnVector repeats = ColumnVector.fromBytes((byte)1, (byte)4);
Table expected = new Table.TestBuilder()
.column(1, 2, 2, 2, 2)
.column("a", "b", "b", "b", "b")
.decimal32Column(-3, 12, -25, -25, -25, -25)
.decimal64Column(2, 11111L, -22222L, -22222L, -22222L, -22222L)
.build();
Table repeated = t.repeat(repeats)) {
assertTablesAreEqual(expected, repeated);
}
}
@Test
void testInterleaveIntColumns() {
try (Table t = new Table.TestBuilder()
.column(1,2,3,4,5)
.column(6,7,8,9,10)
.build();
ColumnVector expected = ColumnVector.fromInts(1,6,2,7,3,8,4,9,5,10);
ColumnVector actual = t.interleaveColumns()) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testInterleaveFloatColumns() {
try (Table t = new Table.TestBuilder()
.column(1f,2f,3f,4f,5f)
.column(6f,7f,8f,9f,10f)
.build();
ColumnVector expected = ColumnVector.fromFloats(1f,6f,2f,7f,3f,8f,4f,9f,5f,10f);
ColumnVector actual = t.interleaveColumns()) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testInterleaveDecimalColumns() {
try (Table t = new Table.TestBuilder()
.decimal32Column(-2, 123, 456, 789)
.decimal32Column(-2,-100, -200, -300)
.build();
ColumnVector expected = ColumnVector.decimalFromInts(-2, 123, -100, 456, -200, 789, -300);
ColumnVector actual = t.interleaveColumns()) {
assertColumnsAreEqual(expected, actual);
}
try (Table t = new Table.TestBuilder()
.decimal64Column(-5, 123456790L, 987654321L)
.decimal64Column(-5,-123456790L, -987654321L)
.build();
ColumnVector expected = ColumnVector.decimalFromLongs(-5, 123456790L, -123456790L, 987654321L, -987654321L);
ColumnVector actual = t.interleaveColumns()) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testInterleaveStringColumns() {
try (Table t = new Table.TestBuilder()
.column("a", "b", "c")
.column("d", "e", "f")
.build();
ColumnVector expected = ColumnVector.fromStrings("a", "d", "b", "e", "c", "f");
ColumnVector actual = t.interleaveColumns()) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testInterleaveMixedColumns() {
try (Table t = new Table.TestBuilder()
.column(1f,2f,3f,4f,5f)
.column(6,7,8,9,10)
.build()) {
assertThrows(CudfException.class, () -> t.interleaveColumns(),
"All columns must have the same data type in interleave_columns");
}
}
@Test
void testConcatNoNulls() {
try (Table t1 = new Table.TestBuilder()
.column(1, 2, 3)
.column("1", "2", "3")
.timestampMicrosecondsColumn(1L, 2L, 3L)
.column(11.0, 12.0, 13.0)
.decimal32Column(-3, 1, 2, 3)
.decimal64Column(-10, 1L, 2L, 3L)
.build();
Table t2 = new Table.TestBuilder()
.column(4, 5)
.column("4", "3")
.timestampMicrosecondsColumn(4L, 3L)
.column(14.0, 15.0)
.decimal32Column(-3, 4, 5)
.decimal64Column(-10, 4L, 5L)
.build();
Table t3 = new Table.TestBuilder()
.column(6, 7, 8, 9)
.column("4", "1", "2", "2")
.timestampMicrosecondsColumn(4L, 1L, 2L, 2L)
.column(16.0, 17.0, 18.0, 19.0)
.decimal32Column(-3, 6, 7, 8, 9)
.decimal64Column(-10, 6L, 7L, 8L, 9L)
.build();
Table concat = Table.concatenate(t1, t2, t3);
Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4, 5, 6, 7, 8, 9)
.column("1", "2", "3", "4", "3", "4", "1", "2", "2")
.timestampMicrosecondsColumn(1L, 2L, 3L, 4L, 3L, 4L, 1L, 2L, 2L)
.column(11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0)
.decimal32Column(-3, 1, 2, 3, 4, 5, 6, 7, 8, 9)
.decimal64Column(-10, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L)
.build()) {
assertTablesAreEqual(expected, concat);
}
}
@Test
void testConcatWithNulls() {
try (Table t1 = new Table.TestBuilder()
.column(1, null, 3)
.column(11.0, 12.0, 13.0)
.decimal32Column(-3, 1, null, 3)
.decimal64Column(-10, 11L, 12L, 13L)
.build();
Table t2 = new Table.TestBuilder()
.column(4, null)
.column(14.0, 15.0)
.decimal32Column(-3, 4, null)
.decimal64Column(-10, 14L, 15L)
.build();
Table t3 = new Table.TestBuilder()
.column(6, 7, 8, 9)
.column(null, null, 18.0, 19.0)
.decimal32Column(-3, 6, 7, 8, 9)
.decimal64Column(-10, null, null, 18L, 19L)
.build();
Table concat = Table.concatenate(t1, t2, t3);
Table expected = new Table.TestBuilder()
.column(1, null, 3, 4, null, 6, 7, 8, 9)
.column(11.0, 12.0, 13.0, 14.0, 15.0, null, null, 18.0, 19.0)
.decimal32Column(-3, 1, null, 3, 4, null, 6, 7, 8, 9)
.decimal64Column(-10, 11L, 12L, 13L, 14L, 15L, null, null, 18L, 19L)
.build()) {
assertTablesAreEqual(expected, concat);
}
}
@Test
void testContiguousSplit() {
ContiguousTable[] splits = null;
try (Table t1 = new Table.TestBuilder()
.column(10, 12, 14, 16, 18, 20, 22, 24, null, 28)
.column(50, 52, 54, 56, 58, 60, 62, 64, 66, null)
.decimal32Column(-3, 10, 12, 14, 16, 18, 20, 22, 24, null, 28)
.decimal64Column(-8, 50L, 52L, 54L, 56L, 58L, 60L, 62L, 64L, 66L, null)
.build()) {
splits = t1.contiguousSplit(2, 5, 9);
assertEquals(4, splits.length);
assertEquals(2, splits[0].getRowCount());
assertEquals(2, splits[0].getTable().getRowCount());
assertEquals(3, splits[1].getRowCount());
assertEquals(3, splits[1].getTable().getRowCount());
assertEquals(4, splits[2].getRowCount());
assertEquals(4, splits[2].getTable().getRowCount());
assertEquals(1, splits[3].getRowCount());
assertEquals(1, splits[3].getTable().getRowCount());
} finally {
if (splits != null) {
for (int i = 0; i < splits.length; i++) {
splits[i].close();
}
}
}
}
@Test
void testChunkedPackBasic() {
try (Table t1 = new Table.TestBuilder()
.column(10, 12, 14, 16, 18, 20, 22, 24, null, 28)
.column(50, 52, 54, 56, 58, 60, 62, 64, 66, null)
.decimal32Column(-3, 10, 12, 14, 16, 18, 20, 22, 24, null, 28)
.decimal64Column(-8, 50L, 52L, 54L, 56L, 58L, 60L, 62L, 64L, 66L, null)
.build();
DeviceMemoryBuffer bounceBuffer = DeviceMemoryBuffer.allocate(10L*1024*1024);
ChunkedPack cp = t1.makeChunkedPack(10L*1024*1024);
PackedColumnMetadata meta = cp.buildMetadata()) {
// unpack to bounce buffer
assertEquals(true, cp.hasNext());
assertEquals(cp.getTotalContiguousSize(), cp.next(bounceBuffer));
assertEquals(false, cp.hasNext());
try (Table unpacked = Table.fromPackedTable(meta.getMetadataDirectBuffer(), bounceBuffer)) {
assertTablesAreEqual(t1, unpacked);
}
}
}
@Test
void testChunkedPackTwoPasses() {
// this test packes ~2MB worth of long into a 1MB bounce buffer
// this is 3 iterations because of the validity buffer
Long[] longs = new Long[256*1024];
try (Table t1 = new Table.TestBuilder().column(longs).build();
DeviceMemoryBuffer bounceBuffer = DeviceMemoryBuffer.allocate(1L*1024*1024);
ChunkedPack cp = t1.makeChunkedPack(1L*1024*1024);
PackedColumnMetadata meta = cp.buildMetadata();
DeviceMemoryBuffer target = DeviceMemoryBuffer.allocate(cp.getTotalContiguousSize())) {
long offset = 0;
// unpack to bounce buffer
assertEquals(true, cp.hasNext());
while (cp.hasNext()) {
long copied = cp.next(bounceBuffer);
target.copyFromDeviceBufferAsync(
offset, target, 0, copied, Cuda.DEFAULT_STREAM);
offset += copied;
}
assertEquals(offset, cp.getTotalContiguousSize());
try (Table unpacked = Table.fromPackedTable(meta.getMetadataDirectBuffer(), target)) {
assertTablesAreEqual(t1, unpacked);
}
}
}
@Test
void testContiguousSplitWithStrings() {
ContiguousTable[] splits = null;
try (Table t1 = new Table.TestBuilder()
.column(10, 12, 14, 16, 18, 20, 22, 24, null, 28)
.column(50, 52, 54, 56, 58, 60, 62, 64, 66, null)
.column("A", "B", "C", "D", "E", "F", "G", "H", "I", "J")
.decimal32Column(-3, 10, 12, 14, 16, 18, 20, 22, 24, null, 28)
.decimal64Column(-8, 50L, 52L, 54L, 56L, 58L, 60L, 62L, 64L, 66L, null)
.build()) {
splits = t1.contiguousSplit(2, 5, 9);
assertEquals(4, splits.length);
assertEquals(2, splits[0].getRowCount());
assertEquals(2, splits[0].getTable().getRowCount());
assertEquals(3, splits[1].getRowCount());
assertEquals(3, splits[1].getTable().getRowCount());
assertEquals(4, splits[2].getRowCount());
assertEquals(4, splits[2].getTable().getRowCount());
assertEquals(1, splits[3].getRowCount());
assertEquals(1, splits[3].getTable().getRowCount());
} finally {
if (splits != null) {
for (int i = 0; i < splits.length; i++) {
splits[i].close();
}
}
}
}
@Test
void testContiguousSplitWithStringsChunked() {
try (Table t1 = new Table.TestBuilder()
.column(10, 12, 14, 16, 18, 20, 22, 24, null, 28)
.column(50, 52, 54, 56, 58, 60, 62, 64, 66, null)
.column("A", "B", "C", "D", "E", "F", "G", "H", "I", "J")
.decimal32Column(-3, 10, 12, 14, 16, 18, 20, 22, 24, null, 28)
.decimal64Column(-8, 50L, 52L, 54L, 56L, 58L, 60L, 62L, 64L, 66L, null)
.build();
DeviceMemoryBuffer bounceBuffer = DeviceMemoryBuffer.allocate(2L*1024*1024);
ChunkedPack cp = t1.makeChunkedPack(2L*1024*1024);
PackedColumnMetadata meta = cp.buildMetadata()) {
// unpack to bounce buffer
assertEquals(true, cp.hasNext());
assertEquals(cp.getTotalContiguousSize(), cp.next(bounceBuffer));
assertEquals(false, cp.hasNext());
try (Table unpacked = Table.fromPackedTable(meta.getMetadataDirectBuffer(), bounceBuffer)) {
assertTablesAreEqual(t1, unpacked);
}
}
}
@Test
void testPartStability() {
final int PARTS = 5;
int expectedPart = -1;
try (Table start = new Table.TestBuilder().column(0).build();
PartitionedTable out = start.onColumns(0).hashPartition(PARTS)) {
// Lets figure out what partitions this is a part of.
int[] parts = out.getPartitions();
for (int i = 0; i < parts.length; i++) {
if (parts[i] > 0) {
expectedPart = i;
}
}
}
final int COUNT = 20;
for (int numEntries = 1; numEntries < COUNT; numEntries++) {
try (ColumnVector data = ColumnVector.build(DType.INT32, numEntries, Range.appendInts(0, numEntries));
Table t = new Table(data);
PartitionedTable out = t.onColumns(0).hashPartition(PARTS);
HostColumnVector tmp = out.getColumn(0).copyToHost()) {
// Now we need to get the range out for the partition we expect
int[] parts = out.getPartitions();
int start = expectedPart == 0 ? 0 : parts[expectedPart - 1];
int end = parts[expectedPart];
boolean found = false;
for (int i = start; i < end; i++) {
if (tmp.getInt(i) == 0) {
found = true;
break;
}
}
assertTrue(found);
}
}
}
@Test
void testPartition() {
try (Table t = new Table.TestBuilder()
.column(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.build();
ColumnVector parts = ColumnVector
.fromInts(1, 2, 1, 2, 1, 2, 1, 2, 1, 2);
PartitionedTable pt = t.partition(parts, 3)) {
assertArrayEquals(new int[]{0, 0, 5}, pt.getPartitions());
// order within partitions is not guaranteed, so sort each partition to compare
ColumnVector[] slicedColumns = pt.getTable().getColumn(0).slice(0, 5, 5, 10);
try (Table part1 = new Table(slicedColumns[0]);
Table part1Sorted = part1.orderBy(OrderByArg.asc(0));
Table part1Expected = new Table.TestBuilder().column(1, 3, 5, 7, 9).build();
Table part2 = new Table(slicedColumns[1]);
Table part2Sorted = part2.orderBy(OrderByArg.asc(0));
Table part2Expected = new Table.TestBuilder().column(2, 4, 6, 8, 10).build()) {
assertTablesAreEqual(part1Expected, part1Sorted);
assertTablesAreEqual(part2Expected, part2Sorted);
} finally {
for (ColumnVector c : slicedColumns) {
c.close();
}
}
}
}
@Test
void testIdentityHashPartition() {
final int count = 1024 * 1024;
try (ColumnVector aIn = ColumnVector.build(DType.INT64, count, Range.appendLongs(count));
ColumnVector bIn = ColumnVector.build(DType.INT32, count, (b) -> {
for (int i = 0; i < count; i++) {
b.append(i / 2);
}
});
ColumnVector cIn = ColumnVector.build(DType.STRING, count, (b) -> {
for (int i = 0; i < count; i++) {
b.appendUTF8String(String.valueOf(i).getBytes());
}
})) {
HashSet<Long> expected = new HashSet<>();
for (long i = 0; i < count; i++) {
expected.add(i);
}
try (Table input = new Table(new ColumnVector[]{aIn, bIn, cIn});
PartitionedTable output = input.onColumns(0).hashPartition(HashType.IDENTITY, 5)) {
int[] parts = output.getPartitions();
assertEquals(5, parts.length);
assertEquals(0, parts[0]);
int previous = 0;
long rows = 0;
for (int i = 1; i < parts.length; i++) {
assertTrue(parts[i] >= previous);
rows += parts[i] - previous;
previous = parts[i];
}
assertTrue(rows <= count);
try (HostColumnVector aOut = output.getColumn(0).copyToHost();
HostColumnVector bOut = output.getColumn(1).copyToHost();
HostColumnVector cOut = output.getColumn(2).copyToHost()) {
for (int i = 0; i < count; i++) {
long fromA = aOut.getLong(i);
long fromB = bOut.getInt(i);
String fromC = cOut.getJavaString(i);
assertTrue(expected.remove(fromA));
assertEquals(fromA / 2, fromB);
assertEquals(String.valueOf(fromA), fromC, "At Index " + i);
}
assertTrue(expected.isEmpty());
}
}
}
}
@Test
void testHashPartition() {
final int count = 1024 * 1024;
try (ColumnVector aIn = ColumnVector.build(DType.INT64, count, Range.appendLongs(count));
ColumnVector bIn = ColumnVector.build(DType.INT32, count, (b) -> {
for (int i = 0; i < count; i++) {
b.append(i / 2);
}
});
ColumnVector cIn = ColumnVector.build(DType.STRING, count, (b) -> {
for (int i = 0; i < count; i++) {
b.appendUTF8String(String.valueOf(i).getBytes());
}
})) {
HashSet<Long> expected = new HashSet<>();
for (long i = 0; i < count; i++) {
expected.add(i);
}
try (Table input = new Table(new ColumnVector[]{aIn, bIn, cIn});
PartitionedTable output = input.onColumns(0).hashPartition(5)) {
int[] parts = output.getPartitions();
assertEquals(5, parts.length);
assertEquals(0, parts[0]);
int previous = 0;
long rows = 0;
for (int i = 1; i < parts.length; i++) {
assertTrue(parts[i] >= previous);
rows += parts[i] - previous;
previous = parts[i];
}
assertTrue(rows <= count);
try (HostColumnVector aOut = output.getColumn(0).copyToHost();
HostColumnVector bOut = output.getColumn(1).copyToHost();
HostColumnVector cOut = output.getColumn(2).copyToHost()) {
for (int i = 0; i < count; i++) {
long fromA = aOut.getLong(i);
long fromB = bOut.getInt(i);
String fromC = cOut.getJavaString(i);
assertTrue(expected.remove(fromA));
assertEquals(fromA / 2, fromB);
assertEquals(String.valueOf(fromA), fromC, "At Index " + i);
}
assertTrue(expected.isEmpty());
}
}
}
}
@Test
void testSerializationRoundTripEmpty() throws IOException {
DataType listStringsType = new ListType(true, new BasicType(true, DType.STRING));
DataType mapType = new ListType(true,
new StructType(true,
new BasicType(false, DType.STRING),
new BasicType(false, DType.STRING)));
DataType structType = new StructType(true,
new BasicType(true, DType.INT8),
new BasicType(false, DType.FLOAT32));
try (ColumnVector emptyInt = ColumnVector.fromInts();
ColumnVector emptyDouble = ColumnVector.fromDoubles();
ColumnVector emptyString = ColumnVector.fromStrings();
ColumnVector emptyListString = ColumnVector.fromLists(listStringsType);
ColumnVector emptyMap = ColumnVector.fromLists(mapType);
ColumnVector emptyStruct = ColumnVector.fromStructs(structType);
Table t = new Table(emptyInt, emptyInt, emptyDouble, emptyString,
emptyListString, emptyMap, emptyStruct)) {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
JCudfSerialization.writeToStream(t, bout, 0, 0);
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
DataInputStream din = new DataInputStream(bin);
try (JCudfSerialization.TableAndRowCountPair result = JCudfSerialization.readTableFrom(din)) {
assertTablesAreEqual(t, result.getTable());
assertEquals(result.getTable(), result.getContiguousTable().getTable());
assertNotNull(result.getContiguousTable().getBuffer());
}
}
}
@Test
void testSerializationZeroColumns() throws IOException {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
JCudfSerialization.writeRowsToStream(bout, 10);
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
try (JCudfSerialization.TableAndRowCountPair result = JCudfSerialization.readTableFrom(bin)) {
assertNull(result.getTable());
assertNull(result.getContiguousTable());
assertEquals(10, result.getNumRows());
}
}
@Test
void testSerializationZeroColsZeroRows() throws IOException {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
JCudfSerialization.writeRowsToStream(bout, 0);
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
try (JCudfSerialization.TableAndRowCountPair result = JCudfSerialization.readTableFrom(bin)) {
assertNull(result.getTable());
assertNull(result.getContiguousTable());
assertEquals(0, result.getNumRows());
}
}
@Test
void testSerializationRoundTripConcatOnHostEmpty() throws IOException {
DataType listStringsType = new ListType(true, new BasicType(true, DType.STRING));
DataType mapType = new ListType(true,
new StructType(true,
new BasicType(false, DType.STRING),
new BasicType(false, DType.STRING)));
DataType structType = new StructType(true,
new BasicType(true, DType.INT8),
new BasicType(false, DType.FLOAT32));
try (ColumnVector emptyInt = ColumnVector.fromInts();
ColumnVector emptyDouble = ColumnVector.fromDoubles();
ColumnVector emptyString = ColumnVector.fromStrings();
ColumnVector emptyListString = ColumnVector.fromLists(listStringsType);
ColumnVector emptyMap = ColumnVector.fromLists(mapType);
ColumnVector emptyStruct = ColumnVector.fromStructs(structType);
Table t = new Table(emptyInt, emptyInt, emptyDouble, emptyString,
emptyListString, emptyMap, emptyStruct)) {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
JCudfSerialization.writeToStream(t, bout, 0, 0);
JCudfSerialization.writeToStream(t, bout, 0, 0);
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
DataInputStream din = new DataInputStream(bin);
ArrayList<JCudfSerialization.SerializedTableHeader> headers = new ArrayList<>();
List<HostMemoryBuffer> buffers = new ArrayList<>();
try {
JCudfSerialization.SerializedTableHeader head;
long numRows = 0;
do {
head = new JCudfSerialization.SerializedTableHeader(din);
if (head.wasInitialized()) {
HostMemoryBuffer buff = hostMemoryAllocator.allocate(head.getDataLen());
buffers.add(buff);
JCudfSerialization.readTableIntoBuffer(din, head, buff);
assert head.wasDataRead();
numRows += head.getNumRows();
assert numRows <= Integer.MAX_VALUE;
headers.add(head);
}
} while (head.wasInitialized());
assert numRows == t.getRowCount();
try (Table found = JCudfSerialization.readAndConcat(
headers.toArray(new JCudfSerialization.SerializedTableHeader[headers.size()]),
buffers.toArray(new HostMemoryBuffer[buffers.size()]))) {
assertTablesAreEqual(t, found);
}
} finally {
for (HostMemoryBuffer buff: buffers) {
buff.close();
}
}
}
}
@Test
void testSerializationRoundTripToHostEmpty() throws IOException {
DataType listStringsType = new ListType(true, new BasicType(true, DType.STRING));
DataType mapType = new ListType(true,
new StructType(true,
new BasicType(false, DType.STRING),
new BasicType(false, DType.STRING)));
DataType structType = new StructType(true,
new BasicType(true, DType.INT8),
new BasicType(false, DType.FLOAT32));
try (ColumnVector emptyInt = ColumnVector.fromInts();
ColumnVector emptyDouble = ColumnVector.fromDoubles();
ColumnVector emptyString = ColumnVector.fromStrings();
ColumnVector emptyListString = ColumnVector.fromLists(listStringsType);
ColumnVector emptyMap = ColumnVector.fromLists(mapType);
ColumnVector emptyStruct = ColumnVector.fromStructs(structType);
Table t = new Table(emptyInt, emptyInt, emptyDouble, emptyString,
emptyListString, emptyMap, emptyStruct)) {
testSerializationRoundTripToHost(t);
}
}
@Test
void testRoundRobinPartition() {
try (Table t = new Table.TestBuilder()
.column( 100, 202, 3003, 40004, 5, -60, 1, null, 3, null, 5, null, 7, null, 9, null, 11, null, 13, null, 15)
.column( true, true, false, false, true, null, true, true, null, false, false, null, true, true, null, false, false, null, true, true, null)
.column( (byte)1, (byte)2, null, (byte)4, (byte)5, (byte)6, (byte)1, (byte)2, (byte)3, null, (byte)5, (byte)6, (byte)7, null, (byte)9, (byte)10, (byte)11, null, (byte)13, (byte)14, (byte)15)
.column((short)6, (short)5, (short)4, null, (short)2, (short)1, (short)1, (short)2, (short)3, null, (short)5, (short)6, (short)7, null, (short)9, (short)10, null, (short)12, (short)13, (short)14, null)
.column( 1L, null, 1001L, 50L, -2000L, null, 1L, 2L, 3L, 4L, null, 6L, 7L, 8L, 9L, null, 11L, 12L, 13L, 14L, null)
.column( 10.1f, 20f, Float.NaN, 3.1415f, -60f, null, 1f, 2f, 3f, 4f, 5f, null, 7f, 8f, 9f, 10f, 11f, null, 13f, 14f, 15f)
.column( 10.1, 20.0, 33.1, 3.1415, -60.5, null, 1., 2., 3., 4., 5., 6., null, 8., 9., 10., 11., 12., null, 14., 15.)
.timestampDayColumn(99, 100, 101, 102, 103, 104, 1, 2, 3, 4, 5, 6, 7, null, 9, 10, 11, 12, 13, null, 15)
.timestampMillisecondsColumn(9L, 1006L, 101L, 5092L, null, 88L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, null, 10L, 11L, 12L, 13L, 14L, 15L)
.timestampSecondsColumn(1L, null, 3L, 4L, 5L, 6L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, null, 11L, 12L, 13L, 14L, 15L)
.column( "A", "B", "C", "D", null, "TESTING", "1", "2", "3", "4", "5", "6", "7", null, "9", "10", "11", "12", "13", null, "15")
.column( "A", "A", "C", "C", null, "TESTING", "1", "2", "3", "4", "5", "6", "7", null, "9", "10", "11", "12", "13", null, "15")
.decimal32Column(-3, 100, 202, 3003, 40004, 5, -60, 1, null, 3, null, 5, null, 7, null, 9, null, 11, null, 13, null, 15)
.decimal64Column( -8, 1L, null, 1001L, 50L, -2000L, null, 1L, 2L, 3L, 4L, null, 6L, 7L, 8L, 9L, null, 11L, 12L, 13L, 14L, null)
.build()) {
try (Table expectedTable = new Table.TestBuilder()
.column( 100, 40004, 1, null, 7, null, 13, 202, 5, null, 5, null, 11, null, 3003, -60, 3, null, 9, null, 15)
.column( true, false, true, false, true, false, true, true, true, true, false, true, false, true, false, null, null, null, null, null, null)
.column( (byte)1, (byte)4, (byte)1, null, (byte)7, (byte)10, (byte)13, (byte)2, (byte)5, (byte)2, (byte)5, null, (byte)11, (byte)14, null, (byte)6, (byte)3, (byte)6, (byte)9, null, (byte)15)
.column((short)6, null, (short)1, null, (short)7, (short)10, (short)13, (short)5, (short)2, (short)2, (short)5, null, null, (short)14, (short)4, (short)1, (short)3, (short)6, (short)9, (short)12, null)
.column( 1L, 50L, 1L, 4L, 7L, null, 13L, null, -2000L, 2L, null, 8L, 11L, 14L, 1001L, null, 3L, 6L, 9L, 12L, null)
.column( 10.1f, 3.1415f, 1f, 4f, 7f, 10f, 13f, 20f, -60f, 2f, 5f, 8f, 11f, 14f, Float.NaN, null, 3f, null, 9f, null, 15f)
.column( 10.1, 3.1415, 1., 4., null, 10., null, 20.0, -60.5, 2., 5., 8., 11., 14., 33.1, null, 3., 6., 9., 12., 15.)
.timestampDayColumn(99, 102, 1, 4, 7, 10, 13, 100, 103, 2, 5, null, 11, null, 101, 104, 3, 6, 9, 12, 15)
.timestampMillisecondsColumn(9L, 5092L, 1L, 4L, 7L, 10L, 13L, 1006L, null, 2L, 5L, 8L, 11L, 14L, 101L, 88L, 3L, 6L, null, 12L, 15L)
.timestampSecondsColumn(1L, 4L, 1L, 4L, 7L, null, 13L, null, 5L, 2L, 5L, 8L, 11L, 14L, 3L, 6L, 3L, 6L, 9L, 12L, 15L)
.column( "A", "D", "1", "4", "7", "10", "13", "B", null, "2", "5", null, "11", null, "C", "TESTING", "3", "6", "9", "12", "15")
.column( "A", "C", "1", "4", "7", "10", "13", "A", null, "2", "5", null, "11", null, "C", "TESTING", "3", "6", "9", "12", "15")
.decimal32Column(-3, 100, 40004, 1, null, 7, null, 13, 202, 5, null, 5, null, 11, null, 3003, -60, 3, null, 9, null, 15)
.decimal64Column(-8, 1L, 50L, 1L, 4L, 7L, null, 13L, null, -2000L, 2L, null, 8L, 11L, 14L, 1001L, null, 3L, 6L, 9L, 12L, null)
.build();
PartitionedTable pt = t.roundRobinPartition(3, 0)) {
assertTablesAreEqual(expectedTable, pt.getTable());
int[] parts = pt.getPartitions();
assertEquals(3, parts.length);
assertEquals(0, parts[0]);
assertEquals(7, parts[1]);
assertEquals(14, parts[2]);
}
try (Table expectedTable = new Table.TestBuilder()
.column( 3003, -60, 3, null, 9, null, 15, 100, 40004, 1, null, 7, null, 13, 202, 5, null, 5, null, 11, null)
.column( false, null, null, null, null, null, null, true, false, true, false, true, false, true, true, true, true, false, true, false, true)
.column( null, (byte)6, (byte)3, (byte)6, (byte)9, null, (byte)15, (byte)1, (byte)4, (byte)1, null, (byte)7, (byte)10, (byte)13, (byte)2, (byte)5, (byte)2, (byte)5, null, (byte)11, (byte)14)
.column( (short)4, (short)1, (short)3, (short)6, (short)9, (short)12, null,(short)6, null, (short)1, null, (short)7, (short)10, (short)13, (short)5, (short)2, (short)2, (short)5, null, null, (short)14)
.column( 1001L, null, 3L, 6L, 9L, 12L, null, 1L, 50L, 1L, 4L, 7L, null, 13L, null, -2000L, 2L, null, 8L, 11L, 14L)
.column( Float.NaN, null, 3f, null, 9f, null, 15f, 10.1f, 3.1415f, 1f, 4f, 7f, 10f, 13f, 20f, -60f, 2f, 5f, 8f, 11f, 14f)
.column( 33.1, null, 3., 6., 9., 12., 15., 10.1, 3.1415, 1., 4., null, 10., null, 20.0, -60.5, 2., 5., 8., 11., 14.)
.timestampDayColumn(101, 104, 3, 6, 9, 12, 15, 99, 102, 1, 4, 7, 10, 13, 100, 103, 2, 5, null, 11, null)
.timestampMillisecondsColumn(101L, 88L, 3L, 6L, null, 12L, 15L, 9L, 5092L, 1L, 4L, 7L, 10L, 13L, 1006L, null, 2L, 5L, 8L, 11L, 14L)
.timestampSecondsColumn(3L, 6L, 3L, 6L, 9L, 12L, 15L, 1L, 4L, 1L, 4L, 7L, null, 13L, null, 5L, 2L, 5L, 8L, 11L, 14L)
.column( "C", "TESTING", "3", "6", "9", "12", "15", "A", "D", "1", "4", "7", "10", "13", "B", null, "2", "5", null, "11", null)
.column( "C", "TESTING", "3", "6", "9", "12", "15", "A", "C", "1", "4", "7", "10", "13", "A", null, "2", "5", null, "11", null)
.decimal32Column(-3, 3003, -60, 3, null, 9, null, 15, 100, 40004, 1, null, 7, null, 13, 202, 5, null, 5, null, 11, null)
.decimal64Column(-8, 1001L, null, 3L, 6L, 9L, 12L, null, 1L, 50L, 1L, 4L, 7L, null, 13L, null, -2000L, 2L, null, 8L, 11L, 14L)
.build();
PartitionedTable pt = t.roundRobinPartition(3, 1)) {
assertTablesAreEqual(expectedTable, pt.getTable());
int[] parts = pt.getPartitions();
assertEquals(3, parts.length);
assertEquals(0, parts[0]);
assertEquals(7, parts[1]);
assertEquals(14, parts[2]);
}
try (Table expectedTable = new Table.TestBuilder()
.column( 202, 5, null, 5, null, 11, null, 3003, -60, 3, null, 9, null, 15, 100, 40004, 1, null, 7, null, 13)
.column( true, true, true, false, true, false, true, false, null, null, null, null, null, null, true, false, true, false, true, false, true)
.column( (byte)2, (byte)5, (byte)2, (byte)5, null, (byte)11, (byte)14, null, (byte)6, (byte)3, (byte)6, (byte)9, null, (byte)15, (byte)1, (byte)4, (byte)1, null, (byte)7, (byte)10, (byte)13)
.column( (short)5, (short)2, (short)2, (short)5, null, null, (short)14, (short)4, (short)1, (short)3, (short)6, (short)9, (short)12, null,(short)6, null, (short)1, null, (short)7, (short)10, (short)13)
.column( null, -2000L, 2L, null, 8L, 11L, 14L, 1001L, null, 3L, 6L, 9L, 12L, null, 1L, 50L, 1L, 4L, 7L, null, 13L)
.column( 20f, -60f, 2f, 5f, 8f, 11f, 14f, Float.NaN, null, 3f, null, 9f, null, 15f, 10.1f, 3.1415f, 1f, 4f, 7f, 10f, 13f)
.column( 20.0, -60.5, 2., 5., 8., 11., 14., 33.1, null, 3., 6., 9., 12., 15., 10.1, 3.1415, 1., 4., null, 10., null)
.timestampDayColumn(100, 103, 2, 5, null, 11, null, 101, 104, 3, 6, 9, 12, 15, 99, 102, 1, 4, 7, 10, 13)
.timestampMillisecondsColumn(1006L, null, 2L, 5L, 8L, 11L, 14L, 101L, 88L, 3L, 6L, null, 12L, 15L, 9L, 5092L, 1L, 4L, 7L, 10L, 13L)
.timestampSecondsColumn(null, 5L, 2L, 5L, 8L, 11L, 14L, 3L, 6L, 3L, 6L, 9L, 12L, 15L, 1L, 4L, 1L, 4L, 7L, null, 13L)
.column( "B", null, "2", "5", null, "11", null, "C", "TESTING", "3", "6", "9", "12", "15", "A", "D", "1", "4", "7", "10", "13")
.column( "A", null, "2", "5", null, "11", null, "C", "TESTING", "3", "6", "9", "12", "15", "A", "C", "1", "4", "7", "10", "13")
.decimal32Column(-3, 202, 5, null, 5, null, 11, null, 3003, -60, 3, null, 9, null, 15, 100, 40004, 1, null, 7, null, 13)
.decimal64Column(-8, null, -2000L, 2L, null, 8L, 11L, 14L, 1001L, null, 3L, 6L, 9L, 12L, null, 1L, 50L, 1L, 4L, 7L, null, 13L)
.build();
PartitionedTable pt = t.roundRobinPartition(3, 2)) {
assertTablesAreEqual(expectedTable, pt.getTable());
int[] parts = pt.getPartitions();
assertEquals(3, parts.length);
assertEquals(0, parts[0]);
assertEquals(7, parts[1]);
assertEquals(14, parts[2]);
}
}
}
@Test
void testSerializationRoundTripConcatHostSide() throws IOException {
try (Table t = buildTestTable()) {
for (int sliceAmount = 1; sliceAmount < t.getRowCount(); sliceAmount ++) {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
for (int i = 0; i < t.getRowCount(); i += sliceAmount) {
int len = (int) Math.min(t.getRowCount() - i, sliceAmount);
JCudfSerialization.writeToStream(t, bout, i, len);
}
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
DataInputStream din = new DataInputStream(bin);
ArrayList<JCudfSerialization.SerializedTableHeader> headers = new ArrayList<>();
List<HostMemoryBuffer> buffers = new ArrayList<>();
try {
JCudfSerialization.SerializedTableHeader head;
long numRows = 0;
do {
head = new JCudfSerialization.SerializedTableHeader(din);
if (head.wasInitialized()) {
HostMemoryBuffer buff = hostMemoryAllocator.allocate(100 * 1024);
buffers.add(buff);
JCudfSerialization.readTableIntoBuffer(din, head, buff);
assert head.wasDataRead();
numRows += head.getNumRows();
assert numRows <= Integer.MAX_VALUE;
headers.add(head);
}
} while (head.wasInitialized());
assert numRows == t.getRowCount();
try (Table found = JCudfSerialization.readAndConcat(
headers.toArray(new JCudfSerialization.SerializedTableHeader[headers.size()]),
buffers.toArray(new HostMemoryBuffer[buffers.size()]))) {
assertPartialTablesAreEqual(t, 0, t.getRowCount(), found, true, false);
}
} finally {
for (HostMemoryBuffer buff: buffers) {
buff.close();
}
}
}
}
}
@Test
void testSerializationRoundTripToHost() throws IOException {
try (Table t = buildTestTable()) {
testSerializationRoundTripToHost(t);
}
}
private void testSerializationRoundTripToHost(Table t) throws IOException {
long rowCount = t.getRowCount();
ByteArrayOutputStream bout = new ByteArrayOutputStream();
JCudfSerialization.writeToStream(t, bout, 0, rowCount);
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
DataInputStream din = new DataInputStream(bin);
JCudfSerialization.SerializedTableHeader header =
new JCudfSerialization.SerializedTableHeader(din);
assertTrue(header.wasInitialized());
try (HostMemoryBuffer buffer = hostMemoryAllocator.allocate(header.getDataLen())) {
JCudfSerialization.readTableIntoBuffer(din, header, buffer);
assertTrue(header.wasDataRead());
HostColumnVector[] hostColumns =
JCudfSerialization.unpackHostColumnVectors(header, buffer);
try {
assertEquals(t.getNumberOfColumns(), hostColumns.length);
for (int i = 0; i < hostColumns.length; i++) {
HostColumnVector actual = hostColumns[i];
assertEquals(rowCount, actual.getRowCount());
try (HostColumnVector expected = t.getColumn(i).copyToHost()) {
assertPartialColumnsAreEqual(expected, 0, rowCount, actual, "COLUMN " + i, true, false);
}
}
} finally {
for (HostColumnVector c: hostColumns) {
// close child columns for multiple times should NOT throw exceptions
for (int i = 0; i < c.getNumChildren(); i++) {
c.getChildColumnView(i).close();
}
c.close();
}
}
}
}
@Test
void testConcatHost() throws IOException {
try (Table t1 = new Table.TestBuilder()
.column(
1, 2, null, 4, 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null)
.decimal32Column(-3,
1, 2, null, 4, 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null)
.build();
Table expected = new Table.TestBuilder()
.column(
null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null)
.decimal32Column(-3,
null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null,
1, 2, null, 4 , 5, 6, 7, 8, 9, 10, null, 12, 13, 14, null, null)
.build();
Table t2 = t1.concatenate(t1, t1)) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
JCudfSerialization.writeToStream(t2, out, 10, t2.getRowCount() - 10);
DataInputStream in = new DataInputStream(new ByteArrayInputStream(out.toByteArray()));
JCudfSerialization.SerializedTableHeader header = new JCudfSerialization.SerializedTableHeader(in);
assert header.wasInitialized();
try (HostMemoryBuffer buff = hostMemoryAllocator.allocate(header.getDataLen())) {
JCudfSerialization.readTableIntoBuffer(in, header, buff);
assert header.wasDataRead();
try (Table result = JCudfSerialization.readAndConcat(
new JCudfSerialization.SerializedTableHeader[] {header, header},
new HostMemoryBuffer[] {buff, buff})) {
assertPartialTablesAreEqual(expected, 0, expected.getRowCount(), result, true, false);
}
}
}
}
@Test
void testSerializationRoundTripSlicedHostSide() throws IOException {
try (Table t = buildTestTable()) {
for (int sliceAmount = 1; sliceAmount < t.getRowCount(); sliceAmount ++) {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
for (int i = 0; i < t.getRowCount(); i += sliceAmount) {
int len = (int) Math.min(t.getRowCount() - i, sliceAmount);
JCudfSerialization.writeToStream(t, bout, i, len);
}
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
DataInputStream din = new DataInputStream(bin);
ArrayList<JCudfSerialization.SerializedTableHeader> headers = new ArrayList<>();
List<HostMemoryBuffer> buffers = new ArrayList<>();
try {
JCudfSerialization.SerializedTableHeader head;
long numRows = 0;
do {
head = new JCudfSerialization.SerializedTableHeader(din);
if (head.wasInitialized()) {
HostMemoryBuffer buff = hostMemoryAllocator.allocate(100 * 1024);
buffers.add(buff);
JCudfSerialization.readTableIntoBuffer(din, head, buff);
assert head.wasDataRead();
numRows += head.getNumRows();
assert numRows <= Integer.MAX_VALUE;
headers.add(head);
}
} while (head.wasInitialized());
assert numRows == t.getRowCount();
ByteArrayOutputStream bout2 = new ByteArrayOutputStream();
JCudfSerialization.writeConcatedStream(
headers.toArray(new JCudfSerialization.SerializedTableHeader[headers.size()]),
buffers.toArray(new HostMemoryBuffer[buffers.size()]), bout2);
ByteArrayInputStream bin2 = new ByteArrayInputStream(bout2.toByteArray());
try (JCudfSerialization.TableAndRowCountPair found = JCudfSerialization.readTableFrom(bin2)) {
assertPartialTablesAreEqual(t, 0, t.getRowCount(), found.getTable(), true, false);
assertEquals(found.getTable(), found.getContiguousTable().getTable());
assertNotNull(found.getContiguousTable().getBuffer());
}
JCudfSerialization.TableAndRowCountPair tp = JCudfSerialization.readTableFrom(bin2);
assertNull(tp.getTable());
assertNull(tp.getContiguousTable());
} finally {
for (HostMemoryBuffer buff: buffers) {
buff.close();
}
}
}
}
}
@Test
void testSerializationRoundTripSliced() throws IOException {
try (Table t = buildTestTable()) {
for (int sliceAmount = 1; sliceAmount < t.getRowCount(); sliceAmount ++) {
for (int i = 0; i < t.getRowCount(); i += sliceAmount) {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
int len = (int) Math.min(t.getRowCount() - i, sliceAmount);
JCudfSerialization.writeToStream(t, bout, i, len);
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
try (JCudfSerialization.TableAndRowCountPair found = JCudfSerialization.readTableFrom(bin)) {
assertPartialTablesAreEqual(t, i, len, found.getTable(), i == 0 && len == t.getRowCount(), false);
assertEquals(found.getTable(), found.getContiguousTable().getTable());
assertNotNull(found.getContiguousTable().getBuffer());
}
JCudfSerialization.TableAndRowCountPair tp = JCudfSerialization.readTableFrom(bin);
assertNull(tp.getTable());
assertNull(tp.getContiguousTable());
}
}
}
}
@Test
void testSerializationReconstructFromMetadata() throws IOException {
try (Table t = buildTestTable()) {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
JCudfSerialization.writeToStream(t, bout, 0, t.getRowCount());
ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray());
try (JCudfSerialization.TableAndRowCountPair trcp = JCudfSerialization.readTableFrom(bin)) {
ContiguousTable contigTable = trcp.getContiguousTable();
DeviceMemoryBuffer oldbuf = contigTable.getBuffer();
try (DeviceMemoryBuffer newbuf = oldbuf.sliceWithCopy(0, oldbuf.getLength())) {
ByteBuffer metadata = contigTable.getMetadataDirectBuffer();
try (Table newTable = Table.fromPackedTable(metadata, newbuf)) {
assertTablesAreEqual(t, newTable);
}
}
}
}
}
@Test
void testValidityFill() {
byte[] buff = new byte[2];
buff[0] = 0;
int bitsToFill = (buff.length * 8) - 1;
assertEquals(bitsToFill, JCudfSerialization.fillValidity(buff, 1, bitsToFill));
assertEquals(buff[0], 0xFFFFFFFE);
assertEquals(buff[1], 0xFFFFFFFF);
}
@Test
void testGroupByScan() {
try (Table t1 = new Table.TestBuilder()
.column( "1", "1", "1", "1", "1", "1", "1", "2", "2", "2", "2") // GBY Key#0
.column( 0, 1, 3, 3, 5, 5, 5, 5, 5, 5, 5) // GBY Key#1
.column(12.0, 14.0, 13.0, 17.0, 17.0, 17.0, null, null, 11.0, null, 10.0)
.column( -9, null, -5, 0, 4, 4, 8, 2, 2, 2, null)
.build()) {
try (Table result = t1
.groupBy(GroupByOptions.builder()
.withKeysSorted(true)
.withKeysDescending(false, false)
.build(), 0, 1)
.scan(GroupByScanAggregation.sum().onColumn(2),
GroupByScanAggregation.count(NullPolicy.INCLUDE).onColumn(2),
GroupByScanAggregation.min().onColumn(2),
GroupByScanAggregation.max().onColumn(2),
GroupByScanAggregation.rank().onColumn(3),
GroupByScanAggregation.denseRank().onColumn(3),
GroupByScanAggregation.percentRank().onColumn(3));
Table expected = new Table.TestBuilder()
.column( "1", "1", "1", "1", "1", "1", "1", "2", "2", "2", "2")
.column( 0, 1, 3, 3, 5, 5, 5, 5, 5, 5, 5)
.column(12.0, 14.0, 13.0, 30.0, 17.0, 34.0, null, null, 11.0, null, 21.0)
.column( 0, 0, 0, 1, 0, 1, 2, 0, 1, 2, 3) // odd why is this not 1 based?
.column(12.0, 14.0, 13.0, 13.0, 17.0, 17.0, null, null, 11.0, null, 10.0)
.column(12.0, 14.0, 13.0, 17.0, 17.0, 17.0, null, null, 11.0, null, 11.0)
.column( 1, 1, 1, 2, 1, 1, 3, 1, 1, 1, 4)
.column( 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 2)
.column( 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
.build()) {
assertTablesAreEqual(expected, result);
}
}
}
@Test
void testGroupByReplaceNulls() {
try (Table t1 = new Table.TestBuilder()
.column( "1", "1", "1", "1", "1", "1", "1", "2", "2", "2", "2")
.column( 0, 1, 3, 3, 5, 5, 5, 5, 5, 5, 5)
.column(null, 14.0, 13.0, 17.0, 17.0, 17.0, null, null, 11.0, null, null)
.build()) {
try (Table result = t1
.groupBy(GroupByOptions.builder()
.withKeysSorted(true)
.withKeysDescending(false, false)
.build(), 0, 1)
.replaceNulls(ReplacePolicy.PRECEDING.onColumn(2),
ReplacePolicy.FOLLOWING.onColumn(2));
Table expected = new Table.TestBuilder()
.column( "1", "1", "1", "1", "1", "1", "1", "2", "2", "2", "2")
.column( 0, 1, 3, 3, 5, 5, 5, 5, 5, 5, 5)
.column(null, 14.0, 13.0, 17.0, 17.0, 17.0, 17.0, null, 11.0, 11.0, 11.0)
.column(null, 14.0, 13.0, 17.0, 17.0, 17.0, null, 11.0, 11.0, null, null)
.build()) {
assertTablesAreEqual(expected, result);
}
}
}
@Test
void testGroupByApproxPercentileReproCase() {
double[] percentiles = {0.25, 0.50, 0.75};
try (Table t1 = new Table.TestBuilder()
.column("a", "a", "b", "c", "d")
.column(1084.0, 1719.0, 15948.0, 148029.0, 1269761.0)
.build();
Table t2 = t1
.groupBy(0)
.aggregate(GroupByAggregation.createTDigest(100).onColumn(1));
Table sorted = t2.orderBy(OrderByArg.asc(0));
ColumnVector actual = sorted.getColumn(1).approxPercentile(percentiles);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.FLOAT64)),
Arrays.asList(1084.0, 1084.0, 1719.0),
Arrays.asList(15948.0, 15948.0, 15948.0),
Arrays.asList(148029.0, 148029.0, 148029.0),
Arrays.asList(1269761.0, 1269761.0, 1269761.0)
)) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testGroupByApproxPercentile() {
double[] percentiles = {0.25, 0.50, 0.75};
try (Table t1 = new Table.TestBuilder()
.column("a", "a", "a", "b", "b", "b")
.column(100, 150, 160, 70, 110, 160)
.build();
Table t2 = t1
.groupBy(0)
.aggregate(GroupByAggregation.createTDigest(1000).onColumn(1));
Table sorted = t2.orderBy(OrderByArg.asc(0));
ColumnVector actual = sorted.getColumn(1).approxPercentile(percentiles);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.FLOAT64)),
Arrays.asList(100d, 150d, 160d),
Arrays.asList(70d, 110d, 160d)
)) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testMergeApproxPercentile() {
double[] percentiles = {0.25, 0.50, 0.75};
try (Table t1 = new Table.TestBuilder()
.column("a", "a", "a", "b", "b", "b")
.column(100, 150, 160, 70, 110, 160)
.build();
Table t2 = t1
.groupBy(0)
.aggregate(GroupByAggregation.createTDigest(1000).onColumn(1));
Table t3 = t1
.groupBy(0)
.aggregate(GroupByAggregation.createTDigest(1000).onColumn(1));
Table t4 = Table.concatenate(t2, t3);
Table t5 = t4
.groupBy(0)
.aggregate(GroupByAggregation.mergeTDigest(1000).onColumn(1));
Table sorted = t5.orderBy(OrderByArg.asc(0));
ColumnVector actual = sorted.getColumn(1).approxPercentile(percentiles);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.FLOAT64)),
Arrays.asList(100d, 150d, 160d),
Arrays.asList(70d, 110d, 160d)
)) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testMergeApproxPercentile2() {
double[] percentiles = {0.25, 0.50, 0.75};
try (Table t1 = new Table.TestBuilder()
.column("a", "a", "a", "b", "b", "b")
.column(70, 110, 160, 100, 150, 160)
.build();
Table t2 = t1
.groupBy(0)
.aggregate(GroupByAggregation.createTDigest(1000).onColumn(1));
Table t3 = t1
.groupBy(0)
.aggregate(GroupByAggregation.createTDigest(1000).onColumn(1));
Table t4 = Table.concatenate(t2, t3);
Table t5 = t4
.groupBy(0)
.aggregate(GroupByAggregation.mergeTDigest(1000).onColumn(1));
Table sorted = t5.orderBy(OrderByArg.asc(0));
ColumnVector actual = sorted.getColumn(1).approxPercentile(percentiles);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.FLOAT64)),
Arrays.asList(70d, 110d, 160d),
Arrays.asList(100d, 150d, 160d)
)) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testCreateTDigestReduction() {
try (Table t1 = new Table.TestBuilder()
.column(100, 150, 160, 70, 110, 160)
.build();
Scalar tdigest = t1.getColumn(0)
.reduce(ReductionAggregation.createTDigest(1000), DType.STRUCT)) {
assertEquals(DType.STRUCT, tdigest.getType());
try (CloseableArray columns = CloseableArray.wrap(tdigest.getChildrenFromStructScalar())) {
assertEquals(3, columns.size());
try (HostColumnVector centroids = ((ColumnView) columns.get(0)).copyToHost();
HostColumnVector min = ((ColumnView) columns.get(1)).copyToHost();
HostColumnVector max = ((ColumnView) columns.get(2)).copyToHost()) {
assertEquals(DType.LIST, centroids.getType());
assertEquals(DType.FLOAT64, min.getType());
assertEquals(DType.FLOAT64, max.getType());
assertEquals(1, min.getRowCount());
assertEquals(1, max.getRowCount());
assertEquals(70, min.getDouble(0));
assertEquals(160, max.getDouble(0));
}
}
}
}
@Test
void testMergeTDigestReduction() {
StructType centroidStruct = new StructType(false,
new BasicType(false, DType.FLOAT64), // mean
new BasicType(false, DType.FLOAT64)); // weight
ListType centroidList = new ListType(false, centroidStruct);
StructType tdigestType = new StructType(false,
centroidList,
new BasicType(false, DType.FLOAT64), // min
new BasicType(false, DType.FLOAT64)); // max
try (ColumnVector tdigests = ColumnVector.fromStructs(tdigestType,
new StructData(Arrays.asList(
new StructData(1.0, 100.0),
new StructData(2.0, 50.0)),
1.0, // min
2.0), // max
new StructData(Arrays.asList(
new StructData(3.0, 200.0),
new StructData(4.0, 99.0)),
3.0, // min
4.0)); // max
Scalar merged = tdigests.reduce(ReductionAggregation.mergeTDigest(1000), DType.STRUCT)) {
assertEquals(DType.STRUCT, merged.getType());
try (CloseableArray columns = CloseableArray.wrap(merged.getChildrenFromStructScalar())) {
assertEquals(3, columns.size());
try (HostColumnVector centroids = ((ColumnView) columns.get(0)).copyToHost();
HostColumnVector min = ((ColumnView) columns.get(1)).copyToHost();
HostColumnVector max = ((ColumnView) columns.get(2)).copyToHost()) {
assertEquals(3, columns.size());
assertEquals(DType.LIST, centroids.getType());
assertEquals(DType.FLOAT64, min.getType());
assertEquals(DType.FLOAT64, max.getType());
assertEquals(1, min.getRowCount());
assertEquals(1, max.getRowCount());
assertEquals(1.0, min.getDouble(0));
assertEquals(4.0, max.getDouble(0));
assertEquals(1, centroids.rows);
List list = centroids.getList(0);
assertEquals(4, list.size());
StructData data = (StructData) list.get(0);
assertEquals(1.0, data.dataRecord.get(0));
assertEquals(100.0, data.dataRecord.get(1));
data = (StructData) list.get(1);
assertEquals(2.0, data.dataRecord.get(0));
assertEquals(50.0, data.dataRecord.get(1));
data = (StructData) list.get(2);
assertEquals(3.0, data.dataRecord.get(0));
assertEquals(200.0, data.dataRecord.get(1));
data = (StructData) list.get(3);
assertEquals(4.0, data.dataRecord.get(0));
assertEquals(99.0, data.dataRecord.get(1));
}
}
}
}
@Test
void testGroupbyHistogram() {
StructType histogramStruct = new StructType(false,
new BasicType(false, DType.INT32), // values
new BasicType(false, DType.INT64)); // frequencies
ListType histogramList = new ListType(false, histogramStruct);
// key = 0: values = [2, 2, -3, -2, 2]
// key = 1: values = [2, 0, 5, 2, 1]
// key = 2: values = [-3, 1, 1, 2, 2]
try (Table input = new Table.TestBuilder()
.column(2, 0, 2, 1, 1, 1, 0, 0, 0, 1, 2, 2, 1, 0, 2)
.column(-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1, 2, 1, 2, 2)
.build();
Table result = input.groupBy(0)
.aggregate(GroupByAggregation.histogram().onColumn(1));
Table sortedResult = result.orderBy(OrderByArg.asc(0));
ColumnVector sortedOutHistograms = sortedResult.getColumn(1).listSortRows(false, false);
ColumnVector expectedKeys = ColumnVector.fromInts(0, 1, 2);
ColumnVector expectedHistograms = ColumnVector.fromLists(histogramList,
Arrays.asList(new StructData(-3, 1L), new StructData(-2, 1L), new StructData(2, 3L)),
Arrays.asList(new StructData(0, 1L), new StructData(1, 1L), new StructData(2, 2L),
new StructData(5, 1L)),
Arrays.asList(new StructData(-3, 1L), new StructData(1, 2L), new StructData(2, 2L)))
) {
assertColumnsAreEqual(expectedKeys, sortedResult.getColumn(0));
assertColumnsAreEqual(expectedHistograms, sortedOutHistograms);
}
}
@Test
void testGroupbyMergeHistogram() {
StructType histogramStruct = new StructType(false,
new BasicType(false, DType.INT32), // values
new BasicType(false, DType.INT64)); // frequencies
ListType histogramList = new ListType(false, histogramStruct);
// key = 0: histograms = [[<-3, 1>, <-2, 1>, <2, 3>], [<0, 1>, <1, 1>], [<-3, 3>, <0, 1>, <1, 2>]]
// key = 1: histograms = [[<-2, 1>, <1, 3>, <2, 2>], [<0, 2>, <1, 1>, <2, 2>]]
try (Table input = new Table.TestBuilder()
.column(0, 1, 0, 1, 0)
.column(histogramStruct,
new StructData[]{new StructData(-3, 1L), new StructData(-2, 1L), new StructData(2, 3L)},
new StructData[]{new StructData(-2, 1L), new StructData(1, 3L), new StructData(2, 2L)},
new StructData[]{new StructData(0, 1L), new StructData(1, 1L)},
new StructData[]{new StructData(0, 2L), new StructData(1, 1L), new StructData(2, 2L)},
new StructData[]{new StructData(-3, 3L), new StructData(0, 1L), new StructData(1, 2L)})
.build();
Table result = input.groupBy(0)
.aggregate(GroupByAggregation.mergeHistogram().onColumn(1));
Table sortedResult = result.orderBy(OrderByArg.asc(0));
ColumnVector sortedOutHistograms = sortedResult.getColumn(1).listSortRows(false, false);
ColumnVector expectedKeys = ColumnVector.fromInts(0, 1);
ColumnVector expectedHistograms = ColumnVector.fromLists(histogramList,
Arrays.asList(new StructData(-3, 4L), new StructData(-2, 1L), new StructData(0, 2L),
new StructData(1, 3L), new StructData(2, 3L)),
Arrays.asList(new StructData(-2, 1L), new StructData(0, 2L), new StructData(1, 4L),
new StructData(2, 4L)))
) {
assertColumnsAreEqual(expectedKeys, sortedResult.getColumn(0));
assertColumnsAreEqual(expectedHistograms, sortedOutHistograms);
}
}
@Test
void testReductionHistogram() {
StructType histogramStruct = new StructType(false,
new BasicType(false, DType.INT32), // values
new BasicType(false, DType.INT64)); // frequencies
try (ColumnVector input = ColumnVector.fromInts(-3, 2, 1, 2, 0, 5, 2, -3, -2, 2, 1);
Scalar result = input.reduce(ReductionAggregation.histogram(), DType.LIST);
ColumnVector resultCV = result.getListAsColumnView().copyToColumnVector();
Table resultTable = new Table(resultCV);
Table sortedResult = resultTable.orderBy(OrderByArg.asc(0));
ColumnVector expectedHistograms = ColumnVector.fromStructs(histogramStruct,
new StructData(-3, 2L), new StructData(-2, 1L), new StructData(0, 1L),
new StructData(1, 2L), new StructData(2, 4L), new StructData(5, 1L))
) {
assertColumnsAreEqual(expectedHistograms, sortedResult.getColumn(0));
}
}
@Test
void testReductionMergeHistogram() {
StructType histogramStruct = new StructType(false,
new BasicType(false, DType.INT32), // values
new BasicType(false, DType.INT64)); // frequencies
try (ColumnVector input = ColumnVector.fromStructs(histogramStruct,
new StructData(-3, 2L), new StructData(2, 1L), new StructData(1, 1L),
new StructData(2, 2L), new StructData(0, 4L), new StructData(5, 1L),
new StructData(2, 2L), new StructData(-3, 3L), new StructData(-2, 5L),
new StructData(2, 3L), new StructData(1, 4L));
Scalar result = input.reduce(ReductionAggregation.mergeHistogram(), DType.LIST);
ColumnVector resultCV = result.getListAsColumnView().copyToColumnVector();
Table resultTable = new Table(resultCV);
Table sortedResult = resultTable.orderBy(OrderByArg.asc(0));
ColumnVector expectedHistograms = ColumnVector.fromStructs(histogramStruct,
new StructData(-3, 5L), new StructData(-2, 5L), new StructData(0, 4L),
new StructData(1, 5L), new StructData(2, 8L), new StructData(5, 1L))
) {
assertColumnsAreEqual(expectedHistograms, sortedResult.getColumn(0));
}
}
@Test
void testGroupByMinMaxDecimal() {
try (Table t1 = new Table.TestBuilder()
.column( "1", "1", "1", "1", "2")
.column(0, 1, 3 , 3, 4)
.decimal128Column(-4, RoundingMode.HALF_UP,
new BigInteger("123456789123456789"),
new BigInteger("7979879879879798"),
new BigInteger("17979879879879798"),
new BigInteger("2234563472398472398"),
null)
.build()) {
try (Table result = t1
.groupBy(GroupByOptions.builder()
.withKeysSorted(true)
.withKeysDescending(false, false)
.build(), 0, 1)
.scan(GroupByScanAggregation.min().onColumn(2),
GroupByScanAggregation.max().onColumn(2));
Table expected = new Table.TestBuilder()
.column( "1", "1", "1", "1", "2")
.column(0, 1, 3, 3, 4)
.decimal128Column(-4, RoundingMode.HALF_UP,
new BigInteger("123456789123456789"),
new BigInteger("7979879879879798"),
new BigInteger("17979879879879798"),
new BigInteger("17979879879879798"),
null)
.decimal128Column(-4, RoundingMode.HALF_UP,
new BigInteger("123456789123456789"),
new BigInteger("7979879879879798"),
new BigInteger("17979879879879798"),
new BigInteger("2234563472398472398"),
null)
.build()) {
assertTablesAreEqual(expected, result);
}
}
}
@Test
void testGroupByMinMaxDecimalAgg() {
try (Table t1 = new Table.TestBuilder()
.column(-341142443, 48424546)
.decimal128Column(-2, RoundingMode.HALF_DOWN,
new BigInteger("2978603952268112009"),
new BigInteger("571526248386900094"))
.build()) {
try (Table result = t1
.groupBy(GroupByOptions.builder()
.build(), 0)
.aggregate(GroupByAggregation.max().onColumn(1));
Table expected = new Table.TestBuilder()
.column(-341142443, 48424546)
.decimal128Column(-2, RoundingMode.HALF_DOWN,
new BigInteger("2978603952268112009"),
new BigInteger("571526248386900094"))
.build()) {
assertTablesAreEqual(expected, result);
}
}
}
@Test
void testGroupByCountDecimal() {
try (Table t1 = new Table.TestBuilder()
.column( "1", "1", "1", "1", "2")
.column(0, 1, 3 , 3, 4)
.decimal128Column(-4, RoundingMode.HALF_UP,
new BigInteger("123456789123456789"),
new BigInteger("7979879879879798"),
new BigInteger("17979879879879798"),
new BigInteger("2234563472398472398"),
null)
.build()) {
try (Table result = t1
.groupBy(GroupByOptions.builder()
.withKeysSorted(true)
.withKeysDescending(false, false)
.build(), 0, 1)
.aggregate(GroupByAggregation.count().onColumn(2));
Table expected = new Table.TestBuilder()
.column( "1", "1", "1", "2")
.column(0, 1, 3, 4)
.column(1, 1, 2, 0)
.build()) {
assertTablesAreEqual(expected, result);
}
}
}
@Test
void testGroupByUniqueCount() {
try (Table t1 = new Table.TestBuilder()
.column( "1", "1", "1", "1", "1", "1")
.column( 1, 3, 3, 5, 5, 0)
.column(12.0, 14.0, 13.0, 17.0, 17.0, 17.0)
.build()) {
try (Table t3 = t1
.groupBy(0, 1)
.aggregate(GroupByAggregation.nunique().onColumn(0));
Table sorted = t3.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table expected = new Table.TestBuilder()
.column( "1", "1", "1", "1")
.column( 0, 1, 3, 5)
.column( 1, 1, 1, 1)
.build()) {
assertTablesAreEqual(expected, sorted);
}
}
}
@Test
void testOrderByDecimal() {
try (Table t1 = new Table.TestBuilder()
.column( "1", "1", "1", "1")
.column(0, 1, 3 , 3)
.decimal64Column(4,
123456L,
124567L,
125678L,
126789L)
.build()) {
try (Table sorted = t1.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table expected = new Table.TestBuilder()
.column( "1", "1", "1", "1")
.column( 0, 1, 3, 3)
.decimal64Column(4,
123456L,
124567L,
125678L,
126789L)
.build()) {
assertTablesAreEqual(expected, sorted);
}
}
}
@Test
void testGroupByUniqueCountNulls() {
try (Table t1 = new Table.TestBuilder()
.column( "1", "1", "1", "1", "1", "1")
.column( 1, 3, 3, 5, 5, 0)
.column(null, null, 13.0, null, null, null)
.build()) {
try (Table t3 = t1
.groupBy(0, 1)
.aggregate(GroupByAggregation.nunique(NullPolicy.INCLUDE).onColumn(0));
Table sorted = t3.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table expected = new Table.TestBuilder()
.column( "1", "1", "1", "1")
.column( 0, 1, 3, 5)
.column( 1, 1, 1, 1)
.build()) {
assertTablesAreEqual(expected, sorted);
}
}
}
@Test
void testGroupByCount() {
try (Table t1 = new Table.TestBuilder().column( "1", "1", "1", "1", "1", "1")
.column( 1, 3, 3, 5, 5, 0)
.column(12.0, 14.0, 13.0, 17.0, 17.0, 17.0)
.build()) {
try (Table t3 = t1.groupBy(0, 1)
.aggregate(GroupByAggregation.count().onColumn(0));
HostColumnVector aggOut1 = t3.getColumn(2).copyToHost()) {
// verify t3
assertEquals(4, t3.getRowCount());
Map<Object, Integer> expectedAggregateResult = new HashMap() {
{
// value, count
put(1, 2);
put(2, 2);
}
};
for (int i = 0; i < 4; ++i) {
int key = aggOut1.getInt(i);
assertTrue(expectedAggregateResult.containsKey(key));
Integer count = expectedAggregateResult.get(key);
if (count == 1) {
expectedAggregateResult.remove(key);
} else {
expectedAggregateResult.put(key, count - 1);
}
}
}
}
}
@Test
void testWindowingCount() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.decimal32Column(-1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // Decimal GBY Key
.decimal64Column(1, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L) // Decimal OBY Key
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table decSorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(4), OrderByArg.asc(5));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
ColumnVector decSortedAggColumn = decSorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, decSortedAggColumn);
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation.count().onColumn(3).overWindow(window));
Table decWindowAggResults = decSorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation.count().onColumn(3).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(2, 3, 3, 2, 2, 3, 3, 2, 2, 3, 3, 2)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect, decWindowAggResults.getColumn(0));
}
}
}
}
}
@Test
void testWindowingMin() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.decimal32Column(-1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // Decimal GBY Key
.decimal64Column(1, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L) // Decimal OBY Key
.decimal64Column(2, 7L, 5L, 1L, 9L, 7L, 9L, 8L, 2L, 8L, 0L, 6L, 6L) // Decimal Agg Column
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table decSorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(4), OrderByArg.asc(5));
ColumnVector expectSortedAggCol = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector expectDecSortedAggCol = ColumnVector.decimalFromLongs(2, 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggCol, sortedAggColumn);
ColumnVector decSortedAggColumn = decSorted.getColumn(6);
assertColumnsAreEqual(expectDecSortedAggCol, decSortedAggColumn);
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation.min().onColumn(3).overWindow(window));
Table decWindowAggResults = decSorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation.min().onColumn(6).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(5, 1, 1, 1, 7, 7, 2, 2, 0, 0, 0, 6);
ColumnVector decExpect = ColumnVector.decimalFromLongs(2, 5, 1, 1, 1, 7, 7, 2, 2, 0, 0, 0, 6)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpect, decWindowAggResults.getColumn(0));
}
}
}
}
}
@Test
void testWindowingMax() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.decimal32Column(-1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // Decimal GBY Key
.decimal64Column(1, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L) // Decimal OBY Key
.decimal64Column(2, 7L, 5L, 1L, 9L, 7L, 9L, 8L, 2L, 8L, 0L, 6L, 6L) // Decimal Agg Column
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table decSorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(4), OrderByArg.asc(5));
ColumnVector expectSortedAggCol = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector expectDecSortedAggCol = ColumnVector.decimalFromLongs(2, 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggCol, sortedAggColumn);
ColumnVector decSortedAggColumn = decSorted.getColumn(6);
assertColumnsAreEqual(expectDecSortedAggCol, decSortedAggColumn);
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation.max().onColumn(3).overWindow(window));
Table decWindowAggResults = decSorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation.max().onColumn(6).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(7, 7, 9, 9, 9, 9, 9, 8, 8, 8, 6, 6);
ColumnVector decExpect = ColumnVector.decimalFromLongs(2, 7, 7, 9, 9, 9, 9, 9, 8, 8, 8, 6, 6)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpect, decWindowAggResults.getColumn(0));
}
}
}
}
}
@Test
void testWindowingSum() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation.sum().onColumn(3).overWindow(window));
ColumnVector expectAggResult = ColumnVector.fromBoxedLongs(12L, 13L, 15L, 10L, 16L, 24L, 19L, 10L, 8L, 14L, 12L, 12L)) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
}
}
}
}
}
@Test
void testWindowingRowNumber() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.decimal32Column(-1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // Decimal GBY Key
.decimal64Column(1, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L) // Decimal OBY Key
.decimal64Column(2, 7L, 5L, 1L, 9L, 7L, 9L, 8L, 2L, 8L, 0L, 6L, 6L) // Decimal Agg Column
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table decSorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(4), OrderByArg.asc(5));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector expectDecSortedAggColumn = ColumnVector.decimalFromLongs(2, 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
ColumnVector decSortedAggColumn = decSorted.getColumn(6);
assertColumnsAreEqual(expectDecSortedAggColumn, decSortedAggColumn);
WindowOptions.Builder windowBuilder = WindowOptions.builder().minPeriods(1);
try (Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1);
WindowOptions options = windowBuilder.window(two, one).build();
WindowOptions options1 = windowBuilder.window(two, one).build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.rowNumber()
.onColumn(3)
.overWindow(options));
Table decWindowAggResults = decSorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.rowNumber()
.onColumn(6)
.overWindow(options1));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2)) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(expectAggResult, decWindowAggResults.getColumn(0));
}
}
try (Scalar three = Scalar.fromInt(3);
Scalar two = Scalar.fromInt(2);
WindowOptions options = windowBuilder.window(three, two).build();
WindowOptions options1 = windowBuilder.window(three, two).build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.rowNumber()
.onColumn(3)
.overWindow(options));
Table decWindowAggResults = decSorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.rowNumber()
.onColumn(6)
.overWindow(options1));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(1, 2, 3, 3, 1, 2, 3, 3, 1, 2, 3, 3)) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(expectAggResult, decWindowAggResults.getColumn(0));
}
}
try (Scalar four = Scalar.fromInt(4);
Scalar three = Scalar.fromInt(3);
WindowOptions options = windowBuilder.window(four, three).build();
WindowOptions options1 = windowBuilder.window(four, three).build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.rowNumber()
.onColumn(3)
.overWindow(options));
Table decWindowAggResults = decSorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.rowNumber()
.onColumn(6)
.overWindow(options1));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4)) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(expectAggResult, decWindowAggResults.getColumn(0));
}
}
}
}
}
@Test
void testWindowingCollectList() {
RollingAggregation aggCollectWithNulls = RollingAggregation.collectList(NullPolicy.INCLUDE);
RollingAggregation aggCollect = RollingAggregation.collectList();
try (Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1);
WindowOptions winOpts = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
StructType nestedType = new StructType(false,
new BasicType(false, DType.INT32), new BasicType(false, DType.STRING));
try (Table raw = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, null, 0, 6, null) // Agg Column of INT32
.column(nestedType, // Agg Column of Struct
new StructData(1, "s1"), new StructData(2, "s2"), new StructData(3, "s3"),
new StructData(4, "s4"), new StructData(11, "s11"), new StructData(22, "s22"),
new StructData(33, "s33"), new StructData(44, "s44"), new StructData(111, "s111"),
new StructData(222, "s222"), new StructData(333, "s333"), new StructData(444, "s444")
).build();
ColumnVector expectSortedAggColumn = ColumnVector
.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, null, 0, 6, null)) {
try (Table sorted = raw.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2))) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
// Primitive type: INT32
// a) including nulls
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollectWithNulls.onColumn(3).overWindow(winOpts));
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.INT32)),
Arrays.asList(7, 5), Arrays.asList(7, 5, 1), Arrays.asList(5, 1, 9), Arrays.asList(1, 9),
Arrays.asList(7, 9), Arrays.asList(7, 9, 8), Arrays.asList(9, 8, 2), Arrays.asList(8, 2),
Arrays.asList(null, 0), Arrays.asList(null, 0, 6), Arrays.asList(0, 6, null), Arrays.asList(6, null))) {
assertColumnsAreEqual(expected, windowAggResults.getColumn(0));
}
// b) excluding nulls
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollect.onColumn(3).overWindow(winOpts));
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.INT32)),
Arrays.asList(7, 5), Arrays.asList(7, 5, 1), Arrays.asList(5, 1, 9), Arrays.asList(1, 9),
Arrays.asList(7, 9), Arrays.asList(7, 9, 8), Arrays.asList(9, 8, 2), Arrays.asList(8, 2),
Arrays.asList(0), Arrays.asList(0, 6), Arrays.asList(0, 6), Arrays.asList(6))) {
assertColumnsAreEqual(expected, windowAggResults.getColumn(0));
}
// Nested type: Struct
List<StructData>[] expectedNestedData = new List[12];
expectedNestedData[0] = Arrays.asList(new StructData(1, "s1"), new StructData(2, "s2"));
expectedNestedData[1] = Arrays.asList(new StructData(1, "s1"), new StructData(2, "s2"), new StructData(3, "s3"));
expectedNestedData[2] = Arrays.asList(new StructData(2, "s2"), new StructData(3, "s3"), new StructData(4, "s4"));
expectedNestedData[3] = Arrays.asList(new StructData(3, "s3"), new StructData(4, "s4"));
expectedNestedData[4] = Arrays.asList(new StructData(11, "s11"), new StructData(22, "s22"));
expectedNestedData[5] = Arrays.asList(new StructData(11, "s11"), new StructData(22, "s22"), new StructData(33, "s33"));
expectedNestedData[6] = Arrays.asList(new StructData(22, "s22"), new StructData(33, "s33"), new StructData(44, "s44"));
expectedNestedData[7] = Arrays.asList(new StructData(33, "s33"), new StructData(44, "s44"));
expectedNestedData[8] = Arrays.asList(new StructData(111, "s111"), new StructData(222, "s222"));
expectedNestedData[9] = Arrays.asList(new StructData(111, "s111"), new StructData(222, "s222"), new StructData(333, "s333"));
expectedNestedData[10] = Arrays.asList(new StructData(222, "s222"), new StructData(333, "s333"), new StructData(444, "s444"));
expectedNestedData[11] = Arrays.asList(new StructData(333, "s333"), new StructData(444, "s444"));
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollect.onColumn(4).overWindow(winOpts));
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, nestedType), expectedNestedData)) {
assertColumnsAreEqual(expected, windowAggResults.getColumn(0));
}
}
}
}
}
@Test
void testWindowingCollectSet() {
RollingAggregation aggCollect = RollingAggregation.collectSet();
RollingAggregation aggCollectWithEqNulls = RollingAggregation.collectSet(NullPolicy.INCLUDE,
NullEquality.EQUAL, NaNEquality.UNEQUAL);
RollingAggregation aggCollectWithUnEqNulls = RollingAggregation.collectSet(NullPolicy.INCLUDE,
NullEquality.UNEQUAL, NaNEquality.UNEQUAL);
RollingAggregation aggCollectWithEqNaNs = RollingAggregation.collectSet(NullPolicy.INCLUDE,
NullEquality.EQUAL, NaNEquality.ALL_EQUAL);
try (Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1);
WindowOptions winOpts = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table raw = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8) // OBY Key
.column(5, 5, 1, 1, 1, 4, 3, 4, null, null, 6, 7) // Agg Column of INT32
.column(1.1, 1.1, null, 2.2, -3.0, 1.3e-7, -3.0, Double.NaN, 1e-3, null, Double.NaN, Double.NaN) // Agg Column of FLOAT64
.build();
ColumnVector expectSortedAggColumn = ColumnVector
.fromBoxedInts(5, 5, 1, 1, 1, 4, 3, 4, null, null, 6, 7)) {
try (Table sorted = raw.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2))) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
// Primitive type: INT32
// a) excluding NULLs
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollect.onColumn(3).overWindow(winOpts));
ColumnVector resultSorted = windowAggResults.getColumn(0).listSortRows(false, false);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.INT32)),
Arrays.asList(5), Arrays.asList(1, 5), Arrays.asList(1, 5), Arrays.asList(1),
Arrays.asList(1, 4), Arrays.asList(1, 3, 4), Arrays.asList(3, 4), Arrays.asList(3, 4),
Arrays.asList(), Arrays.asList(6), Arrays.asList(6, 7), Arrays.asList(6, 7))) {
assertColumnsAreEqual(expected, resultSorted);
}
// b) including NULLs AND NULLs are equal
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollectWithEqNulls.onColumn(3).overWindow(winOpts));
ColumnVector resultSorted = windowAggResults.getColumn(0).listSortRows(false, false);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.INT32)),
Arrays.asList(5), Arrays.asList(1, 5), Arrays.asList(1, 5), Arrays.asList(1),
Arrays.asList(1, 4), Arrays.asList(1, 3, 4), Arrays.asList(3, 4), Arrays.asList(3, 4),
Arrays.asList((Integer) null), Arrays.asList(6, null), Arrays.asList(6, 7, null), Arrays.asList(6, 7))) {
assertColumnsAreEqual(expected, resultSorted);
}
// c) including NULLs AND NULLs are unequal
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollectWithUnEqNulls.onColumn(3).overWindow(winOpts));
ColumnVector resultSorted = windowAggResults.getColumn(0).listSortRows(false, false);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.INT32)),
Arrays.asList(5), Arrays.asList(1, 5), Arrays.asList(1, 5), Arrays.asList(1),
Arrays.asList(1, 4), Arrays.asList(1, 3, 4), Arrays.asList(3, 4), Arrays.asList(3, 4),
Arrays.asList(null, null), Arrays.asList(6, null, null), Arrays.asList(6, 7, null), Arrays.asList(6, 7))) {
assertColumnsAreEqual(expected, resultSorted);
}
// Primitive type: FLOAT64
// a) excluding NULLs
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollect.onColumn(4).overWindow(winOpts));
ColumnVector resultSorted = windowAggResults.getColumn(0).listSortRows(false, false);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.FLOAT64)),
Arrays.asList(1.1), Arrays.asList(1.1), Arrays.asList(1.1, 2.2), Arrays.asList(2.2),
Arrays.asList(-3.0, 1.3e-7), Arrays.asList(-3.0, 1.3e-7),
Arrays.asList(-3.0, 1.3e-7, Double.NaN), Arrays.asList(-3.0, Double.NaN),
Arrays.asList(1e-3), Arrays.asList(1e-3, Double.NaN),
Arrays.asList(Double.NaN, Double.NaN), Arrays.asList(Double.NaN, Double.NaN))) {
assertColumnsAreEqual(expected, resultSorted);
}
// b) including NULLs AND NULLs are equal
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollectWithEqNulls.onColumn(4).overWindow(winOpts));
ColumnVector resultSorted = windowAggResults.getColumn(0).listSortRows(false, false);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.FLOAT64)),
Arrays.asList(1.1), Arrays.asList(1.1, null), Arrays.asList(1.1, 2.2, null), Arrays.asList(2.2, null),
Arrays.asList(-3.0, 1.3e-7), Arrays.asList(-3.0, 1.3e-7),
Arrays.asList(-3.0, 1.3e-7, Double.NaN), Arrays.asList(-3.0, Double.NaN),
Arrays.asList(1e-3, null), Arrays.asList(1e-3, Double.NaN, null),
Arrays.asList(Double.NaN, Double.NaN, null), Arrays.asList(Double.NaN, Double.NaN))) {
assertColumnsAreEqual(expected, resultSorted);
}
// c) including NULLs AND NULLs are equal AND NaNs are equal
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(aggCollectWithEqNaNs.onColumn(4).overWindow(winOpts));
ColumnVector resultSorted = windowAggResults.getColumn(0).listSortRows(false, false);
ColumnVector expected = ColumnVector.fromLists(
new ListType(false, new BasicType(false, DType.FLOAT64)),
Arrays.asList(1.1), Arrays.asList(1.1, null), Arrays.asList(1.1, 2.2, null), Arrays.asList(2.2, null),
Arrays.asList(-3.0, 1.3e-7), Arrays.asList(-3.0, 1.3e-7),
Arrays.asList(-3.0, 1.3e-7, Double.NaN), Arrays.asList(-3.0, Double.NaN),
Arrays.asList(1e-3, null), Arrays.asList(1e-3, Double.NaN, null),
Arrays.asList(Double.NaN, null), Arrays.asList(Double.NaN))) {
assertColumnsAreEqual(expected, resultSorted);
}
}
}
}
}
@Test
void testWindowingLead() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Int Agg Column
.decimal32Column(-1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // Decimal GBY Key
.decimal64Column(1, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L) // Decimal OBY Key
.decimal64Column(-2, 7L, 5L, 1L, 9L, 7L, 9L, 8L, 2L, 8L, 0L, 6L, 6L) // Decimal Agg Column
.column(new ListType(false, new BasicType(true, DType.INT32)),
Arrays.asList(11, 12, null, 13), Arrays.asList(14, null, 15, null), Arrays.asList((Integer) null), Arrays.asList(16),
Arrays.asList(21, null, null, 22), Arrays.asList(23, 24), Arrays.asList(25, 26, 27), Arrays.asList(28, 29, null),
Arrays.asList(null, 31), Arrays.asList(32, 33, 34), Arrays.asList(35, 36), Arrays.asList(37, 38, 39)) // List Agg COLUMN
.column(new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(1, "s1"), new StructData(null, "s2"), new StructData(2, null), new StructData(3, "s3"),
new StructData(11, "s11"), null, new StructData(13, "s13"), new StructData(14, "s14"),
new StructData(111, "s111"), new StructData(null, "s112"), new StructData(2, "s222"), new StructData(3, "s333")) //STRUCT Agg COLUMN
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table decSorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(4), OrderByArg.asc(5));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector expectDecSortedAggColumn = ColumnVector.decimalFromLongs(-2, 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
ColumnVector decSortedAggColumn = decSorted.getColumn(6);
assertColumnsAreEqual(expectDecSortedAggColumn, decSortedAggColumn);
WindowOptions.Builder windowBuilder = WindowOptions.builder().minPeriods(1);
try (Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1);
WindowOptions options = windowBuilder.window(two, one).build();
Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.lead(0)
.onColumn(3) // Int Agg Column
.overWindow(options));
Table decWindowAggResults = decSorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.lead(0)
.onColumn(6) // Decimal Agg Column
.overWindow(options));
Table listWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lead(0)
.onColumn(7) // List Agg COLUMN
.overWindow(options));
Table structWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lead(0)
.onColumn(8) //STRUCT Agg COLUMN
.overWindow(options));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector decExpectAggResult = ColumnVector.decimalFromLongs(-2, 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector listExpectAggResult = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(11, 12, null, 13), Arrays.asList(14, null, 15, null), Arrays.asList((Integer) null), Arrays.asList(16),
Arrays.asList(21, null, null, 22), Arrays.asList(23, 24), Arrays.asList(25, 26, 27), Arrays.asList(28, 29, null),
Arrays.asList(null, 31), Arrays.asList(32, 33, 34), Arrays.asList(35, 36), Arrays.asList(37, 38, 39));
ColumnVector structExpectAggResult = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(1, "s1"), new StructData(null, "s2"), new StructData(2, null), new StructData(3, "s3"),
new StructData(11, "s11"), null, new StructData(13, "s13"), new StructData(14, "s14"),
new StructData(111, "s111"), new StructData(null, "s112"), new StructData(2, "s222"), new StructData(3, "s333"))) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpectAggResult, decWindowAggResults.getColumn(0));
assertColumnsAreEqual(listExpectAggResult, listWindowAggResults.getColumn(0));
assertColumnsAreEqual(structExpectAggResult, structWindowAggResults.getColumn(0));
}
try (Scalar zero = Scalar.fromInt(0);
Scalar one = Scalar.fromInt(1);
WindowOptions options = windowBuilder.window(zero, one).build();
Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.lead(1)
.onColumn(3) //Int Agg COLUMN
.overWindow(options));
Table decWindowAggResults = sorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.lead(1)
.onColumn(6) //Decimal Agg COLUMN
.overWindow(options));
Table listWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lead(1)
.onColumn(7) //LIST Agg COLUMN
.overWindow(options));
Table structWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lead(1)
.onColumn(8) //STRUCT Agg COLUMN
.overWindow(options));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(5, 1, 9, null, 9, 8, 2, null, 0, 6, 6, null);
ColumnVector decExpectAggResult = decimalFromBoxedInts(true, -2, 5, 1, 9, null, 9, 8, 2, null, 0, 6, 6, null);
ColumnVector listExpectAggResult = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(14, null, 15, null), Arrays.asList((Integer) null), Arrays.asList(16), null,
Arrays.asList(23, 24), Arrays.asList(25, 26, 27), Arrays.asList(28, 29, null), null,
Arrays.asList(32, 33, 34), Arrays.asList(35, 36), Arrays.asList(37, 38, 39), null);
ColumnVector structExpectAggResult = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(null, "s2"), new StructData(2, null), new StructData(3, "s3"), null,
null, new StructData(13, "s13"), new StructData(14, "s14"), null,
new StructData(null, "s112"), new StructData(2, "s222"), new StructData(3, "s333"), null)) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpectAggResult, decWindowAggResults.getColumn(0));
assertColumnsAreEqual(listExpectAggResult, listWindowAggResults.getColumn(0));
assertColumnsAreEqual(structExpectAggResult, structWindowAggResults.getColumn(0));
}
try (Scalar zero = Scalar.fromInt(0);
Scalar one = Scalar.fromInt(1);
WindowOptions options = windowBuilder.window(zero, one).build();
ColumnVector defaultOutput = ColumnVector.fromBoxedInts(0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
ColumnVector decDefaultOutput = ColumnVector.decimalFromLongs(-2, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
ColumnVector listDefaultOutput = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(111), Arrays.asList(222), Arrays.asList(333), Arrays.asList(444, null, 555),
Arrays.asList(-11), Arrays.asList(-22), Arrays.asList(-33), Arrays.asList(-44),
Arrays.asList(6), Arrays.asList(6), Arrays.asList(6), Arrays.asList(6, null, null));
ColumnVector structDefaultOutput = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(-1, "s1"), new StructData(null, "s2"), new StructData(-2, null), new StructData(-3, "s3"),
new StructData(-11, "s11"), null, new StructData(-13, "s13"), new StructData(-14, "s14"),
new StructData(-111, "s111"), new StructData(null, "s112"), new StructData(-222, "s222"), new StructData(-333, "s333"));
Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.lead(1, defaultOutput)
.onColumn(3) //Int Agg COLUMN
.overWindow(options));
Table decWindowAggResults = sorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.lead(1, decDefaultOutput)
.onColumn(6) //Decimal Agg COLUMN
.overWindow(options));
Table listWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lead(1, listDefaultOutput)
.onColumn(7) //LIST Agg COLUMN
.overWindow(options));
Table structWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lead(1, structDefaultOutput)
.onColumn(8) //STRUCT Agg COLUMN
.overWindow(options));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(5, 1, 9, -3, 9, 8, 2, -7, 0, 6, 6, -11);
ColumnVector decExpectAggResult = ColumnVector.decimalFromLongs(-2, 5, 1, 9, -3, 9, 8, 2, -7, 0, 6, 6, -11);
ColumnVector listExpectAggResult = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(14, null, 15, null), Arrays.asList((Integer) null), Arrays.asList(16), Arrays.asList(444, null, 555),
Arrays.asList(23, 24), Arrays.asList(25, 26, 27), Arrays.asList(28, 29, null), Arrays.asList(-44),
Arrays.asList(32, 33, 34), Arrays.asList(35, 36), Arrays.asList(37, 38, 39), Arrays.asList(6, null, null));
ColumnVector structExpectAggResult = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(null, "s2"), new StructData(2, null), new StructData(3, "s3"), new StructData(-3, "s3"),
null, new StructData(13, "s13"), new StructData(14, "s14"), new StructData(-14, "s14"),
new StructData(null, "s112"), new StructData(2, "s222"), new StructData(3, "s333"), new StructData(-333, "s333"))) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpectAggResult, decWindowAggResults.getColumn(0));
assertColumnsAreEqual(listExpectAggResult, listWindowAggResults.getColumn(0));
assertColumnsAreEqual(structExpectAggResult, structWindowAggResults.getColumn(0));
}
// Outside bounds
try (Scalar zero = Scalar.fromInt(0);
Scalar one = Scalar.fromInt(1);
WindowOptions options = windowBuilder.window(zero, one).build();
Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.lead(3)
.onColumn(3) //Int Agg COLUMN
.overWindow(options));
Table decWindowAggResults = sorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.lead(3)
.onColumn(6) //Decimal Agg COLUMN
.overWindow(options));
Table listWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lead(3)
.onColumn(7) //LIST Agg COLUMN
.overWindow(options));
Table structWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lead(3)
.onColumn(8) //STRUCT Agg COLUMN
.overWindow(options));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(null, null, null, null, null, null, null, null, null, null, null, null);
ColumnVector decExpectAggResult = decimalFromBoxedInts(true, -2, null, null, null, null, null, null, null, null, null, null, null, null);
ColumnVector listExpectAggResult = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
null, null, null, null, null, null, null, null, null, null, null, null);
ColumnVector structExpectAggResult = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
null, null, null, null, null, null, null, null, null, null, null, null)){
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpectAggResult, decWindowAggResults.getColumn(0));
assertColumnsAreEqual(listExpectAggResult, listWindowAggResults.getColumn(0));
assertColumnsAreEqual(structExpectAggResult, structWindowAggResults.getColumn(0));
}
}
}
}
@Test
void testWindowingLag() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.decimal32Column(-1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // Decimal GBY Key
.decimal64Column(1, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L) // Decimal OBY Key
.decimal64Column(-2, 7L, 5L, 1L, 9L, 7L, 9L, 8L, 2L, 8L, 0L, 6L, 6L) // Decimal Agg Column
.column(new ListType(false, new BasicType(true, DType.INT32)),
Arrays.asList(11, 12, null, 13), Arrays.asList(14, null, 15, null), Arrays.asList((Integer) null), Arrays.asList(16),
Arrays.asList(21, null, null, 22), Arrays.asList(23, 24), Arrays.asList(25, 26, 27), Arrays.asList(28, 29, null),
Arrays.asList(null, 31), Arrays.asList(32, 33, 34), Arrays.asList(35, 36), Arrays.asList(37, 38, 39)) // List Agg COLUMN
.column(new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(1, "s1"), new StructData(null, "s2"), new StructData(2, null), new StructData(3, "s3"),
new StructData(11, "s11"), null, new StructData(13, "s13"), new StructData(14, "s14"),
new StructData(111, "s111"), new StructData(null, "s112"), new StructData(2, "s222"), new StructData(3, "s333")) //STRUCT Agg COLUMN
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table decSorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(4), OrderByArg.asc(5));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector decExpectSortedAggColumn = ColumnVector.decimalFromLongs(-2, 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
ColumnVector decSortedAggColumn = decSorted.getColumn(6);
assertColumnsAreEqual(decExpectSortedAggColumn, decSortedAggColumn);
WindowOptions.Builder windowBuilder = WindowOptions.builder().minPeriods(1);
try (Scalar two = Scalar.fromInt(2);
Scalar one = Scalar.fromInt(1);
WindowOptions options = windowBuilder.window(two, one).build();
Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.lag(0)
.onColumn(3) //Int Agg COLUMN
.overWindow(options));
Table decWindowAggResults = sorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.lag(0)
.onColumn(6) //Decimal Agg COLUMN
.overWindow(options));
Table listWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lag(0)
.onColumn(7) //LIST Agg COLUMN
.overWindow(options));
Table structWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lag(0)
.onColumn(8) //STRUCT Agg COLUMN
.overWindow(options));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector decExpectAggResult = ColumnVector.decimalFromLongs(-2, 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6);
ColumnVector listExpectAggResult = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(11, 12, null, 13), Arrays.asList(14, null, 15, null), Arrays.asList((Integer) null), Arrays.asList(16),
Arrays.asList(21, null, null, 22), Arrays.asList(23, 24), Arrays.asList(25, 26, 27), Arrays.asList(28, 29, null),
Arrays.asList(null, 31), Arrays.asList(32, 33, 34), Arrays.asList(35, 36), Arrays.asList(37, 38, 39));
ColumnVector structExpectAggResult = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(1, "s1"), new StructData(null, "s2"), new StructData(2, null), new StructData(3, "s3"),
new StructData(11, "s11"), null, new StructData(13, "s13"), new StructData(14, "s14"),
new StructData(111, "s111"), new StructData(null, "s112"), new StructData(2, "s222"), new StructData(3, "s333"))) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpectAggResult, decWindowAggResults.getColumn(0));
assertColumnsAreEqual(listExpectAggResult, listWindowAggResults.getColumn(0));
assertColumnsAreEqual(structExpectAggResult, structWindowAggResults.getColumn(0));
}
try (Scalar zero = Scalar.fromInt(0);
Scalar two = Scalar.fromInt(2);
WindowOptions options = windowBuilder.window(two, zero).build();
Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.lag(1)
.onColumn(3) //Int Agg COLUMN
.overWindow(options));
Table decWindowAggResults = sorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.lag(1)
.onColumn(6) //Decimal Agg COLUMN
.overWindow(options));
Table listWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lag(1)
.onColumn(7) //LIST Agg COLUMN
.overWindow(options));
Table structWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lag(1)
.onColumn(8) //STRUCT Agg COLUMN
.overWindow(options));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(null, 7, 5, 1, null, 7, 9, 8, null, 8, 0, 6);
ColumnVector decExpectAggResult = decimalFromBoxedInts(true, -2, null, 7, 5, 1, null, 7, 9, 8, null, 8, 0, 6);
ColumnVector listExpectAggResult = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
null, Arrays.asList(11, 12, null, 13), Arrays.asList(14, null, 15, null), Arrays.asList((Integer) null),
null, Arrays.asList(21, null, null, 22), Arrays.asList(23, 24), Arrays.asList(25, 26, 27),
null, Arrays.asList(null, 31), Arrays.asList(32, 33, 34), Arrays.asList(35, 36));
ColumnVector structExpectAggResult = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
null, new StructData(1, "s1"), new StructData(null, "s2"), new StructData(2, null),
null, new StructData(11, "s11"), null, new StructData(13, "s13"),
null, new StructData(111, "s111"), new StructData(null, "s112"), new StructData(2, "s222"))) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpectAggResult, decWindowAggResults.getColumn(0));
assertColumnsAreEqual(listExpectAggResult, listWindowAggResults.getColumn(0));
assertColumnsAreEqual(structExpectAggResult, structWindowAggResults.getColumn(0));
}
try (Scalar zero = Scalar.fromInt(0);
Scalar two = Scalar.fromInt(2);
WindowOptions options = windowBuilder.window(two, zero).build();
ColumnVector defaultOutput = ColumnVector.fromBoxedInts(0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
ColumnVector decDefaultOutput = ColumnVector.decimalFromLongs(-2, 0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11);
ColumnVector listDefaultOutput = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(111), Arrays.asList(222), Arrays.asList(333), Arrays.asList(444, null, 555),
Arrays.asList(-11), Arrays.asList(-22), Arrays.asList(-33), Arrays.asList(-44),
Arrays.asList(6), Arrays.asList(6), Arrays.asList(6), Arrays.asList(6, null, null));
ColumnVector structDefaultOutput = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(-1, "s1"), new StructData(null, "s2"), new StructData(-2, null), new StructData(-3, "s3"),
new StructData(-11, "s11"), null, new StructData(-13, "s13"), new StructData(-14, "s14"),
new StructData(-111, "s111"), new StructData(null, "s112"), new StructData(-222, "s222"), new StructData(-333, "s333"));
Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.lag(1, defaultOutput)
.onColumn(3) //Int Agg COLUMN
.overWindow(options));
Table decWindowAggResults = sorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.lag(1, decDefaultOutput)
.onColumn(6) //Decimal Agg COLUMN
.overWindow(options));
Table listWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lag(1, listDefaultOutput)
.onColumn(7) //LIST Agg COLUMN
.overWindow(options));
Table structWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lag(1, structDefaultOutput)
.onColumn(8) //STRUCT Agg COLUMN
.overWindow(options));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(0, 7, 5, 1, -4, 7, 9, 8, -8, 8, 0, 6);
ColumnVector decExpectAggResult = ColumnVector.decimalFromLongs(-2, 0, 7, 5, 1, -4, 7, 9, 8, -8, 8, 0, 6);
ColumnVector listExpectAggResult = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
Arrays.asList(111), Arrays.asList(11, 12, null, 13), Arrays.asList(14, null, 15, null), Arrays.asList((Integer) null),
Arrays.asList(-11), Arrays.asList(21, null, null, 22), Arrays.asList(23, 24), Arrays.asList(25, 26, 27),
Arrays.asList(6), Arrays.asList(null, 31), Arrays.asList(32, 33, 34), Arrays.asList(35, 36));
ColumnVector structExpectAggResult = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
new StructData(-1, "s1"), new StructData(1, "s1"), new StructData(null, "s2"), new StructData(2, null),
new StructData(-11, "s11"), new StructData(11, "s11"), null, new StructData(13, "s13"),
new StructData(-111, "s111"), new StructData(111, "s111"), new StructData(null, "s112"), new StructData(2, "s222"))) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpectAggResult, decWindowAggResults.getColumn(0));
assertColumnsAreEqual(listExpectAggResult, listWindowAggResults.getColumn(0));
assertColumnsAreEqual(structExpectAggResult, structWindowAggResults.getColumn(0));
}
// Outside bounds
try (Scalar zero = Scalar.fromInt(0);
Scalar one = Scalar.fromInt(1);
WindowOptions options = windowBuilder.window(one, zero).build();
Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation
.lag(3)
.onColumn(3) //Int Agg COLUMN
.overWindow(options));
Table decWindowAggResults = sorted.groupBy(0, 4)
.aggregateWindows(RollingAggregation
.lag(3)
.onColumn(6) //Decimal Agg COLUMN
.overWindow(options));
Table listWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lag(3)
.onColumn(7) //LIST Agg COLUMN
.overWindow(options));
Table structWindowAggResults = sorted.groupBy(0, 1).aggregateWindows(
RollingAggregation
.lag(3)
.onColumn(8) //STRUCT Agg COLUMN
.overWindow(options));
ColumnVector expectAggResult = ColumnVector.fromBoxedInts(null, null, null, null, null, null, null, null, null, null, null, null);
ColumnVector decExpectAggResult = decimalFromBoxedInts(true, -2, null, null, null, null, null, null, null, null, null, null, null, null);
ColumnVector listExpectAggResult = ColumnVector.fromLists(
new HostColumnVector.ListType(true, new HostColumnVector.BasicType(true, DType.INT32)),
null, null, null, null, null, null, null, null, null, null, null, null);
ColumnVector structExpectAggResult = ColumnVector.fromStructs(
new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.STRING)),
null, null, null, null, null, null, null, null, null, null, null, null);) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
assertColumnsAreEqual(decExpectAggResult, decWindowAggResults.getColumn(0));
assertColumnsAreEqual(listExpectAggResult, listWindowAggResults.getColumn(0));
assertColumnsAreEqual(structExpectAggResult, structWindowAggResults.getColumn(0));
}
}
}
}
@Test
void testWindowingMean() {
try (Table unsorted = new Table.TestBuilder().column( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column( 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column( 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column( 7, 5, 3, 7, 7, 9, 8, 4, 8, 0, 4, 8) // Agg Column
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
ColumnVector expectedSortedAggCol = ColumnVector.fromBoxedInts(7, 5, 3, 7, 7, 9, 8, 4, 8, 0, 4, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectedSortedAggCol, sortedAggColumn);
try (Scalar one = Scalar.fromInt(1);
Scalar two = Scalar.fromInt(2);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation.mean().onColumn(3).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedDoubles(6.0d, 5.0d, 5.0d, 5.0d, 8.0d, 8.0d, 7.0d, 6.0d, 4.0d, 4.0d, 4.0d, 6.0d)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
@Test
void testWindowingNthElement() {
final Integer X = null;
try (Table unsorted = new Table.TestBuilder()
.column( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // 0: GBY Key
.column( 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1) // 1: GBY Key
.column( 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) // 2: OBY Key
.column( X, 4, 0, X, 4, X, 9, 7, 7, 3, 5, 7) // 3: Agg Column
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
ColumnVector expectedSortedAggCol = ColumnVector.fromBoxedInts(7, 5, 3, 7, 7, 9, X, 4, X, 0, 4, X)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectedSortedAggCol, sortedAggColumn);
try (Scalar one = Scalar.fromInt(1);
Scalar two = Scalar.fromInt(2);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(
RollingAggregation.nth(0, NullPolicy.INCLUDE).onColumn(3).overWindow(window),
RollingAggregation.nth(-1, NullPolicy.INCLUDE).onColumn(3).overWindow(window),
RollingAggregation.nth(1, NullPolicy.INCLUDE).onColumn(3).overWindow(window),
RollingAggregation.nth(0, NullPolicy.EXCLUDE).onColumn(3).overWindow(window),
RollingAggregation.nth(-1, NullPolicy.EXCLUDE).onColumn(3).overWindow(window),
RollingAggregation.nth(1, NullPolicy.EXCLUDE).onColumn(3).overWindow(window));
ColumnVector expect_first = ColumnVector.fromBoxedInts(7, 7, 5, 3, 7, 7, 9, X, X, X, 0, 4);
ColumnVector expect_last = ColumnVector.fromBoxedInts(5, 3, 7, 7, 9, X, 4, 4, 0, 4, X, X);
ColumnVector expect_1th = ColumnVector.fromBoxedInts(5, 5, 3, 7, 9, 9, X, 4, 0, 0, 4, X);
ColumnVector expect_first_skip_null =
ColumnVector.fromBoxedInts(7, 7, 5, 3, 7, 7, 9, 4, 0, 0, 0, 4);
ColumnVector expect_last_skip_null =
ColumnVector.fromBoxedInts(5, 3, 7, 7, 9, 9, 4, 4, 0, 4, 4, 4);
ColumnVector expect_1th_skip_null =
ColumnVector.fromBoxedInts(5, 5, 3, 7, 9, 9, 4, X, X, 4, 4, X)) {
assertColumnsAreEqual(expect_first, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect_last, windowAggResults.getColumn(1));
assertColumnsAreEqual(expect_1th, windowAggResults.getColumn(2));
assertColumnsAreEqual(expect_first_skip_null, windowAggResults.getColumn(3));
assertColumnsAreEqual(expect_last_skip_null, windowAggResults.getColumn(4));
assertColumnsAreEqual(expect_1th_skip_null, windowAggResults.getColumn(5));
}
}
}
}
}
@Test
void testWindowingOnMultipleDifferentColumns() {
try (Table unsorted = new Table.TestBuilder()
.column( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column( 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column( 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column( 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
ColumnVector expectedSortedAggCol = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectedSortedAggCol, sortedAggColumn);
try (Scalar one = Scalar.fromInt(1);
Scalar two = Scalar.fromInt(2);
Scalar three = Scalar.fromInt(3);
// Window (1,1), with a minimum of 1 reading.
WindowOptions window_1 = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build();
// Window (2,2), with a minimum of 2 readings.
WindowOptions window_2 = WindowOptions.builder()
.minPeriods(2)
.window(three, two)
.build();
// Window (1,1), with a minimum of 3 readings.
WindowOptions window_3 = WindowOptions.builder()
.minPeriods(3)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(
RollingAggregation.sum().onColumn(3).overWindow(window_1),
RollingAggregation.max().onColumn(3).overWindow(window_1),
RollingAggregation.sum().onColumn(3).overWindow(window_2),
RollingAggregation.min().onColumn(2).overWindow(window_3)
);
ColumnVector expect_0 = ColumnVector.fromBoxedLongs(12L, 13L, 15L, 10L, 16L, 24L, 19L, 10L, 8L, 14L, 12L, 12L);
ColumnVector expect_1 = ColumnVector.fromBoxedInts(7, 7, 9, 9, 9, 9, 9, 8, 8, 8, 6, 6);
ColumnVector expect_2 = ColumnVector.fromBoxedLongs(13L, 22L, 22L, 15L, 24L, 26L, 26L, 19L, 14L, 20L, 20L, 12L);
ColumnVector expect_3 = ColumnVector.fromBoxedInts(null, 1, 1, null, null, 3, 3, null, null, 5, 5, null)) {
assertColumnsAreEqual(expect_0, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect_1, windowAggResults.getColumn(1));
assertColumnsAreEqual(expect_2, windowAggResults.getColumn(2));
assertColumnsAreEqual(expect_3, windowAggResults.getColumn(3));
}
}
}
}
}
@Test
void testWindowingWithoutGroupByColumns() {
try (Table unsorted = new Table.TestBuilder().column( 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column( 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.build();
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6)) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0))) {
ColumnVector sortedAggColumn = sorted.getColumn(1);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
try (Scalar one = Scalar.fromInt(1);
Scalar two = Scalar.fromInt(2);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy().aggregateWindows(
RollingAggregation.sum().onColumn(1).overWindow(window));
ColumnVector expectAggResult = ColumnVector.fromBoxedLongs(12L, 13L, 15L, 17L, 25L, 24L, 19L, 18L, 10L, 14L, 12L, 12L)
) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
}
}
}
}
}
@Test
void testWindowWithUnboundedPrecedingUnboundedFollowing() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6) // OBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6) // Agg Column
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6)) {
ColumnVector sortedAggColumn = sorted.getColumn(3);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.unboundedFollowing()
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation.sum().onColumn(3).overWindow(window));
ColumnVector expectAggResult = ColumnVector.fromBoxedLongs(22L, 22L, 22L, 22L, 26L, 26L, 26L, 26L, 20L, 20L, 20L, 20L)) {
assertColumnsAreEqual(expectAggResult, windowAggResults.getColumn(0));
}
}
}
}
}
private Scalar getScalar(DType type, long value) {
if (type.equals(DType.INT32)) {
return Scalar.fromInt((int) value);
} else if (type.equals(DType.INT64)) {
return Scalar.fromLong(value);
} else if (type.equals(DType.INT16)) {
return Scalar.fromShort((short) value);
} else if (type.equals(DType.INT8)) {
return Scalar.fromByte((byte) value);
} else if (type.equals(DType.UINT8)) {
return Scalar.fromUnsignedByte((byte) value);
} else if (type.equals(DType.UINT16)) {
return Scalar.fromUnsignedShort((short) value);
} else if (type.equals(DType.UINT32)) {
return Scalar.fromUnsignedInt((int) value);
} else if (type.equals(DType.UINT64)) {
return Scalar.fromUnsignedLong(value);
} else if (type.equals(DType.TIMESTAMP_DAYS)) {
return Scalar.durationFromLong(DType.DURATION_DAYS, value);
} else if (type.equals(DType.TIMESTAMP_SECONDS)) {
return Scalar.durationFromLong(DType.DURATION_SECONDS, value);
} else if (type.equals(DType.TIMESTAMP_MILLISECONDS)) {
return Scalar.durationFromLong(DType.DURATION_MILLISECONDS, value);
} else if (type.equals(DType.TIMESTAMP_MICROSECONDS)) {
return Scalar.durationFromLong(DType.DURATION_MICROSECONDS, value);
} else if (type.equals(DType.TIMESTAMP_NANOSECONDS)) {
return Scalar.durationFromLong(DType.DURATION_NANOSECONDS, value);
} else {
return Scalar.fromNull(type);
}
}
@Test
void testRangeWindowingCount() {
try (
Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L) // orderBy Key
.column((short) 1, (short)1, (short)2, (short)3, (short)3, (short)3, (short)4, (short)4, (short)5, (short)5, (short)6, (short)6, (short)7) // orderBy Key
.column(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // orderBy Key
.column((byte) 1, (byte)1, (byte)2, (byte)3, (byte)3, (byte)3, (byte)4, (byte)4, (byte)5, (byte)5, (byte)6, (byte)6, (byte)7) // orderBy Key
.timestampDayColumn(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // timestamp orderBy Key
.timestampSecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMicrosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMillisecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampNanosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(orderIndex));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar preceding = getScalar(type, 1L);
Scalar following = getScalar(type, 1L)) {
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(preceding, following)
.orderByColumnIndex(orderIndex)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1).aggregateWindowsOverRanges(
RollingAggregation.count().onColumn(2).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(3, 3, 4, 2, 4, 4, 4, 4, 4, 4, 5, 5, 3)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
}
}
@Test
void testRangeWindowingLead() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L) // orderBy Key
.column((short) 1, (short)1, (short)2, (short)3, (short)3, (short)3, (short)4, (short)4, (short)5, (short)5, (short)6, (short)6, (short)7) // orderBy Key
.column(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // orderBy Key
.column((byte) 1, (byte)1, (byte)2, (byte)3, (byte)3, (byte)3, (byte)4, (byte)4, (byte)5, (byte)5, (byte)6, (byte)6, (byte)7) // orderBy Key
.timestampDayColumn(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // Timestamp orderBy Key
.timestampSecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMicrosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMillisecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampNanosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(orderIndex));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar preceding = getScalar(type, 1L);
Scalar following = getScalar(type, 1L)) {
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(preceding, following)
.orderByColumnIndex(orderIndex)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.lead(1)
.onColumn(2)
.overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(5, 1, 9, null, 9, 8, 2, null, 0, 6, 6, 8, null)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
}
}
@Test
void testRangeWindowingMax() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L) // orderBy Key
.column((short) 1, (short)1, (short)2, (short)3, (short)3, (short)3, (short)4, (short)4, (short)5, (short)5, (short)6, (short)6, (short)7) // orderBy Key
.column(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // orderBy Key
.column((byte) 1, (byte)1, (byte)2, (byte)3, (byte)3, (byte)3, (byte)4, (byte)4, (byte)5, (byte)5, (byte)6, (byte)6, (byte)7) // orderBy Key
.timestampDayColumn(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // Timestamp orderBy Key
.timestampSecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMicrosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMillisecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampNanosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(orderIndex));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar preceding = getScalar(type, 1L);
Scalar following = getScalar(type, 1L)) {
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(preceding, following)
.orderByColumnIndex(orderIndex)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.max().onColumn(2).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(7, 7, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
try (Scalar one = Scalar.fromInt(1);
Scalar two = Scalar.fromInt(2);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(two, one)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindows(RollingAggregation.max().onColumn(2).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(7, 7, 9, 9, 9, 9, 9, 8, 8, 8, 6, 8, 8)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
}
}
@Test
void testRangeWindowingRowNumber() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L) // orderBy Key
.column((short) 1, (short)1, (short)2, (short)3, (short)3, (short)3, (short)4, (short)4, (short)5, (short)5, (short)6, (short)6, (short)7) // orderBy Key
.column(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // orderBy Key
.column((byte) 1, (byte)1, (byte)2, (byte)3, (byte)3, (byte)3, (byte)4, (byte)4, (byte)5, (byte)5, (byte)6, (byte)6, (byte)7) // orderBy Key
.timestampDayColumn(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // Timestamp orderBy Key
.timestampSecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMicrosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMillisecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampNanosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(orderIndex));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar preceding = getScalar(type, 2L);
Scalar following = getScalar(type, 0L)) {
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(preceding, following)
.orderByColumnIndex(orderIndex)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.rowNumber().onColumn(2).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 5)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
}
}
@Test
void testRangeWindowingCountDescendingTimestamps() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column((short)7, (short)6, (short)6, (short)5, (short)5, (short)4, (short)4, (short)3, (short)3, (short)3, (short)2, (short)1, (short)1)
.column(7L, 6L, 6L, 5L, 5L, 4L, 4L, 3L, 3L, 3L, 2L, 1L, 1L)
.column(7, 6, 6, 5, 5, 4, 4, 3, 3, 3, 2, 1, 1)
.column((byte)7, (byte)6, (byte)6, (byte)5, (byte)5, (byte)4, (byte)4, (byte)3, (byte)3, (byte)3, (byte)2, (byte)1, (byte)1)
.timestampDayColumn(7, 6, 6, 5, 5, 4, 4, 3, 3, 3, 2, 1, 1) // Timestamp Key
.timestampSecondsColumn(7L, 6L, 6L, 5L, 5L, 4L, 4L, 3L, 3L, 3L, 2L, 1L, 1L)
.timestampMicrosecondsColumn(7L, 6L, 6L, 5L, 5L, 4L, 4L, 3L, 3L, 3L, 2L, 1L, 1L)
.timestampMillisecondsColumn(7L, 6L, 6L, 5L, 5L, 4L, 4L, 3L, 3L, 3L, 2L, 1L, 1L)
.timestampNanosecondsColumn(7L, 6L, 6L, 5L, 5L, 4L, 4L, 3L, 3L, 3L, 2L, 1L, 1L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.desc(orderIndex));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar preceding_0 = getScalar(type, 2L);
Scalar following_0 = getScalar(type, 1L);
Scalar preceding_1 = getScalar(type, 3L);
Scalar following_1 = getScalar(type, 0L)) {
try (WindowOptions window_0 = WindowOptions.builder()
.minPeriods(1)
.window(preceding_0, following_0)
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions window_1 = WindowOptions.builder()
.minPeriods(1)
.window(preceding_1, following_1)
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(
RollingAggregation.count().onColumn(2).overWindow(window_0),
RollingAggregation.sum().onColumn(2).overWindow(window_1));
ColumnVector expect_0 = ColumnVector.fromBoxedInts(3, 4, 4, 4, 3, 4, 4, 4, 3, 3, 5, 5, 5);
ColumnVector expect_1 = ColumnVector.fromBoxedLongs(7L, 13L, 13L, 22L, 7L, 24L, 24L, 26L, 8L, 8L, 14L, 28L, 28L)) {
assertColumnsAreEqual(expect_0, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect_1, windowAggResults.getColumn(1));
}
}
}
}
}
}
}
@Test
void testRangeWindowingWithoutGroupByColumns() {
try (Table unsorted = new Table.TestBuilder()
.column( 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L) // orderBy Key
.column((short) 1, (short)1, (short)2, (short)3, (short)3, (short)3, (short)4, (short)4, (short)5, (short)5, (short)6, (short)6, (short)7) // orderBy Key
.column(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // orderBy Key
.column((byte) 1, (byte)1, (byte)2, (byte)3, (byte)3, (byte)3, (byte)4, (byte)4, (byte)5, (byte)5, (byte)6, (byte)6, (byte)7) // orderBy Key
.timestampDayColumn(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // Timestamp orderBy Key
.timestampSecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMicrosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMillisecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampNanosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(orderIndex));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(0);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar preceding = getScalar(type, 1L);
Scalar following = getScalar(type, 1L)) {
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(preceding, following)
.orderByColumnIndex(orderIndex)
.build();) {
try (Table windowAggResults = sorted.groupBy()
.aggregateWindowsOverRanges(RollingAggregation.count().onColumn(1).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(3, 3, 6, 6, 6, 6, 7, 7, 6, 6, 5, 5, 3)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
}
}
@Test
void testRangeWindowingOrderByUnsupportedDataTypeExceptions() {
try (Table table = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(true, false, true, false, true, false, true, false, false, false, false, false, false) // orderBy Key
.build()) {
try (Scalar one = Scalar.fromInt(1);
WindowOptions rangeBasedWindow = WindowOptions.builder()
.minPeriods(1)
.window(one, one)
.orderByColumnIndex(3)
.build()) {
assertThrows(IllegalArgumentException.class,
() -> table
.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.max().onColumn(2).overWindow(rangeBasedWindow)));
}
}
}
@Test
void testInvalidWindowTypeExceptions() {
try (Scalar one = Scalar.fromInt(1);
Table table = new Table.TestBuilder()
.column( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column( 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2) // GBY Key
.timestampDayColumn( 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // Timestamp Key
.column( 7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.build()) {
try (WindowOptions rowBasedWindow = WindowOptions.builder()
.minPeriods(1)
.window(one, one)
.build()) {
assertThrows(IllegalArgumentException.class, () -> table.groupBy(0, 1).aggregateWindowsOverRanges(RollingAggregation.max().onColumn(3).overWindow(rowBasedWindow)));
}
try (WindowOptions rangeBasedWindow = WindowOptions.builder()
.minPeriods(1)
.window(one, one)
.orderByColumnIndex(2)
.build()) {
assertThrows(IllegalArgumentException.class, () -> table.groupBy(0, 1).aggregateWindows(RollingAggregation.max().onColumn(3).overWindow(rangeBasedWindow)));
}
}
}
@Test
void testRangeWindowingCountUnboundedPreceding() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L) // orderBy Key
.column((short) 1, (short)1, (short)2, (short)3, (short)3, (short)3, (short)4, (short)4, (short)5, (short)5, (short)6, (short)6, (short)7) // orderBy Key
.column(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // orderBy Key
.column((byte) 1, (byte)1, (byte)2, (byte)3, (byte)3, (byte)3, (byte)4, (byte)4, (byte)5, (byte)5, (byte)6, (byte)6, (byte)7) // orderBy Key
.timestampDayColumn(1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7) // Timestamp orderBy Key
.timestampSecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMicrosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampMillisecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.timestampNanosecondsColumn(1L, 1L, 2L, 3L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(orderIndex));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar following = getScalar(type, 1L)) {
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following)
.orderByColumnIndex(orderIndex)
.build();) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.count().onColumn(2).overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
}
}
@Test
void testRangeWindowingWithStringOrderByColumn() {
final String X = null;
final int orderIndex = 3; // Index of order-by column.
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column("0", "1", "2", "3", "4", "5", X, X, "1", "2", "4", "5", "7") // String orderBy Key
.build()) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(3, true));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
try (WindowOptions unboundedPrecedingAndFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.build();
WindowOptions unboundedPrecedingAndCurrentRow = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.currentRowFollowing()
.orderByColumnIndex(orderIndex)
.build();
WindowOptions currentRowAndUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.currentRowPreceding()
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndCurrentRow),
RollingAggregation.count().onColumn(2).overWindow(currentRowAndUnboundedFollowing));
ColumnVector expect_0 = ColumnVector.fromBoxedInts(6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7);
ColumnVector expect_1 = ColumnVector.fromBoxedInts(1, 2, 3, 4, 5, 6, 2, 2, 3, 4, 5, 6, 7);
ColumnVector expect_2 = ColumnVector.fromBoxedInts(6, 5, 4, 3, 2, 1, 7, 7, 5, 4, 3, 2, 1)) {
assertColumnsAreEqual(expect_0, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect_1, windowAggResults.getColumn(1));
assertColumnsAreEqual(expect_2, windowAggResults.getColumn(2));
}
}
}
}
}
@Test
void testRangeWindowingCountUnboundedASCWithNullsFirst() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column( null, null, null, 2, 3, 5, null, null, 1, 2, 4, 5, 7) // Timestamp Key
.column( null, null, null, 2L, 3L, 5L, null, null, 1L, 2L, 4L, 5L, 7L) // orderBy Key
.column( null, null, null, (short)2, (short)3, (short)5, null, null, (short)1, (short)2, (short)4, (short)5, (short)7) // orderBy Key
.column( null, null, null, (byte)2, (byte)3, (byte)5, null, null, (byte)1, (byte)2, (byte)4, (byte)5, (byte)7) // orderBy Key
.timestampDayColumn( null, null, null, 2, 3, 5, null, null, 1, 2, 4, 5, 7) // Timestamp orderBy Key
.timestampSecondsColumn( null, null, null, 2L, 3L, 5L, null, null, 1L, 2L, 4L, 5L, 7L)
.timestampMicrosecondsColumn( null, null, null, 2L, 3L, 5L, null, null, 1L, 2L, 4L, 5L, 7L)
.timestampMillisecondsColumn( null, null, null, 2L, 3L, 5L, null, null, 1L, 2L, 4L, 5L, 7L)
.timestampNanosecondsColumn( null, null, null, 2L, 3L, 5L, null, null, 1L, 2L, 4L, 5L, 7L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(orderIndex, true));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar following1 = getScalar(type, 1L);
Scalar preceding1 = getScalar(type, 1L);
Scalar following0 = getScalar(type, 0L);
Scalar preceding0 = getScalar(type, 0L);) {
try (WindowOptions unboundedPrecedingOneFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following1)
.orderByColumnIndex(orderIndex)
.build();
WindowOptions onePrecedingUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.preceding(preceding1)
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.build();
WindowOptions unboundedPrecedingAndFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.build();
WindowOptions unboundedPrecedingAndCurrentRow = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following0)
.orderByColumnIndex(orderIndex)
.build();
WindowOptions currentRowAndUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.preceding(preceding0)
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.build();) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingOneFollowing),
RollingAggregation.count().onColumn(2).overWindow(onePrecedingUnboundedFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndCurrentRow),
RollingAggregation.count().onColumn(2).overWindow(currentRowAndUnboundedFollowing));
ColumnVector expect_0 = ColumnVector.fromBoxedInts(3, 3, 3, 5, 5, 6, 2, 2, 4, 4, 6, 6, 7);
ColumnVector expect_1 = ColumnVector.fromBoxedInts(6, 6, 6, 3, 3, 1, 7, 7, 5, 5, 3, 3, 1);
ColumnVector expect_2 = ColumnVector.fromBoxedInts(6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7);
ColumnVector expect_3 = ColumnVector.fromBoxedInts(3, 3, 3, 4, 5, 6, 2, 2, 3, 4, 5, 6, 7);
ColumnVector expect_4 = ColumnVector.fromBoxedInts(6, 6, 6, 3, 2, 1, 7, 7, 5, 4, 3, 2, 1)) {
assertColumnsAreEqual(expect_0, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect_1, windowAggResults.getColumn(1));
assertColumnsAreEqual(expect_2, windowAggResults.getColumn(2));
assertColumnsAreEqual(expect_3, windowAggResults.getColumn(3));
assertColumnsAreEqual(expect_4, windowAggResults.getColumn(4));
}
}
}
}
}
}
}
@Test
void testRangeWindowingCountUnboundedDESCWithNullsFirst() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(null, null, null, 5, 3, 2, null, null, 7, 5, 4, 2, 1) // Timestamp Key
.column(null, null, null, 5L, 3L, 2L, null, null, 7L, 5L, 4L, 2L, 1L) // orderby Key
.column(null, null, null, (short)5, (short)3, (short)2, null, null, (short)7, (short)5, (short)4, (short)2, (short)1) // orderby Key
.column(null, null, null, (byte)5, (byte)3, (byte)2, null, null, (byte)7, (byte)5, (byte)4, (byte)2, (byte)1) // orderby Key
.timestampDayColumn(null, null, null, 5, 3, 2, null, null, 7, 5, 4, 2, 1) // Timestamp orderby Key
.timestampSecondsColumn( null, null, null, 5L, 3L, 2L, null, null, 7L, 5L, 4L, 2L, 1L)
.timestampMicrosecondsColumn( null, null, null, 5L, 3L, 2L, null, null, 7L, 5L, 4L, 2L, 1L)
.timestampMillisecondsColumn( null, null, null, 5L, 3L, 2L, null, null, 7L, 5L, 4L, 2L, 1L)
.timestampNanosecondsColumn( null, null, null, 5L, 3L, 2L, null, null, 7L, 5L, 4L, 2L, 1L)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.desc(orderIndex, false));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar following1 = getScalar(type, 1L);
Scalar preceding1 = getScalar(type, 1L);
Scalar following0 = getScalar(type, 0L);
Scalar preceding0 = getScalar(type, 0L);) {
try (WindowOptions unboundedPrecedingOneFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following1)
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions onePrecedingUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.preceding(preceding1)
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions unboundedPrecedingAndFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions unboundedPrecedingAndCurrentRow = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following0)
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions currentRowAndUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.preceding(preceding0)
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingOneFollowing),
RollingAggregation.count().onColumn(2).overWindow(onePrecedingUnboundedFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndCurrentRow),
RollingAggregation.count().onColumn(2).overWindow(currentRowAndUnboundedFollowing));
ColumnVector expect_0 = ColumnVector.fromBoxedInts(3, 3, 3, 4, 6, 6, 2, 2, 3, 5, 5, 7, 7);
ColumnVector expect_1 = ColumnVector.fromBoxedInts(6, 6, 6, 3, 2, 2, 7, 7, 5, 4, 4, 2, 2);
ColumnVector expect_2 = ColumnVector.fromBoxedInts(6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7);
ColumnVector expect_3 = ColumnVector.fromBoxedInts(3, 3, 3, 4, 5, 6, 2, 2, 3, 4, 5, 6, 7);
ColumnVector expect_4 = ColumnVector.fromBoxedInts(6, 6, 6, 3, 2, 1, 7, 7, 5, 4, 3, 2, 1)) {
assertColumnsAreEqual(expect_0, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect_1, windowAggResults.getColumn(1));
assertColumnsAreEqual(expect_2, windowAggResults.getColumn(2));
assertColumnsAreEqual(expect_3, windowAggResults.getColumn(3));
assertColumnsAreEqual(expect_4, windowAggResults.getColumn(4));
}
}
}
}
}
}
}
@Test
void testRangeWindowingCountUnboundedASCWithNullsLast() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column(2, 3, 5, null, null, null, 1, 2, 4, 5, 7, null, null) // Timestamp Key
.column(2L, 3L, 5L, null, null, null, 1L, 2L, 4L, 5L, 7L, null, null) // order by Key
.column((short)2, (short)3, (short)5, null, null, null, (short)1, (short)2, (short)4, (short)5, (short)7, null, null) // order by Key
.column((byte)2, (byte)3, (byte)5, null, null, null, (byte)1, (byte)2, (byte)4, (byte)5, (byte)7, null, null) // order by Key
.timestampDayColumn( 2, 3, 5, null, null, null, 1, 2, 4, 5, 7, null, null) // Timestamp order by Key
.timestampSecondsColumn( 2L, 3L, 5L, null, null, null, 1L, 2L, 4L, 5L, 7L, null, null)
.timestampMicrosecondsColumn( 2L, 3L, 5L, null, null, null, 1L, 2L, 4L, 5L, 7L, null, null)
.timestampMillisecondsColumn( 2L, 3L, 5L, null, null, null, 1L, 2L, 4L, 5L, 7L, null, null)
.timestampNanosecondsColumn( 2L, 3L, 5L, null, null, null, 1L, 2L, 4L, 5L, 7L, null, null)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(orderIndex, false));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar following1 = getScalar(type, 1L);
Scalar preceding1 = getScalar(type, 1L);
Scalar following0 = getScalar(type, 0L);
Scalar preceding0 = getScalar(type, 0L);) {
try (WindowOptions unboundedPrecedingOneFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following1)
.orderByColumnIndex(orderIndex)
.build();
WindowOptions onePrecedingUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.preceding(preceding1)
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.build();
WindowOptions unboundedPrecedingAndFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.build();
WindowOptions unboundedPrecedingAndCurrentRow = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following0)
.orderByColumnIndex(orderIndex)
.build();
WindowOptions currentRowAndUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.preceding(preceding0)
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.build();) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingOneFollowing),
RollingAggregation.count().onColumn(2).overWindow(onePrecedingUnboundedFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndCurrentRow),
RollingAggregation.count().onColumn(2).overWindow(currentRowAndUnboundedFollowing));
ColumnVector expect_0 = ColumnVector.fromBoxedInts(2, 2, 3, 6, 6, 6, 2, 2, 4, 4, 5, 7, 7);
ColumnVector expect_1 = ColumnVector.fromBoxedInts(6, 6, 4, 3, 3, 3, 7, 7, 5, 5, 3, 2, 2);
ColumnVector expect_2 = ColumnVector.fromBoxedInts(6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7);
ColumnVector expect_3 = ColumnVector.fromBoxedInts(1, 2, 3, 6, 6, 6, 1, 2, 3, 4, 5, 7, 7);
ColumnVector expect_4 = ColumnVector.fromBoxedInts(6, 5, 4, 3, 3, 3, 7, 6, 5, 4, 3, 2, 2)) {
assertColumnsAreEqual(expect_0, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect_1, windowAggResults.getColumn(1));
assertColumnsAreEqual(expect_2, windowAggResults.getColumn(2));
assertColumnsAreEqual(expect_3, windowAggResults.getColumn(3));
assertColumnsAreEqual(expect_4, windowAggResults.getColumn(4));
}
}
}
}
}
}
}
@Test
void testRangeWindowingCountUnboundedDESCWithNullsLast() {
Integer X = null;
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8) // Agg Column
.column( 5, 3, 2, null, null, null, 7, 5, 4, 2, 1, null, null) // Timestamp Key
.column(5L, 3L, 2L, null, null, null, 7L, 5L, 4L, 2L, 1L, null, null) // Timestamp Key
.column((short)5, (short)3, (short)2, null, null, null, (short)7, (short)5, (short)4, (short)2, (short)1, null, null) // Timestamp Key
.column((byte)5, (byte)3, (byte)2, null, null, null, (byte)7, (byte)5, (byte)4, (byte)2, (byte)1, null, null) // Timestamp Key
.timestampDayColumn( 5, 3, 2, X, X, X, 7, 5, 4, 2, 1, X, X) // Timestamp Key
.timestampSecondsColumn( 5L, 3L, 2L, null, null, null, 7L, 5L, 4L, 2L, 1L, null, null)
.timestampMicrosecondsColumn( 5L, 3L, 2L, null, null, null, 7L, 5L, 4L, 2L, 1L, null, null)
.timestampMillisecondsColumn( 5L, 3L, 2L, null, null, null, 7L, 5L, 4L, 2L, 1L, null, null)
.timestampNanosecondsColumn( 5L, 3L, 2L, null, null, null, 7L, 5L, 4L, 2L, 1L, null, null)
.build()) {
for (int orderIndex = 3; orderIndex < unsorted.getNumberOfColumns(); orderIndex++) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.desc(orderIndex, true));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6, 8)) {
ColumnVector sortedAggColumn = sorted.getColumn(2);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
DType type = unsorted.getColumn(orderIndex).getType();
try (Scalar following1 = getScalar(type, 1L);
Scalar preceding1 = getScalar(type, 1L);
Scalar following0 = getScalar(type, 0L);
Scalar preceding0 = getScalar(type, 0L);) {
try (WindowOptions unboundedPrecedingOneFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following1)
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions onePrecedingUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.preceding(preceding1)
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions unboundedPrecedingAndFollowing = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions unboundedPrecedingAndCurrentRow = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(following0)
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();
WindowOptions currentRowAndUnboundedFollowing = WindowOptions.builder()
.minPeriods(1)
.preceding(preceding0)
.unboundedFollowing()
.orderByColumnIndex(orderIndex)
.orderByDescending()
.build();) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingOneFollowing),
RollingAggregation.count().onColumn(2).overWindow(onePrecedingUnboundedFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndFollowing),
RollingAggregation.count().onColumn(2).overWindow(unboundedPrecedingAndCurrentRow),
RollingAggregation.count().onColumn(2).overWindow(currentRowAndUnboundedFollowing));
ColumnVector expect_0 = ColumnVector.fromBoxedInts(1, 3, 3, 6, 6, 6, 1, 3, 3, 5, 5, 7, 7);
ColumnVector expect_1 = ColumnVector.fromBoxedInts(6, 5, 5, 3, 3, 3, 7, 6, 6, 4, 4, 2, 2);
ColumnVector expect_2 = ColumnVector.fromBoxedInts(6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7);
ColumnVector expect_3 = ColumnVector.fromBoxedInts(1, 2, 3, 6, 6, 6, 1, 2, 3, 4, 5, 7, 7);
ColumnVector expect_4 = ColumnVector.fromBoxedInts(6, 5, 4, 3, 3, 3, 7, 6, 5, 4, 3, 2, 2)) {
assertColumnsAreEqual(expect_0, windowAggResults.getColumn(0));
assertColumnsAreEqual(expect_1, windowAggResults.getColumn(1));
assertColumnsAreEqual(expect_2, windowAggResults.getColumn(2));
assertColumnsAreEqual(expect_3, windowAggResults.getColumn(3));
assertColumnsAreEqual(expect_4, windowAggResults.getColumn(4));
}
}
}
}
}
}
}
/**
* Helper for constructing BigInteger from int
* @param x Integer value
* @return BigInteger equivalent of x
*/
private static BigInteger big(int x)
{
return new BigInteger("" + x);
}
/**
* Helper to get scalar for preceding == Decimal(value),
* with data width depending upon the order-by
* column index:
* orderby_col_idx = 2 -> Decimal32
* orderby_col_idx = 3 -> Decimal64
* orderby_col_idx = 4 -> Decimal128
*/
private static Scalar getDecimalScalarRangeBounds(int scale, int unscaledValue, int orderby_col_idx)
{
switch(orderby_col_idx)
{
case 2: return Scalar.fromDecimal(scale, unscaledValue);
case 3: return Scalar.fromDecimal(scale, Long.valueOf(unscaledValue));
case 4: return Scalar.fromDecimal(scale, big(unscaledValue));
default:
throw new IllegalStateException("Unexpected order by column index: "
+ orderby_col_idx);
}
}
@Test
void testRangeWindowsWithDecimalOrderBy() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.decimal32Column(-1, 4000, 3000, 2000, 1000,
4000, 3000, 2000, 1000,
4000, 3000, 2000, 1000) // Decimal OBY Key
.decimal64Column(-1, 4000l, 3000l, 2000l, 1000l,
4000l, 3000l, 2000l, 1000l,
4000l, 3000l, 2000l, 1000l) // Decimal OBY Key
.decimal128Column(-1, RoundingMode.UNNECESSARY,
big(4000), big(3000), big(2000), big(1000),
big(4000), big(3000), big(2000), big(1000),
big(4000), big(3000), big(2000), big(1000))
.column(9, 1, 5, 7, 2, 8, 9, 7, 6, 6, 0, 8) // Agg Column
.build()) {
// Columns 2,3,4 are decimal order-by columns of type DECIMAL32, DECIMAL64,
// and DECIMAL128 respectively, with similarly ordered values.
// In the following loop, each decimal type is tested as the order-by column,
// producing the same results with similar range bounds.
for (int decimal_oby_col_idx = 2; decimal_oby_col_idx <= 4; ++decimal_oby_col_idx) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0),
OrderByArg.asc(1),
OrderByArg.asc(decimal_oby_col_idx));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6)) {
ColumnVector sortedAggColumn = sorted.getColumn(5);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
// Test Window functionality with range window (200 PRECEDING and 100 FOLLOWING)
try (Scalar preceding200 = getDecimalScalarRangeBounds(0, 200, decimal_oby_col_idx);
Scalar following100 = getDecimalScalarRangeBounds(2, 1, decimal_oby_col_idx);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(preceding200, following100)
.orderByColumnIndex(decimal_oby_col_idx)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.count()
.onColumn(5)
.overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(2, 3, 4, 3, 2, 3, 4, 3, 2, 3, 4, 3)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
// Test Window functionality with range window (UNBOUNDED PRECEDING and CURRENT ROW)
try (Scalar current_row = getDecimalScalarRangeBounds(0, 0, decimal_oby_col_idx);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(current_row)
.orderByColumnIndex(decimal_oby_col_idx)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.count()
.onColumn(5)
.overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
// Test Window functionality with range window (UNBOUNDED PRECEDING and UNBOUNDED FOLLOWING)
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.unboundedFollowing()
.orderByColumnIndex(decimal_oby_col_idx)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.count()
.onColumn(5)
.overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
}
/**
* Helper to get scalar for preceding == Decimal(value),
* with data width depending upon the order-by column index:
* orderby_col_idx = 2 -> FLOAT32
* orderby_col_idx = 3 -> FLOAT64
*/
private static Scalar getFloatingPointScalarRangeBounds(float value, int orderby_col_idx)
{
switch(orderby_col_idx)
{
case 2: return Scalar.fromFloat(value);
case 3: return Scalar.fromDouble(Double.valueOf(value));
default:
throw new IllegalStateException("Unexpected order by column index: "
+ orderby_col_idx);
}
}
@Test
void testRangeWindowsWithFloatOrderBy() {
try (Table unsorted = new Table.TestBuilder()
.column(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) // GBY Key
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3) // GBY Key
.column(400f, 300f, 200f, 100f,
400f, 300f, 200f, 100f,
400f, 300f, 200f, 100f) // Float OBY Key
.column(400.0, 300.0, 200.0, 100.0,
400.0, 300.0, 200.0, 100.0,
400.0, 300.0, 200.0, 100.0) // Double OBY Key
.column(9, 1, 5, 7, 2, 8, 9, 7, 6, 6, 0, 8) // Agg Column
.build()) {
// Columns 2-3 are order-by columns of type FLOAT32 and FLOAT64 respectively, with similarly ordered values.
// In the following loop, each float type is tested as the order-by column,
// producing the same results with similar range bounds.
for (int float_oby_col_idx = 2; float_oby_col_idx <= 3; ++float_oby_col_idx) {
try (Table sorted = unsorted.orderBy(OrderByArg.asc(0),
OrderByArg.asc(1),
OrderByArg.asc(float_oby_col_idx));
ColumnVector expectSortedAggColumn = ColumnVector.fromBoxedInts(7, 5, 1, 9, 7, 9, 8, 2, 8, 0, 6, 6)) {
ColumnVector sortedAggColumn = sorted.getColumn(4);
assertColumnsAreEqual(expectSortedAggColumn, sortedAggColumn);
// Test Window functionality with range window (200 PRECEDING and 100 FOLLOWING)
try (Scalar preceding200 = getFloatingPointScalarRangeBounds(200, float_oby_col_idx);
Scalar following100 = getFloatingPointScalarRangeBounds(100, float_oby_col_idx);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.window(preceding200, following100)
.orderByColumnIndex(float_oby_col_idx)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.count()
.onColumn(4)
.overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(2, 3, 4, 3, 2, 3, 4, 3, 2, 3, 4, 3)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
// Test Window functionality with range window (UNBOUNDED PRECEDING and CURRENT ROW)
try (Scalar current_row = getFloatingPointScalarRangeBounds(0, float_oby_col_idx);
WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.following(current_row)
.orderByColumnIndex(float_oby_col_idx)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.count()
.onColumn(4)
.overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
// Test Window functionality with range window (UNBOUNDED PRECEDING and UNBOUNDED FOLLOWING)
try (WindowOptions window = WindowOptions.builder()
.minPeriods(1)
.unboundedPreceding()
.unboundedFollowing()
.orderByColumnIndex(float_oby_col_idx)
.build()) {
try (Table windowAggResults = sorted.groupBy(0, 1)
.aggregateWindowsOverRanges(RollingAggregation.count()
.onColumn(4)
.overWindow(window));
ColumnVector expect = ColumnVector.fromBoxedInts(4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4)) {
assertColumnsAreEqual(expect, windowAggResults.getColumn(0));
}
}
}
}
}
}
@Test
void testGroupByCountWithNulls() {
try (Table t1 = new Table.TestBuilder().column(null, null, 1, 1, 1, 1)
.column( 1, 1, 1, 1, 1, 1)
.column( 1, 1, null, null, 1, 1)
.column( 1, 1, 1, null, 1, 1)
.build()) {
try (Table tmp = t1.groupBy(0).aggregate(
GroupByAggregation.count().onColumn(1),
GroupByAggregation.count().onColumn(2),
GroupByAggregation.count().onColumn(3));
Table t3 = tmp.orderBy(OrderByArg.asc(0, true));
HostColumnVector groupCol = t3.getColumn(0).copyToHost();
HostColumnVector countCol = t3.getColumn(1).copyToHost();
HostColumnVector nullCountCol = t3.getColumn(2).copyToHost();
HostColumnVector nullCountCol2 = t3.getColumn(3).copyToHost()) {
// verify t3
assertEquals(2, t3.getRowCount());
// compare the grouping columns
assertTrue(groupCol.isNull(0));
assertEquals(groupCol.getInt(1), 1);
// compare the agg columns
// count(1)
assertEquals(countCol.getInt(0), 2);
assertEquals(countCol.getInt(1), 4);
// count(2)
assertEquals(nullCountCol.getInt(0), 2);
assertEquals(nullCountCol.getInt(1), 2); // counts only the non-nulls
// count(3)
assertEquals(nullCountCol2.getInt(0), 2);
assertEquals(nullCountCol2.getInt(1), 3); // counts only the non-nulls
}
}
}
@Test
void testGroupByCountWithNullsIncluded() {
try (Table t1 = new Table.TestBuilder()
.column(null, null, 1, 1, 1, 1)
.column( 1, 1, 1, 1, 1, 1)
.column( 1, 1, null, null, 1, 1)
.column( 1, 1, 1, null, 1, 1)
.build()) {
try (Table tmp = t1.groupBy(0).aggregate(
GroupByAggregation.count(NullPolicy.INCLUDE).onColumn(1),
GroupByAggregation.count(NullPolicy.INCLUDE).onColumn(2),
GroupByAggregation.count(NullPolicy.INCLUDE).onColumn(3),
GroupByAggregation.count().onColumn(3));
Table t3 = tmp.orderBy(OrderByArg.asc(0, true));
HostColumnVector groupCol = t3.getColumn(0).copyToHost();
HostColumnVector countCol = t3.getColumn(1).copyToHost();
HostColumnVector nullCountCol = t3.getColumn(2).copyToHost();
HostColumnVector nullCountCol2 = t3.getColumn(3).copyToHost();
HostColumnVector nullCountCol3 = t3.getColumn(4).copyToHost()) {
// verify t3
assertEquals(2, t3.getRowCount());
// compare the grouping columns
assertTrue(groupCol.isNull(0));
assertEquals(groupCol.getInt(1), 1);
// compare the agg columns
// count(1, true)
assertEquals(countCol.getInt(0), 2);
assertEquals(countCol.getInt(1), 4);
// count(2, true)
assertEquals(nullCountCol.getInt(0), 2);
assertEquals(nullCountCol.getInt(1), 4); // counts including nulls
// count(3, true)
assertEquals(nullCountCol2.getInt(0), 2);
assertEquals(nullCountCol2.getInt(1), 4); // counts including nulls
// count(3)
assertEquals(nullCountCol3.getInt(0), 2);
assertEquals(nullCountCol3.getInt(1), 3); // counts only the non-nulls
}
}
}
@Test
void testGroupByCountWithCollapsingNulls() {
try (Table t1 = new Table.TestBuilder()
.column(null, null, 1, 1, 1, 1)
.column( 1, 1, 1, 1, 1, 1)
.column( 1, 1, null, null, 1, 1)
.column( 1, 1, 1, null, 1, 1)
.build()) {
GroupByOptions options = GroupByOptions.builder()
.withIgnoreNullKeys(true)
.build();
try (Table tmp = t1.groupBy(options, 0).aggregate(
GroupByAggregation.count().onColumn(1),
GroupByAggregation.count().onColumn(2),
GroupByAggregation.count().onColumn(3));
Table t3 = tmp.orderBy(OrderByArg.asc(0, true));
HostColumnVector groupCol = t3.getColumn(0).copyToHost();
HostColumnVector countCol = t3.getColumn(1).copyToHost();
HostColumnVector nullCountCol = t3.getColumn(2).copyToHost();
HostColumnVector nullCountCol2 = t3.getColumn(3).copyToHost()) {
// (null, 1) => became (1) because we are ignoring nulls
assertEquals(1, t3.getRowCount());
// compare the grouping columns
assertEquals(groupCol.getInt(0), 1);
// compare the agg columns
// count(1)
assertEquals(countCol.getInt(0), 4);
// count(2)
assertEquals(nullCountCol.getInt(0), 2); // counts only the non-nulls
// count(3)
assertEquals(nullCountCol2.getInt(0), 3); // counts only the non-nulls
}
}
}
@Test
void testGroupByMax() {
try (Table t1 = new Table.TestBuilder().column( 1, 1, 1, 1, 1, 1)
.column( 1, 3, 3, 5, 5, 0)
.column(12.0, 14.0, 13.0, 17.0, 17.0, 17.0)
.build()) {
try (Table t3 = t1.groupBy(0, 1).aggregate(GroupByAggregation.max().onColumn(2));
HostColumnVector aggOut1 = t3.getColumn(2).copyToHost()) {
// verify t3
assertEquals(4, t3.getRowCount());
Map<Double, Integer> expectedAggregateResult = new HashMap() {
{
// value, count
put(12.0, 1);
put(14.0, 1);
put(17.0, 2);
}
};
for (int i = 0; i < 4; ++i) {
Double key = aggOut1.getDouble(i);
assertTrue(expectedAggregateResult.containsKey(key));
Integer count = expectedAggregateResult.get(key);
if (count == 1) {
expectedAggregateResult.remove(key);
} else {
expectedAggregateResult.put(key, count - 1);
}
}
}
}
}
@Test
void testGroupByArgMax() {
// ArgMax is a sort based aggregation.
try (Table t1 = new Table.TestBuilder()
.column( 1, 1, 1, 1, 1, 1)
.column( 0, 1, 2, 2, 3, 3)
.column(17.0, 14.0, 14.0, 17.0, 17.1, 17.0)
.build()) {
try (Table t3 = t1.groupBy(0, 1)
.aggregate(GroupByAggregation.argMax().onColumn(2));
Table sorted = t3
.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table expected = new Table.TestBuilder()
.column(1, 1, 1, 1)
.column(0, 1, 2, 3)
.column(0, 1, 3, 4)
.build()) {
assertTablesAreEqual(expected, sorted);
}
}
}
@Test
void testGroupByArgMin() {
// ArgMin is a sort based aggregation
try (Table t1 = new Table.TestBuilder()
.column( 1, 1, 1, 1, 1, 1)
.column( 0, 1, 2, 2, 3, 3)
.column(17.0, 14.0, 14.0, 17.0, 17.1, 17.0)
.build()) {
try (Table t3 = t1.groupBy(0, 1)
.aggregate(GroupByAggregation.argMin().onColumn(2));
Table sorted = t3
.orderBy(OrderByArg.asc(0), OrderByArg.asc(1), OrderByArg.asc(2));
Table expected = new Table.TestBuilder()
.column(1, 1, 1, 1)
.column(0, 1, 2, 3)
.column(0, 1, 2, 5)
.build()) {
assertTablesAreEqual(expected, sorted);
}
}
}
@Test
void testGroupByMinBool() {
try (Table t1 = new Table.TestBuilder()
.column(true, null, false, true, null, null)
.column( 1, 1, 2, 2, 3, 3).build();
Table other = t1.groupBy(1).aggregate(GroupByAggregation.min().onColumn(0));
Table ordered = other.orderBy(OrderByArg.asc(0));
Table expected = new Table.TestBuilder()
.column(1, 2, 3)
.column (true, false, null)
.build()) {
assertTablesAreEqual(expected, ordered);
}
}
@Test
void testGroupByMaxBool() {
try (Table t1 = new Table.TestBuilder()
.column(false, null, false, true, null, null)
.column( 1, 1, 2, 2, 3, 3).build();
Table other = t1.groupBy(1).aggregate(GroupByAggregation.max().onColumn(0));
Table ordered = other.orderBy(OrderByArg.asc(0));
Table expected = new Table.TestBuilder()
.column(1, 2, 3)
.column (false, true, null)
.build()) {
assertTablesAreEqual(expected, ordered);
}
}
@Test
void testGroupByDuplicateAggregates() {
try (Table t1 = new Table.TestBuilder().column( 1, 1, 1, 1, 1, 1)
.column( 1, 3, 3, 5, 5, 0)
.column(12.0, 14.0, 13.0, 15.0, 17.0, 18.0)
.build();
Table expected = new Table.TestBuilder()
.column(1, 1, 1, 1)
.column(1, 3, 5, 0)
.column(12.0, 14.0, 17.0, 18.0)
.column(12.0, 13.0, 15.0, 18.0)
.column(12.0, 13.0, 15.0, 18.0)
.column(12.0, 14.0, 17.0, 18.0)
.column(12.0, 13.0, 15.0, 18.0)
.column( 1, 2, 2, 1).build()) {
try (Table t3 = t1.groupBy(0, 1)
.aggregate(
GroupByAggregation.max().onColumn(2),
GroupByAggregation.min().onColumn(2),
GroupByAggregation.min().onColumn(2),
GroupByAggregation.max().onColumn(2),
GroupByAggregation.min().onColumn(2),
GroupByAggregation.count().onColumn(1));
Table t4 = t3.orderBy(OrderByArg.asc(2))) {
// verify t4
assertEquals(4, t4.getRowCount());
assertTablesAreEqual(t4, expected);
assertEquals(t3.getColumn(0).getRefCount(), 1);
assertEquals(t3.getColumn(1).getRefCount(), 1);
assertEquals(t3.getColumn(2).getRefCount(), 2);
assertEquals(t3.getColumn(3).getRefCount(), 3);
assertEquals(t3.getColumn(4).getRefCount(), 3);
assertEquals(t3.getColumn(5).getRefCount(), 2);
assertEquals(t3.getColumn(6).getRefCount(), 3);
}
}
}
@Test
void testGroupByMin() {
try (Table t1 = new Table.TestBuilder().column( 1, 1, 1, 1, 1, 1)
.column( 1, 3, 3, 5, 5, 0)
.column( 12, 14, 13, 17, 17, 17)
.build()) {
try (Table t3 = t1.groupBy(0, 1).aggregate(GroupByAggregation.min().onColumn(2));
HostColumnVector aggOut0 = t3.getColumn(2).copyToHost()) {
// verify t3
assertEquals(4, t3.getRowCount());
Map<Integer, Integer> expectedAggregateResult = new HashMap() {
{
// value, count
put(12, 1);
put(13, 1);
put(17, 2);
}
};
// check to see the aggregate column type depends on the source column
// in this case the source column is Integer, therefore the result should be Integer type
assertEquals(DType.INT32, aggOut0.getType());
for (int i = 0; i < 4; ++i) {
int key = aggOut0.getInt(i);
assertTrue(expectedAggregateResult.containsKey(key));
Integer count = expectedAggregateResult.get(key);
if (count == 1) {
expectedAggregateResult.remove(key);
} else {
expectedAggregateResult.put(key, count - 1);
}
}
}
}
}
@Test
void testGroupBySum() {
try (Table t1 = new Table.TestBuilder().column( 1, 1, 1, 1, 1, 1)
.column( 1, 3, 3, 5, 5, 0)
.column(12.0, 14.0, 13.0, 17.0, 17.0, 17.0)
.build()) {
try (Table t3 = t1.groupBy(0, 1).aggregate(GroupByAggregation.sum().onColumn(2));
HostColumnVector aggOut1 = t3.getColumn(2).copyToHost()) {
// verify t3
assertEquals(4, t3.getRowCount());
Map<Double, Integer> expectedAggregateResult = new HashMap() {
{
// value, count
put(12.0, 1);
put(27.0, 1);
put(34.0, 1);
put(17.0, 1);
}
};
for (int i = 0; i < 4; ++i) {
Double key = aggOut1.getDouble(i);
assertTrue(expectedAggregateResult.containsKey(key));
Integer count = expectedAggregateResult.get(key);
if (count == 1) {
expectedAggregateResult.remove(key);
} else {
expectedAggregateResult.put(key, count - 1);
}
}
}
}
}
@Test
void testGroupByM2() {
// A trivial test:
try (Table input = new Table.TestBuilder().column(1, 2, 3, 1, 2, 2, 1, 3, 3, 2)
.column(0, 1, -2, 3, -4, -5, -6, 7, -8, 9)
.build();
Table results = input.groupBy(0).aggregate(GroupByAggregation.M2()
.onColumn(1));
Table expected = new Table.TestBuilder().column(1, 2, 3)
.column(42.0, 122.75, 114.0)
.build()) {
assertTablesAreEqual(expected, results);
}
// Test with values have nulls (the values associated with key=2 has both nulls and non-nulls,
// while the values associated with key=5 are all nulls):
try (Table input = new Table.TestBuilder().column(1, 2, 5, 3, 4, 5, 2, 3, 2, 5)
.column(0, null, null, 2, 3, null, 5, 6, 7, null)
.build();
Table results = input.groupBy(0).aggregate(GroupByAggregation.M2()
.onColumn(1));
Table expected = new Table.TestBuilder().column(1, 2, 3, 4, 5)
.column(0.0, 2.0, 8.0, 0.0, null)
.build()) {
assertTablesAreEqual(expected, results);
}
// Test with floating-point values having NaN:
try (Table input = new Table.TestBuilder().column(4, 3, 1, 2, 3, 1, 2, 2, 1, null, 3, 2, 4, 4)
.column(null, null, 0.0, 1.0, 2.0, 3.0, 4.0, Double.NaN, 6.0, 7.0, 8.0, 9.0, 10.0, Double.NaN)
.build();
Table results = input.groupBy(0).aggregate(GroupByAggregation.M2()
.onColumn(1));
Table expected = new Table.TestBuilder().column(1, 2, 3, 4, null)
.column(18.0, Double.NaN, 18.0, Double.NaN, 0.0)
.build()) {
assertTablesAreEqual(expected, results);
}
// Test with floating-point values having NaN and +/- Inf
// (The values associated with:
// key=1: have only NaN
// key=2: have only +Inf
// key=3: have only -Inf
// key=4: have NaN and +/- Inf,
// key=5: have normal numbers):
try (Table input = new Table.TestBuilder().column(1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4)
.column(Double.NaN,
Double.POSITIVE_INFINITY,
Double.NEGATIVE_INFINITY,
Double.POSITIVE_INFINITY,
5.0,
//
Double.NaN,
Double.POSITIVE_INFINITY,
Double.NEGATIVE_INFINITY,
Double.NEGATIVE_INFINITY,
10.0,
//
Double.NaN,
Double.POSITIVE_INFINITY,
Double.NEGATIVE_INFINITY,
Double.POSITIVE_INFINITY)
.build();
Table results = input.groupBy(0).aggregate(GroupByAggregation.M2()
.onColumn(1));
Table expected = new Table.TestBuilder().column(1, 2, 3, 4, 5)
.column(Double.NaN, Double.NaN, Double.NaN, Double.NaN, 12.5)
.build()) {
assertTablesAreEqual(expected, results);
}
}
@Test
void testGroupByMergeM2() {
StructType nestedType = new StructType(false,
new BasicType(true, DType.INT32),
new BasicType(true, DType.FLOAT64),
new BasicType(true, DType.FLOAT64));
try (Table partialResults1 = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(nestedType,
struct(1, 0.0, 0.0),
struct(1, 1.0, 0.0),
struct(0, null, null),
struct(0, null, null))
.build();
Table partialResults2 = new Table.TestBuilder()
.column(1, 2, 3)
.column(nestedType,
struct(1, 3.0, 0.0),
struct(1, 4.0, 0.0),
struct(1, 2.0, 0.0))
.build();
Table partialResults3 = new Table.TestBuilder()
.column(1, 2)
.column(nestedType,
struct(1, 6.0, 0.0),
struct(1, Double.NaN, Double.NaN))
.build();
Table partialResults4 = new Table.TestBuilder()
.column(2, 3, 4)
.column(nestedType,
struct(1, 9.0, 0.0),
struct(1, 8.0, 0.0),
struct(2, Double.NaN, Double.NaN))
.build();
Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(nestedType,
struct(3, 3.0, 18.0),
struct(4, Double.NaN, Double.NaN),
struct(2, 5.0, 18.0),
struct(2, Double.NaN, Double.NaN))
.build()) {
try (Table concatenatedResults = Table.concatenate(
partialResults1,
partialResults2,
partialResults3,
partialResults4);
Table finalResults = concatenatedResults.groupBy(0).aggregate(
GroupByAggregation.mergeM2().onColumn(1))
) {
assertTablesAreEqual(expected, finalResults);
}
}
}
@Test
void testGroupByFirstExcludeNulls() {
try (Table input = new Table.TestBuilder()
.column( 1, 1, 1, 1, 2, 2, 2, 2)
.column(null, 13, null, 12, 14, null, 15, null)
.build();
Table expected = new Table.TestBuilder()
.column(1, 2)
.column(13, 14)
.build();
Table found = input.groupBy(0).aggregate(
GroupByAggregation.nth(0, NullPolicy.EXCLUDE).onColumn(1))) {
assertTablesAreEqual(expected, found);
}
}
@Test
void testGroupByLastExcludeNulls() {
try (Table input = new Table.TestBuilder()
.column( 1, 1, 1, 1, 2, 2, 2, 2)
.column(null, 13, null, 12, 14, null, 15, null)
.build();
Table expected = new Table.TestBuilder()
.column(1, 2)
.column(12, 15)
.build();
Table found = input.groupBy(0).aggregate(
GroupByAggregation.nth(-1, NullPolicy.EXCLUDE).onColumn(1))) {
assertTablesAreEqual(expected, found);
}
}
@Test
void testGroupByFirstIncludeNulls() {
try (Table input = new Table.TestBuilder()
.column( 1, 1, 1, 1, 2, 2, 2, 2)
.column(null, 13, null, 12, 14, null, 15, null)
.build();
Table expected = new Table.TestBuilder()
.column(1, 2)
.column(null, 14)
.build();
Table found = input.groupBy(0).aggregate(
GroupByAggregation.nth(0, NullPolicy.INCLUDE).onColumn(1))) {
assertTablesAreEqual(expected, found);
}
}
@Test
void testGroupByLastIncludeNulls() {
try (Table input = new Table.TestBuilder()
.column( 1, 1, 1, 1, 2, 2, 2, 2)
.column(null, 13, null, 12, 14, null, 15, null)
.build();
Table expected = new Table.TestBuilder()
.column(1, 2)
.column(12, null)
.build();
Table found = input.groupBy(0).aggregate(
GroupByAggregation.nth(-1, NullPolicy.INCLUDE).onColumn(1))) {
assertTablesAreEqual(expected, found);
}
}
@Test
void testGroupByAvg() {
try (Table t1 = new Table.TestBuilder().column( 1, 1, 1, 1, 1, 1)
.column( 1, 3, 3, 5, 5, 0)
.column(12, 14, 13, 1, 17, 17)
.build()) {
try (Table t3 = t1.groupBy(0, 1).aggregate(GroupByAggregation.mean().onColumn(2));
HostColumnVector aggOut1 = t3.getColumn(2).copyToHost()) {
// verify t3
assertEquals(4, t3.getRowCount());
Map<Double, Integer> expectedAggregateResult = new HashMap() {
{
// value, count
put(12.0, 1);
put(13.5, 1);
put(17.0, 1);
put(9.0, 1);
}
};
for (int i = 0; i < 4; ++i) {
Double key = aggOut1.getDouble(i);
assertTrue(expectedAggregateResult.containsKey(key));
Integer count = expectedAggregateResult.get(key);
if (count == 1) {
expectedAggregateResult.remove(key);
} else {
expectedAggregateResult.put(key, count - 1);
}
}
}
}
}
@Test
void testMultiAgg() {
try (Table t1 = new Table.TestBuilder().column( 1, 1, 1, 1, 1, 1)
.column( 2, 2, 2, 3, 3, 3)
.column(5.0, 2.3, 3.4, 2.3, 1.3, 12.2)
.column( 3, 1, 7, -1, 9, 0)
.build()) {
try (Table t2 = t1.groupBy(0, 1).aggregate(
GroupByAggregation.count().onColumn(0),
GroupByAggregation.max().onColumn(3),
GroupByAggregation.min().onColumn(2),
GroupByAggregation.mean().onColumn(2),
GroupByAggregation.sum().onColumn(2));
HostColumnVector countOut = t2.getColumn(2).copyToHost();
HostColumnVector maxOut = t2.getColumn(3).copyToHost();
HostColumnVector minOut = t2.getColumn(4).copyToHost();
HostColumnVector avgOut = t2.getColumn(5).copyToHost();
HostColumnVector sumOut = t2.getColumn(6).copyToHost()) {
assertEquals(2, t2.getRowCount());
// verify count
assertEquals(3, countOut.getInt(0));
assertEquals(3, countOut.getInt(1));
// verify mean
List<Double> sortedMean = new ArrayList<>();
sortedMean.add(avgOut.getDouble(0));
sortedMean.add(avgOut.getDouble(1));
sortedMean = sortedMean.stream()
.sorted(Comparator.naturalOrder())
.collect(Collectors.toList());
assertEqualsWithinPercentage(3.5666f, sortedMean.get(0), 0.0001);
assertEqualsWithinPercentage(5.2666f, sortedMean.get(1), 0.0001);
// verify sum
List<Double> sortedSum = new ArrayList<>();
sortedSum.add(sumOut.getDouble(0));
sortedSum.add(sumOut.getDouble(1));
sortedSum = sortedSum.stream()
.sorted(Comparator.naturalOrder())
.collect(Collectors.toList());
assertEqualsWithinPercentage(10.7f, sortedSum.get(0), 0.0001);
assertEqualsWithinPercentage(15.8f, sortedSum.get(1), 0.0001);
// verify min
List<Double> sortedMin = new ArrayList<>();
sortedMin.add(minOut.getDouble(0));
sortedMin.add(minOut.getDouble(1));
sortedMin = sortedMin.stream()
.sorted(Comparator.naturalOrder())
.collect(Collectors.toList());
assertEqualsWithinPercentage(1.3f, sortedMin.get(0), 0.0001);
assertEqualsWithinPercentage(2.3f, sortedMin.get(1), 0.0001);
// verify max
List<Integer> sortedMax = new ArrayList<>();
sortedMax.add(maxOut.getInt(0));
sortedMax.add(maxOut.getInt(1));
sortedMax = sortedMax.stream()
.sorted(Comparator.naturalOrder())
.collect(Collectors.toList());
assertEquals(7, sortedMax.get(0));
assertEquals(9, sortedMax.get(1));
}
}
}
@Test
void testSumWithStrings() {
try (Table t = new Table.TestBuilder()
.column("1-URGENT", "3-MEDIUM", "1-URGENT", "3-MEDIUM")
.column(5289L, 5203L, 5303L, 5206L)
.build();
Table result = t.groupBy(0).aggregate(
GroupByAggregation.sum().onColumn(1));
Table expected = new Table.TestBuilder()
.column("1-URGENT", "3-MEDIUM")
.column(5289L + 5303L, 5203L + 5206L)
.build()) {
assertTablesAreEqual(expected, result);
}
}
@Test
void testGroupByNoAggs() {
try (Table t1 = new Table.TestBuilder().column( 1, 1, 1, 1, 1, 1)
.column( 1, 3, 3, 5, 5, 0)
.column( 12, 14, 13, 17, 17, 17)
.decimal32Column(-3, 12, 14, 13, 111, 222, 333)
.decimal64Column(-3, 12L, 14L, 13L, 111L, 222L, 333L)
.build()) {
try (Table t3 = t1.groupBy(0, 1).aggregate()) {
// verify t3
assertEquals(4, t3.getRowCount());
}
}
}
/**
* A wrapper for ContiguousTable[] to implement AutoCloseable
*/
static class ContiguousSplitRes implements AutoCloseable {
// to be closed
private ContiguousTable[] splits;
public ContiguousSplitRes(ContiguousTable[] splits) {
this.splits = splits;
}
public ContiguousTable[] getSplits() {
return splits;
}
@Override
public void close() throws Exception {
if (splits != null) {
for (ContiguousTable t : splits) { t.close(); }
}
}
}
@Test
void testGroupByContiguousSplitGroups() throws Exception {
try (Table table = new Table.TestBuilder()
.column( 1, 1, 1, 1, 1, 1)
.column( 1, 3, 3, 5, 5, 5)
.column( 12, 14, 13, 17, 16, 18)
.column("s1", "s2", "s3", "s4", "s5", "s6")
.build()) {
// Normal case with primitive types.
try (Table expected1 = new Table.TestBuilder()
.column( 1)
.column( 1)
.column( 12)
.column("s1").build();
Table expected2 = new Table.TestBuilder()
.column( 1, 1)
.column( 3, 3)
.column( 14, 13)
.column("s2", "s3").build();
Table expected3 = new Table.TestBuilder()
.column( 1, 1, 1)
.column( 5, 5, 5)
.column( 17, 16, 18)
.column("s4", "s5", "s6").build();
Table expected4 = new Table.TestBuilder()
.column( 1, 1, 1)
.column( 1, 3, 5).build();
ContiguousSplitRes splitsRes = new ContiguousSplitRes(
table.groupBy(0, 1).contiguousSplitGroups());
ContigSplitGroupByResult r =
table.groupBy(0, 1).contiguousSplitGroupsAndGenUniqKeys()) {
ContiguousTable[] splits = splitsRes.getSplits();
ContiguousTable[] splits2 = r.getGroups();
Table uniqKeys = r.getUniqKeyTable();
for (ContiguousTable[] currSplits : Arrays.asList(splits, splits2)) {
assertEquals(3, currSplits.length);
for (ContiguousTable ct : currSplits) {
if (ct.getRowCount() == 1) {
assertTablesAreEqual(expected1, ct.getTable());
} else if (ct.getRowCount() == 2) {
assertTablesAreEqual(expected2, ct.getTable());
} else if (ct.getRowCount() == 3) {
assertTablesAreEqual(expected3, ct.getTable());
} else {
throw new RuntimeException("unexpected behavior: contiguousSplitGroups");
}
}
}
// verify uniq keys table
assertTablesAreEqual(expected4, uniqKeys);
}
// Empty key columns, the whole table is a group.
try(ContiguousSplitRes splitsRes = new ContiguousSplitRes(
table.groupBy().contiguousSplitGroups());
ContigSplitGroupByResult r = table.groupBy().contiguousSplitGroupsAndGenUniqKeys()) {
ContiguousTable[] splits = splitsRes.getSplits();
ContiguousTable[] splits2 = r.getGroups();
Table uniqKeys = r.getUniqKeyTable();
assertEquals(1, splits.length);
assertTablesAreEqual(table, splits[0].getTable());
assertEquals(1, splits2.length);
assertTablesAreEqual(table, splits2[0].getTable());
// Table should contain 1 or more columns,
// If group by empty, keys table should be null;
assertNull(uniqKeys);
}
// Row count is 0
try(
Table emptyTable = new Table.TestBuilder()
.column(new Integer[0])
.column(new Integer[0])
.column(new Integer[0])
.column(new String[0]).build();
ContiguousSplitRes splitsRes = new ContiguousSplitRes(
emptyTable.groupBy(0, 1).contiguousSplitGroups());
ContigSplitGroupByResult r =
emptyTable.groupBy(0, 1).contiguousSplitGroupsAndGenUniqKeys()) {
ContiguousTable[] splits = splitsRes.getSplits();
ContiguousTable[] splits2 = r.getGroups();
Table uniqKeys = r.getUniqKeyTable();
// the first of tmpSplits is empty split
assertEquals(0, emptyTable.getRowCount());
assertEquals(1, splits.length);
assertEquals(0, splits[0].getTable().getRowCount());
assertEquals(1, splits2.length);
assertEquals(0, splits2[0].getTable().getRowCount());
assertEquals(0, uniqKeys.getRowCount());
}
}
}
@Test
void testGroupByCollectListIncludeNulls() {
try (Table input = new Table.TestBuilder()
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 4)
.column(null, 13, null, 12, 14, null, 15, null, null, 0)
.build();
Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(new ListType(false, new BasicType(true, DType.INT32)),
Arrays.asList(null, 13, null, 12),
Arrays.asList(14, null, 15, null),
Arrays.asList((Integer) null),
Arrays.asList(0))
.build();
Table found = input.groupBy(0).aggregate(
GroupByAggregation.collectList(NullPolicy.INCLUDE).onColumn(1))) {
assertTablesAreEqual(expected, found);
}
}
@Test
void testGroupByMergeLists() {
ListType listOfInts = new ListType(false, new BasicType(false, DType.INT32));
ListType listOfStructs = new ListType(false, new StructType(false,
new BasicType(false, DType.INT32), new BasicType(false, DType.STRING)));
try (Table input = new Table.TestBuilder()
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 4)
.column(listOfInts,
Arrays.asList(1, 2), Arrays.asList(3), Arrays.asList(7, 8), Arrays.asList(4, 5, 6),
Arrays.asList(8, 9), Arrays.asList(8, 9, 10), Arrays.asList(10, 11), Arrays.asList(11, 12),
Arrays.asList(13, 13), Arrays.asList(14, 15, 15))
.column(listOfStructs,
Arrays.asList(new StructData(1, "s1"), new StructData(2, "s2")),
Arrays.asList(new StructData(2, "s3"), new StructData(3, "s4")),
Arrays.asList(new StructData(2, "s2")),
Arrays.asList(),
Arrays.asList(new StructData(11, "s11")),
Arrays.asList(new StructData(22, "s22"), new StructData(33, "s33")),
Arrays.asList(),
Arrays.asList(new StructData(22, "s22"), new StructData(33, "s33"), new StructData(44, "s44")),
Arrays.asList(new StructData(333, "s333"), new StructData(222, "s222"), new StructData(111, "s111")),
Arrays.asList(new StructData(222, "s222"), new StructData(444, "s444")))
.build();
Table expectedListOfInts = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(listOfInts,
Arrays.asList(1, 2, 3, 7 ,8, 4, 5, 6),
Arrays.asList(8, 9, 8, 9, 10, 10, 11, 11, 12),
Arrays.asList(13, 13),
Arrays.asList(14, 15, 15))
.build();
Table expectedListOfStructs = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(listOfStructs,
Arrays.asList(new StructData(1, "s1"), new StructData(2, "s2"),
new StructData(2, "s3"), new StructData(3, "s4"), new StructData(2, "s2")),
Arrays.asList(new StructData(11, "s11"), new StructData(22, "s22"), new StructData(33, "s33"),
new StructData(22, "s22"), new StructData(33, "s33"), new StructData(44, "s44")),
Arrays.asList(new StructData(333, "s333"), new StructData(222, "s222"), new StructData(111, "s111")),
Arrays.asList(new StructData(222, "s222"), new StructData(444, "s444")))
.build();
Table retListOfInts = input.groupBy(0).aggregate(GroupByAggregation.mergeLists().onColumn(1));
Table retListOfStructs = input.groupBy(0).aggregate(GroupByAggregation.mergeLists().onColumn(2))) {
assertTablesAreEqual(expectedListOfInts, retListOfInts);
assertTablesAreEqual(expectedListOfStructs, retListOfStructs);
}
}
@Test
void testGroupByCollectSetIncludeNulls() {
// test with null unequal and nan unequal
GroupByAggregation collectSet = GroupByAggregation.collectSet(NullPolicy.INCLUDE,
NullEquality.UNEQUAL, NaNEquality.UNEQUAL);
try (Table input = new Table.TestBuilder()
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4)
.column(null, 13, null, 13, 14, null, 15, null, 4, 1, 1, 4, 0, 0, 0, 0)
.build();
Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(new ListType(false, new BasicType(true, DType.INT32)),
Arrays.asList(13, null, null), Arrays.asList(14, 15, null, null),
Arrays.asList(1, 4), Arrays.asList(0))
.build();
Table found = input.groupBy(0).aggregate(collectSet.onColumn(1));
ColumnVector listsSorted = found.getColumn(1).listSortRows(false, false)) {
assertColumnsAreEqual(expected.getColumn(0), found.getColumn(0));
assertColumnsAreEqual(expected.getColumn(1), listsSorted);
}
// test with null equal and nan unequal
collectSet = GroupByAggregation.collectSet(NullPolicy.INCLUDE,
NullEquality.EQUAL, NaNEquality.UNEQUAL);
try (Table input = new Table.TestBuilder()
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4)
.column(null, 13.0, null, 13.0,
14.1, Double.NaN, 13.9, Double.NaN,
Double.NaN, null, 1.0, null,
null, null, null, null)
.build();
Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(new ListType(false, new BasicType(true, DType.FLOAT64)),
Arrays.asList(13.0, null),
Arrays.asList(13.9, 14.1, Double.NaN, Double.NaN),
Arrays.asList(1.0, Double.NaN, null),
Arrays.asList((Integer) null))
.build();
Table found = input.groupBy(0).aggregate(collectSet.onColumn(1));
ColumnVector listsSorted = found.getColumn(1).listSortRows(false, false)) {
assertColumnsAreEqual(expected.getColumn(0), found.getColumn(0));
assertColumnsAreEqual(expected.getColumn(1), listsSorted);
}
// test with null equal and nan equal
collectSet = GroupByAggregation.collectSet(NullPolicy.INCLUDE,
NullEquality.EQUAL, NaNEquality.ALL_EQUAL);
try (Table input = new Table.TestBuilder()
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4)
.column(null, 13.0, null, 13.0,
14.1, Double.NaN, 13.9, Double.NaN,
0.0, 0.0, 0.00, 0.0,
Double.NaN, Double.NaN, null, null)
.build();
Table expected = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(new ListType(false, new BasicType(true, DType.FLOAT64)),
Arrays.asList(13.0, null),
Arrays.asList(13.9, 14.1, Double.NaN),
Arrays.asList(0.0),
Arrays.asList(Double.NaN, (Integer) null))
.build();
Table found = input.groupBy(0).aggregate(collectSet.onColumn(1));
ColumnVector listsSorted = found.getColumn(1).listSortRows(false, false)) {
assertColumnsAreEqual(expected.getColumn(0), found.getColumn(0));
assertColumnsAreEqual(expected.getColumn(1), listsSorted);
}
}
@Test
void testGroupByMergeSets() {
ListType listOfInts = new ListType(false, new BasicType(false, DType.INT32));
ListType listOfDoubles = new ListType(false, new BasicType(false, DType.FLOAT64));
try (Table input = new Table.TestBuilder()
.column(1, 1, 1, 1, 2, 2, 2, 2, 3, 4)
.column(listOfInts,
Arrays.asList(1, 2), Arrays.asList(3), Arrays.asList(7, 8), Arrays.asList(4, 5, 6),
Arrays.asList(8, 9), Arrays.asList(8, 9, 10), Arrays.asList(10, 11), Arrays.asList(11, 12),
Arrays.asList(13, 13), Arrays.asList(14, 15, 15))
.column(listOfDoubles,
Arrays.asList(Double.NaN, 1.2), Arrays.asList(), Arrays.asList(Double.NaN), Arrays.asList(-3e10),
Arrays.asList(1.1, 2.2, 3.3), Arrays.asList(3.3, 2.2), Arrays.asList(), Arrays.asList(),
Arrays.asList(1e3, Double.NaN, 1e-3, Double.NaN), Arrays.asList())
.build();
Table expectedListOfInts = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(listOfInts,
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8),
Arrays.asList(8, 9, 10, 11, 12),
Arrays.asList(13),
Arrays.asList(14, 15))
.build();
Table expectedListOfDoubles = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(listOfDoubles,
Arrays.asList(-3e10, 1.2, Double.NaN, Double.NaN),
Arrays.asList(1.1, 2.2, 3.3),
Arrays.asList(1e-3, 1e3, Double.NaN, Double.NaN),
Arrays.asList())
.build();
Table expectedListOfDoublesNaNEq = new Table.TestBuilder()
.column(1, 2, 3, 4)
.column(listOfDoubles,
Arrays.asList(-3e10, 1.2, Double.NaN),
Arrays.asList(1.1, 2.2, 3.3),
Arrays.asList(1e-3, 1e3, Double.NaN),
Arrays.asList())
.build();
Table retListOfInts = input.groupBy(0).aggregate(GroupByAggregation.mergeSets().onColumn(1));
Table retListOfDoubles = input.groupBy(0).aggregate(GroupByAggregation.mergeSets().onColumn(2));
Table retListOfDoublesNaNEq = input.groupBy(0).aggregate(
GroupByAggregation.mergeSets(NullEquality.UNEQUAL, NaNEquality.ALL_EQUAL).onColumn(2));
ColumnVector listsIntsSorted = retListOfInts.getColumn(1).listSortRows(false, false);
ColumnVector listsDoublesSorted = retListOfDoubles.getColumn(1).listSortRows(false, false);
ColumnVector listsDoublesNaNEqSorted = retListOfDoublesNaNEq.getColumn(1).listSortRows(false, false)) {
assertColumnsAreEqual(expectedListOfInts.getColumn(0), retListOfInts.getColumn(0));
assertColumnsAreEqual(expectedListOfDoubles.getColumn(0), retListOfDoubles.getColumn(0));
assertColumnsAreEqual(expectedListOfDoublesNaNEq.getColumn(0), retListOfDoublesNaNEq.getColumn(0));
assertColumnsAreEqual(expectedListOfInts.getColumn(1), listsIntsSorted);
assertColumnsAreEqual(expectedListOfDoubles.getColumn(1), listsDoublesSorted);
assertColumnsAreEqual(expectedListOfDoublesNaNEq.getColumn(1), listsDoublesNaNEqSorted);
}
}
@Test
void testRowBitCount() {
try (Table t = new Table.TestBuilder()
.column(0, 1, null, 3) // 33 bits per row (4 bytes + valid bit)
.column(0.0, null, 2.0, 3.0) // 65 bits per row (8 bytes + valid bit)
.column("zero", null, "two", "three") // 33 bits (4 byte offset + valid bit) + char bits
.build();
ColumnVector expected = ColumnVector.fromInts(163, 131, 155, 171);
ColumnVector actual = t.rowBitCount()) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testRowBitCountEmpty() {
try (Table t = new Table.TestBuilder()
.column(new Integer[0])
.column(new Double[0])
.column(new String[0])
.build();
ColumnVector c = t.rowBitCount()) {
assertEquals(DType.INT32, c.getType());
assertEquals(0, c.getRowCount());
}
}
@Test
void testSimpleGather() {
try (Table testTable = new Table.TestBuilder()
.column(1, 2, 3, 4, 5)
.column("A", "AA", "AAA", "AAAA", "AAAAA")
.decimal32Column(-3, 1, 2, 3, 4, 5)
.decimal64Column(-8, 100001L, 200002L, 300003L, 400004L, 500005L)
.build();
ColumnVector gatherMap = ColumnVector.fromInts(0, 2, 4, -2);
Table expected = new Table.TestBuilder()
.column(1, 3, 5, 4)
.column("A", "AAA", "AAAAA", "AAAA")
.decimal32Column(-3, 1, 3, 5, 4)
.decimal64Column(-8, 100001L, 300003L, 500005L, 400004L)
.build();
Table found = testTable.gather(gatherMap)) {
assertTablesAreEqual(expected, found);
}
}
@Test
void testBoundsCheckedGather() {
try (Table testTable = new Table.TestBuilder()
.column(1, 2, 3, 4, 5)
.column("A", "AA", "AAA", "AAAA", "AAAAA")
.decimal32Column(-3, 1, 2, 3, 4, 5)
.decimal64Column(-8, 100001L, 200002L, 300003L, 400004L, 500005L)
.build();
ColumnVector gatherMap = ColumnVector.fromInts(0, 100, 4, -2);
Table expected = new Table.TestBuilder()
.column(1, null, 5, 4)
.column("A", null, "AAAAA", "AAAA")
.decimal32Column(-3, 1, null, 5, 4)
.decimal64Column(-8, 100001L, null, 500005L, 400004L)
.build();
Table found = testTable.gather(gatherMap)) {
assertTablesAreEqual(expected, found);
}
}
@Test
void testScatterTable() {
try (Table srcTable = new Table.TestBuilder()
.column(1, 2, 3, 4, 5)
.column("A", "AA", "AAA", "AAAA", "AAAAA")
.decimal32Column(-3, 1, 2, 3, 4, 5)
.decimal64Column(-8, 100001L, 200002L, 300003L, 400004L, 500005L)
.build();
ColumnVector scatterMap = ColumnVector.fromInts(0, 2, 4, -2);
Table targetTable = new Table.TestBuilder()
.column(-1, -2, -3, -4, -5)
.column("B", "BB", "BBB", "BBBB", "BBBBB")
.decimal32Column(-3, -1, -2, -3, -4, -5)
.decimal64Column(-8, -100001L, -200002L, -300003L, -400004L, -500005L)
.build();
Table expected = new Table.TestBuilder()
.column(1, -2, 2, 4, 3)
.column("A", "BB", "AA", "AAAA", "AAA")
.decimal32Column(-3, 1, -2, 2, 4, 3)
.decimal64Column(-8, 100001L, -200002L, 200002L, 400004L, 300003L)
.build();
Table result = srcTable.scatter(scatterMap, targetTable)) {
assertTablesAreEqual(expected, result);
}
}
@Test
void testScatterScalars() {
try (Scalar s1 = Scalar.fromInt(0);
Scalar s2 = Scalar.fromString("A");
ColumnVector scatterMap = ColumnVector.fromInts(0, 2, -1);
Table targetTable = new Table.TestBuilder()
.column(-1, -2, -3, -4, -5)
.column("B", "BB", "BBB", "BBBB", "BBBBB")
.build();
Table expected = new Table.TestBuilder()
.column(0, -2, 0, -4, 0)
.column("A", "BB", "A", "BBBB", "A")
.build();
Table result = Table.scatter(new Scalar[] { s1, s2 }, scatterMap, targetTable)) {
assertTablesAreEqual(expected, result);
}
}
@Test
void testMaskWithoutValidity() {
try (ColumnVector mask = ColumnVector.fromBoxedBooleans(true, false, true, false, true);
ColumnVector fromInts = ColumnVector.fromInts(1, 2, 3, 4, 5);
ColumnVector fromStrings = ColumnVector.fromStrings("1", "2", "3", "4", "5");
ColumnVector fromDecimals = ColumnVector.decimalFromLongs(-3, 123L, -234L, 345L, 1000L, -2000L);
Table input = new Table(fromInts, fromStrings, fromDecimals);
Table filteredTable = input.filter(mask);
ColumnVector expectedInts = ColumnVector.fromInts(1, 3, 5);
ColumnVector expectedStrings = ColumnVector.fromStrings("1", "3", "5");
ColumnVector expectedDecimals = ColumnVector.decimalFromLongs(-3, 123L, 345L, -2000L);
Table expected = new Table(expectedInts, expectedStrings, expectedDecimals)) {
assertTablesAreEqual(expected, filteredTable);
}
}
@Test
void testMaskWithValidity() {
final int numRows = 5;
try (Builder builder = HostColumnVector.builder(DType.BOOL8, numRows)) {
for (int i = 0; i < numRows; ++i) {
builder.append((byte) 1);
if (i % 2 != 0) {
builder.setNullAt(i);
}
}
try (ColumnVector mask = builder.buildAndPutOnDevice();
ColumnVector fromInts = ColumnVector.fromBoxedInts(1, null, 2, 3, null);
Table input = new Table(fromInts);
Table filteredTable = input.filter(mask);
HostColumnVector filtered = filteredTable.getColumn(0).copyToHost()) {
assertEquals(DType.INT32, filtered.getType());
assertEquals(3, filtered.getRowCount());
assertEquals(1, filtered.getInt(0));
assertEquals(2, filtered.getInt(1));
assertTrue(filtered.isNull(2));
}
}
}
@Test
void testMaskDataOnly() {
byte[] maskVals = new byte[]{0, 1, 0, 1, 1};
try (ColumnVector mask = ColumnVector.boolFromBytes(maskVals);
ColumnVector fromBytes = ColumnVector.fromBoxedBytes((byte) 1, null, (byte) 2, (byte) 3, null);
Table input = new Table(fromBytes);
Table filteredTable = input.filter(mask);
HostColumnVector filtered = filteredTable.getColumn(0).copyToHost()) {
assertEquals(DType.INT8, filtered.getType());
assertEquals(3, filtered.getRowCount());
assertTrue(filtered.isNull(0));
assertEquals(3, filtered.getByte(1));
assertTrue(filtered.isNull(2));
}
}
@Test
void testAllFilteredFromData() {
Boolean[] maskVals = new Boolean[5];
Arrays.fill(maskVals, false);
try (ColumnVector mask = ColumnVector.fromBoxedBooleans(maskVals);
ColumnVector fromInts = ColumnVector.fromBoxedInts(1, null, 2, 3, null);
ColumnVector fromDecimal32s = ColumnVector.decimalFromInts(-3, 1, 2, 3, 4, 5);
ColumnVector fromDecimal64s = ColumnVector.decimalFromLongs(-11, 1L, 2L, 3L, 4L, 5L);
Table input = new Table(fromInts, fromDecimal32s, fromDecimal64s);
Table filteredTable = input.filter(mask)) {
ColumnVector filtered = filteredTable.getColumn(0);
assertEquals(DType.INT32, filtered.getType());
assertEquals(0, filtered.getRowCount());
filtered = filteredTable.getColumn(1);
assertEquals(DType.create(DType.DTypeEnum.DECIMAL32, -3), filtered.getType());
assertEquals(0, filtered.getRowCount());
filtered = filteredTable.getColumn(2);
assertEquals(DType.create(DType.DTypeEnum.DECIMAL64, -11), filtered.getType());
assertEquals(0, filtered.getRowCount());
}
}
@Test
void testAllFilteredFromValidity() {
final int numRows = 5;
try (Builder builder = HostColumnVector.builder(DType.BOOL8, numRows)) {
for (int i = 0; i < numRows; ++i) {
builder.append((byte) 1);
builder.setNullAt(i);
}
try (ColumnVector mask = builder.buildAndPutOnDevice();
ColumnVector fromInts = ColumnVector.fromBoxedInts(1, null, 2, 3, null);
ColumnVector fromDecimal32s = ColumnVector.decimalFromInts(-3, 1, 2, 3, 4, 5);
ColumnVector fromDecimal64s = ColumnVector.decimalFromLongs(-11, 1L, 2L, 3L, 4L, 5L);
Table input = new Table(fromInts, fromDecimal32s, fromDecimal64s);
Table filteredTable = input.filter(mask)) {
ColumnVector filtered = filteredTable.getColumn(0);
assertEquals(DType.INT32, filtered.getType());
assertEquals(0, filtered.getRowCount());
filtered = filteredTable.getColumn(1);
assertEquals(DType.create(DType.DTypeEnum.DECIMAL32, -3), filtered.getType());
assertEquals(0, filtered.getRowCount());
filtered = filteredTable.getColumn(2);
assertEquals(DType.create(DType.DTypeEnum.DECIMAL64, -11), filtered.getType());
assertEquals(0, filtered.getRowCount());
}
}
}
ColumnView replaceValidity(ColumnView cv, DeviceMemoryBuffer validity, long nullCount) {
assert (validity.length >= BitVectorHelper.getValidityAllocationSizeInBytes(cv.rows));
if (cv.type.isNestedType()) {
ColumnView[] children = cv.getChildColumnViews();
try {
return new ColumnView(cv.type,
cv.rows,
Optional.of(nullCount),
validity,
cv.getOffsets(),
children);
} finally {
for (ColumnView v : children) {
if (v != null) {
v.close();
}
}
}
} else {
return new ColumnView(cv.type, cv.rows, Optional.of(nullCount), cv.getData(), validity, cv.getOffsets());
}
}
@Test
void testRemoveNullMasksIfNeeded() {
ListType nestedType = new ListType(true, new StructType(false,
new BasicType(true, DType.INT32),
new BasicType(true, DType.INT64)));
List data1 = Arrays.asList(10, 20L);
List data2 = Arrays.asList(50, 60L);
HostColumnVector.StructData structData1 = new HostColumnVector.StructData(data1);
HostColumnVector.StructData structData2 = new HostColumnVector.StructData(data2);
//First we create ColumnVectors
try (ColumnVector nonNullVector0 = ColumnVector.fromBoxedInts(1, 2, 3);
ColumnVector nonNullVector2 = ColumnVector.fromStrings("1", "2", "3");
ColumnVector nonNullVector1 = ColumnVector.fromLists(nestedType,
Arrays.asList(structData1, structData2),
Arrays.asList(structData1, structData2),
Arrays.asList(structData1, structData2))) {
//Then we take the created ColumnVectors and add validity masks even though the nullCount = 0
long allocSize = BitVectorHelper.getValidityAllocationSizeInBytes(nonNullVector0.rows);
try (DeviceMemoryBuffer dm0 = DeviceMemoryBuffer.allocate(allocSize);
DeviceMemoryBuffer dm1 = DeviceMemoryBuffer.allocate(allocSize);
DeviceMemoryBuffer dm2 = DeviceMemoryBuffer.allocate(allocSize);
DeviceMemoryBuffer dm3_child =
DeviceMemoryBuffer.allocate(BitVectorHelper.getValidityAllocationSizeInBytes(2))) {
Cuda.memset(dm0.address, (byte) 0xFF, allocSize);
Cuda.memset(dm1.address, (byte) 0xFF, allocSize);
Cuda.memset(dm2.address, (byte) 0xFF, allocSize);
Cuda.memset(dm3_child.address, (byte) 0xFF,
BitVectorHelper.getValidityAllocationSizeInBytes(2));
try (ColumnView cv0View = replaceValidity(nonNullVector0, dm0, 0);
ColumnVector cv0 = cv0View.copyToColumnVector();
ColumnView struct = nonNullVector1.getChildColumnView(0);
ColumnView structChild0 = struct.getChildColumnView(0);
ColumnView newStructChild0 = replaceValidity(structChild0, dm3_child, 0);
ColumnView newStruct = struct.replaceChildrenWithViews(new int[]{0}, new ColumnView[]{newStructChild0});
ColumnView list = nonNullVector1.replaceChildrenWithViews(new int[]{0}, new ColumnView[]{newStruct});
ColumnView cv1View = replaceValidity(list, dm1, 0);
ColumnVector cv1 = cv1View.copyToColumnVector();
ColumnView cv2View = replaceValidity(nonNullVector2, dm2, 0);
ColumnVector cv2 = cv2View.copyToColumnVector()) {
try (Table t = new Table(new ColumnVector[]{cv0, cv1, cv2});
Table tableWithoutNullMask = removeNullMasksIfNeeded(t);
ColumnView tableStructChild0 = t.getColumn(1).getChildColumnView(0).getChildColumnView(0);
ColumnVector tableStructChild0Cv = tableStructChild0.copyToColumnVector();
Table expected = new Table(new ColumnVector[]{nonNullVector0, nonNullVector1,
nonNullVector2})) {
assertTrue(t.getColumn(0).hasValidityVector());
assertTrue(t.getColumn(1).hasValidityVector());
assertTrue(t.getColumn(2).hasValidityVector());
assertTrue(tableStructChild0Cv.hasValidityVector());
assertPartialTablesAreEqual(expected,
0,
expected.getRowCount(),
tableWithoutNullMask,
true,
true);
}
}
}
}
}
@Test
void testRemoveNullMasksIfNeededWithNulls() {
ListType nestedType = new ListType(true, new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(true, DType.INT64)));
List data1 = Arrays.asList(0, 10L);
List data2 = Arrays.asList(50, null);
HostColumnVector.StructData structData1 = new HostColumnVector.StructData(data1);
HostColumnVector.StructData structData2 = new HostColumnVector.StructData(data2);
//First we create ColumnVectors
try (ColumnVector nonNullVector0 = ColumnVector.fromBoxedInts(1, null, 2, 3);
ColumnVector nonNullVector1 = ColumnVector.fromStrings("1", "2", null, "3");
ColumnVector nonNullVector2 = ColumnVector.fromLists(nestedType,
Arrays.asList(structData1, structData2),
null,
Arrays.asList(structData1, structData2),
Arrays.asList(structData1, structData2))) {
try (Table expected = new Table(new ColumnVector[]{nonNullVector0, nonNullVector1, nonNullVector2});
Table unchangedTable = removeNullMasksIfNeeded(expected)) {
assertTablesAreEqual(expected, unchangedTable);
}
}
}
@Test
void testMismatchedSizesForFilter() {
Boolean[] maskVals = new Boolean[3];
Arrays.fill(maskVals, true);
try (ColumnVector mask = ColumnVector.fromBoxedBooleans(maskVals);
ColumnVector fromInts = ColumnVector.fromBoxedInts(1, null, 2, 3, null);
Table input = new Table(fromInts)) {
assertThrows(AssertionError.class, () -> input.filter(mask).close());
}
}
@Test
void testTableBasedFilter() {
byte[] maskVals = new byte[]{0, 1, 0, 1, 1};
try (ColumnVector mask = ColumnVector.boolFromBytes(maskVals);
ColumnVector fromInts = ColumnVector.fromBoxedInts(1, null, 2, 3, null);
ColumnVector fromStrings = ColumnVector.fromStrings("one", "two", "three", null, "five");
ColumnVector fromDecimals = ColumnVector.fromDecimals(BigDecimal.ZERO, null, BigDecimal.ONE, null, BigDecimal.TEN);
Table input = new Table(fromInts, fromStrings, fromDecimals);
Table filtered = input.filter(mask);
ColumnVector expectedFromInts = ColumnVector.fromBoxedInts(null, 3, null);
ColumnVector expectedFromStrings = ColumnVector.fromStrings("two", null, "five");
ColumnVector expectedFromDecimals = ColumnVector.fromDecimals(null, null, BigDecimal.TEN);
Table expected = new Table(expectedFromInts, expectedFromStrings, expectedFromDecimals)) {
assertTablesAreEqual(expected, filtered);
}
}
@Test
void testDropDuplicates() {
int[] keyColumns = new int[]{ 1 };
try (ColumnVector col1 = ColumnVector.fromBoxedInts(5, null, 3, 5, 8, 1);
ColumnVector col2 = ColumnVector.fromBoxedInts(20, null, null, 19, 21, 19);
Table input = new Table(col1, col2)) {
// Keep the first duplicate element.
try (Table result = input.dropDuplicates(keyColumns, Table.DuplicateKeepOption.KEEP_FIRST, true);
Table resultSorted = result.orderBy(OrderByArg.asc(1, true));
ColumnVector expectedCol1 = ColumnVector.fromBoxedInts(null, 5, 5, 8);
ColumnVector expectedCol2 = ColumnVector.fromBoxedInts(null, 19, 20, 21);
Table expected = new Table(expectedCol1, expectedCol2)) {
assertTablesAreEqual(expected, resultSorted);
}
// Keep the last duplicate element.
try (Table result = input.dropDuplicates(keyColumns, Table.DuplicateKeepOption.KEEP_LAST, true);
Table resultSorted = result.orderBy(OrderByArg.asc(1, true));
ColumnVector expectedCol1 = ColumnVector.fromBoxedInts(3, 1, 5, 8);
ColumnVector expectedCol2 = ColumnVector.fromBoxedInts(null, 19, 20, 21);
Table expected = new Table(expectedCol1, expectedCol2)) {
assertTablesAreEqual(expected, resultSorted);
}
}
}
private enum Columns {
BOOL("BOOL"),
INT("INT"),
BYTE("BYTE"),
LONG("LONG"),
STRING("STRING"),
FLOAT("FLOAT"),
DOUBLE("DOUBLE"),
DECIMAL64("DECIMAL64"),
DECIMAL128("DECIMAL128"),
STRUCT("STRUCT"),
STRUCT_DEC128("STRUCT_DEC128"),
LIST("LIST"),
LIST_STRUCT("LIST_STRUCT"),
LIST_DEC128("LIST_DEC128");
final String name;
Columns(String columnName) {
this.name = columnName;
}
}
private static class WriteUtils {
private static final Map<Columns, Function<TestBuilder, TestBuilder>> addColumnFn = Maps.newHashMap();
static {
addColumnFn.put(Columns.BOOL, (t) -> t.column(true, false, false, true, false));
addColumnFn.put(Columns.INT, (t) -> t.column(5, 1, 0, 2, 7));
addColumnFn.put(Columns.LONG, (t) -> t.column(3l, 9l, 4l, 2l, 20l));
addColumnFn.put(Columns.BYTE, (t) -> t.column(new Byte[]{2, 3, 4, 5, 9}));
addColumnFn.put(Columns.STRING, (t) -> t.column("this", "is", "a", "test", "string"));
addColumnFn.put(Columns.FLOAT, (t) -> t.column(1.0f, 3.5f, 5.9f, 7.1f, 9.8f));
addColumnFn.put(Columns.DOUBLE, (t) -> t.column(5.0d, 9.5d, 0.9d, 7.23d, 2.8d));
addColumnFn.put(Columns.DECIMAL64, (t) ->
t.decimal64Column(-5, 1L, 323L, 12398423L, -231312412L, 239893414231L));
addColumnFn.put(Columns.DECIMAL128, (t) ->
t.decimal128Column(-10, RoundingMode.UNNECESSARY, BigInteger.ONE, BigInteger.ZERO,
BigInteger.TEN, new BigInteger("100000000000000000000000000000"),
new BigInteger("-1234567890123456789012345678")));
BasicType dec64Type = new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL64, 0));
StructType structType = new StructType(true,
new BasicType(true, DType.INT32), new BasicType(true, DType.STRING), dec64Type);
addColumnFn.put(Columns.STRUCT, (t) -> t.column(structType,
struct(1, "k1", BigDecimal.ONE),
struct(2, "k2", BigDecimal.ZERO),
struct(3, "k3", BigDecimal.TEN),
struct(4, "k4", BigDecimal.valueOf(Long.MAX_VALUE)),
new HostColumnVector.StructData((List) null)));
BasicType dec128Type = new BasicType(true, DType.create(DType.DTypeEnum.DECIMAL128, -5));
addColumnFn.put(Columns.STRUCT_DEC128, (t) ->
t.column(new StructType(false, dec128Type),
struct(BigDecimal.valueOf(Integer.MAX_VALUE, 5)),
struct(BigDecimal.valueOf(Long.MAX_VALUE, 5)),
struct(new BigDecimal("111111111122222222223333333333").setScale(5)),
struct(new BigDecimal("123456789123456789123456789").setScale(5)),
struct((BigDecimal) null)));
addColumnFn.put(Columns.LIST, (t) ->
t.column(new ListType(false, new BasicType(false, DType.INT32)),
Arrays.asList(1, 2),
Arrays.asList(3, 4),
Arrays.asList(5),
Arrays.asList(6, 7),
Arrays.asList(8, 9, 10)));
addColumnFn.put(Columns.LIST_STRUCT, (t) ->
t.column(new ListType(true, structType),
Arrays.asList(struct(1, "k1", BigDecimal.ONE), struct(2, "k2", BigDecimal.ONE),
struct(3, "k3", BigDecimal.ONE)),
Arrays.asList(struct(4, "k4", BigDecimal.ONE), struct(5, "k5", BigDecimal.ONE)),
Arrays.asList(struct(6, "k6", BigDecimal.ONE)),
Arrays.asList(new HostColumnVector.StructData((List) null)),
(List) null));
addColumnFn.put(Columns.LIST_DEC128, (t) ->
t.column(new ListType(true, new StructType(false, dec128Type)),
Arrays.asList(struct(BigDecimal.valueOf(Integer.MAX_VALUE, 5)),
struct(BigDecimal.valueOf(Integer.MIN_VALUE, 5))),
Arrays.asList(struct(BigDecimal.valueOf(Long.MAX_VALUE, 5)),
struct(BigDecimal.valueOf(0, 5)), struct(BigDecimal.valueOf(-1, 5))),
Arrays.asList(struct(new BigDecimal("111111111122222222223333333333").setScale(5))),
Arrays.asList(struct(new BigDecimal("123456789123456789123456789").setScale(5))),
Arrays.asList(struct((BigDecimal) null))));
}
static TestBuilder addColumn(TestBuilder tb, String colName) {
if (!addColumnFn.containsKey(Columns.valueOf(colName))) {
throw new IllegalArgumentException("Unknown column name: " + colName);
}
return addColumnFn.get(Columns.valueOf(colName)).apply(tb);
}
static String[] getAllColumns(boolean withDecimal128) {
List<String> columns = Lists.newArrayList(
Columns.BOOL.name, Columns.INT.name, Columns.BYTE.name, Columns.LONG.name,
Columns.STRING.name, Columns.FLOAT.name, Columns.DOUBLE.name, Columns.DECIMAL64.name,
Columns.STRUCT.name, Columns.LIST.name, Columns.LIST_STRUCT.name);
if (withDecimal128) {
columns.add(Columns.DECIMAL128.name);
columns.add(Columns.STRUCT_DEC128.name);
columns.add(Columns.LIST_DEC128.name);
}
String[] ret = new String[columns.size()];
columns.toArray(ret);
return ret;
}
static String[] getNonNestedColumns(boolean withDecimal128) {
List<String> columns = Lists.newArrayList(
Columns.BOOL.name, Columns.INT.name, Columns.BYTE.name, Columns.LONG.name,
Columns.STRING.name, Columns.FLOAT.name, Columns.DOUBLE.name, Columns.DECIMAL64.name);
if (withDecimal128) {
columns.add(Columns.DECIMAL128.name);
}
String[] ret = new String[columns.size()];
columns.toArray(ret);
return ret;
}
static void buildWriterOptions(ColumnWriterOptions.NestedBuilder builder, List<String> columns) {
for (String colName : columns) {
buildWriterOptions(builder, colName);
}
}
static void buildWriterOptions(ColumnWriterOptions.NestedBuilder builder, String... columns) {
for (String colName : columns) {
buildWriterOptions(builder, colName);
}
}
static void buildWriterOptions(ColumnWriterOptions.NestedBuilder builder, String colName) {
switch (Columns.valueOf(colName)) {
case BOOL:
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case BYTE:
case STRING:
builder.withColumns(false, colName);
break;
case DECIMAL64:
builder.withDecimalColumn(colName, DType.DECIMAL64_MAX_PRECISION);
break;
case DECIMAL128:
builder.withDecimalColumn(colName, DType.DECIMAL128_MAX_PRECISION);
break;
case STRUCT:
builder.withStructColumn(structBuilder(colName)
.withNullableColumns("ch_int")
.withNullableColumns("ch_str")
.withDecimalColumn("ch_dec64", DType.DECIMAL64_MAX_PRECISION, true)
.build());
break;
case LIST:
builder.withListColumn(listBuilder(colName, false)
.withNonNullableColumns("ch_int")
.build());
break;
case LIST_STRUCT:
builder.withListColumn(listBuilder(colName)
.withStructColumn(structBuilder(colName)
.withNullableColumns("ch_int")
.withNullableColumns("ch_str")
.withDecimalColumn("ch_dec64", DType.DECIMAL64_MAX_PRECISION, true)
.build())
.build());
break;
case STRUCT_DEC128:
builder.withStructColumn(structBuilder(colName, false)
.withDecimalColumn("ch_dec128", DType.DECIMAL128_MAX_PRECISION, true)
.build());
break;
case LIST_DEC128:
builder.withListColumn(listBuilder(colName)
.withStructColumn(structBuilder(colName, false)
.withDecimalColumn("ch_dec128", DType.DECIMAL128_MAX_PRECISION, true)
.build())
.build());
break;
default:
throw new IllegalArgumentException("should NOT reach here");
}
}
}
private Table getExpectedFileTable(String... selectColumns) {
return getExpectedFileTable(Lists.newArrayList(selectColumns));
}
private Table getExpectedFileTable(List<String> selectColumns) {
TestBuilder tb = new TestBuilder();
for (String c : selectColumns) {
WriteUtils.addColumn(tb, c);
}
return tb.build();
}
private Table getExpectedFileTableWithDecimals() {
return new TestBuilder()
.column(true, false, false, true, false)
.column(5, 1, 0, 2, 7)
.column(new Byte[]{2, 3, 4, 5, 9})
.column(3l, 9l, 4l, 2l, 20l)
.column("this", "is", "a", "test", "string")
.column(1.0f, 3.5f, 5.9f, 7.1f, 9.8f)
.column(5.0d, 9.5d, 0.9d, 7.23d, 2.8d)
.decimal32Column(3, 298, 2473, 2119, 1273, 9879)
.decimal64Column(4, 398l, 1322l, 983237l, 99872l, 21337l)
.build();
}
private final class MyBufferConsumer implements HostBufferConsumer, AutoCloseable {
public final HostMemoryBuffer buffer;
long offset = 0;
public MyBufferConsumer() {
buffer = hostMemoryAllocator.allocate(10 * 1024 * 1024);
}
@Override
public void handleBuffer(HostMemoryBuffer src, long len) {
try {
this.buffer.copyFromHostBuffer(offset, src, 0, len);
offset += len;
} finally {
src.close();
}
}
@Override
public void close() {
buffer.close();
}
}
private final class MyBufferProvider implements HostBufferProvider {
private final MyBufferConsumer wrapped;
long offset = 0;
private MyBufferProvider(MyBufferConsumer wrapped) {
this.wrapped = wrapped;
}
@Override
public long readInto(HostMemoryBuffer buffer, long len) {
long amountLeft = wrapped.offset - offset;
long amountToCopy = Math.max(0, Math.min(len, amountLeft));
if (amountToCopy > 0) {
buffer.copyFromHostBuffer(0, wrapped.buffer, offset, amountToCopy);
offset += amountToCopy;
}
return amountToCopy;
}
}
@Test
void testParquetWriteToBufferChunkedInt96() {
try (Table table0 = getExpectedFileTableWithDecimals();
MyBufferConsumer consumer = new MyBufferConsumer()) {
ParquetWriterOptions options = ParquetWriterOptions.builder()
.withNonNullableColumns("_c0", "_c1", "_c2", "_c3", "_c4", "_c5", "_c6")
.withDecimalColumn("_c7", 5)
.withDecimalColumn("_c8", 5)
.build();
TableDebug.get().debug("default stderr table0", table0);
TableDebug.builder()
.withOutput(TableDebug.Output.STDOUT)
.build().debug("stdout table0", table0);
TableDebug.builder()
.withOutput(TableDebug.Output.LOG)
.build().debug("slf4j default debug table0", table0);
TableDebug.builder()
.withOutput(TableDebug.Output.LOG_ERROR)
.build().debug("slf4j error table0", table0);
try (TableWriter writer = Table.writeParquetChunked(options, consumer)) {
writer.write(table0);
writer.write(table0);
writer.write(table0);
}
try (Table table1 = Table.readParquet(ParquetOptions.DEFAULT, consumer.buffer, 0, consumer.offset);
Table concat = Table.concatenate(table0, table0, table0)) {
assertTablesAreEqual(concat, table1);
}
}
}
@Test
void testParquetWriteMap() throws IOException {
ParquetWriterOptions options = ParquetWriterOptions.builder()
.withMapColumn(mapColumn("my_map",
new ColumnWriterOptions("key0", false),
new ColumnWriterOptions("value0"),
true)).build();
File f = File.createTempFile("test-map", ".parquet");
List<HostColumnVector.StructData> list1 =
Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a", "b")));
List<HostColumnVector.StructData> list2 =
Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a", "c")));
List<HostColumnVector.StructData> list3 =
Arrays.asList(new HostColumnVector.StructData(Arrays.asList("e", "d")));
HostColumnVector.StructType structType = new HostColumnVector.StructType(true,
Arrays.asList(new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.STRING)));
try (ColumnVector listColumn = ColumnVector.fromLists(new HostColumnVector.ListType(true,
structType), list1, list2, list3);
Table t0 = new Table(listColumn)) {
try (TableWriter writer = Table.writeParquetChunked(options, f)) {
writer.write(t0);
}
ParquetFileReader reader =
ParquetFileReader.open(HadoopInputFile.fromPath(new Path(f.getAbsolutePath()),
new Configuration()));
MessageType schema = reader.getFooter().getFileMetaData().getSchema();
assertEquals(OriginalType.MAP, schema.getType("my_map").getOriginalType());
}
try (ColumnVector cv = Table.readParquet(f).getColumn(0);
ColumnVector res = cv.getMapValue(Scalar.fromString("a"));
ColumnVector expected = ColumnVector.fromStrings("b", "c", null)) {
assertColumnsAreEqual(expected, res);
}
}
@Test
void testParquetWriteToBufferChunkedWithNested() {
ParquetWriterOptions.Builder optBuilder = ParquetWriterOptions.builder();
WriteUtils.buildWriterOptions(optBuilder, WriteUtils.getAllColumns(false));
ParquetWriterOptions options = optBuilder.build();
try (Table table0 = getExpectedFileTable(WriteUtils.getAllColumns(false));
MyBufferConsumer consumer = new MyBufferConsumer()) {
try (TableWriter writer = Table.writeParquetChunked(options, consumer)) {
writer.write(table0);
writer.write(table0);
writer.write(table0);
}
try (Table table1 = Table.readParquet(ParquetOptions.DEFAULT, consumer.buffer, 0,
consumer.offset);
Table concat = Table.concatenate(table0, table0, table0)) {
assertTablesAreEqual(concat, table1);
}
}
}
@Test
void testParquetWriteToBufferChunked() {
ParquetWriterOptions.Builder optBuilder = ParquetWriterOptions.builder();
List<String> columns = Lists.newArrayList(WriteUtils.getNonNestedColumns(false));
columns.add(Columns.STRUCT.name);
WriteUtils.buildWriterOptions(optBuilder, columns);
ParquetWriterOptions options = optBuilder.build();
ParquetWriterOptions optionsNoCompress = optBuilder.withCompressionType(CompressionType.NONE).build();
try (Table table0 = getExpectedFileTable(columns);
MyBufferConsumer consumer = new MyBufferConsumer()) {
try (TableWriter writer = Table.writeParquetChunked(options, consumer)) {
writer.write(table0);
writer.write(table0);
writer.write(table0);
TableWriter.WriteStatistics statistics = writer.getWriteStatistics();
assertNotEquals(0, statistics.numCompressedBytes);
assertEquals(0, statistics.numFailedBytes);
assertEquals(0, statistics.numSkippedBytes);
assertNotEquals(Double.NaN, statistics.compressionRatio);
}
try (Table table1 = Table.readParquet(ParquetOptions.DEFAULT, consumer.buffer, 0, consumer.offset);
Table concat = Table.concatenate(table0, table0, table0)) {
assertTablesAreEqual(concat, table1);
}
try (TableWriter writer = Table.writeParquetChunked(optionsNoCompress, consumer)) {
writer.write(table0);
writer.write(table0);
writer.write(table0);
TableWriter.WriteStatistics statistics = writer.getWriteStatistics();
assertEquals(0, statistics.numCompressedBytes);
assertEquals(0, statistics.numFailedBytes);
assertEquals(0, statistics.numSkippedBytes);
assertEquals(Double.NaN, statistics.compressionRatio);
}
}
}
@Test
void testParquetWriteToFileWithNames() throws IOException {
File tempFile = File.createTempFile("test-names", ".parquet");
try (Table table0 = getExpectedFileTableWithDecimals()) {
ParquetWriterOptions options = ParquetWriterOptions.builder()
.withNonNullableColumns("first", "second", "third", "fourth", "fifth", "sixth", "seventh")
.withDecimalColumn("eighth", 5)
.withDecimalColumn("ninth", 6)
.withCompressionType(CompressionType.NONE)
.withStatisticsFrequency(ParquetWriterOptions.StatisticsFrequency.NONE)
.build();
try (TableWriter writer = Table.writeParquetChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
try (Table table2 = Table.readParquet(tempFile.getAbsoluteFile())) {
assertTablesAreEqual(table0, table2);
}
} finally {
tempFile.delete();
}
}
@Test
void testParquetWriteToFileWithNamesAndMetadata() throws IOException {
File tempFile = File.createTempFile("test-names-metadata", ".parquet");
try (Table table0 = getExpectedFileTableWithDecimals()) {
ParquetWriterOptions options = ParquetWriterOptions.builder()
.withNonNullableColumns("first", "second", "third", "fourth", "fifth", "sixth", "seventh")
.withDecimalColumn("eighth", 6)
.withDecimalColumn("ninth", 8)
.withMetadata("somekey", "somevalue")
.withCompressionType(CompressionType.NONE)
.withStatisticsFrequency(ParquetWriterOptions.StatisticsFrequency.NONE)
.build();
try (TableWriter writer = Table.writeParquetChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
try (Table table2 = Table.readParquet(tempFile.getAbsoluteFile())) {
assertTablesAreEqual(table0, table2);
}
} finally {
tempFile.delete();
}
}
@Test
void testParquetWriteToFileUncompressedNoStats() throws IOException {
File tempFile = File.createTempFile("test-uncompressed", ".parquet");
try (Table table0 = getExpectedFileTableWithDecimals()) {
ParquetWriterOptions options = ParquetWriterOptions.builder()
.withNonNullableColumns("_c0", "_c1", "_c2", "_c3", "_c4", "_c5", "_c6")
.withDecimalColumn("_c7", 4)
.withDecimalColumn("_c8", 6)
.withCompressionType(CompressionType.NONE)
.withStatisticsFrequency(ParquetWriterOptions.StatisticsFrequency.NONE)
.build();
try (TableWriter writer = Table.writeParquetChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
try (Table table2 = Table.readParquet(tempFile.getAbsoluteFile())) {
assertTablesAreEqual(table0, table2);
}
} finally {
tempFile.delete();
}
}
@Test
void testParquetWriteWithFieldId() throws IOException {
// field IDs are:
// c1: -1, c2: 2, c3: 3, c31: 31, c32: 32, c4: -4, c5: not specified
ColumnWriterOptions.StructBuilder sBuilder =
structBuilder("c3", true, 3)
.withColumn(true, "c31", 31)
.withColumn(true, "c32", 32);
ParquetWriterOptions options = ParquetWriterOptions.builder()
.withColumn(true, "c1", -1)
.withDecimalColumn("c2", 9, true, 2)
.withStructColumn(sBuilder.build())
.withTimestampColumn("c4", true, true, -4)
.withColumns( true, "c5")
.build();
File tempFile = File.createTempFile("test-field-id", ".parquet");
try {
HostColumnVector.StructType structType = new HostColumnVector.StructType(
true,
new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.STRING));
try (Table table0 = new Table.TestBuilder()
.column(true, false) // c1
.decimal32Column(0, 298, 2473) // c2
.column(structType, // c3
new HostColumnVector.StructData("a", "b"), new HostColumnVector.StructData("a", "b"))
.timestampMicrosecondsColumn(1000L, 2000L) // c4
.column("a", "b") // c5
.build()) {
try (TableWriter writer = Table.writeParquetChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
}
try (ParquetFileReader reader = ParquetFileReader.open(HadoopInputFile.fromPath(
new Path(tempFile.getAbsolutePath()),
new Configuration()))) {
MessageType schema = reader.getFooter().getFileMetaData().getSchema();
assert (schema.getFields().get(0).getId().intValue() == -1);
assert (schema.getFields().get(1).getId().intValue() == 2);
assert (schema.getFields().get(2).getId().intValue() == 3);
assert (((GroupType) schema.getFields().get(2)).getFields().get(0).getId().intValue() == 31);
assert (((GroupType) schema.getFields().get(2)).getFields().get(1).getId().intValue() == 32);
assert (schema.getFields().get(3).getId().intValue() == -4);
assert (schema.getFields().get(4).getId() == null);
}
} finally {
tempFile.delete();
}
}
@Test
void testParquetWriteWithFieldIdNestNotSpecified() throws IOException {
// field IDs are:
// c0: no field ID
// c1: 1
// c2: no field ID
// c21: 21
// c22: no field ID
// c3: 3
// c31: 31
// c32: no field ID
// c4: 0
ColumnWriterOptions.StructBuilder c2Builder =
structBuilder("c2", true)
.withColumn(true, "c21", 21)
.withColumns(true, "c22");
ColumnWriterOptions.StructBuilder c3Builder =
structBuilder("c3", true, 3)
.withColumn(true, "c31", 31)
.withColumns(true, "c32");
ParquetWriterOptions options = ParquetWriterOptions.builder()
.withColumns(true, "c0")
.withDecimalColumn("c1", 9, true, 1)
.withStructColumn(c2Builder.build())
.withStructColumn(c3Builder.build())
.withColumn(true, "c4", 0)
.build();
File tempFile = File.createTempFile("test-field-id", ".parquet");
try {
HostColumnVector.StructType structType = new HostColumnVector.StructType(
true,
new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.STRING));
try (Table table0 = new Table.TestBuilder()
.column(true, false) // c0
.decimal32Column(0, 298, 2473) // c1
.column(structType, // c2
new HostColumnVector.StructData("a", "b"), new HostColumnVector.StructData("a", "b"))
.column(structType, // c3
new HostColumnVector.StructData("a", "b"), new HostColumnVector.StructData("a", "b"))
.column("a", "b") // c4
.build()) {
try (TableWriter writer = Table.writeParquetChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
}
try (ParquetFileReader reader = ParquetFileReader.open(HadoopInputFile.fromPath(
new Path(tempFile.getAbsolutePath()),
new Configuration()))) {
MessageType schema = reader.getFooter().getFileMetaData().getSchema();
assert (schema.getFields().get(0).getId() == null);
assert (schema.getFields().get(1).getId().intValue() == 1);
assert (schema.getFields().get(2).getId() == null);
assert (((GroupType) schema.getFields().get(2)).getFields().get(0).getId().intValue() == 21);
assert (((GroupType) schema.getFields().get(2)).getFields().get(1).getId() == null);
assert (((GroupType) schema.getFields().get(3)).getFields().get(0).getId().intValue() == 31);
assert (((GroupType) schema.getFields().get(3)).getFields().get(1).getId() == null);
assert (schema.getFields().get(4).getId().intValue() == 0);
}
} finally {
tempFile.delete();
}
}
/** Return a column where DECIMAL64 has been up-casted to DECIMAL128 */
private ColumnVector castDecimal64To128(ColumnView c) {
DType dtype = c.getType();
switch (dtype.getTypeId()) {
case DECIMAL64:
return c.castTo(DType.create(DType.DTypeEnum.DECIMAL128, dtype.getScale()));
case STRUCT:
case LIST:
{
ColumnView[] oldViews = c.getChildColumnViews();
assert oldViews != null;
ColumnVector[] newChildren = new ColumnVector[oldViews.length];
try {
for (int i = 0; i < oldViews.length; i++) {
newChildren[i] = castDecimal64To128(oldViews[i]);
}
try (ColumnView newView = new ColumnView(dtype, c.getRowCount(),
Optional.of(c.getNullCount()), c.getValid(), c.getOffsets(), newChildren)) {
return newView.copyToColumnVector();
}
} finally {
for (ColumnView v : oldViews) {
v.close();
}
for (ColumnVector v : newChildren) {
if (v != null) {
v.close();
}
}
}
}
default:
if (c instanceof ColumnVector) {
return ((ColumnVector) c).incRefCount();
} else {
return c.copyToColumnVector();
}
}
}
/** Return a new Table with any DECIMAL64 columns up-casted to DECIMAL128 */
private Table castDecimal64To128(Table t) {
final int numCols = t.getNumberOfColumns();
ColumnVector[] cols = new ColumnVector[numCols];
try {
for (int i = 0; i < numCols; i++) {
cols[i] = castDecimal64To128(t.getColumn(i));
}
return new Table(cols);
} finally {
for (ColumnVector c : cols) {
if (c != null) {
c.close();
}
}
}
}
@Test
void testArrowIPCWriteToFileWithNamesAndMetadata() throws IOException {
File tempFile = File.createTempFile("test-names-metadata", ".arrow");
String[] columnNames = WriteUtils.getNonNestedColumns(false);
try (Table table0 = getExpectedFileTable(columnNames)) {
ArrowIPCWriterOptions options = ArrowIPCWriterOptions.builder()
.withColumnNames(columnNames)
.build();
try (TableWriter writer = Table.writeArrowIPCChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
// Reading from Arrow converts decimals to DECIMAL128
try (StreamedTableReader reader = Table.readArrowIPCChunked(tempFile);
Table expected = castDecimal64To128(table0)) {
boolean done = false;
int count = 0;
while (!done) {
try (Table t = reader.getNextIfAvailable()) {
if (t == null) {
done = true;
} else {
assertTablesAreEqual(expected, t);
count++;
}
}
}
assertEquals(1, count);
}
} finally {
tempFile.delete();
}
}
@Test
void testArrowIPCWriteToBufferChunked() {
String[] nonNestedCols = WriteUtils.getNonNestedColumns(false);
List<String> columns = Lists.newArrayList(nonNestedCols);
columns.add(Columns.STRUCT.name);
columns.add(Columns.LIST.name);
columns.add(Columns.LIST_STRUCT.name);
try (Table table0 = getExpectedFileTable(columns);
MyBufferConsumer consumer = new MyBufferConsumer()) {
ArrowIPCWriterOptions options = ArrowIPCWriterOptions.builder()
.withColumnNames(nonNestedCols)
.withColumnNames(Columns.STRUCT.name, "int", "str", "dec64")
.withColumnNames(Columns.LIST.name)
.withColumnNames(Columns.LIST_STRUCT.name, "int", "str", "dec64")
.build();
try (TableWriter writer = Table.writeArrowIPCChunked(options, consumer)) {
writer.write(table0);
writer.write(table0);
writer.write(table0);
}
// Reading from Arrow converts decimals to DECIMAL128
try (StreamedTableReader reader = Table.readArrowIPCChunked(new MyBufferProvider(consumer));
Table expected = castDecimal64To128(table0)) {
boolean done = false;
int count = 0;
while (!done) {
try (Table t = reader.getNextIfAvailable()) {
if (t == null) {
done = true;
} else {
assertTablesAreEqual(expected, t);
count++;
}
}
}
assertEquals(3, count);
}
}
}
@Test
void testArrowIPCWriteEmptyToBufferChunked() {
try (Table emptyTable = new Table.TestBuilder().timestampDayColumn().build();
MyBufferConsumer consumer = new MyBufferConsumer()) {
ArrowIPCWriterOptions options = ArrowIPCWriterOptions.builder()
.withColumnNames("day")
.build();
try (TableWriter writer = Table.writeArrowIPCChunked(options, consumer)) {
writer.write(emptyTable);
}
try (StreamedTableReader reader = Table.readArrowIPCChunked(new MyBufferProvider(consumer))) {
boolean done = false;
int count = 0;
while (!done) {
try (Table t = reader.getNextIfAvailable()) {
if (t == null) {
done = true;
} else {
assertTablesAreEqual(emptyTable, t);
count++;
}
}
}
// Expect one empty batch for the empty table.
assertEquals(1, count);
}
}
}
@Test
void testORCWriteToBufferChunked() {
String[] selectedColumns = WriteUtils.getAllColumns(false);
try (Table table0 = getExpectedFileTable(selectedColumns);
MyBufferConsumer consumer = new MyBufferConsumer()) {
ORCWriterOptions.Builder builder = ORCWriterOptions.builder();
WriteUtils.buildWriterOptions(builder, selectedColumns);
ORCWriterOptions opts = builder.build();
ORCWriterOptions optsNoCompress = builder.withCompressionType(CompressionType.NONE).build();
try (TableWriter writer = Table.writeORCChunked(opts, consumer)) {
writer.write(table0);
writer.write(table0);
writer.write(table0);
TableWriter.WriteStatistics statistics = writer.getWriteStatistics();
assertNotEquals(0, statistics.numCompressedBytes);
assertEquals(0, statistics.numFailedBytes);
assertEquals(0, statistics.numSkippedBytes);
assertNotEquals(Double.NaN, statistics.compressionRatio);
}
try (Table table1 = Table.readORC(ORCOptions.DEFAULT, consumer.buffer, 0, consumer.offset);
Table concat = Table.concatenate(table0, table0, table0)) {
assertTablesAreEqual(concat, table1);
}
try (TableWriter writer = Table.writeORCChunked(optsNoCompress, consumer)) {
writer.write(table0);
writer.write(table0);
writer.write(table0);
TableWriter.WriteStatistics statistics = writer.getWriteStatistics();
assertEquals(0, statistics.numCompressedBytes);
assertEquals(0, statistics.numFailedBytes);
assertEquals(0, statistics.numSkippedBytes);
assertEquals(Double.NaN, statistics.compressionRatio);
}
}
}
@Test
void testORCWriteToFileChunked() throws IOException {
File tempFile = File.createTempFile("test", ".orc");
String[] selectedColumns = WriteUtils.getAllColumns(false);
try (Table table0 = getExpectedFileTable(selectedColumns)) {
ORCWriterOptions.Builder builder = ORCWriterOptions.builder();
WriteUtils.buildWriterOptions(builder, selectedColumns);
ORCWriterOptions opts = builder.build();
try (TableWriter writer = Table.writeORCChunked(opts, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
try (Table table1 = Table.readORC(tempFile.getAbsoluteFile())) {
assertTablesAreEqual(table0, table1);
}
} finally {
tempFile.delete();
}
}
@Test
void testORCWriteMapChunked() throws IOException {
ORCWriterOptions options = ORCWriterOptions.builder()
.withMapColumn(mapColumn("my_map",
new ColumnWriterOptions("key0", false),
new ColumnWriterOptions("value0"),
true)).build();
File f = File.createTempFile("test-map", ".parquet");
List<HostColumnVector.StructData> list1 =
Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a", "b")));
List<HostColumnVector.StructData> list2 =
Arrays.asList(new HostColumnVector.StructData(Arrays.asList("a", "c")));
List<HostColumnVector.StructData> list3 =
Arrays.asList(new HostColumnVector.StructData(Arrays.asList("e", "d")));
HostColumnVector.StructType structType = new HostColumnVector.StructType(true,
Arrays.asList(new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.STRING)));
try (ColumnVector listColumn = ColumnVector.fromLists(new HostColumnVector.ListType(true,
structType), list1, list2, list3);
Table t0 = new Table(listColumn)) {
try (TableWriter writer = Table.writeORCChunked(options, f)) {
writer.write(t0);
}
try (Table res = Table.readORC(f)) {
assertTablesAreEqual(t0, res);
}
}
}
@Test
void testORCWriteToFileWithColNames() throws IOException {
File tempFile = File.createTempFile("test", ".orc");
String[] colNames = WriteUtils.getNonNestedColumns(false);
try (Table table0 = getExpectedFileTable(colNames)) {
ORCWriterOptions.Builder optBuilder = ORCWriterOptions.builder();
WriteUtils.buildWriterOptions(optBuilder, colNames);
ORCWriterOptions options = optBuilder.build();
try (TableWriter writer = Table.writeORCChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
ORCOptions opts = ORCOptions.builder().includeColumn(colNames).build();
try (Table table1 = Table.readORC(opts, tempFile.getAbsoluteFile())) {
assertTablesAreEqual(table0, table1);
}
} finally {
tempFile.delete();
}
}
// https://github.com/NVIDIA/spark-rapids-jni/issues/1338
// Need to remove this tag if #1338 is fixed.
@Tag("noSanitizer")
@Test
void testORCReadAndWriteForDecimal128() throws IOException {
File tempFile = File.createTempFile("test", ".orc");
String[] colNames = new String[]{Columns.DECIMAL64.name,
Columns.DECIMAL128.name, Columns.STRUCT_DEC128.name, Columns.LIST_DEC128.name};
try (Table table0 = getExpectedFileTable(colNames)) {
ORCWriterOptions.Builder optBuilder = ORCWriterOptions.builder();
WriteUtils.buildWriterOptions(optBuilder, colNames);
ORCWriterOptions options = optBuilder.build();
try (TableWriter writer = Table.writeORCChunked(options, tempFile.getAbsoluteFile())) {
writer.write(table0);
}
ORCOptions opts = ORCOptions.builder()
.includeColumn(colNames)
.decimal128Column(Columns.DECIMAL128.name,
String.format("%s.%s", Columns.STRUCT_DEC128.name, "ch_dec128"),
String.format("%s.1.%s", Columns.LIST_DEC128.name, "ch_dec128"))
.build();
try (Table table1 = Table.readORC(opts, tempFile.getAbsoluteFile())) {
assertTablesAreEqual(table0, table1);
}
} finally {
tempFile.delete();
}
}
@Test
void testORCWriteToFileUncompressed() throws IOException {
File tempFileUncompressed = File.createTempFile("test-uncompressed", ".orc");
try (Table table0 = getExpectedFileTable(WriteUtils.getNonNestedColumns(false))) {
String[] colNames = WriteUtils.getNonNestedColumns(false);
ORCWriterOptions.Builder optsBuilder = ORCWriterOptions.builder();
WriteUtils.buildWriterOptions(optsBuilder, colNames);
optsBuilder.withCompressionType(CompressionType.NONE);
ORCWriterOptions opts = optsBuilder.build();
try (TableWriter writer =
Table.writeORCChunked(opts,tempFileUncompressed.getAbsoluteFile())) {
writer.write(table0);
}
try (Table table2 = Table.readORC(tempFileUncompressed.getAbsoluteFile())) {
assertTablesAreEqual(table0, table2);
}
} finally {
tempFileUncompressed.delete();
}
}
@Test
void testStructColumnFilter() {
List<HostColumnVector.DataType> children =
Arrays.asList(new HostColumnVector.BasicType(true, DType.INT32),
new HostColumnVector.BasicType(true, DType.INT64));
HostColumnVector.StructType type = new HostColumnVector.StructType(true, children);
HostColumnVector.StructType expectedType = new HostColumnVector.StructType(true, children);
List data1 = Arrays.asList(10, 20L);
List data2 = Arrays.asList(50, 60L);
List data3 = Arrays.asList(null, 80L);
List data4 = null;
HostColumnVector.StructData structData1 = new HostColumnVector.StructData(data1);
HostColumnVector.StructData structData2 = new HostColumnVector.StructData(data2);
HostColumnVector.StructData structData3 = new HostColumnVector.StructData(data3);
HostColumnVector.StructData structData4 = new HostColumnVector.StructData(data4);
try (ColumnVector mask = ColumnVector.fromBoxedBooleans(true, false, true, false);
ColumnVector fromStructs = ColumnVector.fromStructs(type, Arrays.asList(structData1, structData2, structData3, structData4));
Table input = new Table(fromStructs);
Table filteredTable = input.filter(mask);
ColumnVector expectedStructs = ColumnVector.fromStructs(expectedType, Arrays.asList(structData1, structData3));
Table expected = new Table(expectedStructs)) {
assertTablesAreEqual(expected, filteredTable);
}
}
@Test
void testStructColumnFilterStrings() {
List<HostColumnVector.DataType> children =
Arrays.asList(new HostColumnVector.BasicType(true, DType.STRING),
new HostColumnVector.BasicType(true, DType.STRING));
HostColumnVector.StructType type = new HostColumnVector.StructType(true, children);
HostColumnVector.StructType expectedType = new HostColumnVector.StructType(true, children);
List data1 = Arrays.asList("10", "aliceAndBob");
List data2 = Arrays.asList("50", "foobar");
List data3 = Arrays.asList(null, "zombies");
List data4 = null;
HostColumnVector.StructData structData1 = new HostColumnVector.StructData(data1);
HostColumnVector.StructData structData2 = new HostColumnVector.StructData(data2);
HostColumnVector.StructData structData3 = new HostColumnVector.StructData(data3);
HostColumnVector.StructData structData4 = new HostColumnVector.StructData(data4);
try (ColumnVector mask = ColumnVector.fromBoxedBooleans(true, false, true, true);
ColumnVector fromStructs = ColumnVector.fromStructs(type, Arrays.asList(structData1, structData2, structData3, structData4));
Table input = new Table(fromStructs);
Table filteredTable = input.filter(mask);
ColumnVector expectedStructs = ColumnVector.fromStructs(expectedType, Arrays.asList(structData1, structData3, structData4));
Table expected = new Table(expectedStructs)) {
assertEquals(expected.getRowCount(), 3L, "Expected column row count is incorrect");
assertTablesAreEqual(expected, filteredTable);
}
}
@Test
void fixedWidthRowsRoundTripWide() {
TestBuilder tb = new TestBuilder();
IntStream.range(0, 10).forEach(i -> tb.column(3l, 9l, 4l, 2l, 20l, null));
IntStream.range(0, 10).forEach(i -> tb.column(5.0d, 9.5d, 0.9d, 7.23d, 2.8d, null));
IntStream.range(0, 10).forEach(i -> tb.column(5, 1, 0, 2, 7, null));
IntStream.range(0, 10).forEach(i -> tb.column(true, false, false, true, false, null));
IntStream.range(0, 10).forEach(i -> tb.column(1.0f, 3.5f, 5.9f, 7.1f, 9.8f, null));
IntStream.range(0, 10).forEach(i -> tb.column(new Byte[]{2, 3, 4, 5, 9, null}));
IntStream.range(0, 10).forEach(i -> tb.decimal32Column(-3, RoundingMode.UNNECESSARY, 5.0d,
9.5d, 0.9d, 7.23d, 2.8d, null));
IntStream.range(0, 10).forEach(i -> tb.decimal64Column(-8, 3L, 9L, 4L, 2L, 20L, null));
try (Table origTable = tb.build()) {
ColumnVector[] rowMajorTable = origTable.convertToRows();
try {
// We didn't overflow
assert rowMajorTable.length == 1;
ColumnVector cv = rowMajorTable[0];
assert cv.getRowCount() == origTable.getRowCount();
DType[] types = new DType[origTable.getNumberOfColumns()];
for (int i = 0; i < origTable.getNumberOfColumns(); i++) {
types[i] = origTable.getColumn(i).getType();
}
try (Table backAgain = Table.convertFromRows(cv, types)) {
assertTablesAreEqual(origTable, backAgain);
}
} finally {
for (ColumnVector cv : rowMajorTable) {
cv.close();
}
}
}
}
@Test
void fixedWidthRowsRoundTrip() {
try (Table origTable = new TestBuilder()
.column(3l, 9l, 4l, 2l, 20l, null)
.column(5.0d, 9.5d, 0.9d, 7.23d, 2.8d, null)
.column(5, 1, 0, 2, 7, null)
.column(true, false, false, true, false, null)
.column(1.0f, 3.5f, 5.9f, 7.1f, 9.8f, null)
.column(new Byte[]{2, 3, 4, 5, 9, null})
.decimal32Column(-3, RoundingMode.UNNECESSARY, 5.0d, 9.5d, 0.9d, 7.23d, 2.8d, null)
.decimal64Column(-8, 3L, 9L, 4L, 2L, 20L, null)
.build()) {
ColumnVector[] rowMajorTable = origTable.convertToRowsFixedWidthOptimized();
try {
// We didn't overflow
assert rowMajorTable.length == 1;
ColumnVector cv = rowMajorTable[0];
assert cv.getRowCount() == origTable.getRowCount();
DType[] types = new DType[origTable.getNumberOfColumns()];
for (int i = 0; i < origTable.getNumberOfColumns(); i++) {
types[i] = origTable.getColumn(i).getType();
}
try (Table backAgain = Table.convertFromRowsFixedWidthOptimized(cv, types)) {
assertTablesAreEqual(origTable, backAgain);
}
} finally {
for (ColumnVector cv : rowMajorTable) {
cv.close();
}
}
}
}
// utility methods to reduce typing
private static StructData struct(Object... values) {
return new StructData(values);
}
private StructData[] structs(StructData... values) {
return values;
}
private String[] strings(String... values) {
return values;
}
private static ColumnVector decimalFromBoxedInts(boolean isDec64, int scale, Integer... values) {
BigDecimal[] decimals = new BigDecimal[values.length];
for (int i = 0; i < values.length; i++) {
if (values[i] == null) {
decimals[i] = null;
} else {
decimals[i] = BigDecimal.valueOf(values[i], -scale);
}
}
DType type = isDec64 ? DType.create(DType.DTypeEnum.DECIMAL64, scale) : DType.create(DType.DTypeEnum.DECIMAL32, scale);
return ColumnVector.build(type, decimals.length, (b) -> b.appendBoxed(decimals));
}
private Table buildTestTable() {
StructType mapStructType = new StructType(true,
new BasicType(false, DType.STRING),
new BasicType(false, DType.STRING));
StructType structType = new StructType(true,
new BasicType(true, DType.INT32),
new BasicType(false, DType.FLOAT32));
return new Table.TestBuilder()
.column( 100, 202, 3003, 40004, 5, -60, 1, null, 3, null, 5, null, 7, null, 9, null, 11, null, 13, null, 15)
.column( true, true, false, false, true, null, true, true, null, false, false, null, true, true, null, false, false, null, true, true, null)
.column( (byte)1, (byte)2, null, (byte)4, (byte)5, (byte)6, (byte)1, (byte)2, (byte)3, null, (byte)5, (byte)6, (byte)7, null, (byte)9, (byte)10, (byte)11, null, (byte)13, (byte)14, (byte)15)
.column((short)6, (short)5, (short)4, null, (short)2, (short)1, (short)1, (short)2, (short)3, null, (short)5, (short)6, (short)7, null, (short)9, (short)10, null, (short)12, (short)13, (short)14, null)
.column( 1L, null, 1001L, 50L, -2000L, null, 1L, 2L, 3L, 4L, null, 6L, 7L, 8L, 9L, null, 11L, 12L, 13L, 14L, null)
.column( 10.1f, 20f, Float.NaN, 3.1415f, -60f, null, 1f, 2f, 3f, 4f, 5f, null, 7f, 8f, 9f, 10f, 11f, null, 13f, 14f, 15f)
.column( 10.1f, 20f, Float.NaN, 3.1415f, -60f, -50f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, 10f, 11f, 12f, 13f, 14f, 15f)
.column( 10.1, 20.0, 33.1, 3.1415, -60.5, null, 1., 2., 3., 4., 5., 6., null, 8., 9., 10., 11., 12., null, 14., 15.)
.timestampDayColumn(99, 100, 101, 102, 103, 104, 1, 2, 3, 4, 5, 6, 7, null, 9, 10, 11, 12, 13, null, 15)
.timestampMillisecondsColumn(9L, 1006L, 101L, 5092L, null, 88L, 1L, 2L, 3L, 4L, 5L ,6L, 7L, 8L, null, 10L, 11L, 12L, 13L, 14L, 15L)
.timestampSecondsColumn(1L, null, 3L, 4L, 5L, 6L, 1L, 2L, 3L, 4L, 5L ,6L, 7L, 8L, 9L, null, 11L, 12L, 13L, 14L, 15L)
.decimal32Column(-3, 100, 202, 3003, 40004, 5, -60, 1, null, 3, null, 5, null, 7, null, 9, null, 11, null, 13, null, 15)
.decimal64Column(-8, 1L, null, 1001L, 50L, -2000L, null, 1L, 2L, 3L, 4L, null, 6L, 7L, 8L, 9L, null, 11L, 12L, 13L, 14L, null)
.column( "A", "B", "C", "D", null, "TESTING", "1", "2", "3", "4", "5", "6", "7", null, "9", "10", "11", "12", "13", null, "15")
.column(
strings("1", "2", "3"), strings("4"), strings("5"), strings("6, 7"),
strings("", "9", null), strings("11"), strings(""), strings(null, null),
strings("15", null), null, null, strings("18", "19", "20"),
null, strings("22"), strings("23", ""), null,
null, null, null, strings(),
strings("the end"))
.column(mapStructType,
structs(struct("1", "2")), structs(struct("3", "4")),
null, null,
structs(struct("key", "value"), struct("a", "b")), null,
null, structs(struct("3", "4"), struct("1", "2")),
structs(), structs(null, struct("foo", "bar")),
structs(null, null, null), null,
null, null,
null, null,
null, null,
null, null,
structs(struct("the", "end")))
.column(structType,
struct(1, 1f), null, struct(2, 3f), null, struct(8, 7f),
struct(0, 0f), null, null, struct(-1, -1f), struct(-100, -100f),
struct(Integer.MAX_VALUE, Float.MAX_VALUE), null, null, null, null,
null, null, null, null, null,
struct(Integer.MIN_VALUE, Float.MIN_VALUE))
.column( "A", "A", "C", "C", null, "TESTING", "1", "2", "3", "4", "5", "6", "7", null, "9", "10", "11", "12", "13", null, "15")
.build();
}
@Test
void testBuilderWithColumn() {
try (Table t1 = new Table.TestBuilder()
.decimal32Column(-3, 120, -230, null, 340)
.decimal64Column(-8, 1000L, 200L, null, 30L).build()) {
try (Table t2 = new Table.TestBuilder()
.decimal32Column(-3, RoundingMode.UNNECESSARY, 0.12, -0.23, null, 0.34)
.decimal64Column(-8, RoundingMode.UNNECESSARY, 1e-5, 2e-6, null, 3e-7).build()) {
try (Table t3 = new Table.TestBuilder()
.decimal32Column(-3, RoundingMode.UNNECESSARY, "0.12", "-000.23", null, ".34")
.decimal64Column(-8, RoundingMode.UNNECESSARY, "1e-5", "2e-6", null, "3e-7").build()) {
assertTablesAreEqual(t1, t2);
assertTablesAreEqual(t1, t3);
}
}
}
}
private Table[] buildExplodeTestTableWithPrimitiveTypes(boolean pos, boolean outer) {
try (Table input = new Table.TestBuilder()
.column(new ListType(true, new BasicType(true, DType.INT32)),
Arrays.asList(1, 2, 3),
Arrays.asList(4, 5),
Arrays.asList(6),
null,
Arrays.asList())
.column("s1", "s2", "s3", "s4", "s5")
.column(1, 3, 5, 7, 9)
.column(12.0, 14.0, 13.0, 11.0, 15.0)
.build()) {
Table.TestBuilder expectedBuilder = new Table.TestBuilder();
if (pos) {
Integer[] posData = outer ? new Integer[]{0, 1, 2, 0, 1, 0, null, null} : new Integer[]{0, 1, 2, 0, 1, 0};
expectedBuilder.column(posData);
}
List<Object[]> expectedData = new ArrayList<Object[]>(){{
if (!outer) {
this.add(new Integer[]{1, 2, 3, 4, 5, 6});
this.add(new String[]{"s1", "s1", "s1", "s2", "s2", "s3"});
this.add(new Integer[]{1, 1, 1, 3, 3, 5});
this.add(new Double[]{12.0, 12.0, 12.0, 14.0, 14.0, 13.0});
} else {
this.add(new Integer[]{1, 2, 3, 4, 5, 6, null, null});
this.add(new String[]{"s1", "s1", "s1", "s2", "s2", "s3", "s4", "s5"});
this.add(new Integer[]{1, 1, 1, 3, 3, 5, 7, 9});
this.add(new Double[]{12.0, 12.0, 12.0, 14.0, 14.0, 13.0, 11.0, 15.0});
}
}};
try (Table expected = expectedBuilder.column((Integer[]) expectedData.get(0))
.column((String[]) expectedData.get(1))
.column((Integer[]) expectedData.get(2))
.column((Double[]) expectedData.get(3))
.build()) {
return new Table[]{new Table(input.getColumns()), new Table(expected.getColumns())};
}
}
}
private Table[] buildExplodeTestTableWithNestedTypes(boolean pos, boolean outer) {
StructType nestedType = new StructType(true,
new BasicType(false, DType.INT32), new BasicType(false, DType.STRING));
try (Table input = new Table.TestBuilder()
.column(new ListType(false, nestedType),
Arrays.asList(struct(1, "k1"), struct(2, "k2"), struct(3, "k3")),
Arrays.asList(struct(4, "k4"), struct(5, "k5")),
Arrays.asList(struct(6, "k6")),
Arrays.asList(new HostColumnVector.StructData((List) null)),
null)
.column("s1", "s2", "s3", "s4", "s5")
.column(1, 3, 5, 7, 9)
.column(12.0, 14.0, 13.0, 11.0, 15.0)
.build()) {
Table.TestBuilder expectedBuilder = new Table.TestBuilder();
if (pos) {
if (outer) {
expectedBuilder.column(0, 1, 2, 0, 1, 0, 0, null);
} else {
expectedBuilder.column(0, 1, 2, 0, 1, 0, 0);
}
}
List<Object[]> expectedData = new ArrayList<Object[]>(){{
if (!outer) {
this.add(new HostColumnVector.StructData[]{
struct(1, "k1"), struct(2, "k2"), struct(3, "k3"),
struct(4, "k4"), struct(5, "k5"), struct(6, "k6"),
new HostColumnVector.StructData((List) null)});
this.add(new String[]{"s1", "s1", "s1", "s2", "s2", "s3", "s4"});
this.add(new Integer[]{1, 1, 1, 3, 3, 5, 7});
this.add(new Double[]{12.0, 12.0, 12.0, 14.0, 14.0, 13.0, 11.0});
} else {
this.add(new HostColumnVector.StructData[]{
struct(1, "k1"), struct(2, "k2"), struct(3, "k3"),
struct(4, "k4"), struct(5, "k5"), struct(6, "k6"),
new HostColumnVector.StructData((List) null), null});
this.add(new String[]{"s1", "s1", "s1", "s2", "s2", "s3", "s4", "s5"});
this.add(new Integer[]{1, 1, 1, 3, 3, 5, 7, 9});
this.add(new Double[]{12.0, 12.0, 12.0, 14.0, 14.0, 13.0, 11.0, 15.0});
}
}};
try (Table expected = expectedBuilder
.column(nestedType, (HostColumnVector.StructData[]) expectedData.get(0))
.column((String[]) expectedData.get(1))
.column((Integer[]) expectedData.get(2))
.column((Double[]) expectedData.get(3))
.build()) {
return new Table[]{new Table(input.getColumns()), new Table(expected.getColumns())};
}
}
}
@Test
void testExplode() {
// Child is primitive type
Table[] testTables = buildExplodeTestTableWithPrimitiveTypes(false, false);
try (Table input = testTables[0];
Table expected = testTables[1]) {
try (Table exploded = input.explode(0)) {
assertTablesAreEqual(expected, exploded);
}
}
// Child is nested type
Table[] testTables2 = buildExplodeTestTableWithNestedTypes(false, false);
try (Table input = testTables2[0];
Table expected = testTables2[1]) {
try (Table exploded = input.explode(0)) {
assertTablesAreEqual(expected, exploded);
}
}
}
@Test
void testExplodePosition() {
// Child is primitive type
Table[] testTables = buildExplodeTestTableWithPrimitiveTypes(true, false);
try (Table input = testTables[0];
Table expected = testTables[1]) {
try (Table exploded = input.explodePosition(0)) {
assertTablesAreEqual(expected, exploded);
}
}
// Child is nested type
Table[] testTables2 = buildExplodeTestTableWithNestedTypes(true, false);
try (Table input = testTables2[0];
Table expected = testTables2[1]) {
try (Table exploded = input.explodePosition(0)) {
assertTablesAreEqual(expected, exploded);
}
}
}
@Test
void testExplodeOuter() {
// Child is primitive type
Table[] testTables = buildExplodeTestTableWithPrimitiveTypes(false, true);
try (Table input = testTables[0];
Table expected = testTables[1]) {
try (Table exploded = input.explodeOuter(0)) {
assertTablesAreEqual(expected, exploded);
}
}
// Child is nested type
Table[] testTables2 = buildExplodeTestTableWithNestedTypes(false, true);
try (Table input = testTables2[0];
Table expected = testTables2[1]) {
try (Table exploded = input.explodeOuter(0)) {
assertTablesAreEqual(expected, exploded);
}
}
}
@Test
void testExplodeOuterPosition() {
// Child is primitive type
Table[] testTables = buildExplodeTestTableWithPrimitiveTypes(true, true);
try (Table input = testTables[0];
Table expected = testTables[1]) {
try (Table exploded = input.explodeOuterPosition(0)) {
assertTablesAreEqual(expected, exploded);
}
}
// Child is nested type
Table[] testTables2 = buildExplodeTestTableWithNestedTypes(true, true);
try (Table input = testTables2[0];
Table expected = testTables2[1]) {
try (Table exploded = input.explodeOuterPosition(0)) {
assertTablesAreEqual(expected, exploded);
}
}
}
@Test
void testSample() {
try (Table t = new Table.TestBuilder().column("s1", "s2", "s3", "s4", "s5").build()) {
try (Table ret = t.sample(3, false, 0)) {
assertEquals(ret.getRowCount(), 3);
}
try (Table ret = t.sample(5, false, 0)) {
assertEquals(ret.getRowCount(), 5);
}
try (Table ret = t.sample(8, true, 0)) {
assertEquals(ret.getRowCount(), 8);
}
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/UnaryOpTest.java
|
/*
*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.junit.jupiter.api.Test;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
public class UnaryOpTest extends CudfTestBase {
private static final Double[] DOUBLES_1 = new Double[]{1.0, 10.0, -100.1, 5.3, 50.0, 100.0, null, Double.NaN, Double.POSITIVE_INFINITY, 1/9.0, Double.NEGATIVE_INFINITY, 500.0, -500.0};
private static final Integer[] INTS_1 = new Integer[]{1, 10, -100, 5, 50, 100, null};
private static final Boolean[] BOOLEANS_1 = new Boolean[]{true, false, true, false, true, false, null};
interface CpuOp {
void computeNullSafe(Builder ret, HostColumnVector input, int index);
}
interface DoubleFun {
double apply(double val);
}
static DoubleCpuOp doubleFun(DoubleFun fun) {
return new DoubleCpuOp(fun);
}
static class DoubleCpuOp implements CpuOp {
private final DoubleFun fun;
DoubleCpuOp(DoubleFun fun) {
this.fun = fun;
}
@Override
public void computeNullSafe(Builder ret, HostColumnVector input, int index) {
ret.append(fun.apply(input.getDouble(index)));
}
}
interface IntFun {
int apply(int val);
}
static IntCpuOp intFun(IntFun fun) {
return new IntCpuOp(fun);
}
static class IntCpuOp implements CpuOp {
private final IntFun fun;
IntCpuOp(IntFun fun) {
this.fun = fun;
}
@Override
public void computeNullSafe(Builder ret, HostColumnVector input, int index) {
ret.append(fun.apply(input.getInt(index)));
}
}
interface BoolFun {
boolean apply(boolean val);
}
static BoolCpuOp boolFun(BoolFun fun) {
return new BoolCpuOp(fun);
}
static class BoolCpuOp implements CpuOp {
private final BoolFun fun;
BoolCpuOp(BoolFun fun) {
this.fun = fun;
}
@Override
public void computeNullSafe(Builder ret, HostColumnVector input, int index) {
ret.append(fun.apply(input.getBoolean(index)));
}
}
public static ColumnVector forEach(ColumnVector input, CpuOp op) {
int len = (int)input.getRowCount();
try (HostColumnVector host = input.copyToHost();
Builder builder = HostColumnVector.builder(input.getType(), len)) {
for (int i = 0; i < len; i++) {
if (host.isNull(i)) {
builder.appendNull();
} else {
op.computeNullSafe(builder, host, i);
}
}
return builder.buildAndPutOnDevice();
}
}
// These tests are not for the correctness of the underlying implementation, but really just
// plumbing
@Test
public void testSin() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.sin();
ColumnVector expected = forEach(dcv, doubleFun(Math::sin))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testCos() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.cos();
ColumnVector expected = forEach(dcv, doubleFun(Math::cos))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testTan() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.tan();
ColumnVector expected = forEach(dcv, doubleFun(Math::tan))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testArcsin() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.arcsin();
ColumnVector expected = forEach(dcv, doubleFun(Math::asin))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testArccos() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.arccos();
ColumnVector expected = forEach(dcv, doubleFun(Math::acos))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testArctan() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.arctan();
ColumnVector expected = forEach(dcv, doubleFun(Math::atan))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testSinh() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.sinh();
ColumnVector expected = forEach(dcv, doubleFun(Math::sinh))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testCosh() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.cosh();
ColumnVector expected = forEach(dcv, doubleFun(Math::cosh))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testTanh() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.tanh();
ColumnVector expected = forEach(dcv, doubleFun(Math::tanh))) {
assertColumnsAreEqual(expected, answer);
}
}
public static double asinh(double value) {
return value == Double.NEGATIVE_INFINITY ? Double.NEGATIVE_INFINITY :
java.lang.StrictMath.log(value + java.lang.Math.sqrt(value * value + 1.0));
}
@Test
public void testArcsinh() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.arcsinh();
ColumnVector expected = forEach(dcv, doubleFun(UnaryOpTest::asinh))) {
assertColumnsAreEqual(expected, answer);
}
}
public static double acosh(double value) {
return java.lang.StrictMath.log(value + java.lang.Math.sqrt(value * value - 1.0));
}
@Test
public void testArccosh() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.arccosh();
ColumnVector expected = forEach(dcv, doubleFun(UnaryOpTest::acosh))) {
assertColumnsAreEqual(expected, answer);
}
}
public static double atanh(double value) {
return 0.5 * (java.lang.StrictMath.log1p(value) - java.lang.StrictMath.log1p(- value));
}
@Test
public void testArctanh() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.arctanh();
ColumnVector expected = forEach(dcv, doubleFun(UnaryOpTest::atanh))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testExp() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.exp();
ColumnVector expected = forEach(dcv, doubleFun(Math::exp))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testLog() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.log();
ColumnVector expected = forEach(dcv, doubleFun(Math::log))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testLog2() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.log2();
ColumnVector expected = forEach(dcv, doubleFun(n -> Math.log(n) / Math.log(2)))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testLog10() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.log10();
ColumnVector expected = forEach(dcv, doubleFun(Math::log10))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testSqrt() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.sqrt();
ColumnVector expected = forEach(dcv, doubleFun(Math::sqrt))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testCbrt() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.cbrt();
ColumnVector expected = forEach(dcv, doubleFun(Math::cbrt))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testCeil() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.ceil();
ColumnVector expected = forEach(dcv, doubleFun(Math::ceil))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testFloor() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.floor();
ColumnVector expected = forEach(dcv, doubleFun(Math::floor))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testAbs() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.abs();
ColumnVector expected = forEach(dcv, doubleFun(Math::abs))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testRint() {
try (ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1);
ColumnVector answer = dcv.rint();
ColumnVector expected = forEach(dcv, doubleFun(Math::rint))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testBitInvert() {
try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1);
ColumnVector answer = icv.bitInvert();
ColumnVector expected = forEach(icv, intFun((i) -> ~i))) {
assertColumnsAreEqual(expected, answer);
}
}
@Test
public void testNot() {
try (ColumnVector icv = ColumnVector.fromBoxedBooleans(BOOLEANS_1);
ColumnVector answer = icv.not();
ColumnVector expected = forEach(icv, boolFun((i) -> !i))) {
assertColumnsAreEqual(expected, answer);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/UnsafeMemoryAccessorTest.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@Tag("noSanitizer")
public class UnsafeMemoryAccessorTest {
@Test
public void testAllocate() {
long address = UnsafeMemoryAccessor.allocate(3);
try {
assertNotEquals(0, address);
} finally {
UnsafeMemoryAccessor.free(address);
}
}
@Test
public void setByteAndGetByte() {
long address = UnsafeMemoryAccessor.allocate(2);
try {
UnsafeMemoryAccessor.setByte(address, (byte) 34);
UnsafeMemoryAccessor.setByte(address + 1, (byte) 63);
Byte b = UnsafeMemoryAccessor.getByte(address);
assertEquals((byte) 34, b);
b = UnsafeMemoryAccessor.getByte(address + 1);
assertEquals((byte) 63, b);
} finally {
UnsafeMemoryAccessor.free(address);
}
}
@Test
public void setIntAndGetInt() {
long address = UnsafeMemoryAccessor.allocate(2 * 4);
try {
UnsafeMemoryAccessor.setInt(address, 2);
UnsafeMemoryAccessor.setInt(address + 4, 4);
int v = UnsafeMemoryAccessor.getInt(address);
assertEquals(2, v);
v = UnsafeMemoryAccessor.getInt(address + 4);
assertEquals(4, v);
} finally {
UnsafeMemoryAccessor.free(address);
}
}
@Test
public void setMemoryValue() {
long address = UnsafeMemoryAccessor.allocate(4);
try {
UnsafeMemoryAccessor.setMemory(address, 4, (byte) 1);
int v = UnsafeMemoryAccessor.getInt(address);
assertEquals(16843009, v);
} finally {
UnsafeMemoryAccessor.free(address);
}
}
@Test
public void testGetLongs() {
int numLongs = 257;
long address = UnsafeMemoryAccessor.allocate(numLongs * 8);
for (int i = 0; i < numLongs; ++i) {
UnsafeMemoryAccessor.setLong(address + (i * 8), i);
}
long[] result = new long[numLongs];
UnsafeMemoryAccessor.getLongs(result, 0, address, numLongs);
for (int i = 0; i < numLongs; ++i) {
assertEquals(i, result[i]);
}
UnsafeMemoryAccessor.getLongs(result, 1,
address + ((numLongs - 1) * 8), 1);
for (int i = 0; i < numLongs; ++i) {
long expected = (i == 1) ? numLongs - 1 : i;
assertEquals(expected, result[i]);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/IfElseTest.java
|
/*
*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.util.stream.Stream;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class IfElseTest extends CudfTestBase {
private static Stream<Arguments> createBooleanVVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Boolean[]{false, false, false, true, true},
new Boolean[]{true, true, true, false, false},
new Boolean[]{true, true, false, false, true}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Boolean[]{null, false, false, true, null},
new Boolean[]{true, null, null, false, null},
new Boolean[]{true, null, false, false, null})
);
}
private static Stream<Arguments> createBooleanVSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Boolean[]{false, false, false, true, true},
Boolean.FALSE,
new Boolean[]{false, false, false, false, true}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Boolean[]{null, false, false, true, null},
null,
new Boolean[]{null, null, false, null, null})
);
}
private static Stream<Arguments> createBooleanSVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
Boolean.FALSE,
new Boolean[]{false, false, false, true, true},
new Boolean[]{false, false, false, true, false}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
new Boolean[]{null, false, false, true, null},
new Boolean[]{null, false, null, true, null})
);
}
private static Stream<Arguments> createBooleanSSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
Boolean.FALSE,
Boolean.TRUE,
new Boolean[]{true, true, false, true, false}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
Boolean.FALSE,
new Boolean[]{false, false, null, false, null}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
Boolean.FALSE,
null,
new Boolean[]{null, null, false, null, false})
);
}
private static Stream<Arguments> createByteVVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Byte[]{(byte) 10, (byte) -128, (byte) 127, (byte) -1, (byte) 0},
new Byte[]{(byte) -2, (byte) 1, (byte) 16, (byte) -63, (byte) 42},
new Byte[]{(byte) -2, (byte) 1, (byte) 127, (byte) -63, (byte) 0}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Byte[]{null, (byte) -128, (byte) 127, (byte) -1, null},
new Byte[]{(byte) -2, null, null, (byte) -63, null},
new Byte[]{(byte) -2, null, (byte) 127, (byte) -63, null})
);
}
private static Stream<Arguments> createByteVSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Byte[]{(byte) 10, (byte) -128, (byte) 127, (byte) -1, (byte) 0},
(byte) -2,
new Byte[]{(byte) -2, (byte) -2, (byte) 127, (byte) -2, (byte) 0}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Byte[]{null, (byte) -128, (byte) 127, (byte) -1, null},
null,
new Byte[]{null, null, (byte) 127, null, null})
);
}
private static Stream<Arguments> createByteSVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
(byte) -128,
new Byte[]{(byte) -2, (byte) 1, (byte) 16, (byte) -63, (byte) 42},
new Byte[]{(byte) -2, (byte) 1, (byte) -128, (byte) -63, (byte) -128}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
new Byte[]{null, (byte) 1, (byte) 16, null, (byte) 42},
new Byte[]{null, (byte) 1, null, null, null})
);
}
private static Stream<Arguments> createByteSSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
(byte) -128,
(byte) 42,
new Byte[]{(byte) 42, (byte) 42, (byte) -128, (byte) 42, (byte) -128}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
(byte) -128,
null,
new Byte[]{null, null, (byte) -128, null, (byte) -128})
);
}
private static Stream<Arguments> createShortVVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Short[]{(short) 1024, (short) -128, (short) 127, (short) -1, (short) 0},
new Short[]{(short) -2048, (short) 1, (short) 16, (short) -63, (short) 42},
new Short[]{(short) -2048, (short) 1, (short) 127, (short) -63, (short) 0}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Short[]{null, (short) -128, (short) 127, (short) -1, null},
new Short[]{(short) -2048, null, null, (short) -63, null},
new Short[]{(short) -2048, null, (short) 127, (short) -63, null})
);
}
private static Stream<Arguments> createShortVSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Short[]{(short) 1024, (short) -128, (short) 127, (short) -1, (short) 0},
(short) -2048,
new Short[]{(short) -2048, (short) -2048, (short) 127, (short) -2048, (short) 0}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Short[]{null, (short) -128, (short) 127, (short) -1, null},
null,
new Short[]{null, null, (short) 127, null, null})
);
}
private static Stream<Arguments> createShortSVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
(short) -1287,
new Short[]{(short) -2048, (short) 1, (short) 16, (short) -63, (short) 42},
new Short[]{(short) -2048, (short) 1, (short) -1287, (short) -63, (short) -1287}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
new Short[]{null, (short) 1, (short) 16, null, (short) 42},
new Short[]{null, (short) 1, null, null, null})
);
}
private static Stream<Arguments> createShortSSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
(short) -1287,
(short) 421,
new Short[]{(short) 421, (short) 421, (short) -1287, (short) 421, (short) -1287}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
(short) -1287,
null,
new Short[]{null, null, (short) -1287, null, (short) -1287})
);
}
private static Stream<Arguments> createIntVVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Integer[]{10240, -128, 127, -1, 0},
new Integer[]{-20480, 1, 16, -63, 42},
new Integer[]{-20480, 1, 127, -63, 0}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Integer[]{null, -128, 127, -1, null},
new Integer[]{-20480, null, null, -63, null},
new Integer[]{-20480, null, 127, -63, null})
);
}
private static Stream<Arguments> createIntVSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Integer[]{10240, -128, 127, -1, 0},
-20480,
new Integer[]{-20480, -20480, 127, -20480, 0}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Integer[]{null, -128, 127, -1, null},
null,
new Integer[]{null, null, 127, null, null})
);
}
private static Stream<Arguments> createIntSVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
-12875,
new Integer[]{-2, 1, 16, -63, 42},
new Integer[]{-2, 1, -12875, -63, -12875}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
new Integer[]{null, 1, 16, null, 42},
new Integer[]{null, 1, null, null, null})
);
}
private static Stream<Arguments> createIntSSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
-12875,
42321,
new Integer[]{42321, 42321, -12875, 42321, -12875}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
-12875,
null,
new Integer[]{null, null, -12875, null, -12875})
);
}
private static Stream<Arguments> createLongVVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Long[]{1024056789L, -128L, 127L, -1L, 0L},
new Long[]{-2048012345L, 1L, 16L, -63L, 42L},
new Long[]{-2048012345L, 1L, 127L, -63L, 0L}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Long[]{null, -128L, 127L, -1L, null},
new Long[]{-2048012345L, null, null, -63L, null},
new Long[]{-2048012345L, null, 127L, -63L, null})
);
}
private static Stream<Arguments> createLongVSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Long[]{10240L, -128L, 127L, -1L, 0L},
-2048012345L,
new Long[]{-2048012345L, -2048012345L, 127L, -2048012345L, 0L}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Long[]{null, -128L, 127L, -1L, null},
null,
new Long[]{null, null, 127L, null, null})
);
}
private static Stream<Arguments> createLongSVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
-12875L,
new Long[]{-2L, 1L, 16L, -63L, 42L},
new Long[]{-2L, 1L, -12875L, -63L, -12875L}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
new Long[]{null, 1L, 16L, null, 42L},
new Long[]{null, 1L, null, null, null})
);
}
private static Stream<Arguments> createLongSSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
-12875L,
42321L,
new Long[]{42321L, 42321L, -12875L, 42321L, -12875L}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
-12875L,
null,
new Long[]{null, null, -12875L, null, -12875L})
);
}
private static Stream<Arguments> createFloatVVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Float[]{10240.56789f, -128f, 127f, -1f, 0f},
new Float[]{-20480.12345f, 1f, 16f, -6.3f, 42f},
new Float[]{-20480.12345f, 1f, 127f, -6.3f, 0f}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Float[]{null, -128f, 127f, -1f, null},
new Float[]{-20480.12345f, null, null, -6.3f, null},
new Float[]{-20480.12345f, null, 127f, -6.3f, null})
);
}
private static Stream<Arguments> createFloatVSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Float[]{10240f, -128f, 127f, -1f, 0f},
-20480.12345f,
new Float[]{-20480.12345f, -20480.12345f, 127f, -20480.12345f, 0f}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Float[]{null, -128f, 127f, -1f, null},
null,
new Float[]{null, null, 127f, null, null})
);
}
private static Stream<Arguments> createFloatSVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
-128.75f,
new Float[]{-2f, 1f, 16f, -6.3f, 42f},
new Float[]{-2f, 1f, -128.75f, -6.3f, -128.75f}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
new Float[]{null, 1f, 16f, null, 42f},
new Float[]{null, 1f, null, null, null})
);
}
private static Stream<Arguments> createFloatSSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
-128.75f,
4232.1f,
new Float[]{4232.1f, 4232.1f, -128.75f, 4232.1f, -128.75f}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
-128.75f,
null,
new Float[]{null, null, -128.75f, null, -128.75f})
);
}
private static Stream<Arguments> createDoubleVVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Double[]{10240.56789, -128., 127., -1., 0.},
new Double[]{-20480.12345, 1., 16., -6.3, 42.},
new Double[]{-20480.12345, 1., 127., -6.3, 0.}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Double[]{null, -128., 127., -1., null},
new Double[]{-20480.12345, null, null, -6.3, null},
new Double[]{-20480.12345, null, 127., -6.3, null})
);
}
private static Stream<Arguments> createDoubleVSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Double[]{10240., -128., 127., -1., 0.},
-20480.12345,
new Double[]{-20480.12345, -20480.12345, 127., -20480.12345, 0.}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new Double[]{null, -128., 127., -1., null},
null,
new Double[]{null, null, 127., null, null})
);
}
private static Stream<Arguments> createDoubleSVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
-128.75,
new Double[]{-2., 1., 16., -6.3, 42.},
new Double[]{-2., 1., -128.75, -6.3, -128.75}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
new Double[]{null, 1., 16., null, 42.},
new Double[]{null, 1., null, null, null})
);
}
private static Stream<Arguments> createDoubleSSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
-128.75,
4232.1,
new Double[]{4232.1, 4232.1, -128.75, 4232.1, -128.75}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
-128.75,
null,
new Double[]{null, null, -128.75, null, -128.75})
);
}
private static Stream<Arguments> createStringVVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new String[]{"hello", "world", "how", "are", "you"},
new String[]{"why", "fine", "thanks", "for", "asking"},
new String[]{"why", "fine", "how", "for", "you"}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new String[]{null, "world", "how", "are", null},
new String[]{"why", null, null, "for", null},
new String[]{"why", null, "how", "for", null})
);
}
private static Stream<Arguments> createStringVSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
new String[]{"hello", "world", "how", "are", "you"},
"foo",
new String[]{"foo", "foo", "how", "foo", "you"}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
new String[]{null, "world", "how", "are", null},
null,
new String[]{null, null, "how", null, null})
);
}
private static Stream<Arguments> createStringSVParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
"bar",
new String[]{"why", "fine", "thanks", "for", "asking"},
new String[]{"why", "fine", "bar", "for", "bar"}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
new String[]{null, "world", "how", "are", null},
new String[]{null, "world", null, "are", null})
);
}
private static Stream<Arguments> createStringSSParams() {
return Stream.of(
Arguments.of(
new Boolean[]{false, false, true, false, true},
"hello",
"world",
new String[]{"world", "world", "hello", "world", "hello"}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
null,
"world",
new String[]{"world", "world", null, "world", null}),
Arguments.of(
new Boolean[]{false, false, true, false, true},
"hello",
null,
new String[]{null, null, "hello", null, "hello"})
);
}
@ParameterizedTest
@MethodSource("createBooleanVVParams")
void testBooleanVV(Boolean[] predVals, Boolean[] trueVals, Boolean[] falseVals, Boolean[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedBooleans(trueVals);
ColumnVector falseVec = ColumnVector.fromBoxedBooleans(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.fromBoxedBooleans(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createBooleanVSParams")
void testBooleanVS(Boolean[] predVals, Boolean[] trueVals, Boolean falseVal, Boolean[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedBooleans(trueVals);
Scalar falseScalar = Scalar.fromBool(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedBooleans(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createBooleanSVParams")
void testBooleanSV(Boolean[] predVals, Boolean trueVal, Boolean[] falseVals, Boolean[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromBool(trueVal);
ColumnVector falseVec = ColumnVector.fromBoxedBooleans(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.fromBoxedBooleans(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createBooleanSSParams")
void testBooleanSS(Boolean[] predVals, Boolean trueVal, Boolean falseVal, Boolean[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromBool(trueVal);
Scalar falseScalar = Scalar.fromBool(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedBooleans(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createByteVVParams")
void testByteVV(Boolean[] predVals, Byte[] trueVals, Byte[] falseVals, Byte[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedBytes(trueVals);
ColumnVector falseVec = ColumnVector.fromBoxedBytes(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.fromBoxedBytes(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createByteVSParams")
void testByteVS(Boolean[] predVals, Byte[] trueVals, Byte falseVal, Byte[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedBytes(trueVals);
Scalar falseScalar = Scalar.fromByte(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedBytes(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createByteSVParams")
void testBytesSV(Boolean[] predVals, Byte trueVal, Byte[] falseVals, Byte[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromByte(trueVal);
ColumnVector falseVec = ColumnVector.fromBoxedBytes(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.fromBoxedBytes(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createByteSSParams")
void testBytesSS(Boolean[] predVals, Byte trueVal, Byte falseVal, Byte[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromByte(trueVal);
Scalar falseScalar = Scalar.fromByte(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedBytes(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createShortVVParams")
void testShortVV(Boolean[] predVals, Short[] trueVals, Short[] falseVals, Short[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedShorts(trueVals);
ColumnVector falseVec = ColumnVector.fromBoxedShorts(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.fromBoxedShorts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createShortVSParams")
void testShortVS(Boolean[] predVals, Short[] trueVals, Short falseVal, Short[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedShorts(trueVals);
Scalar falseScalar = Scalar.fromShort(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedShorts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createShortSVParams")
void testShortsSV(Boolean[] predVals, Short trueVal, Short[] falseVals, Short[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromShort(trueVal);
ColumnVector falseVec = ColumnVector.fromBoxedShorts(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.fromBoxedShorts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createShortSSParams")
void testShortsSS(Boolean[] predVals, Short trueVal, Short falseVal, Short[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromShort(trueVal);
Scalar falseScalar = Scalar.fromShort(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedShorts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createIntVVParams")
void testIntVV(Boolean[] predVals, Integer[] trueVals, Integer[] falseVals, Integer[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedInts(trueVals);
ColumnVector falseVec = ColumnVector.fromBoxedInts(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.fromBoxedInts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createIntVSParams")
void testIntVS(Boolean[] predVals, Integer[] trueVals, Integer falseVal, Integer[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedInts(trueVals);
Scalar falseScalar = Scalar.fromInt(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedInts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createIntSVParams")
void testIntsSV(Boolean[] predVals, Integer trueVal, Integer[] falseVals, Integer[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromInt(trueVal);
ColumnVector falseVec = ColumnVector.fromBoxedInts(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.fromBoxedInts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createIntSSParams")
void testIntsSS(Boolean[] predVals, Integer trueVal, Integer falseVal, Integer[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromInt(trueVal);
Scalar falseScalar = Scalar.fromInt(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedInts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVVParams")
void testLongVV(Boolean[] predVals, Long[] trueVals, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedLongs(trueVals);
ColumnVector falseVec = ColumnVector.fromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.fromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVSParams")
void testLongVS(Boolean[] predVals, Long[] trueVals, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedLongs(trueVals);
Scalar falseScalar = Scalar.fromLong(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSVParams")
void testLongsSV(Boolean[] predVals, Long trueVal, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromLong(trueVal);
ColumnVector falseVec = ColumnVector.fromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.fromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSSParams")
void testLongsSS(Boolean[] predVals, Long trueVal, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromLong(trueVal);
Scalar falseScalar = Scalar.fromLong(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createFloatVVParams")
void testFloatVV(Boolean[] predVals, Float[] trueVals, Float[] falseVals, Float[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedFloats(trueVals);
ColumnVector falseVec = ColumnVector.fromBoxedFloats(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.fromBoxedFloats(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createFloatVSParams")
void testFloatVS(Boolean[] predVals, Float[] trueVals, Float falseVal, Float[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedFloats(trueVals);
Scalar falseScalar = Scalar.fromFloat(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedFloats(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createFloatSVParams")
void testFloatsSV(Boolean[] predVals, Float trueVal, Float[] falseVals, Float[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromFloat(trueVal);
ColumnVector falseVec = ColumnVector.fromBoxedFloats(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.fromBoxedFloats(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createFloatSSParams")
void testFloatsSS(Boolean[] predVals, Float trueVal, Float falseVal, Float[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromFloat(trueVal);
Scalar falseScalar = Scalar.fromFloat(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedFloats(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createDoubleVVParams")
void testDoubleVV(Boolean[] predVals, Double[] trueVals, Double[] falseVals, Double[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedDoubles(trueVals);
ColumnVector falseVec = ColumnVector.fromBoxedDoubles(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.fromBoxedDoubles(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createDoubleVSParams")
void testDoubleVS(Boolean[] predVals, Double[] trueVals, Double falseVal, Double[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromBoxedDoubles(trueVals);
Scalar falseScalar = Scalar.fromDouble(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedDoubles(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createDoubleSVParams")
void testDoublesSV(Boolean[] predVals, Double trueVal, Double[] falseVals, Double[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromDouble(trueVal);
ColumnVector falseVec = ColumnVector.fromBoxedDoubles(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.fromBoxedDoubles(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createDoubleSSParams")
void testDoublesSS(Boolean[] predVals, Double trueVal, Double falseVal, Double[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromDouble(trueVal);
Scalar falseScalar = Scalar.fromDouble(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.fromBoxedDoubles(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createIntVVParams")
void testTimestampDaysVV(Boolean[] predVals, Integer[] trueVals, Integer[] falseVals, Integer[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampDaysFromBoxedInts(trueVals);
ColumnVector falseVec = ColumnVector.timestampDaysFromBoxedInts(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.timestampDaysFromBoxedInts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createIntVSParams")
void testTimestampDaysVS(Boolean[] predVals, Integer[] trueVals, Integer falseVal, Integer[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampDaysFromBoxedInts(trueVals);
Scalar falseScalar = Scalar.timestampDaysFromInt(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.timestampDaysFromBoxedInts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createIntSVParams")
void testTimestampDaysSV(Boolean[] predVals, Integer trueVal, Integer[] falseVals, Integer[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampDaysFromInt(trueVal);
ColumnVector falseVec = ColumnVector.timestampDaysFromBoxedInts(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.timestampDaysFromBoxedInts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createIntSSParams")
void testTimestampDaysSS(Boolean[] predVals, Integer trueVal, Integer falseVal, Integer[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampDaysFromInt(trueVal);
Scalar falseScalar = Scalar.timestampDaysFromInt(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.timestampDaysFromBoxedInts(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVVParams")
void testTimestampSecondsVV(Boolean[] predVals, Long[] trueVals, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampSecondsFromBoxedLongs(trueVals);
ColumnVector falseVec = ColumnVector.timestampSecondsFromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.timestampSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVSParams")
void testTimestampSecondsVS(Boolean[] predVals, Long[] trueVals, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampSecondsFromBoxedLongs(trueVals);
Scalar falseScalar = Scalar.timestampFromLong(DType.TIMESTAMP_SECONDS, falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.timestampSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSVParams")
void testTimestampSecondsSV(Boolean[] predVals, Long trueVal, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampFromLong(DType.TIMESTAMP_SECONDS, trueVal);
ColumnVector falseVec = ColumnVector.timestampSecondsFromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.timestampSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSSParams")
void testTimestampSecondsSS(Boolean[] predVals, Long trueVal, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampFromLong(DType.TIMESTAMP_SECONDS, trueVal);
Scalar falseScalar = Scalar.timestampFromLong(DType.TIMESTAMP_SECONDS, falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.timestampSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVVParams")
void testTimestampMilliSecondsVV(Boolean[] predVals, Long[] trueVals, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampMilliSecondsFromBoxedLongs(trueVals);
ColumnVector falseVec = ColumnVector.timestampMilliSecondsFromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.timestampMilliSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVSParams")
void testTimestampMilliSecondsVS(Boolean[] predVals, Long[] trueVals, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampMilliSecondsFromBoxedLongs(trueVals);
Scalar falseScalar = Scalar.timestampFromLong(DType.TIMESTAMP_MILLISECONDS, falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.timestampMilliSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSVParams")
void testTimestampMilliSecondsSV(Boolean[] predVals, Long trueVal, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampFromLong(DType.TIMESTAMP_MILLISECONDS, trueVal);
ColumnVector falseVec = ColumnVector.timestampMilliSecondsFromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.timestampMilliSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSSParams")
void testTimestampMilliSecondsSS(Boolean[] predVals, Long trueVal, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampFromLong(DType.TIMESTAMP_MILLISECONDS, trueVal);
Scalar falseScalar = Scalar.timestampFromLong(DType.TIMESTAMP_MILLISECONDS, falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.timestampMilliSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVVParams")
void testTimestampMicroSecondsVV(Boolean[] predVals, Long[] trueVals, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampMicroSecondsFromBoxedLongs(trueVals);
ColumnVector falseVec = ColumnVector.timestampMicroSecondsFromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.timestampMicroSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVSParams")
void testTimestampMicroSecondsVS(Boolean[] predVals, Long[] trueVals, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampMicroSecondsFromBoxedLongs(trueVals);
Scalar falseScalar = Scalar.timestampFromLong(DType.TIMESTAMP_MICROSECONDS, falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.timestampMicroSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSVParams")
void testTimestampMicroSecondsSV(Boolean[] predVals, Long trueVal, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampFromLong(DType.TIMESTAMP_MICROSECONDS, trueVal);
ColumnVector falseVec = ColumnVector.timestampMicroSecondsFromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.timestampMicroSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSSParams")
void testTimestampMicroSecondsSS(Boolean[] predVals, Long trueVal, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampFromLong(DType.TIMESTAMP_MICROSECONDS, trueVal);
Scalar falseScalar = Scalar.timestampFromLong(DType.TIMESTAMP_MICROSECONDS, falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.timestampMicroSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVVParams")
void testTimestampNanoSecondsVV(Boolean[] predVals, Long[] trueVals, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampNanoSecondsFromBoxedLongs(trueVals);
ColumnVector falseVec = ColumnVector.timestampNanoSecondsFromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.timestampNanoSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongVSParams")
void testTimestampNanoSecondsVS(Boolean[] predVals, Long[] trueVals, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.timestampNanoSecondsFromBoxedLongs(trueVals);
Scalar falseScalar = Scalar.timestampFromLong(DType.TIMESTAMP_NANOSECONDS, falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.timestampNanoSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSVParams")
void testTimestampNanoSecondsSV(Boolean[] predVals, Long trueVal, Long[] falseVals, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampFromLong(DType.TIMESTAMP_NANOSECONDS, trueVal);
ColumnVector falseVec = ColumnVector.timestampNanoSecondsFromBoxedLongs(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.timestampNanoSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createLongSSParams")
void testTimestampNanoSecondsSS(Boolean[] predVals, Long trueVal, Long falseVal, Long[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.timestampFromLong(DType.TIMESTAMP_NANOSECONDS, trueVal);
Scalar falseScalar = Scalar.timestampFromLong(DType.TIMESTAMP_NANOSECONDS, falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.timestampNanoSecondsFromBoxedLongs(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createStringVVParams")
void testStringVV(Boolean[] predVals, String[] trueVals, String[] falseVals, String[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromStrings(trueVals);
ColumnVector falseVec = ColumnVector.fromStrings(falseVals);
ColumnVector result = pred.ifElse(trueVec, falseVec);
ColumnVector expected = ColumnVector.fromStrings(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createStringVSParams")
void testStringVS(Boolean[] predVals, String[] trueVals, String falseVal, String[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
ColumnVector trueVec = ColumnVector.fromStrings(trueVals);
Scalar falseScalar = Scalar.fromString(falseVal);
ColumnVector result = pred.ifElse(trueVec, falseScalar);
ColumnVector expected = ColumnVector.fromStrings(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createStringSVParams")
void testStringSV(Boolean[] predVals, String trueVal, String[] falseVals, String[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromString(trueVal);
ColumnVector falseVec = ColumnVector.fromStrings(falseVals);
ColumnVector result = pred.ifElse(trueScalar, falseVec);
ColumnVector expected = ColumnVector.fromStrings(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@ParameterizedTest
@MethodSource("createStringSSParams")
void testStringSS(Boolean[] predVals, String trueVal, String falseVal, String[] expectVals) {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(predVals);
Scalar trueScalar = Scalar.fromString(trueVal);
Scalar falseScalar = Scalar.fromString(falseVal);
ColumnVector result = pred.ifElse(trueScalar, falseScalar);
ColumnVector expected = ColumnVector.fromStrings(expectVals)) {
assertColumnsAreEqual(expected, result);
}
}
@Test
void testMismatchedTypesVV() {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(true, false, false, true);
ColumnVector trueVec = ColumnVector.fromBoxedInts(1, 2, 3, 4);
ColumnVector falseVec = ColumnVector.fromBoxedLongs(5L, 6L, 7L, 8L)) {
assertThrows(CudfException.class, () -> pred.ifElse(trueVec, falseVec));
}
}
@Test
void testMismatchedTypesVS() {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(true, false, false, true);
ColumnVector trueVec = ColumnVector.fromBoxedLongs(1L, 2L, 3L, 4L);
Scalar falseScalar = Scalar.fromString("hey")) {
assertThrows(CudfException.class, () -> pred.ifElse(trueVec, falseScalar));
}
}
@Test
void testMismatchedTypesSV() {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(true, false, false, true);
Scalar trueScalar = Scalar.fromByte((byte) 1);
ColumnVector falseVec = ColumnVector.fromBoxedInts(0, 2, 4, 6)) {
assertThrows(CudfException.class, () -> pred.ifElse(trueScalar, falseVec));
}
}
@Test
void testMismatchedTypesSS() {
try (ColumnVector pred = ColumnVector.fromBoxedBooleans(true, false, false, true);
Scalar trueScalar = Scalar.fromByte((byte) 1);
Scalar falseScalar = Scalar.fromString("hey")) {
assertThrows(CudfException.class, () -> pred.ifElse(trueScalar, falseScalar));
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/NvtxTest.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class NvtxTest {
@Test
public void testNvtxStartEndEnclosed() {
NvtxUniqueRange range1 = new NvtxUniqueRange("start/end", NvtxColor.RED);
NvtxUniqueRange range2 = new NvtxUniqueRange("enclosed start/end", NvtxColor.BLUE);
range2.close();
range1.close();
}
@Test
public void testNvtxStartEndCloseOutOfOrder() {
NvtxUniqueRange range1 = new NvtxUniqueRange("start/end closes first", NvtxColor.RED);
NvtxUniqueRange range2 = new NvtxUniqueRange("start/end closes later", NvtxColor.BLUE);
range1.close();
range2.close();
}
@Test
public void testNvtxPushPop() {
try(NvtxRange range1 = new NvtxRange("push/pop", NvtxColor.RED)) {
try(NvtxRange range2 = new NvtxRange("enclosed push/pop", NvtxColor.BLUE)) {
}
}
}
@Test
public void testNvtxPushPopEnclosingStartEnd() {
try(NvtxRange range1 = new NvtxRange("push/pop", NvtxColor.RED)) {
NvtxUniqueRange range2 = new NvtxUniqueRange("enclosed start/end", NvtxColor.BLUE);
range2.close();
}
}
@Test
public void testNvtxPushPopAndStartEndCloseOutOfOrder() {
NvtxUniqueRange range2;
try(NvtxRange range1 = new NvtxRange("push/pop closes first", NvtxColor.RED)) {
range2 = new NvtxUniqueRange("start/end closes later", NvtxColor.BLUE);
}
range2.close();
}
@Test
public void testNvtxUniqueRangeCloseMultipleTimes() {
NvtxUniqueRange range = new NvtxUniqueRange("range", NvtxColor.RED);
range.close();
assertThrows(IllegalStateException.class, () -> {
range.close();
});
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/RmmTest.java
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
public class RmmTest {
private static final long TOO_MUCH_MEMORY = 3L * 1024 * 1024 * 1024 * 1024 * 1024 * 1024;
@BeforeEach
public void setup() {
if (Rmm.isInitialized()) {
Rmm.shutdown();
}
}
@AfterEach
public void teardown() {
if (Rmm.isInitialized()) {
Rmm.shutdown();
}
}
@Test
public void testCreateAdaptors() {
final long poolSize = 32 * 1024 * 1024; // 32 MiB
try (RmmCudaMemoryResource r = new RmmCudaMemoryResource()) {
assert(r.getHandle() != 0);
}
try (RmmCudaAsyncMemoryResource r = new RmmCudaAsyncMemoryResource(poolSize, poolSize)) {
assert(r.getHandle() != 0);
}
try (RmmManagedMemoryResource r = new RmmManagedMemoryResource()) {
assert(r.getHandle() != 0);
}
try (RmmArenaMemoryResource<RmmCudaMemoryResource> r =
new RmmArenaMemoryResource<>(new RmmCudaMemoryResource(), poolSize, false)) {
assert(r.getHandle() != 0);
}
try (RmmPoolMemoryResource<RmmCudaMemoryResource> r =
new RmmPoolMemoryResource<>(new RmmCudaMemoryResource(), poolSize, poolSize)) {
assert(r.getHandle() != 0);
}
try (RmmLimitingResourceAdaptor<RmmCudaMemoryResource> r =
new RmmLimitingResourceAdaptor<>(new RmmCudaMemoryResource(), poolSize, 64)) {
assert(r.getHandle() != 0);
}
try (RmmLoggingResourceAdaptor<RmmCudaMemoryResource> r =
new RmmLoggingResourceAdaptor<>(new RmmCudaMemoryResource(), Rmm.logToStderr(), true)) {
assert(r.getHandle() != 0);
}
try (RmmTrackingResourceAdaptor<RmmCudaMemoryResource> r =
new RmmTrackingResourceAdaptor<>(new RmmCudaMemoryResource(), 64)) {
assert(r.getHandle() != 0);
assert(r.getTotalBytesAllocated() == 0);
assert(r.getMaxTotalBytesAllocated() == 0);
assert(r.getScopedMaxTotalBytesAllocated() == 0);
r.resetScopedMaxTotalBytesAllocated(1024);
assert(r.getScopedMaxTotalBytesAllocated() == 1024);
}
}
@ParameterizedTest
@ValueSource(ints = {
RmmAllocationMode.CUDA_DEFAULT,
RmmAllocationMode.POOL,
RmmAllocationMode.ARENA})
public void testTotalAllocated(int rmmAllocMode) {
Rmm.initialize(rmmAllocMode, Rmm.logToStderr(), 512 * 1024 * 1024);
assertEquals(0, Rmm.getTotalBytesAllocated());
try (DeviceMemoryBuffer ignored = Rmm.alloc(1024)) {
assertEquals(1024, Rmm.getTotalBytesAllocated());
}
assertEquals(0, Rmm.getTotalBytesAllocated());
}
@ParameterizedTest
@ValueSource(ints = {
RmmAllocationMode.CUDA_DEFAULT,
RmmAllocationMode.POOL,
RmmAllocationMode.ARENA})
public void testMaxOutstanding(int rmmAllocMode) {
Rmm.initialize(rmmAllocMode, Rmm.logToStderr(), 512 * 1024 * 1024);
assertEquals(0, Rmm.getMaximumTotalBytesAllocated());
try (DeviceMemoryBuffer ignored = Rmm.alloc(1024)) {
assertEquals(1024, Rmm.getMaximumTotalBytesAllocated());
}
assertEquals(0, Rmm.getTotalBytesAllocated());
assertEquals(1024, Rmm.getMaximumTotalBytesAllocated());
}
@ParameterizedTest
@ValueSource(ints = {
RmmAllocationMode.CUDA_DEFAULT,
RmmAllocationMode.POOL,
RmmAllocationMode.ARENA})
public void testScopedMaxOutstanding(int rmmAllocMode) {
Rmm.initialize(rmmAllocMode, Rmm.logToStderr(), 512 * 1024 * 1024);
assertEquals(0, Rmm.getMaximumTotalBytesAllocated());
try (DeviceMemoryBuffer ignored = Rmm.alloc(1024);
DeviceMemoryBuffer ignored2 = Rmm.alloc(1024)) {
assertEquals(2048, Rmm.getScopedMaximumBytesAllocated());
}
assertEquals(0, Rmm.getTotalBytesAllocated());
assertEquals(2048, Rmm.getScopedMaximumBytesAllocated());
Rmm.resetScopedMaximumBytesAllocated();
assertEquals(0, Rmm.getScopedMaximumBytesAllocated());
assertEquals(2048, Rmm.getMaximumTotalBytesAllocated());
DeviceMemoryBuffer ignored = Rmm.alloc(1024);
ignored.close();
assertEquals(1024, Rmm.getScopedMaximumBytesAllocated());
assertEquals(2048, Rmm.getMaximumTotalBytesAllocated());
assertEquals(0, Rmm.getTotalBytesAllocated());
// a non-zero value is the new minimum
DeviceMemoryBuffer ignored2 = Rmm.alloc(1024);
ignored2.close();
Rmm.resetScopedMaximumBytesAllocated(10000);
assertEquals(10000, Rmm.getScopedMaximumBytesAllocated());
assertEquals(2048, Rmm.getMaximumTotalBytesAllocated());
try(DeviceMemoryBuffer ignored3 = Rmm.alloc(1024)) {
Rmm.resetScopedMaximumBytesAllocated(1024);
try (DeviceMemoryBuffer ignored4 = Rmm.alloc(20480)) {
assertEquals(21504, Rmm.getScopedMaximumBytesAllocated());
assertEquals(21504, Rmm.getMaximumTotalBytesAllocated());
}
}
}
@ParameterizedTest
@ValueSource(ints = {
RmmAllocationMode.CUDA_DEFAULT,
RmmAllocationMode.POOL,
RmmAllocationMode.ARENA})
public void testScopedMaxOutstandingNegative(int rmmAllocMode) {
Rmm.initialize(rmmAllocMode, Rmm.logToStderr(), 512 * 1024 * 1024);
assertEquals(0, Rmm.getMaximumTotalBytesAllocated());
try (DeviceMemoryBuffer ignored = Rmm.alloc(1024);
DeviceMemoryBuffer ignored2 = Rmm.alloc(1024)) {
assertEquals(2048, Rmm.getScopedMaximumBytesAllocated());
Rmm.resetScopedMaximumBytesAllocated();
assertEquals(0, Rmm.getScopedMaximumBytesAllocated());
}
// because we allocated a net -2048 Bytes since reset
assertEquals(0, Rmm.getScopedMaximumBytesAllocated());
DeviceMemoryBuffer ignored = Rmm.alloc(1024);
ignored.close();
assertEquals(0, Rmm.getScopedMaximumBytesAllocated());
// if we allocate 2KB and then 256B we start seeing a positive local maximum
try (DeviceMemoryBuffer ignored2 = Rmm.alloc(2048);
DeviceMemoryBuffer ignored3 = Rmm.alloc(256)) {
assertEquals(256, Rmm.getScopedMaximumBytesAllocated());
}
}
@Tag("noSanitizer")
@ParameterizedTest
@ValueSource(ints = {
RmmAllocationMode.CUDA_DEFAULT,
RmmAllocationMode.POOL,
RmmAllocationMode.ARENA})
public void testEventHandler(int rmmAllocMode) {
AtomicInteger invokedCount = new AtomicInteger();
AtomicLong amountRequested = new AtomicLong();
AtomicInteger timesRetried = new AtomicInteger();
AtomicLong totalAllocated = new AtomicLong();
AtomicLong totalDeallocated = new AtomicLong();
RmmEventHandler handler = new BaseRmmEventHandler() {
@Override
public boolean onAllocFailure(long sizeRequested, int retryCount) {
int count = invokedCount.incrementAndGet();
timesRetried.set(retryCount);
amountRequested.set(sizeRequested);
return count != 3;
}
@Override
public void onAllocated(long sizeAllocated) {
totalAllocated.addAndGet(sizeAllocated);
}
@Override
public void onDeallocated(long sizeDeallocated) {
totalDeallocated.addAndGet(sizeDeallocated);
}
};
Rmm.initialize(rmmAllocMode, Rmm.logToStderr(), 512 * 1024 * 1024);
Rmm.setEventHandler(handler);
DeviceMemoryBuffer addr = Rmm.alloc(1024);
addr.close();
assertTrue(addr.address != 0);
assertEquals(0, invokedCount.get());
// by default, we don't get callbacks on allocated or deallocated
assertEquals(0, totalAllocated.get());
assertEquals(0, totalDeallocated.get());
// Try to allocate too much
long requested = TOO_MUCH_MEMORY;
try {
addr = Rmm.alloc(requested);
addr.close();
fail("should have failed to allocate");
} catch (OutOfMemoryError | RmmException ignored) {
}
assertEquals(3, invokedCount.get());
assertEquals(2, timesRetried.get());
assertEquals(requested, amountRequested.get());
// verify after a failure we can still allocate something more reasonable
requested = 8192;
addr = Rmm.alloc(requested);
addr.close();
// test the debug event handler
Rmm.clearEventHandler();
Rmm.setEventHandler(handler, /*enableDebug*/ true);
addr = Rmm.alloc(1024);
addr.close();
assertEquals(1024, totalAllocated.get());
assertEquals(1024, totalDeallocated.get());
}
@Tag("noSanitizer")
@Test
public void testSetEventHandlerTwice() {
Rmm.initialize(RmmAllocationMode.CUDA_DEFAULT, Rmm.logToStderr(), 0L);
// installing an event handler the first time should not be an error
Rmm.setEventHandler(new BaseRmmEventHandler() {
@Override
public boolean onAllocFailure(long sizeRequested, int retryCount) {
return false;
}
});
// installing a second event handler is an error
RmmEventHandler otherHandler = new BaseRmmEventHandler() {
@Override
public boolean onAllocFailure(long sizeRequested, int retryCount) {
return true;
}
};
assertThrows(RmmException.class, () -> Rmm.setEventHandler(otherHandler));
}
@Tag("noSanitizer")
@Test
public void testClearEventHandler() {
Rmm.initialize(RmmAllocationMode.CUDA_DEFAULT, Rmm.logToStderr(), 0L);
// clearing the event handler when it isn't set is not an error
Rmm.clearEventHandler();
// create an event handler that will always retry
RmmEventHandler retryHandler = new BaseRmmEventHandler() {
@Override
public boolean onAllocFailure(long sizeRequested, int retryCount) {
return true;
}
};
Rmm.setEventHandler(retryHandler);
Rmm.clearEventHandler();
// verify handler is no longer installed, alloc should fail
try {
DeviceMemoryBuffer addr = Rmm.alloc(TOO_MUCH_MEMORY);
addr.close();
fail("should have failed to allocate");
} catch (OutOfMemoryError | RmmException ignored) {
}
}
@Tag("noSanitizer")
@Test
public void testAllocOnlyThresholds() {
final AtomicInteger allocInvocations = new AtomicInteger(0);
final AtomicInteger deallocInvocations = new AtomicInteger(0);
final AtomicLong allocated = new AtomicLong(0);
Rmm.initialize(RmmAllocationMode.POOL, Rmm.logToStderr(), 1024 * 1024L);
RmmEventHandler handler = new RmmEventHandler() {
@Override
public boolean onAllocFailure(long sizeRequested, int retryCount) {
return false;
}
@Override
public long[] getAllocThresholds() {
return new long[] { 32 * 1024, 8 * 1024 };
}
@Override
public long[] getDeallocThresholds() {
return null;
}
@Override
public void onAllocThreshold(long totalAllocSize) {
allocInvocations.getAndIncrement();
allocated.set(totalAllocSize);
}
@Override
public void onDeallocThreshold(long totalAllocSize) {
deallocInvocations.getAndIncrement();
}
};
Rmm.setEventHandler(handler);
DeviceMemoryBuffer[] addrs = new DeviceMemoryBuffer[5];
try {
addrs[0] = Rmm.alloc(6 * 1024);
assertEquals(0, allocInvocations.get());
addrs[1] = Rmm.alloc(2 * 1024);
assertEquals(1, allocInvocations.get());
assertEquals(8 * 1024, allocated.get());
addrs[2] = Rmm.alloc(21 * 1024);
assertEquals(1, allocInvocations.get());
addrs[3] = Rmm.alloc(8 * 1024);
assertEquals(2, allocInvocations.get());
assertEquals(37 * 1024, allocated.get());
addrs[4] = Rmm.alloc(8 * 1024);
assertEquals(2, allocInvocations.get());
} finally {
for (DeviceMemoryBuffer addr : addrs) {
if (addr != null) {
addr.close();
}
}
}
assertEquals(2, allocInvocations.get());
assertEquals(0, deallocInvocations.get());
}
@Tag("noSanitizer")
@Test
public void testThresholds() {
final AtomicInteger allocInvocations = new AtomicInteger(0);
final AtomicInteger deallocInvocations = new AtomicInteger(0);
final AtomicLong allocated = new AtomicLong(0);
Rmm.initialize(RmmAllocationMode.POOL, Rmm.logToStderr(), 1024 * 1024L);
RmmEventHandler handler = new RmmEventHandler() {
@Override
public boolean onAllocFailure(long sizeRequested, int retryCount) {
return false;
}
@Override
public long[] getAllocThresholds() {
return new long[] { 8 * 1024 };
}
@Override
public long[] getDeallocThresholds() {
return new long[] { 6 * 1024 };
}
@Override
public void onAllocThreshold(long totalAllocSize) {
allocInvocations.getAndIncrement();
allocated.set(totalAllocSize);
}
@Override
public void onDeallocThreshold(long totalAllocSize) {
deallocInvocations.getAndIncrement();
allocated.set(totalAllocSize);
}
};
Rmm.setEventHandler(handler);
DeviceMemoryBuffer[] addrs = new DeviceMemoryBuffer[5];
try {
addrs[0] = Rmm.alloc(6 * 1024);
assertEquals(0, allocInvocations.get());
assertEquals(0, deallocInvocations.get());
addrs[0].close();
addrs[0] = null;
assertEquals(0, allocInvocations.get());
assertEquals(1, deallocInvocations.get());
assertEquals(0, allocated.get());
addrs[0] = Rmm.alloc(12 * 1024);
assertEquals(1, allocInvocations.get());
assertEquals(1, deallocInvocations.get());
assertEquals(12 * 1024, allocated.get());
addrs[1] = Rmm.alloc(6 * 1024);
assertEquals(1, allocInvocations.get());
assertEquals(1, deallocInvocations.get());
addrs[0].close();
addrs[0] = null;
assertEquals(1, allocInvocations.get());
assertEquals(1, deallocInvocations.get());
addrs[0] = Rmm.alloc(4 * 1024);
assertEquals(2, allocInvocations.get());
assertEquals(1, deallocInvocations.get());
assertEquals(10 * 1024, allocated.get());
addrs[1].close();
addrs[1] = null;
assertEquals(2, allocInvocations.get());
assertEquals(2, deallocInvocations.get());
assertEquals(4 * 1024, allocated.get());
addrs[0].close();
addrs[0] = null;
assertEquals(2, allocInvocations.get());
assertEquals(2, deallocInvocations.get());
} finally {
for (DeviceMemoryBuffer addr : addrs) {
if (addr != null) {
addr.close();
}
}
}
assertEquals(2, allocInvocations.get());
assertEquals(2, deallocInvocations.get());
}
@Tag("noSanitizer")
@Test
public void testExceptionHandling() {
Rmm.initialize(RmmAllocationMode.POOL, Rmm.logToStderr(), 1024 * 1024L);
RmmEventHandler handler = new RmmEventHandler() {
@Override
public boolean onAllocFailure(long sizeRequested, int retryCount) {
throw new AllocFailException();
}
@Override
public long[] getAllocThresholds() {
return new long[] { 8 * 1024 };
}
@Override
public long[] getDeallocThresholds() {
return new long[] { 6 * 1024 };
}
@Override
public void onAllocThreshold(long totalAllocSize) {
throw new AllocThresholdException();
}
@Override
public void onDeallocThreshold(long totalAllocSize) {
throw new DeallocThresholdException();
}
};
Rmm.setEventHandler(handler);
DeviceMemoryBuffer addr = Rmm.alloc(6 * 1024);
assertThrows(DeallocThresholdException.class, addr::close);
assertThrows(AllocThresholdException.class, () -> Rmm.alloc(12 * 1024));
assertThrows(AllocFailException.class, () -> Rmm.alloc(TOO_MUCH_MEMORY));
}
@Test
public void testThreadAutoDeviceSetup() throws Exception {
// A smoke-test for automatic CUDA device setup for threads calling
// into cudf. Hard to fully test without requiring multiple CUDA devices.
Rmm.initialize(RmmAllocationMode.POOL, Rmm.logToStderr(), 1024 * 1024L);
DeviceMemoryBuffer buff = Rmm.alloc(1024);
try {
ExecutorService executor = Executors.newSingleThreadExecutor();
Future<Boolean> future = executor.submit(() -> {
DeviceMemoryBuffer localBuffer = Rmm.alloc(2048);
localBuffer.close();
buff.close();
return true;
});
assertTrue(future.get());
executor.shutdown();
} catch (Exception t) {
buff.close();
throw t;
}
}
@Tag("noSanitizer")
@ParameterizedTest
@ValueSource(ints = {
RmmAllocationMode.CUDA_DEFAULT,
RmmAllocationMode.POOL,
RmmAllocationMode.ARENA})
public void testSetDeviceThrowsAfterRmmInit(int rmmAllocMode) {
Rmm.initialize(rmmAllocMode, Rmm.logToStderr(), 1024 * 1024);
assertThrows(CudfException.class, () -> Cuda.setDevice(Cuda.getDevice() + 1));
// Verify that auto set device does not
Cuda.autoSetDevice();
}
@Tag("noSanitizer")
@Test
public void testPoolSize() {
Rmm.initialize(RmmAllocationMode.POOL, Rmm.logToStderr(), 1024);
try (DeviceMemoryBuffer ignored1 = Rmm.alloc(1024)) {
assertThrows(OutOfMemoryError.class,
() -> {
DeviceMemoryBuffer ignored2 = Rmm.alloc(1024);
ignored2.close();
});
}
}
@Tag("noSanitizer")
@Test
public void testCudaAsyncMemoryResourceSize() {
try {
Rmm.initialize(RmmAllocationMode.CUDA_ASYNC, Rmm.logToStderr(), 1024);
} catch (CudfException e) {
// CUDA 11.2 introduced cudaMallocAsync, older CUDA Toolkit will skip this test.
assumeFalse(e.getMessage().contains("cudaMallocAsync not supported"));
throw e;
}
try (DeviceMemoryBuffer ignored1 = Rmm.alloc(1024)) {
assertThrows(OutOfMemoryError.class,
() -> {
DeviceMemoryBuffer ignored2 = Rmm.alloc(1024);
ignored2.close();
});
}
}
@Tag("noSanitizer")
@Test
public void testCudaAsyncIsIncompatibleWithManaged() {
assertThrows(IllegalArgumentException.class,
() -> Rmm.initialize(
RmmAllocationMode.CUDA_ASYNC | RmmAllocationMode.CUDA_MANAGED_MEMORY,
Rmm.logToStderr(), 1024));
}
@Test
public void testCudaMemoryBuffer() {
Rmm.initialize(RmmAllocationMode.ARENA, Rmm.logToStderr(), 8 * 1024 * 1024);
try (CudaMemoryBuffer one = CudaMemoryBuffer.allocate(512);
CudaMemoryBuffer two = CudaMemoryBuffer.allocate(1024)) {
assertEquals(512, one.length);
assertEquals(1024, two.length);
assertEquals(0, Rmm.getTotalBytesAllocated());
}
}
private static class AllocFailException extends RuntimeException {
}
private static class AllocThresholdException extends RuntimeException {
}
private static class DeallocThresholdException extends RuntimeException {
}
private static abstract class BaseRmmEventHandler implements RmmEventHandler {
@Override
public long[] getAllocThresholds() {
return null;
}
@Override
public long[] getDeallocThresholds() {
return null;
}
@Override
public void onAllocThreshold(long totalAllocSize) {
}
@Override
public void onDeallocThreshold(long totalAllocSize) {
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ReductionTest.java
|
/*
*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import com.google.common.collect.Lists;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.util.EnumSet;
import java.util.List;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
class ReductionTest extends CudfTestBase {
public static final double DELTAD = 0.00001;
public static final float DELTAF = 0.001f;
// reduction operations that produce a floating point value
private static final EnumSet<Aggregation.Kind> FLOAT_REDUCTIONS = EnumSet.of(
Aggregation.Kind.MEAN,
Aggregation.Kind.STD,
Aggregation.Kind.VARIANCE,
Aggregation.Kind.QUANTILE);
// reduction operations that produce a floating point value
private static final EnumSet<Aggregation.Kind> BOOL_REDUCTIONS = EnumSet.of(
Aggregation.Kind.ANY,
Aggregation.Kind.ALL);
private static Scalar buildExpectedScalar(ReductionAggregation op,
HostColumnVector.DataType dataType, Object expectedObject) {
if (expectedObject == null) {
return Scalar.fromNull(dataType.getType());
}
if (FLOAT_REDUCTIONS.contains(op.getWrapped().kind)) {
if (dataType.getType().equals(DType.FLOAT32)) {
return Scalar.fromFloat((Float) expectedObject);
}
return Scalar.fromDouble((Double) expectedObject);
}
if (BOOL_REDUCTIONS.contains(op.getWrapped().kind)) {
return Scalar.fromBool((Boolean) expectedObject);
}
switch (dataType.getType().typeId) {
case BOOL8:
return Scalar.fromBool((Boolean) expectedObject);
case INT8:
return Scalar.fromByte((Byte) expectedObject);
case INT16:
return Scalar.fromShort((Short) expectedObject);
case INT32:
return Scalar.fromInt((Integer) expectedObject);
case INT64:
return Scalar.fromLong((Long) expectedObject);
case FLOAT32:
return Scalar.fromFloat((Float) expectedObject);
case FLOAT64:
return Scalar.fromDouble((Double) expectedObject);
case TIMESTAMP_DAYS:
return Scalar.timestampDaysFromInt((Integer) expectedObject);
case TIMESTAMP_SECONDS:
case TIMESTAMP_MILLISECONDS:
case TIMESTAMP_MICROSECONDS:
case TIMESTAMP_NANOSECONDS:
return Scalar.timestampFromLong(dataType.getType(), (Long) expectedObject);
case STRING:
return Scalar.fromString((String) expectedObject);
case LIST:
HostColumnVector.DataType et = dataType.getChild(0);
ColumnVector col = null;
try {
switch (et.getType().typeId) {
case BOOL8:
col = et.isNullable() ? ColumnVector.fromBoxedBooleans((Boolean[]) expectedObject) :
ColumnVector.fromBooleans((boolean[]) expectedObject);
return Scalar.listFromColumnView(col);
case INT8:
col = et.isNullable() ? ColumnVector.fromBoxedBytes((Byte[]) expectedObject) :
ColumnVector.fromBytes((byte[]) expectedObject);
return Scalar.listFromColumnView(col);
case INT16:
col = et.isNullable() ? ColumnVector.fromBoxedShorts((Short[]) expectedObject) :
ColumnVector.fromShorts((short[]) expectedObject);
return Scalar.listFromColumnView(col);
case INT32:
col = et.isNullable() ? ColumnVector.fromBoxedInts((Integer[]) expectedObject) :
ColumnVector.fromInts((int[]) expectedObject);
return Scalar.listFromColumnView(col);
case INT64:
col = et.isNullable() ? ColumnVector.fromBoxedLongs((Long[]) expectedObject) :
ColumnVector.fromLongs((long[]) expectedObject);
return Scalar.listFromColumnView(col);
case FLOAT32:
col = et.isNullable() ? ColumnVector.fromBoxedFloats((Float[]) expectedObject) :
ColumnVector.fromFloats((float[]) expectedObject);
return Scalar.listFromColumnView(col);
case FLOAT64:
col = et.isNullable() ? ColumnVector.fromBoxedDoubles((Double[]) expectedObject) :
ColumnVector.fromDoubles((double[]) expectedObject);
return Scalar.listFromColumnView(col);
case STRING:
col = ColumnVector.fromStrings((String[]) expectedObject);
return Scalar.listFromColumnView(col);
default:
throw new IllegalArgumentException("Unexpected element type of List: " + et);
}
} finally {
if (col != null) {
col.close();
}
}
default:
throw new IllegalArgumentException("Unexpected type: " + dataType);
}
}
private static Stream<Arguments> createBooleanParams() {
Boolean[] vals = new Boolean[]{true, true, null, false, true, false, null};
HostColumnVector.DataType bool = new HostColumnVector.BasicType(true, DType.BOOL8);
return Stream.of(
Arguments.of(ReductionAggregation.sum(), new Boolean[0], bool, null, 0.),
Arguments.of(ReductionAggregation.sum(), new Boolean[]{null, null, null}, bool, null, 0.),
Arguments.of(ReductionAggregation.sum(), vals, bool, true, 0.),
Arguments.of(ReductionAggregation.min(), vals, bool, false, 0.),
Arguments.of(ReductionAggregation.max(), vals, bool, true, 0.),
Arguments.of(ReductionAggregation.product(), vals, bool, false, 0.),
Arguments.of(ReductionAggregation.sumOfSquares(), vals, bool, true, 0.),
Arguments.of(ReductionAggregation.mean(), vals, bool, 0.6, DELTAD),
Arguments.of(ReductionAggregation.standardDeviation(), vals, bool, 0.5477225575051662, DELTAD),
Arguments.of(ReductionAggregation.variance(), vals, bool, 0.3, DELTAD),
Arguments.of(ReductionAggregation.any(), vals, bool, true, 0.),
Arguments.of(ReductionAggregation.all(), vals, bool, false, 0.)
);
}
private static Stream<Arguments> createByteParams() {
Byte[] vals = new Byte[]{-1, 7, 123, null, 50, 60, 100};
HostColumnVector.DataType int8 = new HostColumnVector.BasicType(true, DType.INT8);
return Stream.of(
Arguments.of(ReductionAggregation.sum(), new Byte[0], int8, null, 0.),
Arguments.of(ReductionAggregation.sum(), new Byte[]{null, null, null}, int8, null, 0.),
Arguments.of(ReductionAggregation.sum(), vals, int8, (byte) 83, 0.),
Arguments.of(ReductionAggregation.min(), vals, int8, (byte) -1, 0.),
Arguments.of(ReductionAggregation.max(), vals, int8, (byte) 123, 0.),
Arguments.of(ReductionAggregation.product(), vals, int8, (byte) 160, 0.),
Arguments.of(ReductionAggregation.sumOfSquares(), vals, int8, (byte) 47, 0.),
Arguments.of(ReductionAggregation.mean(), vals, int8, 56.5, DELTAD),
Arguments.of(ReductionAggregation.standardDeviation(), vals, int8, 49.24530434467839, DELTAD),
Arguments.of(ReductionAggregation.variance(), vals, int8, 2425.1, DELTAD),
Arguments.of(ReductionAggregation.any(), vals, int8, true, 0.),
Arguments.of(ReductionAggregation.all(), vals, int8, true, 0.)
);
}
private static Stream<Arguments> createShortParams() {
Short[] vals = new Short[]{-1, 7, 123, null, 50, 60, 100};
HostColumnVector.DataType int16 = new HostColumnVector.BasicType(true, DType.INT16);
return Stream.of(
Arguments.of(ReductionAggregation.sum(), new Short[0], int16, null, 0.),
Arguments.of(ReductionAggregation.sum(), new Short[]{null, null, null}, int16, null, 0.),
Arguments.of(ReductionAggregation.sum(), vals, int16, (short) 339, 0.),
Arguments.of(ReductionAggregation.min(), vals, int16, (short) -1, 0.),
Arguments.of(ReductionAggregation.max(), vals, int16, (short) 123, 0.),
Arguments.of(ReductionAggregation.product(), vals, int16, (short) -22624, 0.),
Arguments.of(ReductionAggregation.sumOfSquares(), vals, int16, (short) 31279, 0.),
Arguments.of(ReductionAggregation.mean(), vals, int16, 56.5, DELTAD),
Arguments.of(ReductionAggregation.standardDeviation(), vals, int16, 49.24530434467839, DELTAD),
Arguments.of(ReductionAggregation.variance(), vals, int16, 2425.1, DELTAD),
Arguments.of(ReductionAggregation.any(), vals, int16, true, 0.),
Arguments.of(ReductionAggregation.all(), vals, int16, true, 0.)
);
}
private static Stream<Arguments> createIntParams() {
Integer[] vals = new Integer[]{-1, 7, 123, null, 50, 60, 100};
HostColumnVector.BasicType int32 = new HostColumnVector.BasicType(true, DType.INT32);
return Stream.of(
Arguments.of(ReductionAggregation.sum(), new Integer[0], int32, null, 0.),
Arguments.of(ReductionAggregation.sum(), new Integer[]{null, null, null}, int32, null, 0.),
Arguments.of(ReductionAggregation.sum(), vals, int32, 339, 0.),
Arguments.of(ReductionAggregation.min(), vals, int32, -1, 0.),
Arguments.of(ReductionAggregation.max(), vals, int32, 123, 0.),
Arguments.of(ReductionAggregation.product(), vals, int32, -258300000, 0.),
Arguments.of(ReductionAggregation.sumOfSquares(), vals, int32, 31279, 0.),
Arguments.of(ReductionAggregation.mean(), vals, int32, 56.5, DELTAD),
Arguments.of(ReductionAggregation.standardDeviation(), vals, int32, 49.24530434467839, DELTAD),
Arguments.of(ReductionAggregation.variance(), vals, int32, 2425.1, DELTAD),
Arguments.of(ReductionAggregation.any(), vals, int32, true, 0.),
Arguments.of(ReductionAggregation.all(), vals, int32, true, 0.)
);
}
private static Stream<Arguments> createLongParams() {
Long[] vals = new Long[]{-1L, 7L, 123L, null, 50L, 60L, 100L};
HostColumnVector.BasicType int64 = new HostColumnVector.BasicType(true, DType.INT64);
return Stream.of(
Arguments.of(ReductionAggregation.sum(), new Long[0], int64, null, 0.),
Arguments.of(ReductionAggregation.sum(), new Long[]{null, null, null}, int64, null, 0.),
Arguments.of(ReductionAggregation.sum(), vals, int64, 339L, 0.),
Arguments.of(ReductionAggregation.min(), vals, int64, -1L, 0.),
Arguments.of(ReductionAggregation.max(), vals, int64, 123L, 0.),
Arguments.of(ReductionAggregation.product(), vals, int64, -258300000L, 0.),
Arguments.of(ReductionAggregation.sumOfSquares(), vals, int64, 31279L, 0.),
Arguments.of(ReductionAggregation.mean(), vals, int64, 56.5, DELTAD),
Arguments.of(ReductionAggregation.standardDeviation(), vals, int64, 49.24530434467839, DELTAD),
Arguments.of(ReductionAggregation.variance(), vals, int64, 2425.1, DELTAD),
Arguments.of(ReductionAggregation.any(), vals, int64, true, 0.),
Arguments.of(ReductionAggregation.all(), vals, int64, true, 0.),
Arguments.of(ReductionAggregation.quantile(0.5), vals, int64, 55.0, DELTAD),
Arguments.of(ReductionAggregation.quantile(0.9), vals, int64, 111.5, DELTAD)
);
}
private static Stream<Arguments> createFloatParams() {
Float[] vals = new Float[]{-1f, 7f, 123f, null, 50f, 60f, 100f};
Float[] notNulls = new Float[]{-1f, 7f, 123f, 50f, 60f, 100f};
Float[] repeats = new Float[]{Float.MIN_VALUE, 7f, 7f, null, null, Float.NaN, Float.NaN, 50f, 50f, 100f};
HostColumnVector.BasicType fp32 = new HostColumnVector.BasicType(true, DType.FLOAT32);
HostColumnVector.DataType listOfFloat = new HostColumnVector.ListType(
true, new HostColumnVector.BasicType(true, DType.FLOAT32));
return Stream.of(
Arguments.of(ReductionAggregation.sum(), new Float[0], fp32, null, 0f),
Arguments.of(ReductionAggregation.sum(), new Float[]{null, null, null}, fp32, null, 0f),
Arguments.of(ReductionAggregation.sum(), vals, fp32, 339f, 0f),
Arguments.of(ReductionAggregation.min(), vals, fp32, -1f, 0f),
Arguments.of(ReductionAggregation.max(), vals, fp32, 123f, 0f),
Arguments.of(ReductionAggregation.product(), vals, fp32, -258300000f, 0f),
Arguments.of(ReductionAggregation.sumOfSquares(), vals, fp32, 31279f, 0f),
Arguments.of(ReductionAggregation.mean(), vals, fp32, 56.5f, DELTAF),
Arguments.of(ReductionAggregation.standardDeviation(), vals, fp32, 49.24530434467839f, DELTAF),
Arguments.of(ReductionAggregation.variance(), vals, fp32, 2425.1f, DELTAF),
Arguments.of(ReductionAggregation.any(), vals, fp32, true, 0f),
Arguments.of(ReductionAggregation.all(), vals, fp32, true, 0f),
Arguments.of(ReductionAggregation.collectList(NullPolicy.INCLUDE), vals, listOfFloat, vals, 0f),
Arguments.of(ReductionAggregation.collectList(), vals, listOfFloat, notNulls, 0f),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.EXCLUDE, NullEquality.EQUAL, NaNEquality.ALL_EQUAL),
repeats, listOfFloat,
new Float[]{Float.MIN_VALUE, 7f, 50f, 100f, Float.NaN}, 0f),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.INCLUDE, NullEquality.EQUAL, NaNEquality.ALL_EQUAL),
repeats, listOfFloat,
new Float[]{Float.MIN_VALUE, 7f, 50f, 100f, Float.NaN, null}, 0f),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.INCLUDE, NullEquality.UNEQUAL, NaNEquality.ALL_EQUAL),
repeats, listOfFloat,
new Float[]{Float.MIN_VALUE, 7f, 50f, 100f, Float.NaN, null, null}, 0f),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.INCLUDE, NullEquality.EQUAL, NaNEquality.UNEQUAL),
repeats, listOfFloat,
new Float[]{Float.MIN_VALUE, 7f, 50f, 100f, Float.NaN, Float.NaN, null}, 0f),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.INCLUDE, NullEquality.UNEQUAL, NaNEquality.UNEQUAL),
repeats, listOfFloat,
new Float[]{Float.MIN_VALUE, 7f, 50f, 100f, Float.NaN, Float.NaN, null, null}, 0f),
Arguments.of(ReductionAggregation.collectSet(),
repeats, listOfFloat,
new Float[]{Float.MIN_VALUE, 7f, 50f, 100f, Float.NaN, Float.NaN}, 0f)
);
}
private static Stream<Arguments> createDoubleParams() {
Double[] vals = new Double[]{-1., 7., 123., null, 50., 60., 100.};
Double[] notNulls = new Double[]{-1., 7., 123., 50., 60., 100.};
Double[] repeats = new Double[]{Double.MIN_VALUE, 7., 7., null, null, Double.NaN, Double.NaN, 50., 50., 100.};
HostColumnVector.BasicType fp64 = new HostColumnVector.BasicType(true, DType.FLOAT64);
HostColumnVector.DataType listOfDouble = new HostColumnVector.ListType(
true, new HostColumnVector.BasicType(true, DType.FLOAT64));
return Stream.of(
Arguments.of(ReductionAggregation.sum(), new Double[0], fp64, null, 0.),
Arguments.of(ReductionAggregation.sum(), new Double[]{null, null, null}, fp64, null, 0.),
Arguments.of(ReductionAggregation.sum(), vals, fp64, 339., 0.),
Arguments.of(ReductionAggregation.min(), vals, fp64, -1., 0.),
Arguments.of(ReductionAggregation.max(), vals, fp64, 123., 0.),
Arguments.of(ReductionAggregation.product(), vals, fp64, -258300000., 0.),
Arguments.of(ReductionAggregation.sumOfSquares(), vals, fp64, 31279., 0.),
Arguments.of(ReductionAggregation.mean(), vals, fp64, 56.5, DELTAD),
Arguments.of(ReductionAggregation.standardDeviation(), vals, fp64, 49.24530434467839, DELTAD),
Arguments.of(ReductionAggregation.variance(), vals, fp64, 2425.1, DELTAD),
Arguments.of(ReductionAggregation.any(), vals, fp64, true, 0.),
Arguments.of(ReductionAggregation.all(), vals, fp64, true, 0.),
Arguments.of(ReductionAggregation.quantile(0.5), vals, fp64, 55.0, DELTAD),
Arguments.of(ReductionAggregation.quantile(0.9), vals, fp64, 111.5, DELTAD),
Arguments.of(ReductionAggregation.collectList(NullPolicy.INCLUDE), vals, listOfDouble, vals, 0.),
Arguments.of(ReductionAggregation.collectList(NullPolicy.EXCLUDE), vals, listOfDouble, notNulls, 0.),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.EXCLUDE, NullEquality.EQUAL, NaNEquality.ALL_EQUAL),
repeats, listOfDouble,
new Double[]{Double.MIN_VALUE, 7., 50., 100., Double.NaN}, 0.),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.INCLUDE, NullEquality.EQUAL, NaNEquality.ALL_EQUAL),
repeats, listOfDouble,
new Double[]{Double.MIN_VALUE, 7., 50., 100., Double.NaN, null}, 0.),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.INCLUDE, NullEquality.UNEQUAL, NaNEquality.ALL_EQUAL),
repeats, listOfDouble,
new Double[]{Double.MIN_VALUE, 7., 50., 100., Double.NaN, null, null}, 0.),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.INCLUDE, NullEquality.EQUAL, NaNEquality.UNEQUAL),
repeats, listOfDouble,
new Double[]{Double.MIN_VALUE, 7., 50., 100., Double.NaN, Double.NaN, null}, 0.),
Arguments.of(ReductionAggregation.collectSet(
NullPolicy.INCLUDE, NullEquality.UNEQUAL, NaNEquality.UNEQUAL),
repeats, listOfDouble,
new Double[]{Double.MIN_VALUE, 7., 50., 100., Double.NaN, Double.NaN, null, null}, 0.),
Arguments.of(ReductionAggregation.collectSet(),
repeats, listOfDouble,
new Double[]{Double.MIN_VALUE, 7., 50., 100., Double.NaN, Double.NaN}, 0.)
);
}
private static Stream<Arguments> createTimestampDaysParams() {
Integer[] vals = new Integer[]{-1, 7, 123, null, 50, 60, 100};
HostColumnVector.BasicType tsDay = new HostColumnVector.BasicType(true, DType.TIMESTAMP_DAYS);
return Stream.of(
Arguments.of(ReductionAggregation.max(), new Integer[0], tsDay, null),
Arguments.of(ReductionAggregation.max(), new Integer[]{null, null, null}, tsDay, null),
Arguments.of(ReductionAggregation.max(), vals, tsDay, 123),
Arguments.of(ReductionAggregation.min(), vals, tsDay, -1)
);
}
private static Stream<Arguments> createTimestampResolutionParams(HostColumnVector.BasicType type) {
Long[] vals = new Long[]{-1L, 7L, 123L, null, 50L, 60L, 100L};
return Stream.of(
Arguments.of(ReductionAggregation.max(), new Long[0], type, null),
Arguments.of(ReductionAggregation.max(), new Long[]{null, null, null}, type, null),
Arguments.of(ReductionAggregation.min(), vals, type, -1L),
Arguments.of(ReductionAggregation.max(), vals, type, 123L)
);
}
private static Stream<Arguments> createTimestampSecondsParams() {
return createTimestampResolutionParams(
new HostColumnVector.BasicType(true, DType.TIMESTAMP_SECONDS));
}
private static Stream<Arguments> createTimestampMilliSecondsParams() {
return createTimestampResolutionParams(
new HostColumnVector.BasicType(true, DType.TIMESTAMP_MILLISECONDS));
}
private static Stream<Arguments> createTimestampMicroSecondsParams() {
return createTimestampResolutionParams(
new HostColumnVector.BasicType(true, DType.TIMESTAMP_MICROSECONDS));
}
private static Stream<Arguments> createTimestampNanoSecondsParams() {
return createTimestampResolutionParams(
new HostColumnVector.BasicType(true, DType.TIMESTAMP_NANOSECONDS));
}
private static Stream<Arguments> createFloatArrayParams() {
List<Float>[] inputs = new List[]{
Lists.newArrayList(-1f, 7f, null),
Lists.newArrayList(7f, 50f, 60f, Float.NaN),
Lists.newArrayList(),
Lists.newArrayList(60f, 100f, Float.NaN, null)
};
HostColumnVector.DataType fpList = new HostColumnVector.ListType(
true, new HostColumnVector.BasicType(true, DType.FLOAT32));
return Stream.of(
Arguments.of(ReductionAggregation.mergeLists(), inputs, fpList,
new Float[]{-1f, 7f, null,
7f, 50f, 60f, Float.NaN,
60f, 100f, Float.NaN, null}, 0f),
Arguments.of(ReductionAggregation.mergeSets(NullEquality.EQUAL, NaNEquality.ALL_EQUAL),
inputs, fpList,
new Float[]{-1f, 7f, 50f, 60f, 100f, Float.NaN, null}, 0f),
Arguments.of(ReductionAggregation.mergeSets(NullEquality.UNEQUAL, NaNEquality.ALL_EQUAL),
inputs, fpList,
new Float[]{-1f, 7f, 50f, 60f, 100f, Float.NaN, null, null}, 0f),
Arguments.of(ReductionAggregation.mergeSets(NullEquality.EQUAL, NaNEquality.UNEQUAL),
inputs, fpList,
new Float[]{-1f, 7f, 50f, 60f, 100f, Float.NaN, Float.NaN, null}, 0f),
Arguments.of(ReductionAggregation.mergeSets(),
inputs, fpList,
new Float[]{-1f, 7f, 50f, 60f, 100f, Float.NaN, Float.NaN, null, null}, 0f)
);
}
private static void assertEqualsDelta(ReductionAggregation op, Scalar expected, Scalar result,
Double percentage) {
if (FLOAT_REDUCTIONS.contains(op.getWrapped().kind)) {
assertEqualsWithinPercentage(expected.getDouble(), result.getDouble(), percentage);
} else if (expected.getType().typeId == DType.DTypeEnum.LIST) {
try (ColumnVector expectedAsList = ColumnVector.fromScalar(expected, 1);
ColumnVector resultAsList = ColumnVector.fromScalar(result, 1);
ColumnVector expectedSorted = expectedAsList.listSortRows(false, false);
ColumnVector resultSorted = resultAsList.listSortRows(false, false)) {
AssertUtils.assertColumnsAreEqual(expectedSorted, resultSorted);
}
} else {
assertEquals(expected, result);
}
}
private static void assertEqualsDelta(ReductionAggregation op, Scalar expected, Scalar result,
Float percentage) {
if (FLOAT_REDUCTIONS.contains(op.getWrapped().kind)) {
assertEqualsWithinPercentage(expected.getFloat(), result.getFloat(), percentage);
} else if (expected.getType().typeId == DType.DTypeEnum.LIST) {
try (ColumnVector expectedAsList = ColumnVector.fromScalar(expected, 1);
ColumnVector resultAsList = ColumnVector.fromScalar(result, 1);
ColumnVector expectedSorted = expectedAsList.listSortRows(false, false);
ColumnVector resultSorted = resultAsList.listSortRows(false, false)) {
AssertUtils.assertColumnsAreEqual(expectedSorted, resultSorted);
}
} else {
assertEquals(expected, result);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createBooleanParams")
void testBoolean(ReductionAggregation op, Boolean[] values,
HostColumnVector.DataType type, Object expectedObject, Double delta) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.fromBoxedBooleans(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEqualsDelta(op, expected, result, delta);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createByteParams")
void testByte(ReductionAggregation op, Byte[] values,
HostColumnVector.DataType type, Object expectedObject, Double delta) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.fromBoxedBytes(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEqualsDelta(op, expected, result, delta);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createShortParams")
void testShort(ReductionAggregation op, Short[] values,
HostColumnVector.DataType type, Object expectedObject, Double delta) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.fromBoxedShorts(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEqualsDelta(op, expected, result, delta);
}
}
@ParameterizedTest
@MethodSource("createIntParams")
void testInt(ReductionAggregation op, Integer[] values,
HostColumnVector.DataType type, Object expectedObject, Double delta) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.fromBoxedInts(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEqualsDelta(op, expected, result, delta);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createLongParams")
void testLong(ReductionAggregation op, Long[] values,
HostColumnVector.DataType type, Object expectedObject, Double delta) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.fromBoxedLongs(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEqualsDelta(op, expected, result, delta);
}
}
@ParameterizedTest
@MethodSource("createFloatParams")
void testFloat(ReductionAggregation op, Float[] values,
HostColumnVector.DataType type, Object expectedObject, Float delta) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.fromBoxedFloats(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEqualsDelta(op, expected, result, delta);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createDoubleParams")
void testDouble(ReductionAggregation op, Double[] values,
HostColumnVector.DataType type, Object expectedObject, Double delta) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.fromBoxedDoubles(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEqualsDelta(op, expected, result, delta);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createTimestampDaysParams")
void testTimestampDays(ReductionAggregation op, Integer[] values,
HostColumnVector.DataType type, Object expectedObject) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.timestampDaysFromBoxedInts(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEquals(expected, result);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createTimestampSecondsParams")
void testTimestampSeconds(ReductionAggregation op, Long[] values,
HostColumnVector.DataType type, Object expectedObject) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.timestampSecondsFromBoxedLongs(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEquals(expected, result);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createTimestampMilliSecondsParams")
void testTimestampMilliseconds(ReductionAggregation op, Long[] values,
HostColumnVector.DataType type, Object expectedObject) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.timestampMilliSecondsFromBoxedLongs(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEquals(expected, result);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createTimestampMicroSecondsParams")
void testTimestampMicroseconds(ReductionAggregation op, Long[] values,
HostColumnVector.DataType type, Object expectedObject) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.timestampMicroSecondsFromBoxedLongs(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEquals(expected, result);
}
}
@Tag("noSanitizer")
@ParameterizedTest
@MethodSource("createTimestampNanoSecondsParams")
void testTimestampNanoseconds(ReductionAggregation op, Long[] values,
HostColumnVector.DataType type, Object expectedObject) {
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.timestampNanoSecondsFromBoxedLongs(values);
Scalar result = v.reduce(op, expected.getType())) {
assertEquals(expected, result);
}
}
@ParameterizedTest
@MethodSource("createFloatArrayParams")
void testFloatArray(ReductionAggregation op, List<Float>[] values,
HostColumnVector.DataType type, Object expectedObject, Float delta) {
HostColumnVector.DataType listType = new HostColumnVector.ListType(
true, new HostColumnVector.BasicType(true, DType.FLOAT32));
try (Scalar expected = buildExpectedScalar(op, type, expectedObject);
ColumnVector v = ColumnVector.fromLists(listType, values);
Scalar result = v.reduce(op, expected.getType())) {
assertEqualsDelta(op, expected, result, delta);
}
}
@Test
void testWithSetOutputType() {
try (Scalar expected = Scalar.fromLong(1 * 2 * 3 * 4L);
ColumnVector cv = ColumnVector.fromBytes(new byte[]{1, 2, 3, 4});
Scalar result = cv.product(DType.INT64)) {
assertEquals(expected, result);
}
try (Scalar expected = Scalar.fromLong(1 + 2 + 3 + 4L);
ColumnVector cv = ColumnVector.fromBytes(new byte[]{1, 2, 3, 4});
Scalar result = cv.sum(DType.INT64)) {
assertEquals(expected, result);
}
try (Scalar expected = Scalar.fromLong((1 * 1L) + (2 * 2L) + (3 * 3L) + (4 * 4L));
ColumnVector cv = ColumnVector.fromBytes(new byte[]{1, 2, 3, 4});
Scalar result = cv.sumOfSquares(DType.INT64)) {
assertEquals(expected, result);
}
try (Scalar expected = Scalar.fromFloat((1 + 2 + 3 + 4f) / 4);
ColumnVector cv = ColumnVector.fromBytes(new byte[]{1, 2, 3, 4});
Scalar result = cv.mean(DType.FLOAT32)) {
assertEquals(expected, result);
}
try (Scalar expected = Scalar.fromFloat(1.666667f);
ColumnVector cv = ColumnVector.fromBytes(new byte[]{1, 2, 3, 4});
Scalar result = cv.variance(DType.FLOAT32)) {
assertEquals(expected, result);
}
try (Scalar expected = Scalar.fromFloat(1.2909945f);
ColumnVector cv = ColumnVector.fromBytes(new byte[]{1, 2, 3, 4});
Scalar result = cv.standardDeviation(DType.FLOAT32)) {
assertEquals(expected, result);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/Date64ColumnVectorTest.java
|
/*
*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
public class Date64ColumnVectorTest extends CudfTestBase {
private static final long[] DATES = {-131968727238L, //'1965-10-26 14:01:12.762'
1530705600000L, //'2018-07-04 12:00:00.000'
1674631932929L}; //'2023-01-25 07:32:12.929'
@Test
public void getYear() {
try (ColumnVector date64ColumnVector = ColumnVector.timestampMilliSecondsFromLongs(DATES);
ColumnVector tmp = date64ColumnVector.year();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(1965, result.getShort(0));
assertEquals(2018, result.getShort(1));
assertEquals(2023, result.getShort(2));
}
}
@Test
public void getMonth() {
try (ColumnVector date64ColumnVector = ColumnVector.timestampMilliSecondsFromLongs(DATES);
ColumnVector tmp = date64ColumnVector.month();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(10, result.getShort(0));
assertEquals(7, result.getShort(1));
assertEquals(1, result.getShort(2));
}
}
@Test
public void getDay() {
try (ColumnVector date64ColumnVector = ColumnVector.timestampMilliSecondsFromLongs(DATES);
ColumnVector tmp = date64ColumnVector.day();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(26, result.getShort(0));
assertEquals(4, result.getShort(1));
assertEquals(25, result.getShort(2));
}
}
@Test
public void getHour() {
try (ColumnVector date64ColumnVector = ColumnVector.timestampMilliSecondsFromLongs(DATES);
ColumnVector tmp = date64ColumnVector.hour();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(14, result.getShort(0));
assertEquals(12, result.getShort(1));
assertEquals(7, result.getShort(2));
}
}
@Test
public void getMinute() {
try (ColumnVector date64ColumnVector = ColumnVector.timestampMilliSecondsFromLongs(DATES);
ColumnVector tmp = date64ColumnVector.minute();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(1, result.getShort(0));
assertEquals(0, result.getShort(1));
assertEquals(32, result.getShort(2));
}
}
@Test
public void getSecond() {
try (ColumnVector date64ColumnVector = ColumnVector.timestampMilliSecondsFromLongs(DATES);
ColumnVector tmp = date64ColumnVector.second();
HostColumnVector result = tmp.copyToHost()) {
assertEquals(12, result.getShort(0));
assertEquals(0, result.getShort(1));
assertEquals(12, result.getShort(2));
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/Aggregation128UtilsTest.java
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.junit.jupiter.api.Test;
import java.math.BigInteger;
public class Aggregation128UtilsTest extends CudfTestBase {
@Test
public void testExtractInt32Chunks() {
BigInteger[] intvals = new BigInteger[] {
null,
new BigInteger("123456789abcdef0f0debc9a78563412", 16),
new BigInteger("123456789abcdef0f0debc9a78563412", 16),
new BigInteger("123456789abcdef0f0debc9a78563412", 16),
null
};
try (ColumnVector cv = ColumnVector.decimalFromBigInt(-38, intvals);
ColumnVector chunk1 = Aggregation128Utils.extractInt32Chunk(cv, DType.UINT32, 0);
ColumnVector chunk2 = Aggregation128Utils.extractInt32Chunk(cv, DType.UINT32, 1);
ColumnVector chunk3 = Aggregation128Utils.extractInt32Chunk(cv, DType.UINT32, 2);
ColumnVector chunk4 = Aggregation128Utils.extractInt32Chunk(cv, DType.INT32, 3);
Table actualChunks = new Table(chunk1, chunk2, chunk3, chunk4);
ColumnVector expectedChunk1 = ColumnVector.fromBoxedUnsignedInts(
null, 0x78563412, 0x78563412, 0x78563412, null);
ColumnVector expectedChunk2 = ColumnVector.fromBoxedUnsignedInts(
null, -0x0f214366, -0x0f214366, -0x0f214366, null);
ColumnVector expectedChunk3 = ColumnVector.fromBoxedUnsignedInts(
null, -0x65432110, -0x65432110, -0x65432110, null);
ColumnVector expectedChunk4 = ColumnVector.fromBoxedInts(
null, 0x12345678, 0x12345678, 0x12345678, null);
Table expectedChunks = new Table(expectedChunk1, expectedChunk2, expectedChunk3, expectedChunk4)) {
AssertUtils.assertTablesAreEqual(expectedChunks, actualChunks);
}
}
@Test
public void testCombineInt64SumChunks() {
try (ColumnVector chunks0 = ColumnVector.fromBoxedUnsignedLongs(
null, 0L, 1L, 0L, 0L, 0x12345678L, 0x123456789L, 0x1234567812345678L, 0xfedcba9876543210L);
ColumnVector chunks1 = ColumnVector.fromBoxedUnsignedLongs(
null, 0L, 2L, 0L, 0L, 0x9abcdef0L, 0x9abcdef01L, 0x1122334455667788L, 0xaceaceaceaceaceaL);
ColumnVector chunks2 = ColumnVector.fromBoxedUnsignedLongs(
null, 0L, 3L, 0L, 0L, 0x11223344L, 0x556677889L, 0x99aabbccddeeff00L, 0xbdfbdfbdfbdfbdfbL);
ColumnVector chunks3 = ColumnVector.fromBoxedLongs(
null, 0L, -1L, 0x100000000L, 0x80000000L, 0x55667788L, 0x01234567L, 0x66554434L, -0x42042043L);
Table chunksTable = new Table(chunks0, chunks1, chunks2, chunks3);
Table actual = Aggregation128Utils.combineInt64SumChunks(chunksTable, DType.create(DType.DTypeEnum.DECIMAL128, -20));
ColumnVector expectedOverflows = ColumnVector.fromBoxedBooleans(
null, false, false, true, true, false, false, true, false);
ColumnVector expectedValues = ColumnVector.decimalFromBigInt(-20,
null,
new BigInteger("0", 16),
new BigInteger("-fffffffcfffffffdffffffff", 16),
new BigInteger("0", 16),
new BigInteger("-80000000000000000000000000000000", 16),
new BigInteger("55667788112233449abcdef012345678", 16),
new BigInteger("123456c56677892abcdef0223456789", 16),
new BigInteger("ef113244679ace0012345678", 16),
new BigInteger("7bf7bf7ba8ca8ca8e9ab678276543210", 16));
Table expected = new Table(expectedOverflows, expectedValues)) {
AssertUtils.assertTablesAreEqual(expected, actual);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/nvcomp/NvcompTest.java
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.nvcomp;
import ai.rapids.cudf.*;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.Optional;
public class NvcompTest {
private static final HostMemoryAllocator hostMemoryAllocator = DefaultHostMemoryAllocator.get();
private static final Logger log = LoggerFactory.getLogger(ColumnVector.class);
@Test
void testBatchedLZ4RoundTripAsync() {
final Cuda.Stream stream = Cuda.DEFAULT_STREAM;
final long chunkSize = 64 * 1024;
final long targetIntermediteSize = Long.MAX_VALUE;
final int maxElements = 1024 * 1024 + 1;
final int numBuffers = 200;
long[] data = new long[maxElements];
for (int i = 0; i < maxElements; ++i) {
data[i] = i;
}
try (CloseableArray<DeviceMemoryBuffer> originalBuffers =
CloseableArray.wrap(new DeviceMemoryBuffer[numBuffers])) {
// create the batched buffers to compress
for (int i = 0; i < originalBuffers.size(); i++) {
originalBuffers.set(i, initBatchBuffer(data, i));
// Increment the refcount since compression will try to close it
originalBuffers.get(i).incRefCount();
}
// compress and decompress the buffers
BatchedLZ4Compressor compressor = new BatchedLZ4Compressor(chunkSize, targetIntermediteSize);
try (CloseableArray<DeviceMemoryBuffer> compressedBuffers =
CloseableArray.wrap(compressor.compress(originalBuffers.getArray(), stream));
CloseableArray<DeviceMemoryBuffer> uncompressedBuffers =
CloseableArray.wrap(new DeviceMemoryBuffer[numBuffers])) {
for (int i = 0; i < numBuffers; i++) {
uncompressedBuffers.set(i,
DeviceMemoryBuffer.allocate(originalBuffers.get(i).getLength()));
}
// decompress takes ownership of the compressed buffers and will close them
BatchedLZ4Decompressor.decompressAsync(chunkSize, compressedBuffers.release(),
uncompressedBuffers.getArray(), stream);
// check the decompressed results against the original
for (int i = 0; i < numBuffers; ++i) {
try (HostMemoryBuffer expected =
hostMemoryAllocator.allocate(originalBuffers.get(i).getLength());
HostMemoryBuffer actual =
hostMemoryAllocator.allocate(uncompressedBuffers.get(i).getLength())) {
Assertions.assertTrue(expected.getLength() <= Integer.MAX_VALUE);
Assertions.assertTrue(actual.getLength() <= Integer.MAX_VALUE);
Assertions.assertEquals(expected.getLength(), actual.getLength(),
"uncompressed size mismatch at buffer " + i);
expected.copyFromDeviceBuffer(originalBuffers.get(i));
actual.copyFromDeviceBuffer(uncompressedBuffers.get(i));
byte[] expectedBytes = new byte[(int) expected.getLength()];
expected.getBytes(expectedBytes, 0, 0, expected.getLength());
byte[] actualBytes = new byte[(int) actual.getLength()];
actual.getBytes(actualBytes, 0, 0, actual.getLength());
Assertions.assertArrayEquals(expectedBytes, actualBytes,
"mismatch in batch buffer " + i);
}
}
}
}
}
private void closeBuffer(MemoryBuffer buffer) {
if (buffer != null) {
buffer.close();
}
}
private DeviceMemoryBuffer initBatchBuffer(long[] data, int bufferId) {
// grab a subsection of the data based on buffer ID
int dataStart = 0;
int dataLength = data.length / (bufferId + 1);
switch (bufferId % 3) {
case 0:
// take a portion of the first half
dataLength /= 2;
break;
case 1:
// take a portion of the last half
dataStart = data.length / 2;
dataLength /= 2;
break;
default:
break;
}
long[] bufferData = Arrays.copyOfRange(data, dataStart, dataStart + dataLength + 1);
DeviceMemoryBuffer devBuffer = null;
try (HostMemoryBuffer hmb = hostMemoryAllocator.allocate(bufferData.length * 8)) {
hmb.setLongs(0, bufferData, 0, bufferData.length);
devBuffer = DeviceMemoryBuffer.allocate(hmb.getLength());
devBuffer.copyFromHostBuffer(hmb);
return devBuffer;
} catch (Throwable t) {
closeBuffer(devBuffer);
throw new RuntimeException(t);
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf
|
rapidsai_public_repos/cudf/java/src/test/java/ai/rapids/cudf/ast/CompiledExpressionTest.java
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf.ast;
import ai.rapids.cudf.ColumnVector;
import ai.rapids.cudf.CudfException;
import ai.rapids.cudf.CudfTestBase;
import ai.rapids.cudf.DType;
import ai.rapids.cudf.Table;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
import org.junit.jupiter.params.provider.NullSource;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Stream;
import static ai.rapids.cudf.AssertUtils.assertColumnsAreEqual;
public class CompiledExpressionTest extends CudfTestBase {
@Test
public void testColumnReferenceTransform() {
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build()) {
// use an implicit table reference
ColumnReference expr = new ColumnReference(1);
try (CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t)) {
assertColumnsAreEqual(t.getColumn(1), actual);
}
// use an explicit table reference
expr = new ColumnReference(1, TableReference.LEFT);
try (CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t)) {
assertColumnsAreEqual(t.getColumn(1), actual);
}
}
}
@Test
public void testInvalidColumnReferenceTransform() {
// Verify that computeColumn throws when passed an expression operating on TableReference.RIGHT.
ColumnReference expr = new ColumnReference(1, TableReference.RIGHT);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile()) {
Assertions.assertThrows(CudfException.class, () -> compiledExpr.computeColumn(t).close());
}
}
@Test
public void testBooleanLiteralTransform() {
try (Table t = new Table.TestBuilder().column(true, false, null).build()) {
Literal expr = Literal.ofBoolean(true);
try (CompiledExpression trueCompiledExpr = expr.compile();
ColumnVector trueExprActual = trueCompiledExpr.computeColumn(t);
ColumnVector trueExprExpected = ColumnVector.fromBoxedBooleans(true, true, true)) {
assertColumnsAreEqual(trueExprExpected, trueExprActual);
}
Literal nullLiteral = Literal.ofBoolean(null);
UnaryOperation nullExpr = new UnaryOperation(UnaryOperator.IDENTITY, nullLiteral);
try (CompiledExpression nullCompiledExpr = nullExpr.compile();
ColumnVector nullExprActual = nullCompiledExpr.computeColumn(t);
ColumnVector nullExprExpected = ColumnVector.fromBoxedBooleans(null, null, null)) {
assertColumnsAreEqual(nullExprExpected, nullExprActual);
}
}
}
@ParameterizedTest
@NullSource
@ValueSource(bytes = 0x12)
public void testByteLiteralTransform(Byte value) {
Literal expr = Literal.ofByte(value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedBytes(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(shorts = 0x1234)
public void testShortLiteralTransform(Short value) {
Literal expr = Literal.ofShort(value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedShorts(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(ints = 0x12345678)
public void testIntLiteralTransform(Integer value) {
Literal expr = Literal.ofInt(value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedInts(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testLongLiteralTransform(Long value) {
Literal expr = Literal.ofLong(value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(floats = { 123456.789f, Float.NaN, Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY} )
public void testFloatLiteralTransform(Float value) {
Literal expr = Literal.ofFloat(value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedFloats(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(doubles = { 123456.789f, Double.NaN, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY} )
public void testDoubleLiteralTransform(Double value) {
Literal expr = Literal.ofDouble(value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedDoubles(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(ints = 0x12345678)
public void testTimestampDaysLiteralTransform(Integer value) {
Literal expr = Literal.ofTimestampDaysFromInt(value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.timestampDaysFromBoxedInts(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testTimestampSecondsLiteralTransform(Long value) {
Literal expr = Literal.ofTimestampFromLong(DType.TIMESTAMP_SECONDS, value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.timestampSecondsFromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testTimestampMilliSecondsLiteralTransform(Long value) {
Literal expr = Literal.ofTimestampFromLong(DType.TIMESTAMP_MILLISECONDS, value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.timestampMilliSecondsFromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testTimestampMicroSecondsLiteralTransform(Long value) {
Literal expr = Literal.ofTimestampFromLong(DType.TIMESTAMP_MICROSECONDS, value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.timestampMicroSecondsFromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testTimestampNanoSecondsLiteralTransform(Long value) {
Literal expr = Literal.ofTimestampFromLong(DType.TIMESTAMP_NANOSECONDS, value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.timestampNanoSecondsFromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(ints = 0x12345678)
public void testDurationDaysLiteralTransform(Integer value) {
Literal expr = Literal.ofDurationDaysFromInt(value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.durationDaysFromBoxedInts(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testDurationSecondsLiteralTransform(Long value) {
Literal expr = Literal.ofDurationFromLong(DType.DURATION_SECONDS, value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.durationSecondsFromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testDurationMilliSecondsLiteralTransform(Long value) {
Literal expr = Literal.ofDurationFromLong(DType.DURATION_MILLISECONDS, value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.durationMilliSecondsFromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testDurationMicroSecondsLiteralTransform(Long value) {
Literal expr = Literal.ofDurationFromLong(DType.DURATION_MICROSECONDS, value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.durationMicroSecondsFromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
@ParameterizedTest
@NullSource
@ValueSource(longs = 0x1234567890abcdefL)
public void testDurationNanoSecondsLiteralTransform(Long value) {
Literal expr = Literal.ofDurationFromLong(DType.DURATION_NANOSECONDS, value);
try (Table t = new Table.TestBuilder().column(5, 4, 3, 2, 1).column(6, 7, 8, null, 10).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected =
ColumnVector.durationNanoSecondsFromBoxedLongs(value, value, value, value, value)) {
assertColumnsAreEqual(expected, actual);
}
}
private static <T, R> ArrayList<R> mapArray(T[] input, Function<T, R> func) {
ArrayList<R> result = new ArrayList<>(input.length);
for (T t : input) {
result.add(t == null ? null : func.apply(t));
}
return result;
}
private static <T, U, R> ArrayList<R> mapArray(T[] in1, U[] in2, BiFunction<T, U, R> func) {
assert in1.length == in2.length;
ArrayList<R> result = new ArrayList<>(in1.length);
for (int i = 0; i < in1.length; i++) {
result.add(in1[i] == null || in2[i] == null ? null : func.apply(in1[i], in2[i]));
}
return result;
}
private static Stream<Arguments> createUnaryDoubleOperationParams() {
Double[] input = new Double[] { -5., 4.5, null, 2.7, 1.5 };
return Stream.of(
Arguments.of(UnaryOperator.IDENTITY, input, Arrays.asList(input)),
Arguments.of(UnaryOperator.SIN, input, mapArray(input, Math::sin)),
Arguments.of(UnaryOperator.COS, input, mapArray(input, Math::cos)),
Arguments.of(UnaryOperator.TAN, input, mapArray(input, Math::tan)),
Arguments.of(UnaryOperator.ARCSIN, input, mapArray(input, Math::asin)),
Arguments.of(UnaryOperator.ARCCOS, input, mapArray(input, Math::acos)),
Arguments.of(UnaryOperator.ARCTAN, input, mapArray(input, Math::atan)),
Arguments.of(UnaryOperator.SINH, input, mapArray(input, Math::sinh)),
Arguments.of(UnaryOperator.COSH, input, mapArray(input, Math::cosh)),
Arguments.of(UnaryOperator.TANH, input, mapArray(input, Math::tanh)),
Arguments.of(UnaryOperator.EXP, input, mapArray(input, Math::exp)),
Arguments.of(UnaryOperator.LOG, input, mapArray(input, Math::log)),
Arguments.of(UnaryOperator.SQRT, input, mapArray(input, Math::sqrt)),
Arguments.of(UnaryOperator.CBRT, input, mapArray(input, Math::cbrt)),
Arguments.of(UnaryOperator.CEIL, input, mapArray(input, Math::ceil)),
Arguments.of(UnaryOperator.FLOOR, input, mapArray(input, Math::floor)),
Arguments.of(UnaryOperator.ABS, input, mapArray(input, Math::abs)),
Arguments.of(UnaryOperator.RINT, input, mapArray(input, Math::rint)));
}
@ParameterizedTest
@MethodSource("createUnaryDoubleOperationParams")
void testUnaryDoubleOperationTransform(UnaryOperator op, Double[] input,
List<Double> expectedValues) {
UnaryOperation expr = new UnaryOperation(op, new ColumnReference(0));
try (Table t = new Table.TestBuilder().column(input).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedDoubles(
expectedValues.toArray(new Double[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testUnaryShortOperationTransform() {
Short[] input = new Short[] { -5, 4, null, 2, 1 };
try (Table t = new Table.TestBuilder().column(input).build()) {
ColumnReference expr = new ColumnReference(0);
try (CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t)) {
assertColumnsAreEqual(t.getColumn(0), actual);
}
UnaryOperation expr2 = new UnaryOperation(UnaryOperator.BIT_INVERT, new ColumnReference(0));
try (CompiledExpression compiledExpr = expr2.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedInts(4, -5, null, -3, -2)) {
assertColumnsAreEqual(expected, actual);
}
}
}
private static Stream<Arguments> createUnaryLogicalOperationParams() {
Long[] input = new Long[] { -5L, 0L, null, 2L, 1L };
return Stream.of(
Arguments.of(UnaryOperator.NOT, input, Arrays.asList(false, true, null, false, false)),
Arguments.of(UnaryOperator.IS_NULL, input, Arrays.asList(false, false, true, false, false)));
}
@ParameterizedTest
@MethodSource("createUnaryLogicalOperationParams")
void testUnaryLogicalOperationTransform(UnaryOperator op, Long[] input,
List<Boolean> expectedValues) {
UnaryOperation expr = new UnaryOperation(op, new ColumnReference(0));
try (Table t = new Table.TestBuilder().column(input).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedBooleans(
expectedValues.toArray(new Boolean[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
private static Stream<Arguments> createBinaryFloatOperationParams() {
Float[] in1 = new Float[] { -5f, 4.5f, null, 2.7f };
Float[] in2 = new Float[] { 123f, -456f, null, 0f };
return Stream.of(
Arguments.of(BinaryOperator.ADD, in1, in2, mapArray(in1, in2, Float::sum)),
Arguments.of(BinaryOperator.SUB, in1, in2, mapArray(in1, in2, (a, b) -> a - b)),
Arguments.of(BinaryOperator.MUL, in1, in2, mapArray(in1, in2, (a, b) -> a * b)),
Arguments.of(BinaryOperator.DIV, in1, in2, mapArray(in1, in2, (a, b) -> a / b)),
Arguments.of(BinaryOperator.MOD, in1, in2, mapArray(in1, in2, (a, b) -> a % b)),
Arguments.of(BinaryOperator.PYMOD, in1, in2, mapArray(in1, in2,
(a, b) -> ((a % b) + b) % b)),
Arguments.of(BinaryOperator.POW, in1, in2, mapArray(in1, in2,
(a, b) -> (float) Math.pow(a, b))));
}
@ParameterizedTest
@MethodSource("createBinaryFloatOperationParams")
void testBinaryFloatOperationTransform(BinaryOperator op, Float[] in1, Float[] in2,
List<Float> expectedValues) {
BinaryOperation expr = new BinaryOperation(op,
new ColumnReference(0),
new ColumnReference(1));
try (Table t = new Table.TestBuilder().column(in1).column(in2).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedFloats(
expectedValues.toArray(new Float[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
private static Stream<Arguments> createBinaryDoublePromotedOperationParams() {
Float[] in1 = new Float[] { -5f, 4.5f, null, 2.7f };
Float[] in2 = new Float[] { 123f, -456f, null, 0f };
return Stream.of(
Arguments.of(BinaryOperator.TRUE_DIV, in1, in2, mapArray(in1, in2,
(a, b) -> (double) a / b)),
Arguments.of(BinaryOperator.FLOOR_DIV, in1, in2, mapArray(in1, in2,
(a, b) -> Math.floor(a / b))));
}
@ParameterizedTest
@MethodSource("createBinaryDoublePromotedOperationParams")
void testBinaryDoublePromotedOperationTransform(BinaryOperator op, Float[] in1, Float[] in2,
List<Double> expectedValues) {
BinaryOperation expr = new BinaryOperation(op,
new ColumnReference(0),
new ColumnReference(1));
try (Table t = new Table.TestBuilder().column(in1).column(in2).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedDoubles(
expectedValues.toArray(new Double[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
private static Stream<Arguments> createBinaryComparisonOperationParams() {
Integer[] in1 = new Integer[] { -5, 4, null, 2, -3 };
Integer[] in2 = new Integer[] { 123, -456, null, 0, -3 };
return Stream.of(
// nulls compare as equal by default
Arguments.of(BinaryOperator.NULL_EQUAL, in1, in2, Arrays.asList(false, false, true, false, true)),
Arguments.of(BinaryOperator.NOT_EQUAL, in1, in2, mapArray(in1, in2, (a, b) -> !a.equals(b))),
Arguments.of(BinaryOperator.LESS, in1, in2, mapArray(in1, in2, (a, b) -> a < b)),
Arguments.of(BinaryOperator.GREATER, in1, in2, mapArray(in1, in2, (a, b) -> a > b)),
Arguments.of(BinaryOperator.LESS_EQUAL, in1, in2, mapArray(in1, in2, (a, b) -> a <= b)),
Arguments.of(BinaryOperator.GREATER_EQUAL, in1, in2, mapArray(in1, in2, (a, b) -> a >= b)));
}
@ParameterizedTest
@MethodSource("createBinaryComparisonOperationParams")
void testBinaryComparisonOperationTransform(BinaryOperator op, Integer[] in1, Integer[] in2,
List<Boolean> expectedValues) {
BinaryOperation expr = new BinaryOperation(op,
new ColumnReference(0),
new ColumnReference(1));
try (Table t = new Table.TestBuilder().column(in1).column(in2).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedBooleans(
expectedValues.toArray(new Boolean[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
private static Stream<Arguments> createStringLiteralComparisonParams() {
String[] in1 = new String[] {"a", "bb", null, "ccc", "dddd"};
String in2 = "ccc";
return Stream.of(
// nulls compare as equal by default
Arguments.of(BinaryOperator.NULL_EQUAL, in1, in2, Arrays.asList(false, false, false, true, false)),
Arguments.of(BinaryOperator.NOT_EQUAL, in1, in2, mapArray(in1, (a) -> !a.equals(in2))),
Arguments.of(BinaryOperator.LESS, in1, in2, mapArray(in1, (a) -> a.compareTo(in2) < 0)),
Arguments.of(BinaryOperator.GREATER, in1, in2, mapArray(in1, (a) -> a.compareTo(in2) > 0)),
Arguments.of(BinaryOperator.LESS_EQUAL, in1, in2, mapArray(in1, (a) -> a.compareTo(in2) <= 0)),
Arguments.of(BinaryOperator.GREATER_EQUAL, in1, in2, mapArray(in1, (a) -> a.compareTo(in2) >= 0)),
// null literal
Arguments.of(BinaryOperator.NULL_EQUAL, in1, null, Arrays.asList(false, false, true, false, false)),
Arguments.of(BinaryOperator.NOT_EQUAL, in1, null, Arrays.asList(null, null, null, null, null)),
Arguments.of(BinaryOperator.LESS, in1, null, Arrays.asList(null, null, null, null, null)));
}
@ParameterizedTest
@MethodSource("createStringLiteralComparisonParams")
void testStringLiteralComparison(BinaryOperator op, String[] in1, String in2,
List<Boolean> expectedValues) {
Literal lit = Literal.ofString(in2);
BinaryOperation expr = new BinaryOperation(op,
new ColumnReference(0),
lit);
try (Table t = new Table.TestBuilder().column(in1).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedBooleans(
expectedValues.toArray(new Boolean[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
private static Stream<Arguments> createBinaryComparisonOperationStringParams() {
String[] in1 = new String[] {"a", "bb", null, "ccc", "dddd"};
String[] in2 = new String[] {"aa", "b", null, "ccc", "ddd"};
return Stream.of(
// nulls compare as equal by default
Arguments.of(BinaryOperator.NULL_EQUAL, in1, in2, Arrays.asList(false, false, true, true, false)),
Arguments.of(BinaryOperator.NOT_EQUAL, in1, in2, mapArray(in1, in2, (a, b) -> !a.equals(b))),
Arguments.of(BinaryOperator.LESS, in1, in2, mapArray(in1, in2, (a, b) -> a.compareTo(b) < 0)),
Arguments.of(BinaryOperator.GREATER, in1, in2, mapArray(in1, in2, (a, b) -> a.compareTo(b) > 0)),
Arguments.of(BinaryOperator.LESS_EQUAL, in1, in2, mapArray(in1, in2, (a, b) -> a.compareTo(b) <= 0)),
Arguments.of(BinaryOperator.GREATER_EQUAL, in1, in2, mapArray(in1, in2, (a, b) -> a.compareTo(b) >= 0)));
}
@ParameterizedTest
@MethodSource("createBinaryComparisonOperationStringParams")
void testBinaryComparisonOperationStringTransform(BinaryOperator op, String[] in1, String[] in2,
List<Boolean> expectedValues) {
BinaryOperation expr = new BinaryOperation(op,
new ColumnReference(0),
new ColumnReference(1));
try (Table t = new Table.TestBuilder().column(in1).column(in2).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedBooleans(
expectedValues.toArray(new Boolean[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
private static Stream<Arguments> createBinaryBitwiseOperationParams() {
Integer[] in1 = new Integer[] { -5, 4, null, 2, -3 };
Integer[] in2 = new Integer[] { 123, -456, null, 0, -3 };
return Stream.of(
Arguments.of(BinaryOperator.BITWISE_AND, in1, in2, mapArray(in1, in2, (a, b) -> a & b)),
Arguments.of(BinaryOperator.BITWISE_OR, in1, in2, mapArray(in1, in2, (a, b) -> a | b)),
Arguments.of(BinaryOperator.BITWISE_XOR, in1, in2, mapArray(in1, in2, (a, b) -> a ^ b)));
}
@ParameterizedTest
@MethodSource("createBinaryBitwiseOperationParams")
void testBinaryBitwiseOperationTransform(BinaryOperator op, Integer[] in1, Integer[] in2,
List<Integer> expectedValues) {
BinaryOperation expr = new BinaryOperation(op,
new ColumnReference(0),
new ColumnReference(1));
try (Table t = new Table.TestBuilder().column(in1).column(in2).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedInts(
expectedValues.toArray(new Integer[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
private static Stream<Arguments> createBinaryBooleanOperationParams() {
Boolean[] in1 = new Boolean[] { false, true, false, null, true, false };
Boolean[] in2 = new Boolean[] { true, null, null, null, true, false };
return Stream.of(
Arguments.of(BinaryOperator.LOGICAL_AND, in1, in2, mapArray(in1, in2, (a, b) -> a && b)),
Arguments.of(BinaryOperator.LOGICAL_OR, in1, in2, mapArray(in1, in2, (a, b) -> a || b)),
Arguments.of(BinaryOperator.NULL_LOGICAL_AND, in1, in2, Arrays.asList(false, null, false, null, true, false)),
Arguments.of(BinaryOperator.NULL_LOGICAL_OR, in1, in2, Arrays.asList(true, true, null, null, true, false)));
}
@ParameterizedTest
@MethodSource("createBinaryBooleanOperationParams")
void testBinaryBooleanOperationTransform(BinaryOperator op, Boolean[] in1, Boolean[] in2,
List<Boolean> expectedValues) {
BinaryOperation expr = new BinaryOperation(op,
new ColumnReference(0),
new ColumnReference(1));
try (Table t = new Table.TestBuilder().column(in1).column(in2).build();
CompiledExpression compiledExpr = expr.compile();
ColumnVector actual = compiledExpr.computeColumn(t);
ColumnVector expected = ColumnVector.fromBoxedBooleans(
expectedValues.toArray(new Boolean[0]))) {
assertColumnsAreEqual(expected, actual);
}
}
@Test
void testMismatchedBinaryOperationTypes() {
// verify expression fails to transform if operands are not the same type
BinaryOperation expr = new BinaryOperation(BinaryOperator.ADD,
new ColumnReference(0),
new ColumnReference(1));
try (Table t = new Table.TestBuilder().column(1, 2, 3).column(1L, 2L, 3L).build();
CompiledExpression compiledExpr = expr.compile()) {
Assertions.assertThrows(CudfException.class, () -> compiledExpr.computeColumn(t).close());
}
}
}
| 0 |
rapidsai_public_repos/cudf/java/src/test
|
rapidsai_public_repos/cudf/java/src/test/resources/people.json
|
{"name":"Michael"}
{"name":"Andy", "age":30}
{"name":"Justin", "age":19}
| 0 |
rapidsai_public_repos/cudf/java/src/test
|
rapidsai_public_repos/cudf/java/src/test/resources/people_with_invalid_lines.json
|
{"name":"Michael"}
{"name":"Andy", "age":30}
this_line_is_not_valid
{"name":"Justin", "age":19}
| 0 |
rapidsai_public_repos/cudf/java/src/test
|
rapidsai_public_repos/cudf/java/src/test/resources/simple.csv
|
0,110.0,120,one
1,111.0,121,two
2,112.0,122,three
3,113.0,123,four
4,114.0,124,five
5,115.0,125,six
6,116.0,126,seven𐖸
7,117.0,127,eight뽨
8,118.2,128,nineϨ
9,119.8,129,ten
| 0 |
rapidsai_public_repos/cudf/java
|
rapidsai_public_repos/cudf/java/buildscripts/build-info
|
#!/usr/bin/env bash
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script generates the build info.
# Arguments:
# version - The current version of cudf java code
echo_build_properties() {
echo version=$1
echo user=$USER
echo revision=$(git rev-parse HEAD)
echo branch=$(git rev-parse --abbrev-ref HEAD)
echo date=$(date -u +%Y-%m-%dT%H:%M:%SZ)
}
echo_build_properties $1
| 0 |
rapidsai_public_repos/cudf/conda
|
rapidsai_public_repos/cudf/conda/environments/all_cuda-120_arch-x86_64.yaml
|
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- pytorch
- conda-forge
- nvidia
dependencies:
- aiobotocore>=2.2.0
- benchmark==1.8.0
- boto3>=1.21.21
- botocore>=1.24.21
- c-compiler
- cachetools
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cramjam
- cuda-cudart-dev
- cuda-nvcc
- cuda-nvrtc-dev
- cuda-nvtx-dev
- cuda-python>=12.0,<13.0a0
- cuda-sanitizer-api
- cuda-version=12.0
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.3
- dask-cuda==24.2.*
- dlpack>=0.5,<0.6.0a0
- doxygen=1.9.1
- fastavro>=0.22.9
- fmt>=9.1.0,<10
- fsspec>=0.6.0
- gcc_linux-64=11.*
- gmock>=1.13.0
- gtest>=1.13.0
- hypothesis
- identify>=2.5.20
- ipython
- libarrow-all==14.0.1.*
- libcufile-dev
- libcurand-dev
- libkvikio==24.2.*
- librdkafka>=1.9.0,<1.10.0a0
- librmm==24.2.*
- make
- mimesis>=4.1.0
- moto>=4.0.8
- msgpack-python
- myst-nb
- nbsphinx
- ninja
- notebook
- numba>=0.57,<0.58
- numpy>=1.21,<1.25
- numpydoc
- nvcomp==3.0.4
- nvtx>=0.2.1
- packaging
- pandas>=1.3,<1.6.0dev0
- pandoc
- pip
- pre-commit
- protobuf>=4.21,<5
- pyarrow==14.0.1.*
- pydata-sphinx-theme!=0.14.2
- pytest
- pytest-benchmark
- pytest-cases
- pytest-cov
- pytest-xdist
- python-confluent-kafka>=1.9.0,<1.10.0a0
- python-snappy>=0.6.0
- python>=3.9,<3.11
- pytorch<1.12.0
- rapids-dask-dependency==24.2.*
- rich
- rmm==24.2.*
- s3fs>=2022.3.0
- scikit-build>=0.13.1
- scipy
- spdlog>=1.11.0,<1.12
- sphinx
- sphinx-autobuild
- sphinx-copybutton
- sphinx-markdown-tables
- sphinxcontrib-websupport
- streamz
- sysroot_linux-64==2.17
- tokenizers==0.13.1
- transformers==4.24.0
- typing_extensions>=4.0.0
- zlib>=1.2.13
- pip:
- git+https://github.com/python-streamz/streamz.git@master
name: all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/cudf/conda
|
rapidsai_public_repos/cudf/conda/environments/all_cuda-118_arch-x86_64.yaml
|
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- rapidsai-nightly
- dask/label/dev
- pytorch
- conda-forge
- nvidia
dependencies:
- aiobotocore>=2.2.0
- benchmark==1.8.0
- boto3>=1.21.21
- botocore>=1.24.21
- c-compiler
- cachetools
- clang-tools=16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cramjam
- cubinlinker
- cuda-nvtx=11.8
- cuda-python>=11.7.1,<12.0a0
- cuda-sanitizer-api=11.8.86
- cuda-version=11.8
- cudatoolkit
- cupy>=12.0.0
- cxx-compiler
- cython>=3.0.3
- dask-cuda==24.2.*
- dlpack>=0.5,<0.6.0a0
- doxygen=1.9.1
- fastavro>=0.22.9
- fmt>=9.1.0,<10
- fsspec>=0.6.0
- gcc_linux-64=11.*
- gmock>=1.13.0
- gtest>=1.13.0
- hypothesis
- identify>=2.5.20
- ipython
- libarrow-all==14.0.1.*
- libcufile-dev=1.4.0.31
- libcufile=1.4.0.31
- libcurand-dev=10.3.0.86
- libcurand=10.3.0.86
- libkvikio==24.2.*
- librdkafka>=1.9.0,<1.10.0a0
- librmm==24.2.*
- make
- mimesis>=4.1.0
- moto>=4.0.8
- msgpack-python
- myst-nb
- nbsphinx
- ninja
- notebook
- numba>=0.57,<0.58
- numpy>=1.21,<1.25
- numpydoc
- nvcc_linux-64=11.8
- nvcomp==3.0.4
- nvtx>=0.2.1
- packaging
- pandas>=1.3,<1.6.0dev0
- pandoc
- pip
- pre-commit
- protobuf>=4.21,<5
- ptxcompiler
- pyarrow==14.0.1.*
- pydata-sphinx-theme!=0.14.2
- pytest
- pytest-benchmark
- pytest-cases
- pytest-cov
- pytest-xdist
- python-confluent-kafka>=1.9.0,<1.10.0a0
- python-snappy>=0.6.0
- python>=3.9,<3.11
- pytorch<1.12.0
- rapids-dask-dependency==24.2.*
- rich
- rmm==24.2.*
- s3fs>=2022.3.0
- scikit-build>=0.13.1
- scipy
- spdlog>=1.11.0,<1.12
- sphinx
- sphinx-autobuild
- sphinx-copybutton
- sphinx-markdown-tables
- sphinxcontrib-websupport
- streamz
- sysroot_linux-64==2.17
- tokenizers==0.13.1
- transformers==4.24.0
- typing_extensions>=4.0.0
- zlib>=1.2.13
- pip:
- git+https://github.com/python-streamz/streamz.git@master
name: all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/cudf_kafka/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/cudf_kafka/build.sh
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
./build.sh -v cudf_kafka
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/cudf_kafka/meta.yaml
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: cudf_kafka
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=cudf-kafka-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=cudf-kafka-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
- ninja
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- python
- cython >=3.0.3
- cuda-version ={{ cuda_version }}
- cudf ={{ version }}
- libcudf_kafka ={{ version }}
- scikit-build >=0.13.1
- setuptools
{% if cuda_major == "12" %}
- cuda-cudart-dev
{% endif %}
run:
- python
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- libcudf_kafka ={{ version }}
- cudf ={{ version }}
test:
requires:
- cuda-version ={{ cuda_version }}
imports:
- cudf_kafka
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: APACHE
license_file: LICENSE
summary: libcudf_kafka library
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/cudf/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/cudf/build.sh
|
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
# This assumes the script is executed from the root of the repo directory
./build.sh cudf
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/cudf/meta.yaml
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: cudf
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=cudf-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=cudf-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
ignore_run_exports:
# libcudf's run_exports pinning is looser than we would like
- libcudf
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
- ninja
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- protobuf ==4.24.*
- python
- cython >=3.0.3
- scikit-build >=0.13.1
- setuptools
- dlpack >=0.5,<0.6.0a0
- pyarrow ==14.0.1.*
- libcudf ={{ version }}
- rmm ={{ minor_version }}
{% if cuda_major == "11" %}
- cudatoolkit
{% else %}
- cuda-cudart-dev
- cuda-nvrtc
- libcufile-dev # [linux64]
{% endif %}
- cuda-version ={{ cuda_version }}
run:
- {{ pin_compatible('protobuf', min_pin='x.x', max_pin='x') }}
- python
- typing_extensions >=4.0.0
- pandas >=1.3,<1.6.0dev0
- cupy >=12.0.0
# TODO: Pin to numba<0.58 until #14160 is resolved
- numba >=0.57,<0.58
# TODO: Pin to numpy<1.25 until cudf requires pandas 2
- numpy >=1.21,<1.25
- {{ pin_compatible('pyarrow', max_pin='x') }}
- libcudf ={{ version }}
- {{ pin_compatible('rmm', max_pin='x.x') }}
- fsspec >=0.6.0
{% if cuda_major == "11" %}
- cudatoolkit
- ptxcompiler >=0.7.0
- cubinlinker # CUDA enhanced compatibility.
- cuda-python >=11.7.1,<12.0a0
{% else %}
# Needed by Numba for CUDA support
- cuda-nvcc-impl
# TODO: Add nvjitlink here
# xref: https://github.com/rapidsai/cudf/issues/12822
- cuda-nvrtc
- cuda-python >=12.0,<13.0a0
{% endif %}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- nvtx >=0.2.1
- packaging
- cachetools
- rich
test:
requires:
- cuda-version ={{ cuda_version }}
imports:
- cudf
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: APACHE
license_file: LICENSE
summary: cuDF GPU DataFrame core library
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/custreamz/build.sh
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
# This assumes the script is executed from the root of the repo directory
./build.sh -v custreamz
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/custreamz/meta.yaml
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: custreamz
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=custreamz-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=custreamz-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
requirements:
host:
- python
- python-confluent-kafka >=1.9.0,<1.10.0a0
- cudf_kafka ={{ version }}
- cuda-version ={{ cuda_version }}
run:
- python
- streamz
- cudf ={{ version }}
- cudf_kafka ={{ version }}
- rapids-dask-dependency ={{ minor_version }}
- python-confluent-kafka >=1.9.0,<1.10.0a0
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
test:
requires:
- cuda-version ={{ cuda_version }}
imports:
- custreamz
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: APACHE
license_file: LICENSE
summary: cuStreamz library
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/libcudf/install_libcudf_tests.sh
|
#!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
cmake --install cpp/build --component testing
cmake --install cpp/libcudf_kafka/build --component testing
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/libcudf/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
gbench_version:
- "==1.8.0"
gtest_version:
- ">=1.13.0"
libarrow_version:
- "==14.0.1"
dlpack_version:
- ">=0.5,<0.6.0a0"
librdkafka_version:
- ">=1.9.0,<1.10.0a0"
fmt_version:
- ">=9.1.0,<10"
spdlog_version:
- ">=1.11.0,<1.12"
nvcomp_version:
- "=3.0.4"
zlib_version:
- ">=1.2.13"
# The CTK libraries below are missing from the conda-forge::cudatoolkit package
# for CUDA 11. The "*_host_*" version specifiers correspond to `11.8` packages
# and the "*_run_*" version specifiers correspond to `11.x` packages.
cuda11_libcufile_host_version:
- "1.4.0.31"
cuda11_libcufile_run_version:
- ">=1.0.0.82,<=1.4.0.31"
cuda11_libcurand_host_version:
- "=10.3.0.86"
cuda11_libcurand_run_version:
- ">=10.2.5.43,<10.3.1"
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/libcudf/build.sh
|
#!/bin/bash
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
export cudf_ROOT="$(realpath ./cpp/build)"
./build.sh -n -v \
libcudf libcudf_kafka benchmarks tests \
--build_metrics --incl_cache_stats \
--cmake-args=\"-DCMAKE_INSTALL_LIBDIR=lib -DCUDF_ENABLE_ARROW_S3=ON -DNVBench_ENABLE_CUPTI=OFF\"
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/libcudf/install_libcudf_kafka.sh
|
#!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
cmake --install cpp/libcudf_kafka/build
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/libcudf/meta.yaml
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: libcudf-split
source:
path: ../../..
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- RAPIDS_ARTIFACTS_DIR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=libcudf-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=libcudf-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
requirements:
build:
- cmake {{ cmake_version }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- librmm ={{ minor_version }}
- libkvikio ={{ minor_version }}
{% if cuda_major == "11" %}
- cudatoolkit
- libcufile {{ cuda11_libcufile_host_version }} # [linux64]
- libcufile-dev {{ cuda11_libcufile_host_version }} # [linux64]
- libcurand {{ cuda11_libcurand_host_version }}
- libcurand-dev {{ cuda11_libcurand_host_version }}
- cuda-nvrtc ={{ cuda_version }}
- cuda-nvrtc-dev ={{ cuda_version }}
- cuda-nvtx ={{ cuda_version }}
{% else %}
- cuda-nvrtc-dev
- cuda-nvtx-dev
- libcufile-dev # [linux64]
- libcurand-dev
{% endif %}
- cuda-version ={{ cuda_version }}
- nvcomp {{ nvcomp_version }}
- libarrow {{ libarrow_version }}
- dlpack {{ dlpack_version }}
- librdkafka {{ librdkafka_version }}
- fmt {{ fmt_version }}
- spdlog {{ spdlog_version }}
- benchmark {{ gbench_version }}
- gtest {{ gtest_version }}
- gmock {{ gtest_version }}
- zlib {{ zlib_version }}
outputs:
- name: libcudf
version: {{ version }}
script: install_libcudf.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
run_exports:
- {{ pin_subpackage("libcudf", max_pin="x.x") }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
host:
- libarrow {{ libarrow_version }}
run:
{% if cuda_major == "11" %}
- cudatoolkit
- libcufile {{ cuda11_libcufile_run_version }} # [linux64]
{% else %}
- cuda-nvrtc
- libcufile # [linux64]
{% endif %}
- cuda-version {{ cuda_spec }}
- nvcomp {{ nvcomp_version }}
- librmm ={{ minor_version }}
- libkvikio ={{ minor_version }}
- dlpack {{ dlpack_version }}
- gtest {{ gtest_version }}
- gmock {{ gtest_version }}
test:
commands:
- test -f $PREFIX/lib/libcudf.so
- test -f $PREFIX/include/cudf/column/column.hpp
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: APACHE
license_file: LICENSE
summary: libcudf library
- name: libcudf_kafka
version: {{ version }}
script: install_libcudf_kafka.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
host:
- librdkafka {{ librdkafka_version }}
- {{ pin_subpackage('libcudf', exact=True) }}
run:
- librdkafka {{ librdkafka_version }}
- {{ pin_subpackage('libcudf', exact=True) }}
test:
commands:
- test -f $PREFIX/lib/libcudf_kafka.so
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: APACHE
license_file: LICENSE
summary: libcudf_kafka library
- name: libcudf-example
version: {{ version }}
script: install_libcudf_example.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} ={{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- ninja
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- {{ pin_subpackage('libcudf', exact=True) }}
{% if cuda_major == "11" %}
- cuda-nvtx ={{ cuda_version }}
{% else %}
- cuda-nvtx-dev
{% endif %}
- cuda-version ={{ cuda_version }}
run:
- {{ pin_subpackage('libcudf', exact=True) }}
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: APACHE
license_file: LICENSE
summary: libcudf_example library
- name: libcudf-tests
version: {{ version }}
script: install_libcudf_tests.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
host:
- {{ pin_subpackage('libcudf', exact=True) }}
- {{ pin_subpackage('libcudf_kafka', exact=True) }}
- cuda-version {{ cuda_spec }}
{% if cuda_major == "11" %}
- libcurand {{ cuda11_libcurand_run_version }}
{% else %}
- libcurand-dev
{% endif %}
- benchmark {{ gbench_version }}
- gtest {{ gtest_version }}
- gmock {{ gtest_version }}
run:
- {{ pin_subpackage('libcudf', exact=True) }}
- {{ pin_subpackage('libcudf_kafka', exact=True) }}
- cuda-version {{ cuda_spec }}
{% if cuda_major == "11" %}
- libcurand {{ cuda11_libcurand_run_version }}
{% endif %}
- benchmark {{ gbench_version }}
- gtest {{ gtest_version }}
- gmock {{ gtest_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: APACHE
license_file: LICENSE
summary: libcudf test & benchmark executables
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/libcudf/install_libcudf.sh
|
#!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
cmake --install cpp/build
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/libcudf/install_libcudf_example.sh
|
#!/bin/bash
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
./cpp/examples/build.sh
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/dask-cudf/build.sh
|
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
# This assumes the script is executed from the root of the repo directory
./build.sh dask_cudf
| 0 |
rapidsai_public_repos/cudf/conda/recipes
|
rapidsai_public_repos/cudf/conda/recipes/dask-cudf/meta.yaml
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
{% set py_version = environ['CONDA_PY'] %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: dask-cudf
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=dask-cudf-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=dask-cudf-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
requirements:
host:
- python
- cuda-version ={{ cuda_version }}
run:
- python
- cudf ={{ version }}
- rapids-dask-dependency ={{ minor_version }}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
test:
requires:
- cuda-version ={{ cuda_version }}
imports:
- dask_cudf
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: APACHE
license_file: LICENSE
summary: dask-cudf library
| 0 |
rapidsai_public_repos/cudf
|
rapidsai_public_repos/cudf/cpp/.clangd
|
# https://clangd.llvm.org/config
# Apply a config conditionally to all C files
If:
PathMatch: .*\.(c|h)$
---
# Apply a config conditionally to all C++ files
If:
PathMatch: .*\.(c|h)pp
---
# Apply a config conditionally to all CUDA files
If:
PathMatch: .*\.cuh?
CompileFlags:
Add:
- "-x"
- "cuda"
# No error on unknown CUDA versions
- "-Wno-unknown-cuda-version"
# Allow variadic CUDA functions
- "-Xclang=-fcuda-allow-variadic-functions"
Diagnostics:
Suppress:
- "variadic_device_fn"
- "attributes_not_allowed"
---
# Tweak the clangd parse settings for all files
CompileFlags:
Add:
# report all errors
- "-ferror-limit=0"
- "-fmacro-backtrace-limit=0"
- "-ftemplate-backtrace-limit=0"
# Skip the CUDA version check
- "--no-cuda-version-check"
Remove:
# remove gcc's -fcoroutines
- -fcoroutines
# remove nvc++ flags unknown to clang
- "-gpu=*"
- "-stdpar*"
# remove nvcc flags unknown to clang
- "-arch*"
- "-gencode*"
- "--generate-code*"
- "-ccbin*"
- "-t=*"
- "--threads*"
- "-Xptxas*"
- "-Xcudafe*"
- "-Xfatbin*"
- "-Xcompiler*"
- "--diag-suppress*"
- "--diag_suppress*"
- "--compiler-options*"
- "--expt-extended-lambda"
- "--expt-relaxed-constexpr"
- "-forward-unknown-to-host-compiler"
- "-Werror=cross-execution-space-call"
| 0 |
rapidsai_public_repos/cudf
|
rapidsai_public_repos/cudf/cpp/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
cmake_minimum_required(VERSION 3.26.4 FATAL_ERROR)
include(../fetch_rapids.cmake)
include(rapids-cmake)
include(rapids-cpm)
include(rapids-cuda)
include(rapids-export)
include(rapids-find)
rapids_cuda_init_architectures(CUDF)
project(
CUDF
VERSION 24.02.00
LANGUAGES C CXX CUDA
)
if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 11.5)
message(
FATAL_ERROR
"libcudf requires CUDA Toolkit 11.5+ to compile (nvcc ${CMAKE_CUDA_COMPILER_VERSION} provided)"
)
endif()
# Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to
# have different values for the `Threads::Threads` target. Setting this flag ensures
# `Threads::Threads` is the same value in first run and subsequent runs.
set(THREADS_PREFER_PTHREAD_FLAG ON)
# ##################################################################################################
# * build options ---------------------------------------------------------------------------------
option(USE_NVTX "Build with NVTX support" ON)
option(BUILD_TESTS "Configure CMake to build tests" ON)
option(BUILD_BENCHMARKS "Configure CMake to build (google & nvbench) benchmarks" OFF)
option(BUILD_SHARED_LIBS "Build cuDF shared libraries" ON)
option(JITIFY_USE_CACHE "Use a file cache for JIT compiled kernels" ON)
option(CUDF_BUILD_TESTUTIL "Whether to build the test utilities contained in libcudf" ON)
mark_as_advanced(CUDF_BUILD_TESTUTIL)
option(CUDF_USE_PROPRIETARY_NVCOMP "Download and use NVCOMP with proprietary extensions" ON)
option(CUDF_USE_ARROW_STATIC "Build and statically link Arrow libraries" OFF)
option(CUDF_ENABLE_ARROW_ORC "Build the Arrow ORC adapter" OFF)
option(CUDF_ENABLE_ARROW_PYTHON "Find (or build) Arrow with Python support" OFF)
option(CUDF_ENABLE_ARROW_PARQUET "Find (or build) Arrow with Parquet support" OFF)
option(CUDF_ENABLE_ARROW_S3 "Build/Enable AWS S3 Arrow filesystem support" OFF)
option(
CUDF_USE_PER_THREAD_DEFAULT_STREAM
"Build cuDF with per-thread default stream, including passing the per-thread default
stream to external libraries."
OFF
)
# Option to add all symbols to the dynamic symbol table in the library file, allowing to retrieve
# human-readable stacktrace for debugging.
option(
CUDF_BUILD_STACKTRACE_DEBUG
"Replace the current optimization flags by the options '-rdynamic -Og -NDEBUG', useful for debugging with stacktrace retrieval"
OFF
)
option(DISABLE_DEPRECATION_WARNINGS "Disable warnings generated from deprecated declarations." OFF)
# Option to enable line info in CUDA device compilation to allow introspection when profiling /
# memchecking
option(CUDA_ENABLE_LINEINFO
"Enable the -lineinfo option for nvcc (useful for cuda-memcheck / profiler)" OFF
)
option(CUDA_WARNINGS_AS_ERRORS "Enable -Werror=all-warnings for all CUDA compilation" ON)
# cudart can be statically linked or dynamically linked. The python ecosystem wants dynamic linking
option(CUDA_STATIC_RUNTIME "Statically link the CUDA runtime" OFF)
set(DEFAULT_CUDF_BUILD_STREAMS_TEST_UTIL ON)
if(CUDA_STATIC_RUNTIME OR NOT BUILD_SHARED_LIBS)
set(DEFAULT_CUDF_BUILD_STREAMS_TEST_UTIL OFF)
endif()
option(
CUDF_BUILD_STREAMS_TEST_UTIL
"Whether to build the utilities for stream testing contained in libcudf"
${DEFAULT_CUDF_BUILD_STREAMS_TEST_UTIL}
)
mark_as_advanced(CUDF_BUILD_STREAMS_TEST_UTIL)
option(USE_LIBARROW_FROM_PYARROW "Use the libarrow contained within pyarrow." OFF)
mark_as_advanced(USE_LIBARROW_FROM_PYARROW)
message(VERBOSE "CUDF: Build with NVTX support: ${USE_NVTX}")
message(VERBOSE "CUDF: Configure CMake to build tests: ${BUILD_TESTS}")
message(VERBOSE "CUDF: Configure CMake to build (google & nvbench) benchmarks: ${BUILD_BENCHMARKS}")
message(VERBOSE "CUDF: Build cuDF shared libraries: ${BUILD_SHARED_LIBS}")
message(VERBOSE "CUDF: Use a file cache for JIT compiled kernels: ${JITIFY_USE_CACHE}")
message(VERBOSE "CUDF: Build and statically link Arrow libraries: ${CUDF_USE_ARROW_STATIC}")
message(VERBOSE "CUDF: Build and enable S3 filesystem support for Arrow: ${CUDF_ENABLE_ARROW_S3}")
message(VERBOSE "CUDF: Build with per-thread default stream: ${CUDF_USE_PER_THREAD_DEFAULT_STREAM}")
message(
VERBOSE
"CUDF: Replace the current optimization flags by the options '-rdynamic -Og' (useful for debugging with stacktrace retrieval): ${CUDF_BUILD_STACKTRACE_DEBUG}"
)
message(
VERBOSE
"CUDF: Disable warnings generated from deprecated declarations: ${DISABLE_DEPRECATION_WARNINGS}"
)
message(
VERBOSE
"CUDF: Enable the -lineinfo option for nvcc (useful for cuda-memcheck / profiler): ${CUDA_ENABLE_LINEINFO}"
)
message(VERBOSE "CUDF: Statically link the CUDA runtime: ${CUDA_STATIC_RUNTIME}")
# Set a default build type if none was specified
rapids_cmake_build_type("Release")
set(CUDF_BUILD_TESTS ${BUILD_TESTS})
set(CUDF_BUILD_BENCHMARKS ${BUILD_BENCHMARKS})
if(BUILD_TESTS AND NOT CUDF_BUILD_TESTUTIL)
message(
FATAL_ERROR
"Tests cannot be built without building cudf test utils. Please set CUDF_BUILD_TESTUTIL=ON or BUILD_TESTS=OFF"
)
endif()
if(CUDF_BUILD_STACKTRACE_DEBUG AND NOT CMAKE_COMPILER_IS_GNUCXX)
message(FATAL_ERROR "CUDF_BUILD_STACKTRACE_DEBUG is only supported with GCC compiler")
endif()
set(CUDF_CXX_FLAGS "")
set(CUDF_CUDA_FLAGS "")
set(CUDF_CXX_DEFINITIONS "")
set(CUDF_CUDA_DEFINITIONS "")
# Set logging level
set(LIBCUDF_LOGGING_LEVEL
"INFO"
CACHE STRING "Choose the logging level."
)
set_property(
CACHE LIBCUDF_LOGGING_LEVEL PROPERTY STRINGS "TRACE" "DEBUG" "INFO" "WARN" "ERROR" "CRITICAL"
"OFF"
)
message(VERBOSE "CUDF: LIBCUDF_LOGGING_LEVEL = '${LIBCUDF_LOGGING_LEVEL}'.")
if(NOT CUDF_GENERATED_INCLUDE_DIR)
set(CUDF_GENERATED_INCLUDE_DIR ${CUDF_BINARY_DIR})
endif()
# ##################################################################################################
# * conda environment -----------------------------------------------------------------------------
rapids_cmake_support_conda_env(conda_env MODIFY_PREFIX_PATH)
# ##################################################################################################
# * compiler options ------------------------------------------------------------------------------
rapids_find_package(
CUDAToolkit REQUIRED
BUILD_EXPORT_SET cudf-exports
INSTALL_EXPORT_SET cudf-exports
)
include(cmake/Modules/ConfigureCUDA.cmake) # set other CUDA compilation flags
# ##################################################################################################
# * dependencies ----------------------------------------------------------------------------------
# find zlib
rapids_find_package(ZLIB REQUIRED)
if(CUDF_BUILD_TESTUTIL)
# find Threads (needed by cudftestutil)
rapids_find_package(
Threads REQUIRED
BUILD_EXPORT_SET cudf-exports
INSTALL_EXPORT_SET cudf-exports
)
endif()
# add third party dependencies using CPM
rapids_cpm_init()
# find jitify
include(cmake/thirdparty/get_jitify.cmake)
# find nvCOMP
include(cmake/thirdparty/get_nvcomp.cmake)
# find thrust/cub
include(cmake/thirdparty/get_thrust.cmake)
# find rmm
include(cmake/thirdparty/get_rmm.cmake)
# find arrow
include(cmake/thirdparty/get_arrow.cmake)
# find dlpack
include(cmake/thirdparty/get_dlpack.cmake)
# find libcu++
include(cmake/thirdparty/get_libcudacxx.cmake)
# find cuCollections Should come after including thrust and libcudacxx
include(cmake/thirdparty/get_cucollections.cmake)
# find or install GoogleTest
if(CUDF_BUILD_TESTUTIL)
include(cmake/thirdparty/get_gtest.cmake)
endif()
# preprocess jitify-able kernels
include(cmake/Modules/JitifyPreprocessKernels.cmake)
# find cuFile
include(cmake/thirdparty/get_cufile.cmake)
# find KvikIO
include(cmake/thirdparty/get_kvikio.cmake)
# find fmt
include(cmake/thirdparty/get_fmt.cmake)
# find spdlog
include(cmake/thirdparty/get_spdlog.cmake)
# Workaround until https://github.com/rapidsai/rapids-cmake/issues/176 is resolved
if(NOT BUILD_SHARED_LIBS)
include("${rapids-cmake-dir}/export/find_package_file.cmake")
list(APPEND METADATA_KINDS BUILD INSTALL)
list(APPEND dependencies KvikIO ZLIB nvcomp)
if(TARGET cufile::cuFile_interface)
list(APPEND dependencies cuFile)
endif()
foreach(METADATA_KIND IN LISTS METADATA_KINDS)
foreach(dep IN LISTS dependencies)
rapids_export_package(${METADATA_KIND} ${dep} cudf-exports)
endforeach()
endforeach()
if(TARGET conda_env)
install(TARGETS conda_env EXPORT cudf-exports)
endif()
endif()
# ##################################################################################################
# * library targets -------------------------------------------------------------------------------
add_library(
cudf
src/aggregation/aggregation.cpp
src/aggregation/aggregation.cu
src/aggregation/result_cache.cpp
src/ast/expression_parser.cpp
src/ast/expressions.cpp
src/binaryop/binaryop.cpp
src/binaryop/compiled/ATan2.cu
src/binaryop/compiled/Add.cu
src/binaryop/compiled/BitwiseAnd.cu
src/binaryop/compiled/BitwiseOr.cu
src/binaryop/compiled/BitwiseXor.cu
src/binaryop/compiled/Div.cu
src/binaryop/compiled/FloorDiv.cu
src/binaryop/compiled/Greater.cu
src/binaryop/compiled/GreaterEqual.cu
src/binaryop/compiled/IntPow.cu
src/binaryop/compiled/Less.cu
src/binaryop/compiled/LessEqual.cu
src/binaryop/compiled/LogBase.cu
src/binaryop/compiled/LogicalAnd.cu
src/binaryop/compiled/LogicalOr.cu
src/binaryop/compiled/Mod.cu
src/binaryop/compiled/Mul.cu
src/binaryop/compiled/NullEquals.cu
src/binaryop/compiled/NullLogicalAnd.cu
src/binaryop/compiled/NullLogicalOr.cu
src/binaryop/compiled/NullMax.cu
src/binaryop/compiled/NullMin.cu
src/binaryop/compiled/PMod.cu
src/binaryop/compiled/Pow.cu
src/binaryop/compiled/PyMod.cu
src/binaryop/compiled/ShiftLeft.cu
src/binaryop/compiled/ShiftRight.cu
src/binaryop/compiled/ShiftRightUnsigned.cu
src/binaryop/compiled/Sub.cu
src/binaryop/compiled/TrueDiv.cu
src/binaryop/compiled/binary_ops.cu
src/binaryop/compiled/equality_ops.cu
src/binaryop/compiled/util.cpp
src/labeling/label_bins.cu
src/bitmask/null_mask.cu
src/bitmask/is_element_valid.cpp
src/column/column.cu
src/column/column_device_view.cu
src/column/column_factories.cpp
src/column/column_factories.cu
src/column/column_view.cpp
src/copying/concatenate.cu
src/copying/contiguous_split.cu
src/copying/copy.cpp
src/copying/copy.cu
src/copying/copy_range.cu
src/copying/gather.cu
src/copying/get_element.cu
src/copying/pack.cpp
src/copying/purge_nonempty_nulls.cu
src/copying/reverse.cu
src/copying/sample.cu
src/copying/scatter.cu
src/copying/shift.cu
src/copying/slice.cu
src/copying/split.cpp
src/copying/segmented_shift.cu
src/datetime/datetime_ops.cu
src/dictionary/add_keys.cu
src/dictionary/decode.cu
src/dictionary/detail/concatenate.cu
src/dictionary/detail/merge.cu
src/dictionary/dictionary_column_view.cpp
src/dictionary/dictionary_factories.cu
src/dictionary/encode.cu
src/dictionary/remove_keys.cu
src/dictionary/replace.cu
src/dictionary/search.cu
src/dictionary/set_keys.cu
src/filling/calendrical_month_sequence.cu
src/filling/fill.cu
src/filling/repeat.cu
src/filling/sequence.cu
src/groupby/groupby.cu
src/groupby/hash/groupby.cu
src/groupby/sort/aggregate.cpp
src/groupby/sort/group_argmax.cu
src/groupby/sort/group_argmin.cu
src/groupby/sort/group_collect.cu
src/groupby/sort/group_correlation.cu
src/groupby/sort/group_count.cu
src/groupby/sort/group_histogram.cu
src/groupby/sort/group_m2.cu
src/groupby/sort/group_max.cu
src/groupby/sort/group_min.cu
src/groupby/sort/group_merge_lists.cu
src/groupby/sort/group_merge_m2.cu
src/groupby/sort/group_nth_element.cu
src/groupby/sort/group_nunique.cu
src/groupby/sort/group_product.cu
src/groupby/sort/group_quantiles.cu
src/groupby/sort/group_std.cu
src/groupby/sort/group_sum.cu
src/groupby/sort/scan.cpp
src/groupby/sort/group_count_scan.cu
src/groupby/sort/group_max_scan.cu
src/groupby/sort/group_min_scan.cu
src/groupby/sort/group_rank_scan.cu
src/groupby/sort/group_replace_nulls.cu
src/groupby/sort/group_sum_scan.cu
src/groupby/sort/sort_helper.cu
src/hash/hashing.cu
src/hash/md5_hash.cu
src/hash/murmurhash3_x86_32.cu
src/hash/murmurhash3_x64_128.cu
src/hash/spark_murmurhash3_x86_32.cu
src/hash/xxhash_64.cu
src/interop/dlpack.cpp
src/interop/from_arrow.cu
src/interop/to_arrow.cu
src/interop/detail/arrow_allocator.cpp
src/io/avro/avro.cpp
src/io/avro/avro_gpu.cu
src/io/avro/reader_impl.cu
src/io/comp/brotli_dict.cpp
src/io/comp/cpu_unbz2.cpp
src/io/comp/debrotli.cu
src/io/comp/gpuinflate.cu
src/io/comp/nvcomp_adapter.cpp
src/io/comp/nvcomp_adapter.cu
src/io/comp/snap.cu
src/io/comp/statistics.cu
src/io/comp/uncomp.cpp
src/io/comp/unsnap.cu
src/io/csv/csv_gpu.cu
src/io/csv/durations.cu
src/io/csv/reader_impl.cu
src/io/csv/writer_impl.cu
src/io/functions.cpp
src/io/json/byte_range_info.cu
src/io/json/json_column.cu
src/io/json/json_tree.cu
src/io/json/nested_json_gpu.cu
src/io/json/read_json.cu
src/io/json/legacy/json_gpu.cu
src/io/json/legacy/reader_impl.cu
src/io/json/write_json.cu
src/io/orc/aggregate_orc_metadata.cpp
src/io/orc/dict_enc.cu
src/io/orc/orc.cpp
src/io/orc/reader_impl.cu
src/io/orc/stats_enc.cu
src/io/orc/stripe_data.cu
src/io/orc/stripe_enc.cu
src/io/orc/stripe_init.cu
src/datetime/timezone.cpp
src/io/orc/writer_impl.cu
src/io/parquet/compact_protocol_reader.cpp
src/io/parquet/compact_protocol_writer.cpp
src/io/parquet/decode_preprocess.cu
src/io/parquet/page_data.cu
src/io/parquet/chunk_dict.cu
src/io/parquet/page_enc.cu
src/io/parquet/page_hdr.cu
src/io/parquet/page_delta_decode.cu
src/io/parquet/page_string_decode.cu
src/io/parquet/predicate_pushdown.cpp
src/io/parquet/reader.cpp
src/io/parquet/reader_impl.cpp
src/io/parquet/reader_impl_chunking.cu
src/io/parquet/reader_impl_helpers.cpp
src/io/parquet/reader_impl_preprocess.cu
src/io/parquet/writer_impl.cu
src/io/statistics/orc_column_statistics.cu
src/io/statistics/parquet_column_statistics.cu
src/io/text/byte_range_info.cpp
src/io/text/data_chunk_source_factories.cpp
src/io/text/bgzip_data_chunk_source.cu
src/io/text/bgzip_utils.cpp
src/io/text/multibyte_split.cu
src/io/utilities/arrow_io_source.cpp
src/io/utilities/column_buffer.cpp
src/io/utilities/config_utils.cpp
src/io/utilities/data_casting.cu
src/io/utilities/data_sink.cpp
src/io/utilities/datasource.cpp
src/io/utilities/file_io_utilities.cpp
src/io/utilities/parsing_utils.cu
src/io/utilities/row_selection.cpp
src/io/utilities/type_inference.cu
src/io/utilities/trie.cu
src/jit/cache.cpp
src/jit/parser.cpp
src/jit/util.cpp
src/join/conditional_join.cu
src/join/cross_join.cu
src/join/hash_join.cu
src/join/join.cu
src/join/join_utils.cu
src/join/mixed_join.cu
src/join/mixed_join_kernel.cu
src/join/mixed_join_kernel_nulls.cu
src/join/mixed_join_kernels_semi.cu
src/join/mixed_join_semi.cu
src/join/mixed_join_size_kernel.cu
src/join/mixed_join_size_kernel_nulls.cu
src/join/mixed_join_size_kernels_semi.cu
src/join/semi_join.cu
src/json/json_path.cu
src/lists/contains.cu
src/lists/combine/concatenate_list_elements.cu
src/lists/combine/concatenate_rows.cu
src/lists/copying/concatenate.cu
src/lists/copying/copying.cu
src/lists/copying/gather.cu
src/lists/copying/segmented_gather.cu
src/lists/copying/scatter_helper.cu
src/lists/count_elements.cu
src/lists/dremel.cu
src/lists/explode.cu
src/lists/extract.cu
src/lists/interleave_columns.cu
src/lists/lists_column_factories.cu
src/lists/lists_column_view.cu
src/lists/reverse.cu
src/lists/segmented_sort.cu
src/lists/sequences.cu
src/lists/set_operations.cu
src/lists/stream_compaction/apply_boolean_mask.cu
src/lists/stream_compaction/distinct.cu
src/lists/utilities.cu
src/merge/merge.cu
src/partitioning/partitioning.cu
src/partitioning/round_robin.cu
src/quantiles/tdigest/tdigest.cu
src/quantiles/tdigest/tdigest_aggregation.cu
src/quantiles/tdigest/tdigest_column_view.cpp
src/quantiles/quantile.cu
src/quantiles/quantiles.cu
src/reductions/all.cu
src/reductions/any.cu
src/reductions/collect_ops.cu
src/reductions/histogram.cu
src/reductions/max.cu
src/reductions/mean.cu
src/reductions/min.cu
src/reductions/minmax.cu
src/reductions/nth_element.cu
src/reductions/product.cu
src/reductions/reductions.cpp
src/reductions/scan/rank_scan.cu
src/reductions/scan/scan.cpp
src/reductions/scan/scan_exclusive.cu
src/reductions/scan/scan_inclusive.cu
src/reductions/segmented/all.cu
src/reductions/segmented/any.cu
src/reductions/segmented/counts.cu
src/reductions/segmented/max.cu
src/reductions/segmented/mean.cu
src/reductions/segmented/min.cu
src/reductions/segmented/nunique.cu
src/reductions/segmented/product.cu
src/reductions/segmented/reductions.cpp
src/reductions/segmented/std.cu
src/reductions/segmented/sum.cu
src/reductions/segmented/sum_of_squares.cu
src/reductions/segmented/update_validity.cu
src/reductions/segmented/var.cu
src/reductions/std.cu
src/reductions/sum.cu
src/reductions/sum_of_squares.cu
src/reductions/var.cu
src/replace/clamp.cu
src/replace/nans.cu
src/replace/nulls.cu
src/replace/replace.cu
src/reshape/byte_cast.cu
src/reshape/interleave_columns.cu
src/reshape/tile.cu
src/rolling/detail/optimized_unbounded_window.cpp
src/rolling/detail/rolling_collect_list.cu
src/rolling/detail/rolling_fixed_window.cu
src/rolling/detail/rolling_variable_window.cu
src/rolling/grouped_rolling.cu
src/rolling/range_window_bounds.cpp
src/rolling/rolling.cu
src/round/round.cu
src/scalar/scalar.cpp
src/scalar/scalar_factories.cpp
src/search/contains_column.cu
src/search/contains_scalar.cu
src/search/contains_table.cu
src/search/search_ordered.cu
src/sort/is_sorted.cu
src/sort/rank.cu
src/sort/segmented_sort.cu
src/sort/sort_column.cu
src/sort/sort.cu
src/sort/stable_segmented_sort.cu
src/sort/stable_sort_column.cu
src/sort/stable_sort.cu
src/stream_compaction/apply_boolean_mask.cu
src/stream_compaction/distinct.cu
src/stream_compaction/distinct_count.cu
src/stream_compaction/distinct_helpers.cu
src/stream_compaction/drop_nans.cu
src/stream_compaction/drop_nulls.cu
src/stream_compaction/stable_distinct.cu
src/stream_compaction/unique.cu
src/stream_compaction/unique_count.cu
src/stream_compaction/unique_count_column.cu
src/strings/attributes.cu
src/strings/capitalize.cu
src/strings/case.cu
src/strings/char_types/char_cases.cu
src/strings/char_types/char_types.cu
src/strings/combine/concatenate.cu
src/strings/combine/join.cu
src/strings/combine/join_list_elements.cu
src/strings/contains.cu
src/strings/convert/convert_booleans.cu
src/strings/convert/convert_datetime.cu
src/strings/convert/convert_durations.cu
src/strings/convert/convert_fixed_point.cu
src/strings/convert/convert_floats.cu
src/strings/convert/convert_hex.cu
src/strings/convert/convert_integers.cu
src/strings/convert/convert_ipv4.cu
src/strings/convert/convert_urls.cu
src/strings/convert/convert_lists.cu
src/strings/copying/concatenate.cu
src/strings/copying/copying.cu
src/strings/copying/shift.cu
src/strings/count_matches.cu
src/strings/extract/extract.cu
src/strings/extract/extract_all.cu
src/strings/filling/fill.cu
src/strings/filter_chars.cu
src/strings/like.cu
src/strings/padding.cu
src/strings/regex/regcomp.cpp
src/strings/regex/regexec.cpp
src/strings/regex/regex_program.cpp
src/strings/repeat_strings.cu
src/strings/replace/backref_re.cu
src/strings/replace/multi.cu
src/strings/replace/multi_re.cu
src/strings/replace/replace.cu
src/strings/replace/replace_re.cu
src/strings/reverse.cu
src/strings/scan/scan_inclusive.cu
src/strings/search/findall.cu
src/strings/search/find.cu
src/strings/search/find_multiple.cu
src/strings/slice.cu
src/strings/split/partition.cu
src/strings/split/split.cu
src/strings/split/split_re.cu
src/strings/split/split_record.cu
src/strings/strings_column_factories.cu
src/strings/strings_column_view.cpp
src/strings/strings_scalar_factories.cpp
src/strings/strip.cu
src/strings/translate.cu
src/strings/utilities.cu
src/strings/wrap.cu
src/structs/copying/concatenate.cu
src/structs/scan/scan_inclusive.cu
src/structs/structs_column_factories.cu
src/structs/structs_column_view.cpp
src/structs/utilities.cpp
src/table/row_operators.cu
src/table/table.cpp
src/table/table_device_view.cu
src/table/table_view.cpp
src/text/detokenize.cu
src/text/edit_distance.cu
src/text/generate_ngrams.cu
src/text/jaccard.cu
src/text/minhash.cu
src/text/ngrams_tokenize.cu
src/text/normalize.cu
src/text/replace.cu
src/text/stemmer.cu
src/text/bpe/byte_pair_encoding.cu
src/text/bpe/load_merge_pairs.cu
src/text/subword/data_normalizer.cu
src/text/subword/load_hash_file.cu
src/text/subword/subword_tokenize.cu
src/text/subword/wordpiece_tokenizer.cu
src/text/tokenize.cu
src/text/vocabulary_tokenize.cu
src/transform/bools_to_mask.cu
src/transform/compute_column.cu
src/transform/encode.cu
src/transform/mask_to_bools.cu
src/transform/nans_to_nulls.cu
src/transform/one_hot_encode.cu
src/transform/row_bit_count.cu
src/transform/transform.cpp
src/transpose/transpose.cu
src/unary/cast_ops.cu
src/unary/math_ops.cu
src/unary/nan_ops.cu
src/unary/null_ops.cu
src/utilities/default_stream.cpp
src/utilities/linked_column.cpp
src/utilities/logger.cpp
src/utilities/stacktrace.cpp
src/utilities/stream_pool.cpp
src/utilities/traits.cpp
src/utilities/type_checks.cpp
src/utilities/type_dispatcher.cpp
)
# Anything that includes jitify needs to be compiled with _FILE_OFFSET_BITS=64 due to a limitation
# in how conda builds glibc
set_source_files_properties(
src/binaryop/binaryop.cpp
src/jit/cache.cpp
src/rolling/detail/rolling_fixed_window.cu
src/rolling/detail/rolling_variable_window.cu
src/rolling/grouped_rolling.cu
src/rolling/rolling.cu
src/transform/transform.cpp
PROPERTIES COMPILE_DEFINITIONS "_FILE_OFFSET_BITS=64"
)
set_target_properties(
cudf
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
# For std:: support of __int128_t. Can be removed once using cuda::std
CXX_EXTENSIONS ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
cudf PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${CUDF_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_FLAGS}>"
)
if(CUDF_BUILD_STACKTRACE_DEBUG)
# Remove any optimization level to avoid nvcc warning "incompatible redefinition for option
# 'optimize'".
string(REGEX REPLACE "(\-O[0123])" "" CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}")
string(REGEX REPLACE "(\-O[0123])" "" CMAKE_CUDA_FLAGS_RELEASE "${CMAKE_CUDA_FLAGS_RELEASE}")
string(REGEX REPLACE "(\-O[0123])" "" CMAKE_CUDA_FLAGS_MINSIZEREL
"${CMAKE_CUDA_FLAGS_MINSIZEREL}"
)
string(REGEX REPLACE "(\-O[0123])" "" CMAKE_CUDA_FLAGS_RELWITHDEBINFO
"${CMAKE_CUDA_FLAGS_RELWITHDEBINFO}"
)
add_library(cudf_backtrace INTERFACE)
target_compile_definitions(cudf_backtrace INTERFACE CUDF_BUILD_STACKTRACE_DEBUG)
target_compile_options(
cudf_backtrace INTERFACE "$<$<COMPILE_LANGUAGE:CXX>:-Og>"
"$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-Og>"
)
target_link_options(
cudf_backtrace INTERFACE "$<$<LINK_LANGUAGE:CXX>:-rdynamic>"
"$<$<LINK_LANGUAGE:CUDA>:-Xlinker=-rdynamic>"
)
target_link_libraries(cudf PRIVATE cudf_backtrace)
endif()
# Specify include paths for the current target and dependents
target_include_directories(
cudf
PUBLIC "$<BUILD_INTERFACE:${DLPACK_INCLUDE_DIR}>"
"$<BUILD_INTERFACE:${JITIFY_INCLUDE_DIR}>"
"$<BUILD_INTERFACE:${CUDF_SOURCE_DIR}/include>"
"$<BUILD_INTERFACE:${CUDF_GENERATED_INCLUDE_DIR}/include>"
PRIVATE "$<BUILD_INTERFACE:${CUDF_SOURCE_DIR}/src>"
INTERFACE "$<INSTALL_INTERFACE:include>"
)
target_compile_definitions(
cudf PUBLIC "$<$<COMPILE_LANGUAGE:CXX>:${CUDF_CXX_DEFINITIONS}>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_DEFINITIONS}>>"
)
# Disable Jitify log printing. See https://github.com/NVIDIA/jitify/issues/79
target_compile_definitions(cudf PRIVATE "JITIFY_PRINT_LOG=0")
if(JITIFY_USE_CACHE)
# Instruct src/jit/cache what version of cudf we are building so it can compute a cal-ver cache
# directory. We isolate this definition to the single source so it doesn't effect compiling
# caching for all of libcudf
set_property(
SOURCE src/jit/cache.cpp
APPEND
PROPERTY COMPILE_DEFINITIONS "JITIFY_USE_CACHE" "CUDF_VERSION=${PROJECT_VERSION}"
)
endif()
# Per-thread default stream
if(CUDF_USE_PER_THREAD_DEFAULT_STREAM)
target_compile_definitions(
cudf PUBLIC CUDA_API_PER_THREAD_DEFAULT_STREAM CUDF_USE_PER_THREAD_DEFAULT_STREAM
)
endif()
# Disable NVTX if necessary
if(NOT USE_NVTX)
target_compile_definitions(cudf PUBLIC NVTX_DISABLE)
endif()
# Define RMM logging level
target_compile_definitions(cudf PRIVATE "RMM_LOGGING_LEVEL=LIBCUDF_LOGGING_LEVEL")
# Define spdlog level
target_compile_definitions(cudf PUBLIC "SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${LIBCUDF_LOGGING_LEVEL}")
# Compile stringified JIT sources first
add_dependencies(cudf jitify_preprocess_run)
# Specify the target module library dependencies
target_link_libraries(
cudf
PUBLIC ${ARROW_LIBRARIES} libcudacxx::libcudacxx cudf::Thrust rmm::rmm
PRIVATE cuco::cuco ZLIB::ZLIB nvcomp::nvcomp kvikio::kvikio
$<TARGET_NAME_IF_EXISTS:cuFile_interface>
)
# Add Conda library, and include paths if specified
if(TARGET conda_env)
target_link_libraries(cudf PRIVATE conda_env)
endif()
if(CUDA_STATIC_RUNTIME)
# Tell CMake what CUDA language runtime to use
set_target_properties(cudf PROPERTIES CUDA_RUNTIME_LIBRARY Static)
# Make sure to export to consumers what runtime we used
target_link_libraries(cudf PUBLIC CUDA::cudart_static)
else()
# Tell CMake what CUDA language runtime to use
set_target_properties(cudf PROPERTIES CUDA_RUNTIME_LIBRARY Shared)
# Make sure to export to consumers what runtime we used
target_link_libraries(cudf PUBLIC CUDA::cudart)
endif()
file(
WRITE "${CUDF_BINARY_DIR}/fatbin.ld"
[=[
SECTIONS
{
.nvFatBinSegment : { *(.nvFatBinSegment) }
.nv_fatbin : { *(.nv_fatbin) }
}
]=]
)
target_link_options(cudf PRIVATE "$<HOST_LINK:${CUDF_BINARY_DIR}/fatbin.ld>")
add_library(cudf::cudf ALIAS cudf)
# ##################################################################################################
# * tests and benchmarks --------------------------------------------------------------------------
# ##################################################################################################
# ##################################################################################################
# * build cudftestutil ----------------------------------------------------------------------------
if(CUDF_BUILD_TESTUTIL)
add_library(
cudftest_default_stream
# When compiled as a dynamic library allows us to use LD_PRELOAD injection of symbols. We
# currently leverage this for stream-related library validation and may make use of it for
# other similar features in the future.
tests/utilities/default_stream.cpp
)
set_target_properties(
cudftest_default_stream
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_link_libraries(
cudftest_default_stream
PUBLIC cudf
PRIVATE $<TARGET_NAME_IF_EXISTS:conda_env>
)
add_library(cudf::cudftest_default_stream ALIAS cudftest_default_stream)
# Needs to be static so that we support usage of static builds of gtest which doesn't compile with
# fPIC enabled and therefore can't be embedded into shared libraries.
add_library(
cudftestutil STATIC
tests/io/metadata_utilities.cpp
tests/utilities/base_fixture.cpp
tests/utilities/column_utilities.cu
tests/utilities/debug_utilities.cu
tests/utilities/table_utilities.cu
tests/utilities/tdigest_utilities.cu
)
set_target_properties(
cudftestutil
PROPERTIES BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
# set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
cudftestutil PUBLIC "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${CUDF_CXX_FLAGS}>>"
"$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CUDA>:${CUDF_CUDA_FLAGS}>>"
)
target_link_libraries(
cudftestutil
PUBLIC GTest::gmock GTest::gtest Threads::Threads cudf cudftest_default_stream
PRIVATE $<TARGET_NAME_IF_EXISTS:conda_env>
)
target_include_directories(
cudftestutil PUBLIC "$<BUILD_INTERFACE:${CUDF_SOURCE_DIR}>"
"$<BUILD_INTERFACE:${CUDF_SOURCE_DIR}/src>"
)
add_library(cudf::cudftestutil ALIAS cudftestutil)
endif()
# * build cudf_identify_stream_usage --------------------------------------------------------------
if(CUDF_BUILD_STREAMS_TEST_UTIL)
if(CUDA_STATIC_RUNTIME)
message(
FATAL_ERROR
"Stream identification cannot be used with a static CUDA runtime. Please set CUDA_STATIC_RUNTIME=OFF or CUDF_BUILD_STREAMS_TEST_UTIL=OFF."
)
endif()
# Libraries for stream-related testing. We build the library twice, one with STREAM_MODE_TESTING
# on and one with it set to off. Each test will then be configured to use the appropriate library
# depending via ctest and whether it has been updated to expose public stream APIs.
foreach(_mode cudf testing)
set(_tgt "cudf_identify_stream_usage_mode_${_mode}")
add_library(
${_tgt} SHARED src/utilities/stacktrace.cpp tests/utilities/identify_stream_usage.cpp
)
set_target_properties(
${_tgt}
PROPERTIES # set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
${_tgt} PRIVATE "$<BUILD_INTERFACE:$<$<COMPILE_LANGUAGE:CXX>:${CUDF_CXX_FLAGS}>>"
)
target_include_directories(${_tgt} PRIVATE "$<BUILD_INTERFACE:${CUDF_SOURCE_DIR}/include>")
target_link_libraries(${_tgt} PUBLIC CUDA::cudart rmm::rmm)
if(CUDF_BUILD_STACKTRACE_DEBUG)
target_link_libraries(${_tgt} PRIVATE cudf_backtrace)
endif()
add_library(cudf::${_tgt} ALIAS ${_tgt})
if("${_mode}" STREQUAL "testing")
target_compile_definitions(${_tgt} PUBLIC STREAM_MODE_TESTING)
endif()
endforeach()
endif()
# ##################################################################################################
# * add tests -------------------------------------------------------------------------------------
if(CUDF_BUILD_TESTS)
# include CTest module -- automatically calls enable_testing()
include(CTest)
# ctest cuda memcheck
find_program(CUDA_SANITIZER compute-sanitizer)
set(MEMORYCHECK_COMMAND ${CUDA_SANITIZER})
set(MEMORYCHECK_TYPE CudaSanitizer)
set(CUDA_SANITIZER_COMMAND_OPTIONS "--tool memcheck")
# Always print verbose output when tests fail if run using `make test`.
list(APPEND CMAKE_CTEST_ARGUMENTS "--output-on-failure")
add_subdirectory(tests)
endif()
# ##################################################################################################
# * add benchmarks --------------------------------------------------------------------------------
if(CUDF_BUILD_BENCHMARKS)
# Find or install GoogleBench
include(${rapids-cmake-dir}/cpm/gbench.cmake)
rapids_cpm_gbench()
# Find or install nvbench
include(cmake/thirdparty/get_nvbench.cmake)
add_subdirectory(benchmarks)
endif()
# ##################################################################################################
# * install targets -------------------------------------------------------------------------------
rapids_cmake_install_lib_dir(lib_dir)
include(CPack)
include(GNUInstallDirs)
set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME cudf)
# install target for cudf_base and the proxy libcudf.so
install(
TARGETS cudf
DESTINATION ${lib_dir}
EXPORT cudf-exports
)
set(_components_export_string)
if(TARGET cudftestutil)
install(
TARGETS cudftest_default_stream cudftestutil
DESTINATION ${lib_dir}
EXPORT cudf-testing-exports
)
set(_components_export_string COMPONENTS testing COMPONENTS_EXPORT_SET cudf-testing-exports)
endif()
install(DIRECTORY ${CUDF_SOURCE_DIR}/include/cudf ${CUDF_SOURCE_DIR}/include/cudf_test
${CUDF_SOURCE_DIR}/include/nvtext DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)
if(CUDF_BUILD_STREAMS_TEST_UTIL)
install(TARGETS cudf_identify_stream_usage_mode_cudf DESTINATION ${lib_dir})
install(TARGETS cudf_identify_stream_usage_mode_testing DESTINATION ${lib_dir})
endif()
set(doc_string
[=[
Provide targets for the cudf library.
Built based on the Apache Arrow columnar memory format, cuDF is a GPU DataFrame
library for loading, joining, aggregating, filtering, and otherwise
manipulating data.
cuDF provides a pandas-like API that will be familiar to data engineers &
data scientists, so they can use it to easily accelerate their workflows
without going into the details of CUDA programming.
Imported Targets
^^^^^^^^^^^^^^^^
If cudf is found, this module defines the following IMPORTED GLOBAL
targets:
cudf::cudf - The main cudf library.
This module offers an optional testing component which defines the
following IMPORTED GLOBAL targets:
cudf::cudftestutil - The main cudf testing library
]=]
)
set(common_code_string
[=[
if(NOT TARGET cudf::Thrust)
thrust_create_target(cudf::Thrust FROM_OPTIONS)
endif()
]=]
)
if(CUDF_ENABLE_ARROW_PARQUET)
string(
APPEND
install_code_string
[=[
if(NOT Parquet_DIR)
set(Parquet_DIR "${Arrow_DIR}")
endif()
set(ArrowDataset_DIR "${Arrow_DIR}")
find_dependency(ArrowDataset)
]=]
)
endif()
string(
APPEND
install_code_string
[=[
if(testing IN_LIST cudf_FIND_COMPONENTS)
enable_language(CUDA)
endif()
]=]
)
string(APPEND install_code_string "${common_code_string}")
rapids_export(
INSTALL cudf
EXPORT_SET cudf-exports ${_components_export_string}
GLOBAL_TARGETS cudf cudftestutil
NAMESPACE cudf::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK install_code_string
)
# ##################################################################################################
# * build export -------------------------------------------------------------------------------
set(build_code_string
[=[
if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/cudf-testing-dependencies.cmake")
include("${CMAKE_CURRENT_LIST_DIR}/cudf-testing-dependencies.cmake")
endif()
if(EXISTS "${CMAKE_CURRENT_LIST_DIR}/cudf-testing-targets.cmake")
include("${CMAKE_CURRENT_LIST_DIR}/cudf-testing-targets.cmake")
endif()
]=]
)
string(APPEND build_code_string "${common_code_string}")
rapids_export(
BUILD cudf
EXPORT_SET cudf-exports ${_components_export_string}
GLOBAL_TARGETS cudf cudftestutil
NAMESPACE cudf::
DOCUMENTATION doc_string
FINAL_CODE_BLOCK build_code_string
)
# ##################################################################################################
# * make documentation ----------------------------------------------------------------------------
# doc targets for cuDF
add_custom_command(
OUTPUT CUDF_DOXYGEN
WORKING_DIRECTORY ${CUDF_SOURCE_DIR}/doxygen
COMMAND doxygen Doxyfile
VERBATIM
COMMENT "Custom command for building cudf doxygen docs."
)
add_custom_target(
docs_cudf
DEPENDS CUDF_DOXYGEN
COMMENT "Custom command for building cudf doxygen docs."
)
# ##################################################################################################
# * make gdb helper scripts ------------------------------------------------------------------------
# build pretty-printer load script
if(Thrust_SOURCE_DIR AND rmm_SOURCE_DIR)
configure_file(scripts/load-pretty-printers.in load-pretty-printers @ONLY)
endif()
| 0 |
rapidsai_public_repos/cudf
|
rapidsai_public_repos/cudf/cpp/.clang-tidy
|
---
Checks:
'modernize-*,
-modernize-use-equals-default,
-modernize-concat-nested-namespaces,
-modernize-use-trailing-return-type'
# -modernize-use-equals-default # auto-fix is broken (doesn't insert =default correctly)
# -modernize-concat-nested-namespaces # auto-fix is broken (can delete code)
# -modernize-use-trailing-return-type # just a preference
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
FormatStyle: none
CheckOptions:
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
...
| 0 |
rapidsai_public_repos/cudf/cpp
|
rapidsai_public_repos/cudf/cpp/include/doxygen_groups.h
|
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief Doxygen group definitions
*/
// This header is only processed by doxygen and does
// not need to be included in any source file.
// Below are the main groups that doxygen uses to build
// the Modules page in the specified order.
//
// To add a new API to an existing group, just use the
// @ingroup tag to the API's doxygen comment.
// Add a new group by first specifying in the hierarchy below.
/**
* @defgroup cudf_classes Classes
* @{
* @defgroup column_classes Column
* @{
* @defgroup column_factories Factories
* @defgroup strings_classes Strings
* @defgroup dictionary_classes Dictionary
* @defgroup timestamp_classes Timestamp
* @defgroup lists_classes Lists
* @defgroup structs_classes Structs
* @}
* @defgroup table_classes Table
* @defgroup scalar_classes Scalar
* @{
* @defgroup scalar_factories Factories
* @}
* @defgroup fixed_point_classes Fixed Point
* @}
* @defgroup column_apis Column and Table
* @{
* @defgroup column_copy Copying
* @{
* @defgroup copy_concatenate Concatenating
* @defgroup copy_gather Gathering
* @{
* @file cudf/copying.hpp
* @}
* @defgroup copy_scatter Scattering
* @{
* @file cudf/copying.hpp
* @}
* @defgroup copy_slice Slicing
* @{
* @file cudf/copying.hpp
* @}
* @defgroup copy_split Splitting
* @{
* @file cudf/contiguous_split.hpp
* @file cudf/copying.hpp
* @}
* @defgroup copy_shift Shifting
* @{
* @file cudf/copying.hpp
* @}
* @}
* @defgroup column_nullmask Bitmask Operations
* @defgroup column_sort Sorting
* @defgroup column_search Searching
* @defgroup column_hash Hashing
* @defgroup column_merge Merging
* @defgroup column_join Joining
* @defgroup column_quantiles Quantiles
* @defgroup column_aggregation Aggregation
* @{
* @defgroup aggregation_factories Aggregation Factories
* @defgroup aggregation_reduction Reduction
* @defgroup aggregation_groupby GroupBy
* @defgroup aggregation_rolling Rolling Window
* @}
* @defgroup column_transformation Transformation
* @{
* @defgroup transformation_unaryops Unary Operations
* @defgroup transformation_binaryops Binary Operations
* @defgroup transformation_transform Transform
* @defgroup transformation_replace Replacing
* @defgroup transformation_fill Filling
* @}
* @defgroup column_reshape Reshaping
* @{
* @defgroup reshape_transpose Transpose
* @}
* @defgroup column_reorder Reordering
* @{
* @defgroup reorder_partition Partitioning
* @defgroup reorder_compact Stream Compaction
* @}
* @defgroup column_interop Interop
* @{
* @defgroup interop_dlpack DLPack
* @defgroup interop_arrow Arrow
* @}
* @}
* @defgroup datetime_apis DateTime
* @{
* @defgroup datetime_extract Extracting
* @defgroup datetime_compute Compute Day
* @}
* @defgroup strings_apis Strings
* @{
* @defgroup strings_case Case
* @defgroup strings_types Character Types
* @defgroup strings_combine Combining
* @defgroup strings_contains Searching
* @defgroup strings_convert Converting
* @defgroup strings_copy Copying
* @defgroup strings_slice Slicing
* @defgroup strings_find Finding
* @defgroup strings_modify Modifying
* @defgroup strings_replace Replacing
* @defgroup strings_split Splitting
* @defgroup strings_extract Extracting
* @defgroup strings_regex Regex
* @}
* @defgroup dictionary_apis Dictionary
* @{
* @defgroup dictionary_encode Encoding
* @defgroup dictionary_search Searching
* @defgroup dictionary_update Updating Keys
* @}
* @defgroup io_apis IO
* @{
* @defgroup io_readers Readers
* @defgroup io_writers Writers
* @defgroup io_datasources Data Sources
* @defgroup io_datasinks Data Sinks
* @}
* @defgroup json_apis JSON
* @{
* @defgroup json_object JSON Path
* @}
* @defgroup lists_apis Lists
* @{
* @defgroup lists_combine Combining
* @defgroup lists_modify Modifying
* @defgroup lists_extract Extracting
* @defgroup lists_filling Filling
* @defgroup lists_contains Searching
* @defgroup lists_gather Gathering
* @defgroup lists_elements Counting
* @defgroup lists_filtering Filtering
* @defgroup lists_sort Sorting
* @defgroup set_operations Set Operations
* @}
* @defgroup nvtext_apis NVText
* @{
* @defgroup nvtext_ngrams NGrams
* @defgroup nvtext_normalize Normalizing
* @defgroup nvtext_stemmer Stemming
* @defgroup nvtext_edit_distance Edit Distance
* @defgroup nvtext_tokenize Tokenizing
* @defgroup nvtext_replace Replacing
* @defgroup nvtext_minhash MinHashing
* @defgroup nvtext_jaccard Jaccard Index
* @}
* @defgroup utility_apis Utilities
* @{
* @defgroup utility_types Types
* @defgroup utility_dispatcher Type Dispatcher
* @defgroup utility_bitmask Bitmask
* @defgroup utility_error Exception
* @}
* @defgroup labeling_apis Labeling
* @{
* @defgroup label_bins Bin Labeling
* @}
*/
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/groupby.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/replace.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <utility>
#include <vector>
namespace cudf {
//! `groupby` APIs
namespace groupby {
namespace detail {
namespace sort {
class sort_groupby_helper;
} // namespace sort
} // namespace detail
/**
* @addtogroup aggregation_groupby
* @{
* @file
*/
/**
* @brief Request for groupby aggregation(s) to perform on a column.
*
* The group membership of each `value[i]` is determined by the corresponding
* row `i` in the original order of `keys` used to construct the
* `groupby`. I.e., for each `aggregation`, `values[i]` is aggregated with all
* other `values[j]` where rows `i` and `j` in `keys` are equivalent.
*
* `values.size()` column must equal `keys.num_rows()`.
*/
struct aggregation_request {
column_view values; ///< The elements to aggregate
std::vector<std::unique_ptr<groupby_aggregation>> aggregations; ///< Desired aggregations
};
/**
* @brief Request for groupby aggregation(s) for scanning a column.
*
* The group membership of each `value[i]` is determined by the corresponding
* row `i` in the original order of `keys` used to construct the
* `groupby`. I.e., for each `aggregation`, `values[i]` is aggregated with all
* other `values[j]` where rows `i` and `j` in `keys` are equivalent.
*
* `values.size()` column must equal `keys.num_rows()`.
*/
struct scan_request {
column_view values; ///< The elements to aggregate
std::vector<std::unique_ptr<groupby_scan_aggregation>> aggregations; ///< Desired aggregations
};
/**
* @brief The result(s) of an `aggregation_request`
*
* For every `aggregation_request` given to `groupby::aggregate` an
* `aggregation_result` will be returned. The `aggregation_result` holds the
* resulting column(s) for each requested aggregation on the `request`s values.
*/
struct aggregation_result {
/// Columns of results from an `aggregation_request`
std::vector<std::unique_ptr<column>> results{};
};
/**
* @brief Groups values by keys and computes aggregations on those groups.
*/
class groupby {
public:
groupby() = delete;
~groupby();
groupby(groupby const&) = delete;
groupby(groupby&&) = delete;
groupby& operator=(groupby const&) = delete;
groupby& operator=(groupby&&) = delete;
/**
* @brief Construct a groupby object with the specified `keys`
*
* If the `keys` are already sorted, better performance may be achieved by
* passing `keys_are_sorted == true` and indicating the ascending/descending
* order of each column and null order in `column_order` and
* `null_precedence`, respectively.
*
* @note This object does *not* maintain the lifetime of `keys`. It is the
* user's responsibility to ensure the `groupby` object does not outlive the
* data viewed by the `keys` `table_view`.
*
* @param keys Table whose rows act as the groupby keys
* @param null_handling Indicates whether rows in `keys` that contain
* NULL values should be included
* @param keys_are_sorted Indicates whether rows in `keys` are already sorted
* @param column_order If `keys_are_sorted == YES`, indicates whether each
* column is ascending/descending. If empty, assumes all columns are
* ascending. Ignored if `keys_are_sorted == false`.
* @param null_precedence If `keys_are_sorted == YES`, indicates the ordering
* of null values in each column. Else, ignored. If empty, assumes all columns
* use `null_order::AFTER`. Ignored if `keys_are_sorted == false`.
*/
explicit groupby(table_view const& keys,
null_policy null_handling = null_policy::EXCLUDE,
sorted keys_are_sorted = sorted::NO,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {});
/**
* @brief Performs grouped aggregations on the specified values.
*
* The values to aggregate and the aggregations to perform are specified in an
* `aggregation_request`. Each request contains a `column_view` of values to
* aggregate and a set of `aggregation`s to perform on those elements.
*
* For each `aggregation` in a request, `values[i]` is aggregated with
* all other `values[j]` where rows `i` and `j` in `keys` are equivalent.
*
* The `size()` of the request column must equal `keys.num_rows()`.
*
* For every `aggregation_request` an `aggregation_result` will be returned.
* The `aggregation_result` holds the resulting column(s) for each requested
* aggregation on the `request`s values. The order of the columns in each
* result is the same order as was specified in the request.
*
* The returned `table` contains the group labels for each group, i.e., the
* unique rows from `keys`. Element `i` across all aggregation results
* belongs to the group at row `i` in the group labels table.
*
* The order of the rows in the group labels is arbitrary. Furthermore,
* successive `groupby::aggregate` calls may return results in different
* orders.
*
* @throws cudf::logic_error If `requests[i].values.size() !=
* keys.num_rows()`.
*
* Example:
* ```
* Input:
* keys: {1 2 1 3 1}
* {1 2 1 4 1}
* request:
* values: {3 1 4 9 2}
* aggregations: {{SUM}, {MIN}}
*
* result:
*
* keys: {3 1 2}
* {4 1 2}
* values:
* SUM: {9 9 1}
* MIN: {9 2 1}
* ```
*
* @param requests The set of columns to aggregate and the aggregations to
* perform
* @param mr Device memory resource used to allocate the returned table and columns' device memory
* @return Pair containing the table with each group's unique key and
* a vector of aggregation_results for each request in the same order as
* specified in `requests`.
*/
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> aggregate(
host_span<aggregation_request const> requests,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @copydoc aggregate(host_span<aggregation_request const>, rmm::mr::device_memory_resource*)
*
* @param stream CUDA stream used for device memory operations and kernel launches.
*/
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> aggregate(
host_span<aggregation_request const> requests,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs grouped scans on the specified values.
*
* The values to aggregate and the aggregations to perform are specified in an
* `aggregation_request`. Each request contains a `column_view` of values to
* aggregate and a set of `aggregation`s to perform on those elements.
*
* For each `aggregation` in a request, `values[i]` is scan aggregated with
* all previous `values[j]` where rows `i` and `j` in `keys` are equivalent.
*
* The `size()` of the request column must equal `keys.num_rows()`.
*
* For every `aggregation_request` an `aggregation_result` will be returned.
* The `aggregation_result` holds the resulting column(s) for each requested
* aggregation on the `request`s values. The order of the columns in each
* result is the same order as was specified in the request.
*
* The returned `table` contains the group labels for each row, i.e., the
* `keys` given to groupby object. Element `i` across all aggregation results
* belongs to the group at row `i` in the group labels table.
*
* The order of the rows in the group labels is arbitrary. Furthermore,
* successive `groupby::scan` calls may return results in different orders.
*
* @throws cudf::logic_error If `requests[i].values.size() !=
* keys.num_rows()`.
*
* Example:
* ```
* Input:
* keys: {1 2 1 3 1}
* {1 2 1 4 1}
* request:
* values: {3 1 4 9 2}
* aggregations: {{SUM}, {MIN}}
*
* result:
*
* keys: {3 1 1 1 2}
* {4 1 1 1 2}
* values:
* SUM: {9 3 7 9 1}
* MIN: {9 3 3 2 1}
* ```
*
* @param requests The set of columns to scan and the scans to perform
* @param mr Device memory resource used to allocate the returned table and columns' device memory
* @return Pair containing the table with each group's key and
* a vector of aggregation_results for each request in the same order as
* specified in `requests`.
*/
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> scan(
host_span<scan_request const> requests,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs grouped shifts for specified values.
*
* In `j`th column, for each group, `i`th element is determined by the `i - offsets[j]`th
* element of the group. If `i - offsets[j] < 0 or >= group_size`, the value is determined by
* @p fill_values[j].
*
* @note The first returned table stores the keys passed to the groupby object. Row `i` of the key
* table corresponds to the group labels of row `i` in the shifted columns. The key order in
* each group matches the input order. The order of each group is arbitrary. The group order
* in successive calls to `groupby::shifts` may be different.
*
* Example:
* @code{.pseudo}
* keys: {1 4 1 3 4 4 1}
* {1 2 1 3 2 2 1}
* values: {3 9 1 4 2 5 7}
* {"a" "c" "bb" "ee" "z" "x" "d"}
* offset: {2, -1}
* fill_value: {@, @}
* result (group order maybe different):
* keys: {3 1 1 1 4 4 4}
* {3 1 1 1 2 2 2}
* values: {@ @ @ 3 @ @ 9}
* {@ "bb" "d" @ "z" "x" @}
*
* -------------------------------------------------
* keys: {1 4 1 3 4 4 1}
* {1 2 1 3 2 2 1}
* values: {3 9 1 4 2 5 7}
* {"a" "c" "bb" "ee" "z" "x" "d"}
* offset: {-2, 1}
* fill_value: {-1, "42"}
* result (group order maybe different):
* keys: {3 1 1 1 4 4 4}
* {3 1 1 1 2 2 2}
* values: {-1 7 -1 -1 5 -1 -1}
* {"42" "42" "a" "bb" "42" "c" "z"}
*
* @endcode
*
* @param values Table whose columns to be shifted
* @param offsets The offsets by which to shift the input
* @param fill_values Fill values for indeterminable outputs
* @param mr Device memory resource used to allocate the returned table and columns' device memory
* @return Pair containing the tables with each group's key and the columns shifted
*
* @throws cudf::logic_error if @p fill_value[i] dtype does not match @p values[i] dtype for
* `i`th column
*/
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> shift(
table_view const& values,
host_span<size_type const> offsets,
std::vector<std::reference_wrapper<scalar const>> const& fill_values,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief The grouped data corresponding to a groupby operation on a set of values.
*
* A `groups` object holds two tables of identical number of rows:
* a table of grouped keys and a table of grouped values. In addition, it holds
* a vector of integer offsets into the rows of the tables, such that
* `offsets[i+1] - offsets[i]` gives the size of group `i`.
*/
struct groups {
std::unique_ptr<table> keys; ///< Table of grouped keys
std::vector<size_type> offsets; ///< Group Offsets
std::unique_ptr<table> values; ///< Table of grouped values
};
/**
* @brief Get the grouped keys and values corresponding to a groupby operation on a set of values
*
* Returns a `groups` object representing the grouped keys and values.
* If values is not provided, only a grouping of the keys is performed,
* and the `values` of the `groups` object will be `nullptr`.
*
* @param values Table representing values on which a groupby operation is to be performed
* @param mr Device memory resource used to allocate the returned tables's device memory in the
* returned groups
* @return A `groups` object representing grouped keys and values
*/
groups get_groups(cudf::table_view values = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs grouped replace nulls on @p value
*
* For each `value[i] == NULL` in group `j`, `value[i]` is replaced with the first non-null value
* in group `j` that precedes or follows `value[i]`. If a non-null value is not found in the
* specified direction, `value[i]` is left NULL.
*
* The returned pair contains a column of the sorted keys and the result column. In result column,
* values of the same group are in contiguous memory. In each group, the order of values maintain
* their original order. The order of groups are not guaranteed.
*
* Example:
* @code{.pseudo}
*
* //Inputs:
* keys: {3 3 1 3 1 3 4}
* {2 2 1 2 1 2 5}
* values: {3 4 7 @ @ @ @}
* {@ @ @ "x" "tt" @ @}
* replace_policies: {FORWARD, BACKWARD}
*
* //Outputs (group orders may be different):
* keys: {3 3 3 3 1 1 4}
* {2 2 2 2 1 1 5}
* result: {3 4 4 4 7 7 @}
* {"x" "x" "x" @ "tt" "tt" @}
* @endcode
*
* @param[in] values A table whose column null values will be replaced
* @param[in] replace_policies Specify the position of replacement values relative to null values,
* one for each column
* @param[in] mr Device memory resource used to allocate device memory of the returned column
*
* @return Pair that contains a table with the sorted keys and the result column
*/
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> replace_nulls(
table_view const& values,
host_span<cudf::replace_policy const> replace_policies,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
private:
table_view _keys; ///< Keys that determine grouping
null_policy _include_null_keys{null_policy::EXCLUDE}; ///< Include rows in keys
///< with NULLs
sorted _keys_are_sorted{sorted::NO}; ///< Whether or not the keys are sorted
std::vector<order> _column_order{}; ///< If keys are sorted, indicates
///< the order of each column
std::vector<null_order> _null_precedence{}; ///< If keys are sorted,
///< indicates null order
///< of each column
std::unique_ptr<detail::sort::sort_groupby_helper>
_helper; ///< Helper object
///< used by sort based implementation
/**
* @brief Get the sort helper object
*
* The object is constructed on first invocation and subsequent invocations
* of this function return the memoized object.
*/
detail::sort::sort_groupby_helper& helper();
/**
* @brief Dispatches to the appropriate implementation to satisfy the
* aggregation requests.
*/
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> dispatch_aggregation(
host_span<aggregation_request const> requests,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
// Sort-based groupby
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> sort_aggregate(
host_span<aggregation_request const> requests,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> sort_scan(
host_span<scan_request const> requests,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr);
};
/** @} */
} // namespace groupby
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/filling.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
namespace cudf {
/**
* @addtogroup transformation_fill
* @{
* @file
* @brief Column APIs for fill, repeat, and sequence
*/
/**
* @brief Fills a range of elements in-place in a column with a scalar value.
*
* Fills N elements of @p destination starting at @p begin with @p value, where
* N = (@p end - @p begin).
*
* Overwrites the range of elements in @p destination indicated by the indices
* [@p begin, @p end) with @p value. Use the out-of-place fill function
* returning std::unique_ptr<column> for use cases requiring memory
* reallocation.
*
* @throws cudf::logic_error if memory reallocation is required (e.g. for
* variable width types).
* @throws cudf::logic_error for invalid range (if @p begin < 0,
* @p begin > @p end, or @p end > @p destination.size()).
* @throws cudf::logic_error if @p destination and @p value have different
* types.
* @throws cudf::logic_error if @p value is invalid but @p destination is not
* nullable.
*
* @param destination The preallocated column to fill into
* @param begin The starting index of the fill range (inclusive)
* @param end The index of the last element in the fill range (exclusive)
* @param value The scalar value to fill
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void fill_in_place(mutable_column_view& destination,
size_type begin,
size_type end,
scalar const& value,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Fills a range of elements in a column out-of-place with a scalar
* value.
*
* Creates a new column as-if an in-place fill was performed into @p input;
* i.e. it is as if a copy of @p input was created first and then the elements
* indicated by the indices [@p begin, @p end) were overwritten by @p value.
*
* @throws cudf::logic_error for invalid range (if @p begin < 0,
* @p begin > @p end, or @p end > @p destination.size()).
* @throws cudf::logic_error if @p destination and @p value have different
* types.
*
* @param input The input column used to create a new column. The new column
* is created by replacing the values of @p input in the specified range with
* @p value.
* @param begin The starting index of the fill range (inclusive)
* @param end The index of the last element in the fill range (exclusive)
* @param value The scalar value to fill
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return The result output column
*/
std::unique_ptr<column> fill(
column_view const& input,
size_type begin,
size_type end,
scalar const& value,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Repeat rows of a Table.
*
* Creates a new table by repeating the rows of @p input_table. The number of
* repetitions of each element is defined by the value at the corresponding
* index of @p count
* Example:
* ```
* in = [4,5,6]
* count = [1,2,3]
* return = [4,5,5,6,6,6]
* ```
* @p count should not have null values; should not contain negative values;
* and the sum of count elements should not overflow the size_type's limit.
* The behavior of this function is undefined if @p count has negative values
* or the sum overflows.
*
* @throws cudf::logic_error if the data type of @p count is not size_type.
* @throws cudf::logic_error if @p input_table and @p count have different
* number of rows.
* @throws cudf::logic_error if @p count has null values.
*
* @param input_table Input table
* @param count Non-nullable column of an integral type
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return The result table containing the repetitions
*/
std::unique_ptr<table> repeat(
table_view const& input_table,
column_view const& count,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Repeat rows of a Table.
*
* Creates a new table by repeating @p count times the rows of @p input_table.
* Example:
* ```
* in = [4,5,6]
* count = 2
* return = [4,4,5,5,6,6]
* ```
* @throws cudf::logic_error if @p count is negative.
* @throws std::overflow_error if @p input_table.num_rows() * @p count overflows size_type.
*
* @param input_table Input table
* @param count Number of repetitions
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return The result table containing the repetitions
*/
std::unique_ptr<table> repeat(
table_view const& input_table,
size_type count,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Fills a column with a sequence of value specified by an initial value and a step.
*
* Creates a new column and fills with @p size values starting at @p init and
* incrementing by @p step, generating the sequence
* [ init, init+step, init+2*step, ... init + (size - 1)*step]
*
* ```
* size = 3
* init = 0
* step = 2
* return = [0, 2, 4]
* ```
* @throws cudf::logic_error if @p init and @p step are not the same type.
* @throws cudf::logic_error if scalar types are not numeric.
* @throws cudf::logic_error if @p size is < 0.
*
* @param size Size of the output column
* @param init First value in the sequence
* @param step Increment value
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return The result column containing the generated sequence
*/
std::unique_ptr<column> sequence(
size_type size,
scalar const& init,
scalar const& step,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Fills a column with a sequence of value specified by an initial value and a step of 1.
*
* Creates a new column and fills with @p size values starting at @p init and
* incrementing by 1, generating the sequence
* [ init, init+1, init+2, ... init + (size - 1)]
*
* ```
* size = 3
* init = 0
* return = [0, 1, 2]
* ```
* @throws cudf::logic_error if @p init is not numeric.
* @throws cudf::logic_error if @p size is < 0.
*
* @param size Size of the output column
* @param init First value in the sequence
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return The result column containing the generated sequence
*/
std::unique_ptr<column> sequence(
size_type size,
scalar const& init,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Generate a sequence of timestamps beginning at `init` and incrementing by `months` for
* each successive element, i.e., `output[i] = init + i * months` for `i` in `[0, size)`.
*
* If a given date is invalid, the date is scaled back to the last available day of that month.
*
* Example:
* ```
* size = 3
* init = 2020-01-31 08:00:00
* months = 1
* return = [2020-01-31 08:00:00, 2020-02-29 08:00:00, 2020-03-31 08:00:00]
* ```
*
* @throw cudf::logic_error if input datatype is not a TIMESTAMP
*
* @param size Number of timestamps to generate
* @param init The initial timestamp
* @param months Months to increment
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @return Timestamps column with sequences of months
*/
std::unique_ptr<cudf::column> calendrical_month_sequence(
size_type size,
scalar const& init,
size_type months,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/transpose.hpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/table/table_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
/**
* @addtogroup reshape_transpose
* @{
* @file
*/
/**
* @brief Transposes a table.
*
* Stores output in a contiguous column, exposing the transposed table as
* a `table_view`.
*
* @throw cudf::logic_error if column types are non-homogeneous
* @throw cudf::logic_error if column types are non-fixed-width
*
* @param[in] input A table (M cols x N rows) to be transposed
* @param[in] mr Device memory resource used to allocate the device memory of returned value
* @return The transposed input (N cols x M rows) as a `column` and
* `table_view`, representing the owner and transposed table,
* respectively.
*/
std::pair<std::unique_ptr<column>, table_view> transpose(
table_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/hashing.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
/**
* @addtogroup column_hash
* @{
* @file
*/
/**
* @brief Type of hash value
*
*/
using hash_value_type = uint32_t;
/**
* @brief Identifies the hash function to be used
*
*/
enum class hash_id {
HASH_IDENTITY = 0, ///< Identity hash function that simply returns the key to be hashed
HASH_MURMUR3, ///< Murmur3 hash function
HASH_SPARK_MURMUR3, ///< Spark Murmur3 hash function
HASH_MD5 ///< MD5 hash function
};
/**
* @brief The default seed value for hash functions
*/
static constexpr uint32_t DEFAULT_HASH_SEED = 0;
/**
* @brief Computes the hash value of each row in the input set of columns.
*
* @deprecated Since 23.08
*
* @param input The table of columns to hash
* @param hash_function The hash function enum to use
* @param seed Optional seed value to use for the hash function
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A column where each row is the hash of a column from the input
*/
std::unique_ptr<column> hash(
table_view const& input,
hash_id hash_function = hash_id::HASH_MURMUR3,
uint32_t seed = DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
//! Hash APIs
namespace hashing {
/**
* @brief Computes the MurmurHash3 32-bit hash value of each row in the given table
*
* This function computes the hash of each column using the `seed` for the first column
* and the resulting hash as a seed for the next column and so on.
* The result is a uint32 value for each row.
*
* @param input The table of columns to hash
* @param seed Optional seed value to use for the hash function
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A column where each row is the hash of a row from the input
*/
std::unique_ptr<column> murmurhash3_x86_32(
table_view const& input,
uint32_t seed = DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the MurmurHash3 64-bit hash value of each row in the given table
*
* This function takes a 64-bit seed value and returns hash values using the
* MurmurHash3_x64_128 algorithm. The hash produces in two uint64 values per row.
*
* @param input The table of columns to hash
* @param seed Optional seed value to use for the hash function
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A table of two UINT64 columns
*/
std::unique_ptr<table> murmurhash3_x64_128(
table_view const& input,
uint64_t seed = DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the MurmurHash3 32-bit hash value of each row in the given table
*
* This function computes the hash similar to MurmurHash3_x86_32 with special processing
* to match Spark's implementation results.
*
* @param input The table of columns to hash
* @param seed Optional seed value to use for the hash function
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A column where each row is the hash of a row from the input
*/
std::unique_ptr<column> spark_murmurhash3_x86_32(
table_view const& input,
uint32_t seed = DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the MD5 hash value of each row in the given table
*
* @param input The table of columns to hash
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A column where each row is the hash of a row from the input
*/
std::unique_ptr<column> md5(
table_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the XXHash_64 hash value of each row in the given table
*
* This function takes a 64-bit seed value and returns a column of type UINT64.
*
* @param input The table of columns to hash
* @param seed Optional seed value to use for the hash function
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A column where each row is the hash of a row from the input
*/
std::unique_ptr<column> xxhash_64(
table_view const& input,
uint64_t seed = DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
} // namespace hashing
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/quantiles.hpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/tdigest/tdigest_column_view.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
/**
* @addtogroup column_quantiles
* @{
* @file
*/
/**
* @brief Computes quantiles with interpolation.
*
* Computes the specified quantiles by interpolating values between which they
* lie, using the interpolation strategy specified in `interp`.
*
* @param[in] input Column from which to compute quantile values
* @param[in] q Specified quantiles in range [0, 1]
* @param[in] interp Strategy used to select between values adjacent to
* a specified quantile.
* @param[in] ordered_indices Column containing the sorted order of `input`.
* If the column is empty, all `input` values are
* used in existing order. Indices must be in range
* [0, `input.size()`), but are not required to be
* unique. Values not indexed by this column will be
* ignored.
* @param[in] exact If true, returns doubles.
* If false, returns same type as input.
* @param[in] mr Device memory resource used to allocate the returned column's device
memory
* @returns Column of specified quantiles, with nulls for indeterminable values
*/
std::unique_ptr<column> quantile(
column_view const& input,
std::vector<double> const& q,
interpolation interp = interpolation::LINEAR,
column_view const& ordered_indices = {},
bool exact = true,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the rows of the input corresponding to the requested quantiles.
*
* Quantiles are cut points that divide the range of a dataset into continuous
* intervals. e.g: quartiles are the three cut points that divide a dataset into
* four equal-sized groups. See https://en.wikipedia.org/wiki/Quantile
*
* The indices used to gather rows are computed by interpolating between the
* index on either side of the desired quantile. Since some columns may be
* non-arithmetic, interpolation between rows is limited to non-arithmetic
* strategies.
*
* Non-arithmetic interpolation strategies include HIGHER, LOWER, and NEAREST.
*
* quantiles `<= 0` correspond to row `0`. (first)
* quantiles `>= 1` correspond to row `input.size() - 1`. (last)
*
* @param input Table used to compute quantile rows
* @param q Desired quantiles in range [0, 1]
* @param interp Strategy used to select between the two rows on either
side of the desired quantile.
* @param is_input_sorted Indicates if the input has been pre-sorted
* @param column_order The desired sort order for each column
* @param null_precedence The desired order of null compared to other elements
* @param mr Device memory resource used to allocate the returned table's device memory
*
* @returns Table of specified quantiles, with nulls for indeterminable values
* @throws cudf::logic_error if `interp` is an arithmetic interpolation strategy
* @throws cudf::logic_error if `input` is empty
*/
std::unique_ptr<table> quantiles(
table_view const& input,
std::vector<double> const& q,
interpolation interp = interpolation::NEAREST,
cudf::sorted is_input_sorted = sorted::NO,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Calculate approximate percentiles on an input tdigest column.
*
* tdigest (https://arxiv.org/pdf/1902.04023.pdf) columns are produced specifically
* by the TDIGEST and MERGE_TDIGEST aggregations. These columns represent
* compressed representations of a very large input data set that can be
* queried for quantile information.
*
* Produces a LIST column where each row `i` represents output from querying the
* corresponding tdigest from `input` row `i`. The length of each output list
* is the number of percentages specified in `percentages`.
*
* @param input tdigest input data. One tdigest per row
* @param percentiles Desired percentiles in range [0, 1]
* @param mr Device memory resource used to allocate the returned column's device
* memory
*
* @throws cudf::logic_error if `input` is not a valid tdigest column.
* @throws cudf::logic_error if `percentiles` is not a FLOAT64 column.
*
* @returns LIST Column containing requested percentile values as FLOAT64
*/
std::unique_ptr<column> percentile_approx(
tdigest::tdigest_column_view const& input,
column_view const& percentiles,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/unary.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
namespace cudf {
/**
* @addtogroup transformation_unaryops
* @{
* @file
* @brief Column APIs for unary ops
*/
/**
* @brief Types of unary operations that can be performed on data.
*/
enum class unary_operator : int32_t {
SIN, ///< Trigonometric sine
COS, ///< Trigonometric cosine
TAN, ///< Trigonometric tangent
ARCSIN, ///< Trigonometric sine inverse
ARCCOS, ///< Trigonometric cosine inverse
ARCTAN, ///< Trigonometric tangent inverse
SINH, ///< Hyperbolic sine
COSH, ///< Hyperbolic cosine
TANH, ///< Hyperbolic tangent
ARCSINH, ///< Hyperbolic sine inverse
ARCCOSH, ///< Hyperbolic cosine inverse
ARCTANH, ///< Hyperbolic tangent inverse
EXP, ///< Exponential (base e, Euler number)
LOG, ///< Natural Logarithm (base e)
SQRT, ///< Square-root (x^0.5)
CBRT, ///< Cube-root (x^(1.0/3))
CEIL, ///< Smallest integer value not less than arg
FLOOR, ///< largest integer value not greater than arg
ABS, ///< Absolute value
RINT, ///< Rounds the floating-point argument arg to an integer value
BIT_INVERT, ///< Bitwise Not (~)
NOT, ///< Logical Not (!)
};
/**
* @brief Performs unary op on all values in column
*
* Note: For `decimal32` and `decimal64`, only `ABS`, `CEIL` and `FLOOR` are supported.
*
* @param input A `column_view` as input
* @param op operation to perform
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns Column of same size as `input` containing result of the operation
*/
std::unique_ptr<cudf::column> unary_operation(
cudf::column_view const& input,
cudf::unary_operator op,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a column of `type_id::BOOL8` elements where for every element in `input` `true`
* indicates the value is null and `false` indicates the value is valid.
*
* @param input A `column_view` as input
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A non-nullable column of `type_id::BOOL8` elements with `true`
* representing `null` values.
*/
std::unique_ptr<cudf::column> is_null(
cudf::column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a column of `type_id::BOOL8` elements where for every element in `input` `true`
* indicates the value is valid and `false` indicates the value is null.
*
* @param input A `column_view` as input
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A non-nullable column of `type_id::BOOL8` elements with `false`
* representing `null` values.
*/
std::unique_ptr<cudf::column> is_valid(
cudf::column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Casts data from dtype specified in input to dtype specified in output.
*
* Supports only fixed-width types.
*
* @param input Input column
* @param out_type Desired datatype of output column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns Column of same size as `input` containing result of the cast operation
* @throw cudf::logic_error if `out_type` is not a fixed-width type
*/
std::unique_ptr<column> cast(
column_view const& input,
data_type out_type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a column of `type_id::BOOL8` elements indicating the presence of `NaN` values
* in a column of floating point values.
* The output element at row `i` is `true` if the element in `input` at row i is `NAN`, else `false`
*
* @throws cudf::logic_error if `input` is a non-floating point type
*
* @param input A column of floating-point elements
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A non-nullable column of `type_id::BOOL8` elements with `true` representing `NAN` values
*/
std::unique_ptr<column> is_nan(
cudf::column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a column of `type_id::BOOL8` elements indicating the absence of `NaN` values
* in a column of floating point values.
* The output element at row `i` is `false` if the element in `input` at row i is `NAN`, else `true`
*
* @throws cudf::logic_error if `input` is a non-floating point type
*
* @param input A column of floating-point elements
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A non-nullable column of `type_id::BOOL8` elements with `false` representing `NAN`
* values
*/
std::unique_ptr<column> is_not_nan(
cudf::column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/copying.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <vector>
namespace cudf {
/**
* @addtogroup column_copy
* @{
* @file
* @brief Column APIs for gather, scatter, split, slice, etc.
*/
/**
* @brief Policy to account for possible out-of-bounds indices
*
* `NULLIFY` means to nullify output values corresponding to out-of-bounds gather_map values.
* `DONT_CHECK` means do not check whether the indices are out-of-bounds, for better performance.
*/
enum class out_of_bounds_policy : bool {
NULLIFY, ///< Output values corresponding to out-of-bounds indices are null
DONT_CHECK ///< No bounds checking is performed, better performance
};
/**
* @brief Gathers the specified rows (including null values) of a set of columns.
*
* @ingroup copy_gather
*
* Gathers the rows of the source columns according to `gather_map` such that row "i"
* in the resulting table's columns will contain row "gather_map[i]" from the source columns.
* The number of rows in the result table will be equal to the number of elements in
* `gather_map`.
*
* A negative value `i` in the `gather_map` is interpreted as `i+n`, where
* `n` is the number of rows in the `source_table`.
*
* For dictionary columns, the keys column component is copied and not trimmed
* if the gather results in abandoned key elements.
*
* @throws cudf::logic_error if gather_map contains null values.
*
* @param source_table The input columns whose rows will be gathered
* @param gather_map View into a non-nullable column of integral indices that maps the
* rows in the source columns to rows in the destination columns.
* @param bounds_policy Policy to apply to account for possible out-of-bounds indices
* `DONT_CHECK` skips all bounds checking for gather map values. `NULLIFY` coerces rows that
* corresponds to out-of-bounds indices in the gather map to be null elements. Callers should
* use `DONT_CHECK` when they are certain that the gather_map contains only valid indices for
* better performance. If `policy` is set to `DONT_CHECK` and there are out-of-bounds indices
* in the gather map, the behavior is undefined. Defaults to `DONT_CHECK`.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Result of the gather
*/
std::unique_ptr<table> gather(
table_view const& source_table,
column_view const& gather_map,
out_of_bounds_policy bounds_policy = out_of_bounds_policy::DONT_CHECK,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Reverses the rows within a table.
*
* Creates a new table that is the reverse of @p source_table.
* Example:
* ```
* source = [[4,5,6], [7,8,9], [10,11,12]]
* return = [[6,5,4], [9,8,7], [12,11,10]]
* ```
*
* @param source_table Table that will be reversed
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Reversed table
*/
std::unique_ptr<table> reverse(
table_view const& source_table,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Reverses the elements of a column
*
* Creates a new column that is the reverse of @p source_column.
* Example:
* ```
* source = [4,5,6]
* return = [6,5,4]
* ```
*
* @param source_column Column that will be reversed
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Reversed column
*/
std::unique_ptr<column> reverse(
column_view const& source_column,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Scatters the rows of the source table into a copy of the target table
* according to a scatter map.
*
* @ingroup copy_scatter
*
* Scatters values from the source table into the target table out-of-place,
* returning a "destination table". The scatter is performed according to a
* scatter map such that row `scatter_map[i]` of the destination table gets row
* `i` of the source table. All other rows of the destination table equal
* corresponding rows of the target table.
*
* The number of columns in source must match the number of columns in target
* and their corresponding datatypes must be the same.
*
* If the same index appears more than once in the scatter map, the result is
* undefined.
*
* If any values in `scatter_map` are outside of the interval [-n, n) where `n`
* is the number of rows in the `target` table, behavior is undefined.
*
* A negative value `i` in the `scatter_map` is interpreted as `i+n`, where `n`
* is the number of rows in the `target` table.
*
* @param source The input columns containing values to be scattered into the
* target columns
* @param scatter_map A non-nullable column of integral indices that maps the
* rows in the source table to rows in the target table. The size must be equal
* to or less than the number of elements in the source columns.
* @param target The set of columns into which values from the source_table
* are to be scattered
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Result of scattering values from source to target
*/
std::unique_ptr<table> scatter(
table_view const& source,
column_view const& scatter_map,
table_view const& target,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Scatters a row of scalar values into a copy of the target table
* according to a scatter map.
*
* @ingroup copy_scatter
*
* Scatters values from the source row into the target table out-of-place,
* returning a "destination table". The scatter is performed according to a
* scatter map such that row `scatter_map[i]` of the destination table is
* replaced by the source row. All other rows of the destination table equal
* corresponding rows of the target table.
*
* The number of elements in source must match the number of columns in target
* and their corresponding datatypes must be the same.
*
* If the same index appears more than once in the scatter map, the result is
* undefined.
*
* If any values in `scatter_map` are outside of the interval [-n, n) where `n`
* is the number of rows in the `target` table, behavior is undefined.
*
* @param source The input scalars containing values to be scattered into the
* target columns
* @param indices A non-nullable column of integral indices that indicate
* the rows in the target table to be replaced by source.
* @param target The set of columns into which values from the source_table
* are to be scattered
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Result of scattering values from source to target
*/
std::unique_ptr<table> scatter(
std::vector<std::reference_wrapper<scalar const>> const& source,
column_view const& indices,
table_view const& target,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Indicates when to allocate a mask, based on an existing mask.
*/
enum class mask_allocation_policy {
NEVER, ///< Do not allocate a null mask, regardless of input
RETAIN, ///< Allocate a null mask if the input contains one
ALWAYS ///< Allocate a null mask, regardless of input
};
/**
* @brief Initializes and returns an empty column of the same type as the `input`.
*
* @param[in] input Immutable view of input column to emulate
* @return An empty column of same type as `input`
*/
std::unique_ptr<column> empty_like(column_view const& input);
/**
* @brief Initializes and returns an empty column of the same type as the `input`.
*
* @param[in] input Scalar to emulate
* @return An empty column of same type as `input`
*/
std::unique_ptr<column> empty_like(scalar const& input);
/**
* @brief Creates an uninitialized new column of the same size and type as the `input`.
*
* Supports only fixed-width types.
*
* If the `mask_alloc` allocates a validity mask that mask is also uninitialized
* and the validity bits and the null count should be set by the caller.
*
* @param input Immutable view of input column to emulate
* @param mask_alloc Optional, Policy for allocating null mask. Defaults to RETAIN
* @param mr Device memory resource used to allocate the returned column's device memory
* @param stream CUDA stream used for device memory operations and kernel launches
* @return A column with sufficient uninitialized capacity to hold the same
* number of elements as `input` of the same type as `input.type()`
*/
std::unique_ptr<column> allocate_like(
column_view const& input,
mask_allocation_policy mask_alloc = mask_allocation_policy::RETAIN,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates an uninitialized new column of the specified size and same type as the `input`.
*
* Supports only fixed-width types.
*
* If the `mask_alloc` allocates a validity mask that mask is also uninitialized
* and the validity bits and the null count should be set by the caller.
*
* @param input Immutable view of input column to emulate
* @param size The desired number of elements that the new column should have capacity for
* @param mask_alloc Optional, Policy for allocating null mask. Defaults to RETAIN
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A column with sufficient uninitialized capacity to hold the specified number of elements
* as `input` of the same type as `input.type()`
*/
std::unique_ptr<column> allocate_like(
column_view const& input,
size_type size,
mask_allocation_policy mask_alloc = mask_allocation_policy::RETAIN,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a table of empty columns with the same types as the `input_table`
*
* Creates the `cudf::column` objects, but does not allocate any underlying device
* memory for the column's data or bitmask.
*
* @param[in] input_table Immutable view of input table to emulate
* @return A table of empty columns with the same types as the columns in
* `input_table`
*/
std::unique_ptr<table> empty_like(table_view const& input_table);
/**
* @brief Copies a range of elements in-place from one column to another.
*
* Overwrites the range of elements in @p target indicated by the indices
* [@p target_begin, @p target_begin + N) with the elements from @p source
* indicated by the indices [@p source_begin, @p source_end) (where N =
* (@p source_end - @p source_begin)). Use the out-of-place copy function
* returning std::unique_ptr<column> for uses cases requiring memory
* reallocation. For example for strings columns and other variable-width types.
*
* If @p source and @p target refer to the same elements and the ranges overlap,
* the behavior is undefined.
*
* @throws cudf::logic_error if memory reallocation is required (e.g. for
* variable width types).
* @throws cudf::logic_error for invalid range (if
* @p source_begin > @p source_end, @p source_begin < 0,
* @p source_begin >= @p source.size(), @p source_end > @p source.size(),
* @p target_begin < 0, target_begin >= @p target.size(), or
* @p target_begin + (@p source_end - @p source_begin) > @p target.size()).
* @throws cudf::logic_error if @p target and @p source have different types.
* @throws cudf::logic_error if @p source has null values and @p target is not
* nullable.
*
* @param source The column to copy from
* @param target The preallocated column to copy into
* @param source_begin The starting index of the source range (inclusive)
* @param source_end The index of the last element in the source range
* (exclusive)
* @param target_begin The starting index of the target range (inclusive)
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void copy_range_in_place(column_view const& source,
mutable_column_view& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Copies a range of elements out-of-place from one column to another.
*
* Creates a new column as if an in-place copy was performed into @p target.
* A copy of @p target is created first and then the elements indicated by the
* indices [@p target_begin, @p target_begin + N) were copied from the elements
* indicated by the indices [@p source_begin, @p source_end) of @p source
* (where N = (@p source_end - @p source_begin)). Elements outside the range are
* copied from @p target into the returned new column target.
*
* If @p source and @p target refer to the same elements and the ranges overlap,
* the behavior is undefined.
*
* @throws cudf::logic_error for invalid range (if
* @p source_begin > @p source_end, @p source_begin < 0,
* @p source_begin >= @p source.size(), @p source_end > @p source.size(),
* @p target_begin < 0, target_begin >= @p target.size(), or
* @p target_begin + (@p source_end - @p source_begin) > @p target.size()).
* @throws cudf::logic_error if @p target and @p source have different types.
*
* @param source The column to copy from inside the range
* @param target The column to copy from outside the range
* @param source_begin The starting index of the source range (inclusive)
* @param source_end The index of the last element in the source range
* (exclusive)
* @param target_begin The starting index of the target range (inclusive)
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return The result target column
*/
std::unique_ptr<column> copy_range(
column_view const& source,
column_view const& target,
size_type source_begin,
size_type source_end,
size_type target_begin,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a new column by shifting all values by an offset.
*
* @ingroup copy_shift
*
* Elements will be determined by `output[idx] = input[idx - offset]`.
* Some elements in the output may be indeterminable from the input. For those
* elements, the value will be determined by `fill_values`.
*
* @code{.pseudo}
* Examples
* -------------------------------------------------
* input = [0, 1, 2, 3, 4]
* offset = 3
* fill_values = @
* return = [@, @, @, 0, 1]
* -------------------------------------------------
* input = [5, 4, 3, 2, 1]
* offset = -2
* fill_values = 7
* return = [3, 2, 1, 7, 7]
* @endcode
*
* @note if the input is nullable, the output will be nullable.
* @note if the fill value is null, the output will be nullable.
*
* @param input Column to be shifted
* @param offset The offset by which to shift the input
* @param fill_value Fill value for indeterminable outputs
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned result's device memory
*
* @throw cudf::logic_error if @p input dtype is neither fixed-width nor string type
* @throw cudf::logic_error if @p fill_value dtype does not match @p input dtype.
*
* @return The shifted column
*/
std::unique_ptr<column> shift(
column_view const& input,
size_type offset,
scalar const& fill_value,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Slices a `column_view` into a set of `column_view`s according to a set of indices.
*
* @ingroup copy_slice
*
* The returned views of `input` are constructed from an even number indices where
* the `i`th returned `column_view` views the elements in `input` indicated by the range
* `[indices[2*i], indices[(2*i)+1])`.
*
* For all `i` it is expected `indices[i] <= input.size()`
* For all `i%2==0`, it is expected that `indices[i] <= indices[i+1]`
*
* @note It is the caller's responsibility to ensure that the returned views
* do not outlive the viewed device memory.
*
* @code{.pseudo}
* input: {10, 12, 14, 16, 18, 20, 22, 24, 26, 28}
* indices: {1, 3, 5, 9, 2, 4, 8, 8}
* output: {{12, 14}, {20, 22, 24, 26}, {14, 16}, {}}
* @endcode
*
* @throws cudf::logic_error if `indices` size is not even.
* @throws cudf::logic_error When the values in the pair are strictly decreasing.
* @throws cudf::logic_error When any of the values in the pair don't belong to
* the range [0, input.size()).
*
* @param input View of column to slice
* @param indices Indices used to take slices of `input`
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Vector of views of `input` indicated by the ranges in `indices`
*/
std::vector<column_view> slice(column_view const& input,
host_span<size_type const> indices,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @ingroup copy_slice
* @copydoc cudf::slice(column_view const&, host_span<size_type const>, rmm::cuda_stream_view)
*/
std::vector<column_view> slice(column_view const& input,
std::initializer_list<size_type> indices,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Slices a `table_view` into a set of `table_view`s according to a set of indices.
*
* @ingroup copy_slice
*
* The returned views of `input` are constructed from an even number indices where
* the `i`th returned `table_view` views the elements in `input` indicated by the range
* `[indices[2*i], indices[(2*i)+1])`.
*
* For all `i` it is expected `indices[i] <= input.size()`
* For all `i%2==0`, it is expected that `indices[i] <= indices[i+1]`
*
* @note It is the caller's responsibility to ensure that the returned views
* do not outlive the viewed device memory.
*
* @code{.pseudo}
* input: [{10, 12, 14, 16, 18, 20, 22, 24, 26, 28},
* {50, 52, 54, 56, 58, 60, 62, 64, 66, 68}]
* indices: {1, 3, 5, 9, 2, 4, 8, 8}
* output: [{{12, 14}, {20, 22, 24, 26}, {14, 16}, {}},
* {{52, 54}, {60, 22, 24, 26}, {14, 16}, {}}]
* @endcode
*
* @throws cudf::logic_error if `indices` size is not even.
* @throws cudf::logic_error When the values in the pair are strictly decreasing.
* @throws cudf::logic_error When any of the values in the pair don't belong to
* the range [0, input.size()).
*
* @param input View of table to slice
* @param indices Indices used to take slices of `input`
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Vector of views of `input` indicated by the ranges in `indices`
*/
std::vector<table_view> slice(table_view const& input,
host_span<size_type const> indices,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @ingroup copy_slice
* @copydoc cudf::slice(table_view const&, host_span<size_type const>, rmm::cuda_stream_view stream)
*/
std::vector<table_view> slice(table_view const& input,
std::initializer_list<size_type> indices,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Splits a `column_view` into a set of `column_view`s according to a set of indices
* derived from expected splits.
*
* @ingroup copy_split
*
* The returned view's of `input` are constructed from vector of splits, which indicates
* where the split should occur. The `i`th returned `column_view` is sliced as
* `[0, splits[i])` if `i`=0, else `[splits[i], input.size())` if `i` is the last view and
* `[splits[i-1], splits[i]]` otherwise.
*
* For all `i` it is expected `splits[i] <= splits[i+1] <= input.size()`
* For a `splits` size N, there will always be N+1 splits in the output
*
* @note It is the caller's responsibility to ensure that the returned views
* do not outlive the viewed device memory.
*
* @code{.pseudo}
* Example:
* input: {10, 12, 14, 16, 18, 20, 22, 24, 26, 28}
* splits: {2, 5, 9}
* output: {{10, 12}, {14, 16, 18}, {20, 22, 24, 26}, {28}}
* @endcode
*
* @throws cudf::logic_error if `splits` has end index > size of `input`.
* @throws cudf::logic_error When the value in `splits` is not in the range [0, input.size()).
* @throws cudf::logic_error When the values in the `splits` are 'strictly decreasing'.
*
* @param input View of column to split
* @param splits Indices where the view will be split
* @param stream CUDA stream used for device memory operations and kernel launches
* @return The set of requested views of `input` indicated by the `splits`
*/
std::vector<column_view> split(column_view const& input,
host_span<size_type const> splits,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @ingroup copy_split
* @copydoc cudf::split(column_view const&, host_span<size_type const>, rmm::cuda_stream_view)
*/
std::vector<column_view> split(column_view const& input,
std::initializer_list<size_type> splits,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Splits a `table_view` into a set of `table_view`s according to a set of indices
* derived from expected splits.
*
* @ingroup copy_split
*
* The returned views of `input` are constructed from vector of splits, which indicates
* where the split should occur. The `i`th returned `table_view` is sliced as
* `[0, splits[i])` if `i`=0, else `[splits[i], input.size())` if `i` is the last view and
* `[splits[i-1], splits[i]]` otherwise.
*
* For all `i` it is expected `splits[i] <= splits[i+1] <= input.size()`
* For a `splits` size N, there will always be N+1 splits in the output
*
* @note It is the caller's responsibility to ensure that the returned views
* do not outlive the viewed device memory.
*
* @code{.pseudo}
* Example:
* input: [{10, 12, 14, 16, 18, 20, 22, 24, 26, 28},
* {50, 52, 54, 56, 58, 60, 62, 64, 66, 68}]
* splits: {2, 5, 9}
* output: [{{10, 12}, {14, 16, 18}, {20, 22, 24, 26}, {28}},
* {{50, 52}, {54, 56, 58}, {60, 62, 64, 66}, {68}}]
* @endcode
*
* @throws cudf::logic_error if `splits` has end index > size of `input`.
* @throws cudf::logic_error When the value in `splits` is not in the range [0, input.size()).
* @throws cudf::logic_error When the values in the `splits` are 'strictly decreasing'.
*
* @param input View of a table to split
* @param splits Indices where the view will be split
* @param stream CUDA stream used for device memory operations and kernel launches
* @return The set of requested views of `input` indicated by the `splits`
*/
std::vector<table_view> split(table_view const& input,
host_span<size_type const> splits,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @ingroup copy_split
* @copydoc cudf::split(table_view const&, host_span<size_type const>, rmm::cuda_stream_view)
*/
std::vector<table_view> split(table_view const& input,
std::initializer_list<size_type> splits,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Returns a new column, where each element is selected from either @p lhs or
* @p rhs based on the value of the corresponding element in @p boolean_mask
*
* Selects each element i in the output column from either @p rhs or @p lhs using the following
* rule: `output[i] = (boolean_mask.valid(i) and boolean_mask[i]) ? lhs[i] : rhs[i]`
*
* @throws cudf::logic_error if lhs and rhs are not of the same type
* @throws cudf::logic_error if lhs and rhs are not of the same length
* @throws cudf::logic_error if boolean mask is not of type bool
* @throws cudf::logic_error if boolean mask is not of the same length as lhs and rhs
* @param lhs left-hand column_view
* @param rhs right-hand column_view
* @param boolean_mask column of `type_id::BOOL8` representing "left (true) / right (false)"
* boolean for each element. Null element represents false.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns new column with the selected elements
*/
std::unique_ptr<column> copy_if_else(
column_view const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a new column, where each element is selected from either @p lhs or
* @p rhs based on the value of the corresponding element in @p boolean_mask
*
* Selects each element i in the output column from either @p rhs or @p lhs using the following
* rule: `output[i] = (boolean_mask.valid(i) and boolean_mask[i]) ? lhs : rhs[i]`
*
* @throws cudf::logic_error if lhs and rhs are not of the same type
* @throws cudf::logic_error if boolean mask is not of type bool
* @throws cudf::logic_error if boolean mask is not of the same length as rhs
* @param lhs left-hand scalar
* @param rhs right-hand column_view
* @param boolean_mask column of `type_id::BOOL8` representing "left (true) / right (false)"
* boolean for each element. Null element represents false.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns new column with the selected elements
*/
std::unique_ptr<column> copy_if_else(
scalar const& lhs,
column_view const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a new column, where each element is selected from either @p lhs or
* @p rhs based on the value of the corresponding element in @p boolean_mask
*
* Selects each element i in the output column from either @p rhs or @p lhs using the following
* rule: `output[i] = (boolean_mask.valid(i) and boolean_mask[i]) ? lhs[i] : rhs`
*
* @throws cudf::logic_error if lhs and rhs are not of the same type
* @throws cudf::logic_error if boolean mask is not of type bool
* @throws cudf::logic_error if boolean mask is not of the same length as lhs
* @param lhs left-hand column_view
* @param rhs right-hand scalar
* @param boolean_mask column of `type_id::BOOL8` representing "left (true) / right (false)"
* boolean for each element. Null element represents false.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns new column with the selected elements
*/
std::unique_ptr<column> copy_if_else(
column_view const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a new column, where each element is selected from either @p lhs or
* @p rhs based on the value of the corresponding element in @p boolean_mask
*
* Selects each element i in the output column from either @p rhs or @p lhs using the following
* rule: `output[i] = (boolean_mask.valid(i) and boolean_mask[i]) ? lhs : rhs`
*
* @throws cudf::logic_error if boolean mask is not of type bool
* @param lhs left-hand scalar
* @param rhs right-hand scalar
* @param boolean_mask column of `type_id::BOOL8` representing "left (true) / right (false)"
* boolean for each element. null element represents false.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns new column with the selected elements
*/
std::unique_ptr<column> copy_if_else(
scalar const& lhs,
scalar const& rhs,
column_view const& boolean_mask,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Scatters rows from the input table to rows of the output corresponding
* to true values in a boolean mask.
*
* @ingroup copy_scatter
*
* The `i`th row of `input` will be written to the output table at the location
* of the `i`th true value in `boolean_mask`. All other rows in the output will
* equal the same row in `target`.
*
* `boolean_mask` should have number of `true`s <= number of rows in `input`.
* If boolean mask is `true`, corresponding value in target is updated with
* value from corresponding `input` column, else it is left untouched.
*
* @code{.pseudo}
* Example:
* input: {{1, 5, 6, 8, 9}}
* boolean_mask: {true, false, false, false, true, true, false, true, true, false}
* target: {{ 2, 2, 3, 4, 4, 7, 7, 7, 8, 10}}
*
* output: {{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}
* @endcode
*
* @throw cudf::logic_error if input.num_columns() != target.num_columns()
* @throws cudf::logic_error if any `i`th input_column type != `i`th target_column type
* @throws cudf::logic_error if boolean_mask.type() != bool
* @throws cudf::logic_error if boolean_mask.size() != target.num_rows()
* @throws cudf::logic_error if number of `true` in `boolean_mask` > input.num_rows()
*
* @param input table_view (set of dense columns) to scatter
* @param target table_view to modify with scattered values from `input`
* @param boolean_mask column_view which acts as boolean mask
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate device memory of the returned table
*
* @returns Returns a table by scattering `input` into `target` as per `boolean_mask`
*/
std::unique_ptr<table> boolean_mask_scatter(
table_view const& input,
table_view const& target,
column_view const& boolean_mask,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Scatters scalar values to rows of the output corresponding
* to true values in a boolean mask.
*
* @ingroup copy_scatter
*
* The `i`th scalar in `input` will be written to all columns of the output
* table at the location of the `i`th true value in `boolean_mask`.
* All other rows in the output will equal the same row in `target`.
*
* @code{.pseudo}
* Example:
* input: {11}
* boolean_mask: {true, false, false, false, true, true, false, true, true, false}
* target: {{ 2, 2, 3, 4, 4, 7, 7, 7, 8, 10}}
*
* output: {{ 11, 2, 3, 4, 11, 11, 7, 11, 11, 10}}
* @endcode
*
* @throw cudf::logic_error if input.size() != target.num_columns()
* @throws cudf::logic_error if any `i`th input_scalar type != `i`th target_column type
* @throws cudf::logic_error if boolean_mask.type() != bool
* @throws cudf::logic_error if boolean_mask.size() != target.size()
*
* @param input scalars to scatter
* @param target table_view to modify with scattered values from `input`
* @param boolean_mask column_view which acts as boolean mask
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate device memory of the returned table
*
* @returns Returns a table by scattering `input` into `target` as per `boolean_mask`
*/
std::unique_ptr<table> boolean_mask_scatter(
std::vector<std::reference_wrapper<scalar const>> const& input,
table_view const& target,
column_view const& boolean_mask,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Get the element at specified index from a column
*
* @warning This function is expensive (invokes a kernel launch). So, it is not
* recommended to be used in performance sensitive code or inside a loop.
*
* @throws cudf::logic_error if `index` is not within the range `[0, input.size())`
*
* @param input Column view to get the element from
* @param index Index into `input` to get the element at
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @return Scalar containing the single value
*/
std::unique_ptr<scalar> get_element(
column_view const& input,
size_type index,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Indicates whether a row can be sampled more than once.
*/
enum class sample_with_replacement : bool {
FALSE, ///< A row can be sampled only once
TRUE ///< A row can be sampled more than once
};
/**
* @brief Gather `n` samples from given `input` randomly
*
* @code{.pseudo}
* Example:
* input: {col1: {1, 2, 3, 4, 5}, col2: {6, 7, 8, 9, 10}}
* n: 3
* replacement: false
*
* output: {col1: {3, 1, 4}, col2: {8, 6, 9}}
*
* replacement: true
*
* output: {col1: {3, 1, 1}, col2: {8, 6, 6}}
* @endcode
*
* @throws cudf::logic_error if `n` > `input.num_rows()` and `replacement` == FALSE.
* @throws cudf::logic_error if `n` < 0.
*
* @param input View of a table to sample
* @param n non-negative number of samples expected from `input`
* @param replacement Allow or disallow sampling of the same row more than once
* @param seed Seed value to initiate random number generator
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
*
* @return Table containing samples from `input`
*/
std::unique_ptr<table> sample(
table_view const& input,
size_type const n,
sample_with_replacement replacement = sample_with_replacement::FALSE,
int64_t const seed = 0,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Checks if a column or its descendants have non-empty null rows
*
* @note This function is exact. If it returns `true`, there exists one or more
* non-empty null elements.
*
* A LIST or STRING column might have non-empty rows that are marked as null.
* A STRUCT OR LIST column might have child columns that have non-empty null rows.
* Other types of columns are deemed incapable of having non-empty null rows.
* E.g. Fixed width columns have no concept of an "empty" row.
*
* @param input The column which is (and whose descendants are) to be checked for
* non-empty null rows.
* @param stream CUDA stream used for device memory operations and kernel launches
* @return true If either the column or its descendants have non-empty null rows
* @return false If neither the column or its descendants have non-empty null rows
*/
bool has_nonempty_nulls(column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Approximates if a column or its descendants *may* have non-empty null elements
*
* @note This function is approximate.
* - `true`: Non-empty null elements could exist
* - `false`: Non-empty null elements definitely do not exist
*
* False positives are possible, but false negatives are not.
*
* Compared to the exact `has_nonempty_nulls()` function, this function is typically
* more efficient.
*
* Complexity:
* - Best case: `O(count_descendants(input))`
* - Worst case: `O(count_descendants(input)) * m`, where `m` is the number of rows in the largest
* descendant
*
* @param input The column which is (and whose descendants are) to be checked for
* non-empty null rows
* @return true If either the column or its descendants have null rows
* @return false If neither the column nor its descendants have null rows
*/
bool may_have_nonempty_nulls(column_view const& input);
/**
* @brief Copy `input` into output while purging any non-empty null rows in the column or its
* descendants.
*
* If the input column is not of compound type (LIST/STRING/STRUCT/DICTIONARY), the output will be
* the same as input.
*
* The purge operation only applies directly to LIST and STRING columns, but it applies indirectly
* to STRUCT/DICTIONARY columns as well, since these columns may have child columns that
* are LIST or STRING.
*
* Examples:
*
* @code{.pseudo}
* auto const lists = lists_column_wrapper<int32_t>{ {0,1}, {2,3}, {4,5} }.release();
* cudf::detail::set_null_mask(lists->null_mask(), 1, 2, false);
*
* lists[1] is now null, but the lists child column still stores `{2,3}`.
* The lists column contents will be:
* Validity: 101
* Offsets: [0, 2, 4, 6]
* Child: [0, 1, 2, 3, 4, 5]
*
* After purging the contents of the list's null rows, the column's contents will be:
* Validity: 101
* Offsets: [0, 2, 2, 4]
* Child: [0, 1, 4, 5]
* @endcode
*
* @code{.pseudo}
* auto const strings = strings_column_wrapper{ "AB", "CD", "EF" }.release();
* cudf::detail::set_null_mask(strings->null_mask(), 1, 2, false);
*
* strings[1] is now null, but the strings column still stores `"CD"`.
* The lists column contents will be:
* Validity: 101
* Offsets: [0, 2, 4, 6]
* Child: [A, B, C, D, E, F]
*
* After purging the contents of the list's null rows, the column's contents
* will be:
* Validity: 101
* Offsets: [0, 2, 2, 4]
* Child: [A, B, E, F]
* @endcode
*
* @code{.pseudo}
* auto const lists = lists_column_wrapper<int32_t>{ {0,1}, {2,3}, {4,5} };
* auto const structs = structs_column_wrapper{ {lists}, null_at(1) };
*
* structs[1].child is now null, but the lists column still stores `{2,3}`.
* The lists column contents will be:
* Validity: 101
* Offsets: [0, 2, 4, 6]
* Child: [0, 1, 2, 3, 4, 5]
*
* After purging the contents of the list's null rows, the column's contents
* will be:
* Validity: 101
* Offsets: [0, 2, 2, 4]
* Child: [0, 1, 4, 5]
* @endcode
*
* @param input The column whose null rows are to be checked and purged
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A new column with equivalent contents to `input`, but with null rows purged
*/
std::unique_ptr<column> purge_nonempty_nulls(
column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/types.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifdef __CUDACC__
#define CUDF_HOST_DEVICE __host__ __device__
#else
#define CUDF_HOST_DEVICE
#endif
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
/**
* @file
* @brief Type declarations for libcudf.
*/
// Forward declarations
/// @cond
namespace rmm {
class device_buffer;
/// @endcond
} // namespace rmm
namespace cudf {
// Forward declaration
class column;
class column_view;
class mutable_column_view;
class string_view;
class list_view;
class struct_view;
class scalar;
// clang-format off
class list_scalar;
class struct_scalar;
class string_scalar;
template <typename T> class numeric_scalar;
template <typename T> class fixed_point_scalar;
template <typename T> class timestamp_scalar;
template <typename T> class duration_scalar;
class string_scalar_device_view;
template <typename T> class numeric_scalar_device_view;
template <typename T> class fixed_point_scalar_device_view;
template <typename T> class timestamp_scalar_device_view;
template <typename T> class duration_scalar_device_view;
// clang-format on
class table;
class table_view;
class mutable_table_view;
/**
* @addtogroup utility_types
* @{
* @file
*/
using size_type = int32_t; ///< Row index type for columns and tables
using bitmask_type = uint32_t; ///< Bitmask type stored as 32-bit unsigned integer
using valid_type = uint8_t; ///< Valid type in host memory
using thread_index_type = int64_t; ///< Thread index type in kernels
/**
* @brief Similar to `std::distance` but returns `cudf::size_type` and performs `static_cast`
*
* @tparam T Iterator type
* @param f "first" iterator
* @param l "last" iterator
* @return The distance between first and last
*/
template <typename T>
size_type distance(T f, T l)
{
return static_cast<size_type>(std::distance(f, l));
}
/**
* @brief Indicates the order in which elements should be sorted.
*/
enum class order : bool {
ASCENDING, ///< Elements ordered from small to large
DESCENDING ///< Elements ordered from large to small
};
/**
* @brief Enum to specify whether to include nulls or exclude nulls
*/
enum class null_policy : bool {
EXCLUDE, ///< exclude null elements
INCLUDE ///< include null elements
};
/**
* @brief Enum to treat NaN floating point value as null or non-null element
*/
enum class nan_policy : bool {
NAN_IS_NULL, ///< treat nans as null elements
NAN_IS_VALID ///< treat nans as valid elements (non-null)
};
/**
* @brief Enum to consider different elements (of floating point types) holding NaN value as equal
* or unequal
*/
enum class nan_equality /*unspecified*/ {
ALL_EQUAL, ///< All NaNs compare equal, regardless of sign
UNEQUAL ///< All NaNs compare unequal (IEEE754 behavior)
};
/**
* @brief Enum to consider two nulls as equal or unequal
*/
enum class null_equality : bool {
EQUAL, ///< nulls compare equal
UNEQUAL ///< nulls compare unequal
};
/**
* @brief Indicates how null values compare against all other values.
*/
enum class null_order : bool {
AFTER, ///< NULL values ordered *after* all other values
BEFORE ///< NULL values ordered *before* all other values
};
/**
* @brief Indicates whether a collection of values is known to be sorted.
*/
enum class sorted : bool { NO, YES };
/**
* @brief Indicates how a collection of values has been ordered.
*/
struct order_info {
sorted is_sorted; ///< Indicates whether the collection is sorted
order ordering; ///< Indicates the order in which the values are sorted
null_order null_ordering; ///< Indicates how null values compare against all other values
};
/**
* @brief Controls the allocation/initialization of a null mask.
*/
enum class mask_state : int32_t {
UNALLOCATED, ///< Null mask not allocated, (all elements are valid)
UNINITIALIZED, ///< Null mask allocated, but not initialized
ALL_VALID, ///< Null mask allocated, initialized to all elements valid
ALL_NULL ///< Null mask allocated, initialized to all elements NULL
};
/**
* @brief Interpolation method to use when the desired quantile lies between
* two data points i and j
*/
enum class interpolation : int32_t {
LINEAR, ///< Linear interpolation between i and j
LOWER, ///< Lower data point (i)
HIGHER, ///< Higher data point (j)
MIDPOINT, ///< (i + j)/2
NEAREST ///< i or j, whichever is nearest
};
/**
* @brief Identifies a column's logical element type
*/
enum class type_id : int32_t {
EMPTY, ///< Always null with no underlying data
INT8, ///< 1 byte signed integer
INT16, ///< 2 byte signed integer
INT32, ///< 4 byte signed integer
INT64, ///< 8 byte signed integer
UINT8, ///< 1 byte unsigned integer
UINT16, ///< 2 byte unsigned integer
UINT32, ///< 4 byte unsigned integer
UINT64, ///< 8 byte unsigned integer
FLOAT32, ///< 4 byte floating point
FLOAT64, ///< 8 byte floating point
BOOL8, ///< Boolean using one byte per value, 0 == false, else true
TIMESTAMP_DAYS, ///< point in time in days since Unix Epoch in int32
TIMESTAMP_SECONDS, ///< point in time in seconds since Unix Epoch in int64
TIMESTAMP_MILLISECONDS, ///< point in time in milliseconds since Unix Epoch in int64
TIMESTAMP_MICROSECONDS, ///< point in time in microseconds since Unix Epoch in int64
TIMESTAMP_NANOSECONDS, ///< point in time in nanoseconds since Unix Epoch in int64
DURATION_DAYS, ///< time interval of days in int32
DURATION_SECONDS, ///< time interval of seconds in int64
DURATION_MILLISECONDS, ///< time interval of milliseconds in int64
DURATION_MICROSECONDS, ///< time interval of microseconds in int64
DURATION_NANOSECONDS, ///< time interval of nanoseconds in int64
DICTIONARY32, ///< Dictionary type using int32 indices
STRING, ///< String elements
LIST, ///< List elements
DECIMAL32, ///< Fixed-point type with int32_t
DECIMAL64, ///< Fixed-point type with int64_t
DECIMAL128, ///< Fixed-point type with __int128_t
STRUCT, ///< Struct elements
// `NUM_TYPE_IDS` must be last!
NUM_TYPE_IDS ///< Total number of type ids
};
/**
* @brief Indicator for the logical data type of an element in a column.
*
* Simple types can be entirely described by their `id()`, but some types
* require additional metadata to fully describe elements of that type.
*/
class data_type {
public:
data_type() = default;
~data_type() = default;
data_type(data_type const&) = default; ///< Copy constructor
data_type(data_type&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator for data_type
*
* @return Reference to this object
*/
data_type& operator=(data_type const&) = default;
/**
* @brief Move assignment operator for data_type
*
* @return Reference to this object
*/
data_type& operator=(data_type&&) = default;
/**
* @brief Construct a new `data_type` object
*
* @param id The type's identifier
*/
explicit constexpr data_type(type_id id) : _id{id} {}
/**
* @brief Construct a new `data_type` object for `numeric::fixed_point`
*
* @param id The `fixed_point`'s identifier
* @param scale The `fixed_point`'s scale (see `fixed_point::_scale`)
*/
explicit data_type(type_id id, int32_t scale) : _id{id}, _fixed_point_scale{scale}
{
assert(id == type_id::DECIMAL32 || id == type_id::DECIMAL64 || id == type_id::DECIMAL128);
}
/**
* @brief Returns the type identifier
*
* @return The type identifier
*/
[[nodiscard]] constexpr type_id id() const noexcept { return _id; }
/**
* @brief Returns the scale (for fixed_point types)
*
* @return The scale
*/
[[nodiscard]] constexpr int32_t scale() const noexcept { return _fixed_point_scale; }
private:
type_id _id{type_id::EMPTY};
// Below is additional type specific metadata. Currently, only _fixed_point_scale is stored.
int32_t _fixed_point_scale{}; // numeric::scale_type not available here, use int32_t
};
/**
* @brief Compares two `data_type` objects for equality.
*
* // TODO Define exactly what it means for two `data_type`s to be equal. e.g.,
* are two timestamps with different resolutions equal? How about decimals with
* different scale/precision?
*
* @param lhs The first `data_type` to compare
* @param rhs The second `data_type` to compare
* @return true `lhs` is equal to `rhs`
* @return false `lhs` is not equal to `rhs`
*/
constexpr bool operator==(data_type const& lhs, data_type const& rhs)
{
// use std::tie in the future, breaks JITIFY currently
return lhs.id() == rhs.id() && lhs.scale() == rhs.scale();
}
/**
* @brief Compares two `data_type` objects for inequality.
*
* // TODO Define exactly what it means for two `data_type`s to be equal. e.g.,
* are two timestamps with different resolutions equal? How about decimals with
* different scale/precision?
*
* @param lhs The first `data_type` to compare
* @param rhs The second `data_type` to compare
* @return true `lhs` is not equal to `rhs`
* @return false `lhs` is equal to `rhs`
*/
inline bool operator!=(data_type const& lhs, data_type const& rhs) { return !(lhs == rhs); }
/**
* @brief Returns the size in bytes of elements of the specified `data_type`
*
* @note Only fixed-width types are supported
*
* @throws cudf::logic_error if `is_fixed_width(element_type) == false`
*
* @param t The `data_type` to get the size of
* @return Size in bytes of an element of the specified `data_type`
*/
std::size_t size_of(data_type t);
/** @} */
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/null_mask.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <vector>
namespace cudf {
/**
* @addtogroup column_nullmask
* @{
* @file
* @brief APIs for managing validity bitmasks
*/
/**
* @brief Returns the null count for a null mask of the specified `state`
* representing `size` elements.
*
* @throw std::invalid_argument if state is UNINITIALIZED
*
* @param state The state of the null mask
* @param size The number of elements represented by the mask
* @return The count of null elements
*/
size_type state_null_count(mask_state state, size_type size);
/**
* @brief Computes the required bytes necessary to represent the specified
* number of bits with a given padding boundary.
*
* @note The Arrow specification for the null bitmask requires a 64B padding
* boundary.
*
* @param number_of_bits The number of bits that need to be represented
* @param padding_boundary The value returned will be rounded up to a multiple
* of this value
* @return The necessary number of bytes
*/
std::size_t bitmask_allocation_size_bytes(size_type number_of_bits,
std::size_t padding_boundary = 64);
/**
* @brief Returns the number of `bitmask_type` words required to represent the
* specified number of bits.
*
* Unlike `bitmask_allocation_size_bytes`, which returns the number of *bytes*
* needed for a bitmask allocation (including padding), this function returns
* the *actual* number `bitmask_type` elements necessary to represent
* `number_of_bits`. This is useful when one wishes to process all of the bits
* in a bitmask and ignore the padding/slack bits.
*
* @param number_of_bits The number of bits that need to be represented
* @return The necessary number of `bitmask_type` elements
*/
size_type num_bitmask_words(size_type number_of_bits);
/**
* @brief Creates a `device_buffer` for use as a null value indicator bitmask of
* a `column`.
*
* @param size The number of elements to be represented by the mask
* @param state The desired state of the mask
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned device_buffer
* @return A `device_buffer` for use as a null bitmask
* satisfying the desired size and state
*/
rmm::device_buffer create_null_mask(
size_type size,
mask_state state,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Sets a pre-allocated bitmask buffer to a given state in the range
* `[begin_bit, end_bit)`
*
* Sets `[begin_bit, end_bit)` bits of bitmask to valid if `valid==true`
* or null otherwise.
*
* @param bitmask Pointer to bitmask (e.g. returned by `column_viewnull_mask()`)
* @param begin_bit Index of the first bit to set (inclusive)
* @param end_bit Index of the last bit to set (exclusive)
* @param valid If true set all entries to valid; otherwise, set all to null
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void set_null_mask(bitmask_type* bitmask,
size_type begin_bit,
size_type end_bit,
bool valid,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Creates a `device_buffer` from a slice of bitmask defined by a range
* of indices `[begin_bit, end_bit)`.
*
* Returns empty `device_buffer` if `bitmask == nullptr`.
*
* @throws cudf::logic_error if `begin_bit > end_bit`
* @throws cudf::logic_error if `begin_bit < 0`
*
* @param mask Bitmask residing in device memory whose bits will be copied
* @param begin_bit Index of the first bit to be copied (inclusive)
* @param end_bit Index of the last bit to be copied (exclusive)
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned device_buffer
* @return A `device_buffer` containing the bits
* `[begin_bit, end_bit)` from `mask`.
*/
rmm::device_buffer copy_bitmask(
bitmask_type const* mask,
size_type begin_bit,
size_type end_bit,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Copies `view`'s bitmask from the bits
* `[view.offset(), view.offset() + view.size())` into a `device_buffer`
*
* Returns empty `device_buffer` if the column is not nullable
*
* @param view Column view whose bitmask needs to be copied
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned device_buffer
* @return A `device_buffer` containing the bits
* `[view.offset(), view.offset() + view.size())` from `view`'s bitmask.
*/
rmm::device_buffer copy_bitmask(
column_view const& view,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs bitwise AND of the bitmasks of columns of a table. Returns
* a pair of resulting mask and count of unset bits.
*
* If any of the columns isn't nullable, it is considered all valid.
* If no column in the table is nullable, an empty bitmask is returned.
*
* @param view The table of columns
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned device_buffer
* @return A pair of resulting bitmask and count of unset bits
*/
std::pair<rmm::device_buffer, size_type> bitmask_and(
table_view const& view,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs bitwise OR of the bitmasks of columns of a table. Returns
* a pair of resulting mask and count of unset bits.
*
* If any of the columns isn't nullable, it is considered all valid.
* If no column in the table is nullable, an empty bitmask is returned.
*
* @param view The table of columns
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned device_buffer
* @return A pair of resulting bitmask and count of unset bits
*/
std::pair<rmm::device_buffer, size_type> bitmask_or(
table_view const& view,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Given a validity bitmask, counts the number of null elements (unset bits)
* in the range `[start, stop)`.
*
* If `bitmask == nullptr`, all elements are assumed to be valid and the
* function returns ``.
*
* @throws cudf::logic_error if `start > stop`
* @throws cudf::logic_error if `start < 0`
*
* @param bitmask Validity bitmask residing in device memory.
* @param start Index of the first bit to count (inclusive).
* @param stop Index of the last bit to count (exclusive).
* @param stream CUDA stream used for device memory operations and kernel launches
* @return The number of null elements in the specified range.
*/
cudf::size_type null_count(bitmask_type const* bitmask,
size_type start,
size_type stop,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/search.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <vector>
namespace cudf {
/**
* @addtogroup column_search
* @{
* @file
* @brief Column APIs for lower_bound, upper_bound, and contains
*/
/**
* @brief Find smallest indices in a sorted table where values should be inserted to maintain order.
*
* For each row in `needles`, find the first index in `haystack` where inserting the row still
* maintains its sort order.
*
* @code{.pseudo}
* Example:
*
* Single column:
* idx 0 1 2 3 4
* haystack = { 10, 20, 20, 30, 50 }
* needles = { 20 }
* result = { 1 }
*
* Multi Column:
* idx 0 1 2 3 4
* haystack = {{ 10, 20, 20, 20, 20 },
* { 5.0, .5, .5, .7, .7 },
* { 90, 77, 78, 61, 61 }}
* needles = {{ 20 },
* { .7 },
* { 61 }}
* result = { 3 }
* @endcode
*
* @param haystack The table containing search space
* @param needles Values for which to find the insert locations in the search space
* @param column_order Vector of column sort order
* @param null_precedence Vector of null_precedence enums needles
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A non-nullable column of elements containing the insertion points
*/
std::unique_ptr<column> lower_bound(
table_view const& haystack,
table_view const& needles,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Find largest indices in a sorted table where values should be inserted to maintain order.
*
* For each row in `needles`, find the last index in `haystack` where inserting the row still
* maintains its sort order.
*
* @code{.pseudo}
* Example:
*
* Single Column:
* idx 0 1 2 3 4
* haystack = { 10, 20, 20, 30, 50 }
* needles = { 20 }
* result = { 3 }
*
* Multi Column:
* idx 0 1 2 3 4
* haystack = {{ 10, 20, 20, 20, 20 },
* { 5.0, .5, .5, .7, .7 },
* { 90, 77, 78, 61, 61 }}
* needles = {{ 20 },
* { .7 },
* { 61 }}
* result = { 5 }
* @endcode
*
* @param haystack The table containing search space
* @param needles Values for which to find the insert locations in the search space
* @param column_order Vector of column sort order
* @param null_precedence Vector of null_precedence enums needles
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A non-nullable column of elements containing the insertion points
*/
std::unique_ptr<column> upper_bound(
table_view const& haystack,
table_view const& needles,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Check if the given `needle` value exists in the `haystack` column.
*
* @throws cudf::logic_error If `haystack.type() != needle.type()`.
*
* @code{.pseudo}
* Single Column:
* idx 0 1 2 3 4
* haystack = { 10, 20, 20, 30, 50 }
* needle = { 20 }
* result = true
* @endcode
*
* @param haystack The column containing search space
* @param needle A scalar value to check for existence in the search space
* @param stream CUDA stream used for device memory operations and kernel launches
* @return true if the given `needle` value exists in the `haystack` column
*/
bool contains(column_view const& haystack,
scalar const& needle,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Check if the given `needles` values exists in the `haystack` column.
*
* The new column will have type BOOL and have the same size and null mask as the input `needles`
* column. That is, any null row in the `needles` column will result in a nul row in the output
* column.
*
* @throws cudf::logic_error If `haystack.type() != needles.type()`
*
* @code{.pseudo}
* haystack = { 10, 20, 30, 40, 50 }
* needles = { 20, 40, 60, 80 }
* result = { true, true, false, false }
* @endcode
*
* @param haystack The column containing search space
* @param needles A column of values to check for existence in the search space
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A BOOL column indicating if each element in `needles` exists in the search space
*/
std::unique_ptr<column> contains(
column_view const& haystack,
column_view const& needles,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/stream_compaction.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <vector>
namespace cudf {
/**
* @addtogroup reorder_compact
* @{
* @file
* @brief Column APIs for filtering rows
*/
/**
* @brief Filters a table to remove null elements with threshold count.
*
* Filters the rows of the `input` considering specified columns indicated in
* `keys` for validity / null values.
*
* Given an input table_view, row `i` from the input columns is copied to
* the output if the same row `i` of @p keys has at least @p keep_threshold
* non-null fields.
*
* This operation is stable: the input order is preserved in the output.
*
* Any non-nullable column in the input is treated as all non-null.
*
* @code{.pseudo}
* input {col1: {1, 2, 3, null},
* col2: {4, 5, null, null},
* col3: {7, null, null, null}}
* keys = {0, 1, 2} // All columns
* keep_threshold = 2
*
* output {col1: {1, 2}
* col2: {4, 5}
* col3: {7, null}}
* @endcode
*
* @note if @p input.num_rows() is zero, or @p keys is empty or has no nulls,
* there is no error, and an empty `table` is returned
*
* @param[in] input The input `table_view` to filter
* @param[in] keys vector of indices representing key columns from `input`
* @param[in] keep_threshold The minimum number of non-null fields in a row
* required to keep the row.
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return Table containing all rows of the `input` with at least @p
* keep_threshold non-null fields in @p keys.
*/
std::unique_ptr<table> drop_nulls(
table_view const& input,
std::vector<size_type> const& keys,
cudf::size_type keep_threshold,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Filters a table to remove null elements.
*
* Filters the rows of the `input` considering specified columns indicated in
* `keys` for validity / null values.
*
* @code{.pseudo}
* input {col1: {1, 2, 3, null},
* col2: {4, 5, null, null},
* col3: {7, null, null, null}}
* keys = {0, 1, 2} //All columns
*
* output {col1: {1}
* col2: {4}
* col3: {7}}
* @endcode
*
* Same as drop_nulls but defaults keep_threshold to the number of columns in
* @p keys.
*
* @param[in] input The input `table_view` to filter
* @param[in] keys vector of indices representing key columns from `input`
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return Table containing all rows of the `input` without nulls in the columns
* of @p keys.
*/
std::unique_ptr<table> drop_nulls(
table_view const& input,
std::vector<size_type> const& keys,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Filters a table to remove NANs with threshold count.
*
* Filters the rows of the `input` considering specified columns indicated in
* `keys` for NANs. These key columns must be of floating-point type.
*
* Given an input table_view, row `i` from the input columns is copied to
* the output if the same row `i` of @p keys has at least @p keep_threshold
* non-NAN elements.
*
* This operation is stable: the input order is preserved in the output.
*
* @code{.pseudo}
* input {col1: {1.0, 2.0, 3.0, NAN},
* col2: {4.0, null, NAN, NAN},
* col3: {7.0, NAN, NAN, NAN}}
* keys = {0, 1, 2} // All columns
* keep_threshold = 2
*
* output {col1: {1.0, 2.0}
* col2: {4.0, null}
* col3: {7.0, NAN}}
* @endcode
*
* @note if @p input.num_rows() is zero, or @p keys is empty,
* there is no error, and an empty `table` is returned
*
* @throws cudf::logic_error if The `keys` columns are not floating-point type.
*
* @param[in] input The input `table_view` to filter
* @param[in] keys vector of indices representing key columns from `input`
* @param[in] keep_threshold The minimum number of non-NAN elements in a row
* required to keep the row.
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return Table containing all rows of the `input` with at least @p
* keep_threshold non-NAN elements in @p keys.
*/
std::unique_ptr<table> drop_nans(
table_view const& input,
std::vector<size_type> const& keys,
cudf::size_type keep_threshold,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Filters a table to remove NANs.
*
* Filters the rows of the `input` considering specified columns indicated in
* `keys` for NANs. These key columns must be of floating-point type.
*
* @code{.pseudo}
* input {col1: {1.0, 2.0, 3.0, NAN},
* col2: {4.0, null, NAN, NAN},
* col3: {null, NAN, NAN, NAN}}
* keys = {0, 1, 2} // All columns
* keep_threshold = 2
*
* output {col1: {1.0}
* col2: {4.0}
* col3: {null}}
* @endcode
*
* Same as drop_nans but defaults keep_threshold to the number of columns in
* @p keys.
*
* @param[in] input The input `table_view` to filter
* @param[in] keys vector of indices representing key columns from `input`
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return Table containing all rows of the `input` without NANs in the columns
* of @p keys.
*/
std::unique_ptr<table> drop_nans(
table_view const& input,
std::vector<size_type> const& keys,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Filters `input` using `boolean_mask` of boolean values as a mask.
*
* Given an input `table_view` and a mask `column_view`, an element `i` from
* each column_view of the `input` is copied to the corresponding output column
* if the corresponding element `i` in the mask is non-null and `true`.
* This operation is stable: the input order is preserved.
*
* @note if @p input.num_rows() is zero, there is no error, and an empty table
* is returned.
*
* @throws cudf::logic_error if `input.num_rows() != boolean_mask.size()`.
* @throws cudf::logic_error if `boolean_mask` is not `type_id::BOOL8` type.
*
* @param[in] input The input table_view to filter
* @param[in] boolean_mask A nullable column_view of type type_id::BOOL8 used
* as a mask to filter the `input`.
* @param[in] mr Device memory resource used to allocate the returned table's device memory
* @return Table containing copy of all rows of @p input passing
* the filter defined by @p boolean_mask.
*/
std::unique_ptr<table> apply_boolean_mask(
table_view const& input,
column_view const& boolean_mask,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Choices for drop_duplicates API for retainment of duplicate rows
*/
enum class duplicate_keep_option {
KEEP_ANY = 0, ///< Keep an unspecified occurrence
KEEP_FIRST, ///< Keep first occurrence
KEEP_LAST, ///< Keep last occurrence
KEEP_NONE ///< Keep no (remove all) occurrences of duplicates
};
/**
* @brief Create a new table with consecutive duplicate rows removed.
*
* Given an `input` table_view, each row is copied to the output table to create a set of distinct
* rows. If there are duplicate rows, which row is copied depends on the `keep` parameter.
*
* The order of rows in the output table remains the same as in the input.
*
* A row is distinct if there are no equivalent rows in the table. A row is unique if there is no
* adjacent equivalent row. That is, keeping distinct rows removes all duplicates in the
* table/column, while keeping unique rows only removes duplicates from consecutive groupings.
*
* Performance hint: if the input is pre-sorted, `cudf::unique` can produce an equivalent result
* (i.e., same set of output rows) but with less running time than `cudf::distinct`.
*
* @throws cudf::logic_error if the `keys` column indices are out of bounds in the `input` table.
*
* @param[in] input input table_view to copy only unique rows
* @param[in] keys vector of indices representing key columns from `input`
* @param[in] keep keep any, first, last, or none of the found duplicates
* @param[in] nulls_equal flag to denote nulls are equal if null_equality::EQUAL, nulls are not
* equal if null_equality::UNEQUAL
* @param[in] mr Device memory resource used to allocate the returned table's device
* memory
*
* @return Table with unique rows from each sequence of equivalent rows as specified by `keep`
*/
std::unique_ptr<table> unique(
table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option keep,
null_equality nulls_equal = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Create a new table without duplicate rows.
*
* Given an `input` table_view, each row is copied to the output table to create a set of distinct
* rows. If there are duplicate rows, which row is copied depends on the `keep` parameter.
*
* The order of rows in the output table is not specified.
*
* Performance hint: if the input is pre-sorted, `cudf::unique` can produce an equivalent result
* (i.e., same set of output rows) but with less running time than `cudf::distinct`.
*
* @param input The input table
* @param keys Vector of indices indicating key columns in the `input` table
* @param keep Copy any, first, last, or none of the found duplicates
* @param nulls_equal Flag to specify whether null elements should be considered as equal
* @param nans_equal Flag to specify whether NaN elements should be considered as equal
* @param mr Device memory resource used to allocate the returned table
* @return Table with distinct rows in an unspecified order
*/
std::unique_ptr<table> distinct(
table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option keep = duplicate_keep_option::KEEP_ANY,
null_equality nulls_equal = null_equality::EQUAL,
nan_equality nans_equal = nan_equality::ALL_EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Create a new table without duplicate rows, preserving input order.
*
* Given an `input` table_view, each row is copied to the output table to create a set of distinct
* rows. The input row order is preserved. If there are duplicate rows, which row is copied depends
* on the `keep` parameter.
*
* This API produces the same output rows as `cudf::distinct`, but with input order preserved.
*
* Note that when `keep` is `KEEP_ANY`, the choice of which duplicate row to keep is arbitrary, but
* the returned table will retain the input order. That is, if the key column contained `1, 2, 1`
* with another values column `3, 4, 5`, the result could contain values `3, 4` or `4, 5` but not
* `4, 3` or `5, 4`.
*
* @param input The input table
* @param keys Vector of indices indicating key columns in the `input` table
* @param keep Copy any, first, last, or none of the found duplicates
* @param nulls_equal Flag to specify whether null elements should be considered as equal
* @param nans_equal Flag to specify whether NaN elements should be considered as equal
* @param mr Device memory resource used to allocate the returned table
* @return Table with distinct rows, preserving input order
*/
std::unique_ptr<table> stable_distinct(
table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option keep = duplicate_keep_option::KEEP_ANY,
null_equality nulls_equal = null_equality::EQUAL,
nan_equality nans_equal = nan_equality::ALL_EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Count the number of consecutive groups of equivalent rows in a column.
*
* If `null_handling` is null_policy::EXCLUDE and `nan_handling` is nan_policy::NAN_IS_NULL, both
* `NaN` and `null` values are ignored. If `null_handling` is null_policy::EXCLUDE and
* `nan_handling` is nan_policy::NAN_IS_VALID, only `null` is ignored, `NaN` is considered in count.
*
* `null`s are handled as equal.
*
* @param[in] input The column_view whose consecutive groups of equivalent rows will be counted
* @param[in] null_handling flag to include or ignore `null` while counting
* @param[in] nan_handling flag to consider `NaN==null` or not
*
* @return number of consecutive groups of equivalent rows in the column
*/
cudf::size_type unique_count(column_view const& input,
null_policy null_handling,
nan_policy nan_handling);
/**
* @brief Count the number of consecutive groups of equivalent rows in a table.
*
* @param[in] input Table whose consecutive groups of equivalent rows will be counted
* @param[in] nulls_equal flag to denote if null elements should be considered equal
* nulls are not equal if null_equality::UNEQUAL.
*
* @return number of consecutive groups of equivalent rows in the column
*/
cudf::size_type unique_count(table_view const& input,
null_equality nulls_equal = null_equality::EQUAL);
/**
* @brief Count the distinct elements in the column_view.
*
* If `nulls_equal == nulls_equal::UNEQUAL`, all `null`s are distinct.
*
* Given an input column_view, number of distinct elements in this column_view is returned.
*
* If `null_handling` is null_policy::EXCLUDE and `nan_handling` is nan_policy::NAN_IS_NULL, both
* `NaN` and `null` values are ignored. If `null_handling` is null_policy::EXCLUDE and
* `nan_handling` is nan_policy::NAN_IS_VALID, only `null` is ignored, `NaN` is considered in
* distinct count.
*
* `null`s are handled as equal.
*
* @param[in] input The column_view whose distinct elements will be counted
* @param[in] null_handling flag to include or ignore `null` while counting
* @param[in] nan_handling flag to consider `NaN==null` or not
*
* @return number of distinct rows in the table
*/
cudf::size_type distinct_count(column_view const& input,
null_policy null_handling,
nan_policy nan_handling);
/**
* @brief Count the distinct rows in a table.
*
* @param[in] input Table whose distinct rows will be counted
* @param[in] nulls_equal flag to denote if null elements should be considered equal.
* nulls are not equal if null_equality::UNEQUAL.
*
* @return number of distinct rows in the table
*/
cudf::size_type distinct_count(table_view const& input,
null_equality nulls_equal = null_equality::EQUAL);
/** @} */
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/reshape.hpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
namespace cudf {
/**
* @addtogroup column_reshape
* @{
* @file
* @brief Column APIs for interleave and tile
*/
/**
* @brief Interleave columns of a table into a single column.
*
* Converts the column major table `input` into a row major column.
* Example:
* ```
* in = [[A1, A2, A3], [B1, B2, B3]]
* return = [A1, B1, A2, B2, A3, B3]
* ```
*
* @throws cudf::logic_error if input contains no columns.
* @throws cudf::logic_error if input columns dtypes are not identical.
*
* @param[in] input Table containing columns to interleave
* @param[in] mr Device memory resource used to allocate the returned column's device memory
*
* @return The interleaved columns as a single column
*/
std::unique_ptr<column> interleave_columns(
table_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Repeats the rows from `input` table `count` times to form a new table.
*
* `output.num_columns() == input.num_columns()`
* `output.num_rows() == input.num_rows() * count`
*
* ```
* input = [[8, 4, 7], [5, 2, 3]]
* count = 2
* return = [[8, 4, 7, 8, 4, 7], [5, 2, 3, 5, 2, 3]]
* ```
*
* @param[in] input Table containing rows to be repeated
* @param[in] count Number of times to tile "rows". Must be non-negative
* @param[in] mr Device memory resource used to allocate the returned table's device memory
*
* @return The table containing the tiled "rows"
*/
std::unique_ptr<table> tile(
table_view const& input,
size_type count,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Configures whether byte casting flips endianness
*/
enum class flip_endianness : bool { NO, YES };
/**
* @brief Converts a column's elements to lists of bytes
*
* ```
* input<int32> = [8675, 309]
* configuration = flip_endianness::YES
* return = [[0x00, 0x00, 0x21, 0xe3], [0x00, 0x00, 0x01, 0x35]]
* ```
*
* @param input_column Column to be converted to lists of bytes
* @param endian_configuration Whether to retain or flip the endianness of the elements
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @return The column containing the lists of bytes
*/
std::unique_ptr<column> byte_cast(
column_view const& input_column,
flip_endianness endian_configuration,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/join.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/ast/expressions.hpp>
#include <cudf/hashing.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <optional>
#include <utility>
#include <vector>
namespace cudf {
// forward declaration
namespace hashing::detail {
template <typename T>
class MurmurHash3_x86_32;
} // namespace hashing::detail
namespace detail {
template <typename T>
class hash_join;
} // namespace detail
/**
* @addtogroup column_join
* @{
* @file
*/
/**
* @brief Returns a pair of row index vectors corresponding to an
* inner join between the specified tables.
*
* The first returned vector contains the row indices from the left
* table that have a match in the right table (in unspecified order).
* The corresponding values in the second returned vector are
* the matched row indices from the right table.
*
* @code{.pseudo}
* Left: {{0, 1, 2}}
* Right: {{1, 2, 3}}
* Result: {{1, 2}, {0, 1}}
*
* Left: {{0, 1, 2}, {3, 4, 5}}
* Right: {{1, 2, 3}, {4, 6, 7}}
* Result: {{1}, {0}}
* @endcode
*
* @throw cudf::logic_error if number of elements in `left_keys` or `right_keys`
* mismatch.
*
* @param[in] left_keys The left table
* @param[in] right_keys The right table
* @param[in] compare_nulls controls whether null join-key values
* should match or not.
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing an inner join between two tables with `left_keys` and `right_keys`
* as the join keys .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
inner_join(cudf::table_view const& left_keys,
cudf::table_view const& right_keys,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a pair of row index vectors corresponding to a
* left join between the specified tables.
*
* The first returned vector contains all the row indices from the left
* table (in unspecified order). The corresponding value in the
* second returned vector is either (1) the row index of the matched row
* from the right table, if there is a match or (2) an unspecified
* out-of-bounds value.
*
* @code{.pseudo}
* Left: {{0, 1, 2}}
* Right: {{1, 2, 3}}
* Result: {{0, 1, 2}, {None, 0, 1}}
*
* Left: {{0, 1, 2}, {3, 4, 5}}
* Right: {{1, 2, 3}, {4, 6, 7}}
* Result: {{0, 1, 2}, {None, 0, None}}
* @endcode
*
* @throw cudf::logic_error if number of elements in `left_keys` or `right_keys`
* mismatch.
*
* @param[in] left_keys The left table
* @param[in] right_keys The right table
* @param[in] compare_nulls controls whether null join-key values
* should match or not.
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a left join between two tables with `left_keys` and `right_keys`
* as the join keys .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
left_join(cudf::table_view const& left_keys,
cudf::table_view const& right_keys,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a pair of row index vectors corresponding to a
* full join between the specified tables.
*
* Taken pairwise, the values from the returned vectors are one of:
* (1) row indices corresponding to matching rows from the left and
* right tables, (2) a row index and an unspecified out-of-bounds value,
* representing a row from one table without a match in the other.
*
* @code{.pseudo}
* Left: {{0, 1, 2}}
* Right: {{1, 2, 3}}
* Result: {{0, 1, 2, None}, {None, 0, 1, 2}}
*
* Left: {{0, 1, 2}, {3, 4, 5}}
* Right: {{1, 2, 3}, {4, 6, 7}}
* Result: {{0, 1, 2, None, None}, {None, 0, None, 1, 2}}
* @endcode
*
* @throw cudf::logic_error if number of elements in `left_keys` or `right_keys`
* mismatch.
*
* @param[in] left_keys The left table
* @param[in] right_keys The right table
* @param[in] compare_nulls controls whether null join-key values
* should match or not.
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a full join between two tables with `left_keys` and `right_keys`
* as the join keys .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
full_join(cudf::table_view const& left_keys,
cudf::table_view const& right_keys,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a vector of row indices corresponding to a left semi-join
* between the specified tables.
*
* The returned vector contains the row indices from the left table
* for which there is a matching row in the right table.
*
* @code{.pseudo}
* TableA: {{0, 1, 2}}
* TableB: {{1, 2, 3}}
* Result: {1, 2}
* @endcode
*
* @param left_keys The left table
* @param right_keys The right table
* @param compare_nulls Controls whether null join-key values should match or not
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A vector `left_indices` that can be used to construct
* the result of performing a left semi join between two tables with
* `left_keys` and `right_keys` as the join keys .
*/
std::unique_ptr<rmm::device_uvector<size_type>> left_semi_join(
cudf::table_view const& left_keys,
cudf::table_view const& right_keys,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a vector of row indices corresponding to a left anti join
* between the specified tables.
*
* The returned vector contains the row indices from the left table
* for which there is no matching row in the right table.
*
* @code{.pseudo}
* TableA: {{0, 1, 2}}
* TableB: {{1, 2, 3}}
* Result: {0}
* @endcode
*
* @throw cudf::logic_error if the number of columns in either `left_keys` or `right_keys` is 0
*
* @param[in] left_keys The left table
* @param[in] right_keys The right table
* @param[in] compare_nulls controls whether null join-key values
* should match or not.
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A column `left_indices` that can be used to construct
* the result of performing a left anti join between two tables with
* `left_keys` and `right_keys` as the join keys .
*/
std::unique_ptr<rmm::device_uvector<size_type>> left_anti_join(
cudf::table_view const& left_keys,
cudf::table_view const& right_keys,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs a cross join on two tables (`left`, `right`)
*
* The cross join returns the cartesian product of rows from each table.
*
* @note Warning: This function can easily cause out-of-memory errors. The size of the output is
* equal to `left.num_rows() * right.num_rows()`. Use with caution.
*
* @code{.pseudo}
* Left a: {0, 1, 2}
* Right b: {3, 4, 5}
* Result: { a: {0, 0, 0, 1, 1, 1, 2, 2, 2}, b: {3, 4, 5, 3, 4, 5, 3, 4, 5} }
* @endcode
* @throw cudf::logic_error if the number of columns in either `left` or `right` table is 0
*
* @param left The left table
* @param right The right table
* @param mr Device memory resource used to allocate the returned table's device memory
*
* @return Result of cross joining `left` and `right` tables
*/
std::unique_ptr<cudf::table> cross_join(
cudf::table_view const& left,
cudf::table_view const& right,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief The enum class to specify if any of the input join tables (`build` table and any later
* `probe` table) has nulls.
*
* This is used upon hash_join object construction to specify the existence of nulls in all the
* possible input tables. If such null existence is unknown, `YES` should be used as the default
* option.
*/
enum class nullable_join : bool { YES, NO };
/**
* @brief Hash join that builds hash table in creation and probes results in subsequent `*_join`
* member functions.
*
* This class enables the hash join scheme that builds hash table once, and probes as many times as
* needed (possibly in parallel).
*/
class hash_join {
public:
using impl_type = typename cudf::detail::hash_join<
cudf::hashing::detail::MurmurHash3_x86_32<cudf::hash_value_type>>; ///< Implementation type
hash_join() = delete;
~hash_join();
hash_join(hash_join const&) = delete;
hash_join(hash_join&&) = delete;
hash_join& operator=(hash_join const&) = delete;
hash_join& operator=(hash_join&&) = delete;
/**
* @brief Construct a hash join object for subsequent probe calls.
*
* @note The `hash_join` object must not outlive the table viewed by `build`, else behavior is
* undefined.
*
* @param build The build table, from which the hash table is built
* @param compare_nulls Controls whether null join-key values should match or not
* @param stream CUDA stream used for device memory operations and kernel launches
*/
hash_join(cudf::table_view const& build,
null_equality compare_nulls,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @copydoc hash_join(cudf::table_view const&, null_equality, rmm::cuda_stream_view)
*
* @param has_nulls Flag to indicate if there exists any nulls in the `build` table or
* any `probe` table that will be used later for join
*/
hash_join(cudf::table_view const& build,
nullable_join has_nulls,
null_equality compare_nulls,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* Returns the row indices that can be used to construct the result of performing
* an inner join between two tables. @see cudf::inner_join(). Behavior is undefined if the
* provided `output_size` is smaller than the actual output size.
*
* @param probe The probe table, from which the tuples are probed
* @param output_size Optional value which allows users to specify the exact output size
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table and columns' device
* memory.
*
* @throw cudf::logic_error If the input probe table has nulls while this hash_join object was not
* constructed with null check.
*
* @return A pair of columns [`left_indices`, `right_indices`] that can be used to construct
* the result of performing an inner join between two tables with `build` and `probe`
* as the join keys .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
inner_join(cudf::table_view const& probe,
std::optional<std::size_t> output_size = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const;
/**
* Returns the row indices that can be used to construct the result of performing
* a left join between two tables. @see cudf::left_join(). Behavior is undefined if the
* provided `output_size` is smaller than the actual output size.
*
* @param probe The probe table, from which the tuples are probed
* @param output_size Optional value which allows users to specify the exact output size
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table and columns' device
* memory.
*
* @throw cudf::logic_error If the input probe table has nulls while this hash_join object was not
* constructed with null check.
*
* @return A pair of columns [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a left join between two tables with `build` and `probe`
* as the join keys .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
left_join(cudf::table_view const& probe,
std::optional<std::size_t> output_size = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const;
/**
* Returns the row indices that can be used to construct the result of performing
* a full join between two tables. @see cudf::full_join(). Behavior is undefined if the
* provided `output_size` is smaller than the actual output size.
*
* @param probe The probe table, from which the tuples are probed
* @param output_size Optional value which allows users to specify the exact output size
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table and columns' device
* memory.
*
* @throw cudf::logic_error If the input probe table has nulls while this hash_join object was not
* constructed with null check.
*
* @return A pair of columns [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a full join between two tables with `build` and `probe`
* as the join keys .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
full_join(cudf::table_view const& probe,
std::optional<std::size_t> output_size = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const;
/**
* Returns the exact number of matches (rows) when performing an inner join with the specified
* probe table.
*
* @param probe The probe table, from which the tuples are probed
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @throw cudf::logic_error If the input probe table has nulls while this hash_join object was not
* constructed with null check.
*
* @return The exact number of output when performing an inner join between two tables with
* `build` and `probe` as the join keys .
*/
[[nodiscard]] std::size_t inner_join_size(
cudf::table_view const& probe, rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
/**
* Returns the exact number of matches (rows) when performing a left join with the specified probe
* table.
*
* @param probe The probe table, from which the tuples are probed
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @throw cudf::logic_error If the input probe table has nulls while this hash_join object was not
* constructed with null check.
*
* @return The exact number of output when performing a left join between two tables with `build`
* and `probe` as the join keys .
*/
[[nodiscard]] std::size_t left_join_size(
cudf::table_view const& probe, rmm::cuda_stream_view stream = cudf::get_default_stream()) const;
/**
* Returns the exact number of matches (rows) when performing a full join with the specified probe
* table.
*
* @param probe The probe table, from which the tuples are probed
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the intermediate table and columns' device
* memory.
*
* @throw cudf::logic_error If the input probe table has nulls while this hash_join object was not
* constructed with null check.
*
* @return The exact number of output when performing a full join between two tables with `build`
* and `probe` as the join keys .
*/
std::size_t full_join_size(
cudf::table_view const& probe,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const;
private:
const std::unique_ptr<impl_type const> _impl;
};
/**
* @brief Returns a pair of row index vectors corresponding to all pairs
* of rows between the specified tables where the predicate evaluates to true.
*
* The first returned vector contains the row indices from the left
* table that have a match in the right table (in unspecified order).
* The corresponding values in the second returned vector are
* the matched row indices from the right table.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @code{.pseudo}
* Left: {{0, 1, 2}}
* Right: {{1, 2, 3}}
* Expression: Left.Column_0 == Right.Column_0
* Result: {{1, 2}, {0, 1}}
*
* Left: {{0, 1, 2}, {3, 4, 5}}
* Right: {{1, 2, 3}, {4, 6, 7}}
* Expression: (Left.Column_0 == Right.Column_0) AND (Left.Column_1 == Right.Column_1)
* Result: {{1}, {0}}
* @endcode
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param output_size Optional value which allows users to specify the exact output size
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a conditional inner join between two tables `left` and `right` .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
conditional_inner_join(
table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
std::optional<std::size_t> output_size = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a pair of row index vectors corresponding to all pairs
* of rows between the specified tables where the predicate evaluates to true,
* or null matches for rows in left that have no match in right.
*
* The first returned vector contains all the row indices from the left
* table (in unspecified order). The corresponding value in the
* second returned vector is either (1) the row index of the matched row
* from the right table, if there is a match or (2) an unspecified
* out-of-bounds value.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @code{.pseudo}
* Left: {{0, 1, 2}}
* Right: {{1, 2, 3}}
* Expression: Left.Column_0 == Right.Column_0
* Result: {{0, 1, 2}, {None, 0, 1}}
*
* Left: {{0, 1, 2}, {3, 4, 5}}
* Right: {{1, 2, 3}, {4, 6, 7}}
* Expression: (Left.Column_0 == Right.Column_0) AND (Left.Column_1 == Right.Column_1)
* Result: {{0, 1, 2}, {None, 0, None}}
* @endcode
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param output_size Optional value which allows users to specify the exact output size
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a conditional left join between two tables `left` and `right` .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
conditional_left_join(table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
std::optional<std::size_t> output_size = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a pair of row index vectors corresponding to all pairs
* of rows between the specified tables where the predicate evaluates to true,
* or null matches for rows in either table that have no match in the other.
*
* Taken pairwise, the values from the returned vectors are one of:
* (1) row indices corresponding to matching rows from the left and
* right tables, (2) a row index and an unspecified out-of-bounds value,
* representing a row from one table without a match in the other.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @code{.pseudo}
* Left: {{0, 1, 2}}
* Right: {{1, 2, 3}}
* Expression: Left.Column_0 == Right.Column_0
* Result: {{0, 1, 2, None}, {None, 0, 1, 2}}
*
* Left: {{0, 1, 2}, {3, 4, 5}}
* Right: {{1, 2, 3}, {4, 6, 7}}
* Expression: (Left.Column_0 == Right.Column_0) AND (Left.Column_1 == Right.Column_1)
* Result: {{0, 1, 2, None, None}, {None, 0, None, 1, 2}}
* @endcode
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a conditional full join between two tables `left` and `right` .
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
conditional_full_join(table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns an index vector corresponding to all rows in the left table
* for which there exists some row in the right table where the predicate
* evaluates to true.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @code{.pseudo}
* Left: {{0, 1, 2}}
* Right: {{1, 2, 3}}
* Expression: Left.Column_0 == Right.Column_0
* Result: {1, 2}
*
* Left: {{0, 1, 2}, {3, 4, 5}}
* Right: {{1, 2, 3}, {4, 6, 7}}
* Expression: (Left.Column_0 == Right.Column_0) AND (Left.Column_1 == Right.Column_1)
* Result: {1}
* @endcode
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param output_size Optional value which allows users to specify the exact output size
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A vector `left_indices` that can be used to construct the result of
* performing a conditional left semi join between two tables `left` and
* `right` .
*/
std::unique_ptr<rmm::device_uvector<size_type>> conditional_left_semi_join(
table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
std::optional<std::size_t> output_size = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns an index vector corresponding to all rows in the left table
* for which there does not exist any row in the right table where the
* predicate evaluates to true.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @code{.pseudo}
* Left: {{0, 1, 2}}
* Right: {{1, 2, 3}}
* Expression: Left.Column_0 == Right.Column_0
* Result: {0}
*
* Left: {{0, 1, 2}, {3, 4, 5}}
* Right: {{1, 2, 3}, {4, 6, 7}}
* Expression: (Left.Column_0 == Right.Column_0) AND (Left.Column_1 == Right.Column_1)
* Result: {0, 2}
* @endcode
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param output_size Optional value which allows users to specify the exact output size
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A vector `left_indices` that can be used to construct the result of
* performing a conditional left anti join between two tables `left` and
* `right` .
*/
std::unique_ptr<rmm::device_uvector<size_type>> conditional_left_anti_join(
table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
std::optional<std::size_t> output_size = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a pair of row index vectors corresponding to all pairs of
* rows between the specified tables where the columns of the equality table
* are equal and the predicate evaluates to true on the conditional tables.
*
* The first returned vector contains the row indices from the left
* table that have a match in the right table (in unspecified order).
* The corresponding values in the second returned vector are
* the matched row indices from the right table.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output. It is the user's
* responsibility to choose a suitable compare_nulls value AND use appropriate
* null-safe operators in the expression.
*
* If the provided output size or per-row counts are incorrect, behavior is undefined.
*
* @code{.pseudo}
* left_equality: {{0, 1, 2}}
* right_equality: {{1, 2, 3}}
* left_conditional: {{4, 4, 4}}
* right_conditional: {{3, 4, 5}}
* Expression: Left.Column_0 > Right.Column_0
* Result: {{1}, {0}}
* @endcode
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param output_size_data An optional pair of values indicating the exact output size and the
* number of matches for each row in the larger of the two input tables, left or right (may be
* precomputed using the corresponding mixed_inner_join_size API).
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a mixed inner join between the four input tables.
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_inner_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
std::optional<std::pair<std::size_t, device_span<size_type const>>> output_size_data = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a pair of row index vectors corresponding to all pairs of
* rows between the specified tables where the columns of the equality table
* are equal and the predicate evaluates to true on the conditional tables,
* or null matches for rows in left that have no match in right.
*
* The first returned vector contains the row indices from the left
* tables that have a match in the right tables (in unspecified order).
* The corresponding value in the second returned vector is either (1)
* the row index of the matched row from the right tables, or (2) an
* unspecified out-of-bounds value.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output. It is the user's
* responsibility to choose a suitable compare_nulls value AND use appropriate
* null-safe operators in the expression.
*
* If the provided output size or per-row counts are incorrect, behavior is undefined.
*
* @code{.pseudo}
* left_equality: {{0, 1, 2}}
* right_equality: {{1, 2, 3}}
* left_conditional: {{4, 4, 4}}
* right_conditional: {{3, 4, 5}}
* Expression: Left.Column_0 > Right.Column_0
* Result: {{0, 1, 2}, {None, 0, None}}
* @endcode
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param output_size_data An optional pair of values indicating the exact output size and the
* number of matches for each row in the larger of the two input tables, left or right (may be
* precomputed using the corresponding mixed_left_join_size API).
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a mixed left join between the four input tables.
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_left_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
std::optional<std::pair<std::size_t, device_span<size_type const>>> output_size_data = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a pair of row index vectors corresponding to all pairs of
* rows between the specified tables where the columns of the equality table
* are equal and the predicate evaluates to true on the conditional tables,
* or null matches for rows in either pair of tables that have no matches in
* the other pair.
*
* Taken pairwise, the values from the returned vectors are one of:
* (1) row indices corresponding to matching rows from the left and
* right tables, (2) a row index and an unspecified out-of-bounds value,
* representing a row from one table without a match in the other.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output. It is the user's
* responsibility to choose a suitable compare_nulls value AND use appropriate
* null-safe operators in the expression.
*
* If the provided output size or per-row counts are incorrect, behavior is undefined.
*
* @code{.pseudo}
* left_equality: {{0, 1, 2}}
* right_equality: {{1, 2, 3}}
* left_conditional: {{4, 4, 4}}
* right_conditional: {{3, 4, 5}}
* Expression: Left.Column_0 > Right.Column_0
* Result: {{0, 1, 2, None, None}, {None, 0, None, 1, 2}}
* @endcode
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param output_size_data An optional pair of values indicating the exact output size and the
* number of matches for each row in the larger of the two input tables, left or right (may be
* precomputed using the corresponding mixed_full_join_size API).
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a mixed full join between the four input tables.
*/
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_full_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
std::optional<std::pair<std::size_t, device_span<size_type const>>> output_size_data = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns an index vector corresponding to all rows in the left tables
* where the columns of the equality table are equal and the predicate
* evaluates to true on the conditional tables.
*
* If the provided predicate returns NULL for a pair of rows (left, right), the
* left row is not included in the output. It is the user's responsibility to
* choose a suitable compare_nulls value AND use appropriate null-safe
* operators in the expression.
*
* If the provided output size or per-row counts are incorrect, behavior is undefined.
*
* @code{.pseudo}
* left_equality: {{0, 1, 2}}
* right_equality: {{1, 2, 3}}
* left_conditional: {{4, 4, 4}}
* right_conditional: {{3, 4, 5}}
* Expression: Left.Column_0 > Right.Column_0
* Result: {1}
* @endcode
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param output_size_data An optional pair of values indicating the exact output size and the
* number of matches for each row in the larger of the two input tables, left or right (may be
* precomputed using the corresponding mixed_full_join_size API).
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a mixed full join between the four input tables.
*/
std::unique_ptr<rmm::device_uvector<size_type>> mixed_left_semi_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
std::optional<std::pair<std::size_t, device_span<size_type const>>> output_size_data = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns an index vector corresponding to all rows in the left tables
* for which there is no row in the right tables where the columns of the
* equality table are equal and the predicate evaluates to true on the
* conditional tables.
*
* If the provided predicate returns NULL for a pair of rows (left, right), the
* left row is not included in the output. It is the user's responsibility to
* choose a suitable compare_nulls value AND use appropriate null-safe
* operators in the expression.
*
* If the provided output size or per-row counts are incorrect, behavior is undefined.
*
* @code{.pseudo}
* left_equality: {{0, 1, 2}}
* right_equality: {{1, 2, 3}}
* left_conditional: {{4, 4, 4}}
* right_conditional: {{3, 4, 5}}
* Expression: Left.Column_0 > Right.Column_0
* Result: {0, 2}
* @endcode
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param output_size_data An optional pair of values indicating the exact output size and the
* number of matches for each row in the larger of the two input tables, left or right (may be
* precomputed using the corresponding mixed_full_join_size API).
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair of vectors [`left_indices`, `right_indices`] that can be used to construct
* the result of performing a mixed full join between the four input tables.
*/
std::unique_ptr<rmm::device_uvector<size_type>> mixed_left_anti_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
std::optional<std::pair<std::size_t, device_span<size_type const>>> output_size_data = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the exact number of matches (rows) when performing a
* mixed inner join between the specified tables where the columns of the
* equality table are equal and the predicate evaluates to true on the
* conditional tables.
*
* If the provided predicate returns NULL for a pair of rows (left, right),
* that pair is not included in the output. It is the user's responsibility to
* choose a suitable compare_nulls value AND use appropriate null-safe
* operators in the expression.
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair containing the size that would result from performing the
* requested join and the number of matches for each row in one of the two
* tables. Which of the two tables is an implementation detail and should not
* be relied upon, simply passed to the corresponding `mixed_inner_join` API as
* is.
*/
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>> mixed_inner_join_size(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the exact number of matches (rows) when performing a
* mixed left join between the specified tables where the columns of the
* equality table are equal and the predicate evaluates to true on the
* conditional tables.
*
* If the provided predicate returns NULL for a pair of rows (left, right),
* that pair is not included in the output. It is the user's responsibility to
* choose a suitable compare_nulls value AND use appropriate null-safe
* operators in the expression.
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair containing the size that would result from performing the
* requested join and the number of matches for each row in one of the two
* tables. Which of the two tables is an implementation detail and should not
* be relied upon, simply passed to the corresponding `mixed_left_join` API as
* is.
*/
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>> mixed_left_join_size(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the exact number of matches (rows) when performing a mixed
* left semi join between the specified tables where the columns of the
* equality table are equal and the predicate evaluates to true on the
* conditional tables.
*
* If the provided predicate returns NULL for a pair of rows (left, right),
* that pair is not included in the output. It is the user's responsibility to
* choose a suitable compare_nulls value AND use appropriate null-safe
* operators in the expression.
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair containing the size that would result from performing the
* requested join and the number of matches for each row in one of the two
* tables. Which of the two tables is an implementation detail and should not
* be relied upon, simply passed to the corresponding `mixed_left_join` API as
* is.
*/
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>> mixed_left_semi_join_size(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the exact number of matches (rows) when performing a mixed
* left anti join between the specified tables.
*
* If the provided predicate returns NULL for a pair of rows (left, right),
* that pair is not included in the output. It is the user's responsibility to
* choose a suitable compare_nulls value AND use appropriate null-safe
* operators in the expression.
*
* @throw cudf::logic_error If the binary predicate outputs a non-boolean result.
* @throw cudf::logic_error If the number of rows in left_equality and left_conditional do not
* match.
* @throw cudf::logic_error If the number of rows in right_equality and right_conditional do not
* match.
*
* @param left_equality The left table used for the equality join
* @param right_equality The right table used for the equality join
* @param left_conditional The left table used for the conditional join
* @param right_conditional The right table used for the conditional join
* @param binary_predicate The condition on which to join
* @param compare_nulls Whether or not null values join to each other or not
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return A pair containing the size that would result from performing the
* requested join and the number of matches for each row in one of the two
* tables. Which of the two tables is an implementation detail and should not
* be relied upon, simply passed to the corresponding `mixed_left_join` API as
* is.
*/
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>> mixed_left_anti_join_size(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls = null_equality::EQUAL,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the exact number of matches (rows) when performing a
* conditional inner join between the specified tables where the predicate
* evaluates to true.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return The size that would result from performing the requested join
*/
std::size_t conditional_inner_join_size(
table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the exact number of matches (rows) when performing a
* conditional left join between the specified tables where the predicate
* evaluates to true.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return The size that would result from performing the requested join
*/
std::size_t conditional_left_join_size(
table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the exact number of matches (rows) when performing a
* conditional left semi join between the specified tables where the predicate
* evaluates to true.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return The size that would result from performing the requested join
*/
std::size_t conditional_left_semi_join_size(
table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the exact number of matches (rows) when performing a
* conditional left anti join between the specified tables where the predicate
* evaluates to true.
*
* If the provided predicate returns NULL for a pair of rows
* (left, right), that pair is not included in the output.
*
* @throw cudf::logic_error if the binary predicate outputs a non-boolean result.
*
* @param left The left table
* @param right The right table
* @param binary_predicate The condition on which to join
* @param mr Device memory resource used to allocate the returned table and columns' device memory
*
* @return The size that would result from performing the requested join
*/
std::size_t conditional_left_anti_join_size(
table_view const& left,
table_view const& right,
ast::expression const& binary_predicate,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/transform.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/ast/expressions.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
namespace cudf {
/**
* @addtogroup transformation_transform
* @{
* @file
* @brief Column APIs for transforming rows
*/
/**
* @brief Creates a new column by applying a unary function against every
* element of an input column.
*
* Computes:
* `out[i] = F(in[i])`
*
* The output null mask is the same is the input null mask so if input[i] is
* null then output[i] is also null
*
* @param input An immutable view of the input column to transform
* @param unary_udf The PTX/CUDA string of the unary function to apply
* @param output_type The output type that is compatible with the output type in the UDF
* @param is_ptx true: the UDF is treated as PTX code; false: the UDF is treated as CUDA code
* @param mr Device memory resource used to allocate the returned column's device memory
* @return The column resulting from applying the unary function to
* every element of the input
*/
std::unique_ptr<column> transform(
column_view const& input,
std::string const& unary_udf,
data_type output_type,
bool is_ptx,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a null_mask from `input` by converting `NaN` to null and
* preserving existing null values and also returns new null_count.
*
* @throws cudf::logic_error if `input.type()` is a non-floating type
*
* @param input An immutable view of the input column of floating-point type
* @param mr Device memory resource used to allocate the returned bitmask
* @return A pair containing a `device_buffer` with the new bitmask and it's
* null count obtained by replacing `NaN` in `input` with null.
*/
std::pair<std::unique_ptr<rmm::device_buffer>, size_type> nans_to_nulls(
column_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Compute a new column by evaluating an expression tree on a table.
*
* This evaluates an expression over a table to produce a new column. Also called an n-ary
* transform.
*
* @throws cudf::logic_error if passed an expression operating on table_reference::RIGHT.
*
* @param table The table used for expression evaluation
* @param expr The root of the expression tree
* @param mr Device memory resource
* @return Output column
*/
std::unique_ptr<column> compute_column(
table_view const& table,
ast::expression const& expr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a bitmask from a column of boolean elements.
*
* If element `i` in `input` is `true`, bit `i` in the resulting mask is set (`1`). Else,
* if element `i` is `false` or null, bit `i` is unset (`0`).
*
*
* @throws cudf::logic_error if `input.type()` is a non-boolean type
*
* @param input Boolean elements to convert to a bitmask
* @param mr Device memory resource used to allocate the returned bitmask
* @return A pair containing a `device_buffer` with the new bitmask and it's
* null count obtained from input considering `true` represent `valid`/`1` and
* `false` represent `invalid`/`0`.
*/
std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> bools_to_mask(
column_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Encode the rows of the given table as integers
*
* The encoded values are integers in the range [0, n), where `n`
* is the number of distinct rows in the input table.
* The result table is such that `keys[result[i]] == input[i]`,
* where `keys` is a table containing the distinct rows in `input` in
* sorted ascending order. Nulls, if any, are sorted to the end of
* the `keys` table.
*
* Examples:
* @code{.pseudo}
* input: [{'a', 'b', 'b', 'a'}]
* output: [{'a', 'b'}], {0, 1, 1, 0}
*
* input: [{1, 3, 1, 2, 9}, {1, 2, 1, 3, 5}]
* output: [{1, 2, 3, 9}, {1, 3, 2, 5}], {0, 2, 0, 1, 3}
* @endcode
*
* @param input Table containing values to be encoded
* @param mr Device memory resource used to allocate the returned table's device memory
* @return A pair containing the distinct row of the input table in sorter order,
* and a column of integer indices representing the encoded rows.
*/
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::column>> encode(
cudf::table_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Encodes `input` by generating a new column for each value in `categories` indicating the
* presence of that value in `input`.
*
* The resulting per-category columns are returned concatenated as a single column viewed by a
* `table_view`.
*
* The `i`th row of the `j`th column in the output table equals 1
* if `input[i] == categories[j]`, and 0 otherwise.
*
* The `i`th row of the `j`th column in the output table equals 1
* if input[i] == categories[j], and 0 otherwise.
*
* Examples:
* @code{.pseudo}
* input: [{'a', 'c', null, 'c', 'b'}]
* categories: ['c', null]
* output: [{0, 1, 0, 1, 0}, {0, 0, 1, 0, 0}]
* @endcode
*
* @throws cudf::logic_error if input and categories are of different types.
*
* @param input Column containing values to be encoded
* @param categories Column containing categories
* @param mr Device memory resource used to allocate the returned table's device memory
* @return A pair containing the owner to all encoded data and a table view into the data
*/
std::pair<std::unique_ptr<column>, table_view> one_hot_encode(
column_view const& input,
column_view const& categories,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a boolean column from given bitmask.
*
* Returns a `bool` for each bit in `[begin_bit, end_bit)`. If bit `i` in least-significant bit
* numbering is set (1), then element `i` in the output is `true`, otherwise `false`.
*
* @throws cudf::logic_error if `bitmask` is null and end_bit-begin_bit > 0
* @throws cudf::logic_error if begin_bit > end_bit
*
* Examples:
* @code{.pseudo}
* input: {0b10101010}
* output: [{false, true, false, true, false, true, false, true}]
* @endcode
*
* @param bitmask A device pointer to the bitmask which needs to be converted
* @param begin_bit position of the bit from which the conversion should start
* @param end_bit position of the bit before which the conversion should stop
* @param mr Device memory resource used to allocate the returned columns' device memory
* @return A boolean column representing the given mask from [begin_bit, end_bit)
*/
std::unique_ptr<column> mask_to_bools(
bitmask_type const* bitmask,
size_type begin_bit,
size_type end_bit,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns an approximate cumulative size in bits of all columns in the `table_view` for
* each row.
*
* This function counts bits instead of bytes to account for the null mask which only has one
* bit per row.
*
* Each row in the returned column is the sum of the per-row size for each column in
* the table.
*
* In some cases, this is an inexact approximation. Specifically, columns of lists and strings
* require N+1 offsets to represent N rows. It is up to the caller to calculate the small
* additional overhead of the terminating offset for any group of rows being considered.
*
* This function returns the per-row sizes as the columns are currently formed. This can
* end up being larger than the number you would get by gathering the rows. Specifically,
* the push-down of struct column validity masks can nullify rows that contain data for
* string or list columns. In these cases, the size returned is conservative:
*
* row_bit_count(column(x)) >= row_bit_count(gather(column(x)))
*
* @param t The table view to perform the computation on
* @param mr Device memory resource used to allocate the returned columns' device memory
* @return A 32-bit integer column containing the per-row bit counts
*/
std::unique_ptr<column> row_bit_count(
table_view const& t,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/merge.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <vector>
namespace cudf {
/**
* @addtogroup column_merge
* @{
* @file
*/
/**
* @brief Merge a set of sorted tables.
*
* Merges sorted tables into one sorted table
* containing data from all tables. The key columns
* of each table must be sorted according to the
* parameters (cudf::column_order and cudf::null_order)
* specified for that column.
*
* ```
* Example 1:
* input:
* table 1 => col 1 {0, 1, 2, 3}
* col 2 {4, 5, 6, 7}
* table 2 => col 1 {1, 2}
* col 2 {8, 9}
* table 3 => col 1 {2, 4}
* col 2 {8, 9}
* output:
* table => col 1 {0, 1, 1, 2, 2, 2, 3, 4}
* col 2 {4, 5, 8, 6, 8, 9, 7, 9}
* ```
* ```
* Example 2:
* input:
* table 1 => col 0 {1, 0}
* col 1 {'c', 'b'}
* col 2 {RED, GREEN}
*
*
* table 2 => col 0 {1}
* col 1 {'a'}
* col 2 {NULL}
*
* with key_cols[] = {0,1}
* and asc_desc[] = {ASC, ASC};
*
* Lex-sorting is on columns {0,1}; hence, lex-sorting of ((L0 x L1) V (R0 x R1)) is:
* (0,'b', GREEN), (1,'a', NULL), (1,'c', RED)
*
* (third column, the "color", just "goes along for the ride";
* meaning it is permuted according to the data movements dictated
* by lexicographic ordering of columns 0 and 1)
*
* with result columns:
*
* Res0 = {0,1,1}
* Res1 = {'b', 'a', 'c'}
* Res2 = {GREEN, NULL, RED}
* ```
*
* @throws cudf::logic_error if tables in `tables_to_merge` have different
* number of columns
* @throws cudf::logic_error if tables in `tables_to_merge` have columns with
* mismatched types
* @throws cudf::logic_error if `key_cols` is empty
* @throws cudf::logic_error if `key_cols` size is larger than the number of
* columns in `tables_to_merge` tables
* @throws cudf::logic_error if `key_cols` size and `column_order` size mismatches
*
* @param[in] tables_to_merge Non-empty list of tables to be merged
* @param[in] key_cols Indices of left_cols and right_cols to be used
* for comparison criteria
* @param[in] column_order Sort order types of columns indexed by key_cols
* @param[in] null_precedence Array indicating the order of nulls with respect
* to non-nulls for the indexing columns (key_cols)
* @param mr Device memory resource used to allocate the returned table's device memory
*
* @returns A table containing sorted data from all input tables
*/
std::unique_ptr<cudf::table> merge(
std::vector<table_view> const& tables_to_merge,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence = {},
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/rolling.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/rolling/range_window_bounds.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
namespace cudf {
/**
* @addtogroup aggregation_rolling
* @{
* @file
*/
/**
* @brief Applies a fixed-size rolling window function to the values in a column.
*
* This function aggregates values in a window around each element i of the input column, and
* invalidates the bit mask for element i if there are not enough observations. The window size is
* static (the same for each element). This matches Pandas' API for DataFrame.rolling with a few
* notable differences:
* - instead of the center flag it uses a two-part window to allow for more flexible windows.
* The total window size = `preceding_window + following_window`. Element `i` uses elements
* `[i-preceding_window+1, i+following_window]` to do the window computation.
* - instead of storing NA/NaN for output rows that do not meet the minimum number of observations
* this function updates the valid bitmask of the column to indicate which elements are valid.
*
* Notes on return column types:
* - The returned column for count aggregation always has `INT32` type.
* - The returned column for VARIANCE/STD aggregations always has `FLOAT64` type.
* - All other operators return a column of the same type as the input. Therefore
* it is suggested to convert integer column types (especially low-precision integers)
* to `FLOAT32` or `FLOAT64` before doing a rolling `MEAN`.
*
* @param[in] input The input column
* @param[in] preceding_window The static rolling window size in the backward direction
* @param[in] following_window The static rolling window size in the forward direction
* @param[in] min_periods Minimum number of observations in window required to have a value,
* otherwise element `i` is null.
* @param[in] agg The rolling window aggregation type (SUM, MAX, MIN, etc.)
* @param[in] mr Device memory resource used to allocate the returned column's device memory
*
* @returns A nullable output column containing the rolling window results
*/
std::unique_ptr<column> rolling_window(
column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief @copybrief rolling_window
* @details @copydetails rolling_window(
* column_view const& input,
* size_type preceding_window,
* size_type following_window,
* size_type min_periods,
* rolling_aggregation const& agg,
* rmm::mr::device_memory_resource* mr)
*
* @param default_outputs A column of per-row default values to be returned instead
* of nulls. Used for LEAD()/LAG(), if the row offset crosses
* the boundaries of the column.
*/
std::unique_ptr<column> rolling_window(
column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Abstraction for window boundary sizes
*/
struct window_bounds {
public:
/**
* @brief Construct bounded window boundary.
*
* @param value Finite window boundary (in days or rows)
* @returns A window boundary
*/
static window_bounds get(size_type value) { return window_bounds(false, value); }
/**
* @brief Construct unbounded window boundary.
*
* @return window_bounds
*/
static window_bounds unbounded()
{
return window_bounds(true, std::numeric_limits<cudf::size_type>::max());
}
/**
* Whether the window_bounds is unbounded.
*
* @return true if the window bounds is unbounded.
* @return false if the window bounds has a finite row boundary.
*/
[[nodiscard]] bool is_unbounded() const { return _is_unbounded; }
/**
* @brief Gets the row-boundary for this window_bounds.
*
* @return the row boundary value (in days or rows)
*/
[[nodiscard]] size_type value() const { return _value; }
private:
explicit window_bounds(bool is_unbounded_, size_type value_ = 0)
: _is_unbounded{is_unbounded_}, _value{value_}
{
}
bool const _is_unbounded; ///< Whether the window boundary is unbounded
size_type const _value; ///< Finite window boundary value (in days or rows)
};
/**
* @brief Applies a grouping-aware, fixed-size rolling window function to the values in a column.
*
* Like `rolling_window()`, this function aggregates values in a window around each
* element of a specified `input` column. It differs from `rolling_window()` in that elements of the
* `input` column are grouped into distinct groups (e.g. the result of a groupby). The window
* aggregation cannot cross the group boundaries. For a row `i` of `input`, the group is determined
* from the corresponding (i.e. i-th) values of the columns under `group_keys`.
*
* Note: This method requires that the rows are presorted by the `group_key` values.
*
* @code{.pseudo}
* Example: Consider a user-sales dataset, where the rows look as follows:
* { "user_id", sales_amt, day }
*
* The `grouped_rolling_window()` method enables windowing queries such as grouping a dataset by
* `user_id`, and summing up the `sales_amt` column over a window of 3 rows (2 preceding (including
* current row), 1 row following).
*
* In this example,
* 1. `group_keys == [ user_id ]`
* 2. `input == sales_amt`
* The data are grouped by `user_id`, and ordered by `day`-string. The aggregation
* (SUM) is then calculated for a window of 3 values around (and including) each row.
*
* For the following input:
*
* [ // user, sales_amt
* { "user1", 10 },
* { "user2", 20 },
* { "user1", 20 },
* { "user1", 10 },
* { "user2", 30 },
* { "user2", 80 },
* { "user1", 50 },
* { "user1", 60 },
* { "user2", 40 }
* ]
*
* Partitioning (grouping) by `user_id` yields the following `sales_amt` vector
* (with 2 groups, one for each distinct `user_id`):
*
* [ 10, 20, 10, 50, 60, 20, 30, 80, 40 ]
* <-------user1-------->|<------user2------->
*
* The SUM aggregation is applied with 1 preceding and 1 following
* row, with a minimum of 1 period. The aggregation window is thus 3 rows wide,
* yielding the following column:
*
* [ 30, 40, 80, 120, 110, 50, 130, 150, 120 ]
*
* Note: The SUMs calculated at the group boundaries (i.e. indices 0, 4, 5, and 8)
* consider only 2 values each, in spite of the window-size being 3.
* Each aggregation operation cannot cross group boundaries.
* @endcode
*
* The returned column for `op == COUNT` always has `INT32` type. All other operators return a
* column of the same type as the input. Therefore it is suggested to convert integer column types
* (especially low-precision integers) to `FLOAT32` or `FLOAT64` before doing a rolling `MEAN`.
*
* Note: `preceding_window` and `following_window` could well have negative values. This yields
* windows where the current row might not be included at all. For instance, consider a window
* defined as (preceding=3, following=-1). This produces a window from 2 (i.e. 3-1) rows preceding
* the current row, and 1 row *preceding* the current row. For the example above, the window for
* row#3 is:
*
* [ 10, 20, 10, 50, 60, 20, 30, 80, 40 ]
* <--window--> ^
* |
* current_row
*
* Similarly, `preceding` could have a negative value, indicating that the window begins at a
* position after the current row. It differs slightly from the semantics for `following`, because
* `preceding` includes the current row. Therefore:
* 1. preceding=1 => Window starts at the current row.
* 2. preceding=0 => Window starts at 1 past the current row.
* 3. preceding=-1 => Window starts at 2 past the current row. Etc.
*
* @param[in] group_keys The (pre-sorted) grouping columns
* @param[in] input The input column (to be aggregated)
* @param[in] preceding_window The static rolling window size in the backward direction (for
* positive values), or forward direction (for negative values)
* @param[in] following_window The static rolling window size in the forward direction (for positive
* values), or backward direction (for negative values)
* @param[in] min_periods Minimum number of observations in window required to have a value,
* otherwise element `i` is null.
* @param[in] aggr The rolling window aggregation type (SUM, MAX, MIN, etc.)
* @param[in] mr Device memory resource used to allocate the returned column's device memory
*
* @returns A nullable output column containing the rolling window results
*/
std::unique_ptr<column> grouped_rolling_window(
table_view const& group_keys,
column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief @copybrief grouped_rolling_window
* @details @copydetails grouped_rolling_window(
* table_view const& group_keys,
* column_view const& input,
* size_type preceding_window,
* size_type following_window,
* size_type min_periods,
* rolling_aggregation const& aggr,
* rmm::mr::device_memory_resource* mr)
*/
std::unique_ptr<column> grouped_rolling_window(
table_view const& group_keys,
column_view const& input,
window_bounds preceding_window,
window_bounds following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief @copybrief grouped_rolling_window
* @details @copydetails grouped_rolling_window(
* table_view const& group_keys,
* column_view const& input,
* size_type preceding_window,
* size_type following_window,
* size_type min_periods,
* rolling_aggregation const& aggr,
* rmm::mr::device_memory_resource* mr)
*
* @param default_outputs A column of per-row default values to be returned instead
* of nulls. Used for LEAD()/LAG(), if the row offset crosses
* the boundaries of the column or group.
*/
std::unique_ptr<column> grouped_rolling_window(
table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
size_type preceding_window,
size_type following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief @copybrief grouped_rolling_window
* @details @copydetails grouped_rolling_window(
* table_view const& group_keys,
* column_view const& input,
* column_view const& default_outputs,
* size_type preceding_window,
* size_type following_window,
* size_type min_periods,
* rolling_aggregation const& aggr,
* rmm::mr::device_memory_resource* mr)
*/
std::unique_ptr<column> grouped_rolling_window(
table_view const& group_keys,
column_view const& input,
column_view const& default_outputs,
window_bounds preceding_window,
window_bounds following_window,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Applies a grouping-aware, timestamp-based rolling window function to the values in a
* column.
*
* Like `rolling_window()`, this function aggregates values in a window around each
* element of a specified `input` column. It differs from `rolling_window()` in two respects:
* 1. The elements of the `input` column are grouped into distinct groups (e.g. the result of a
* groupby), determined by the corresponding values of the columns under `group_keys`. The
* window-aggregation cannot cross the group boundaries.
* 2. Within a group, the aggregation window is calculated based on a time interval (e.g. number
* of days preceding/following the current row). The timestamps for the input data are
* specified by the `timestamp_column` argument.
*
* Note: This method requires that the rows are presorted by the group keys and timestamp values.
*
* @code{.pseudo}
* Example: Consider a user-sales dataset, where the rows look as follows:
* { "user_id", sales_amt, date }
*
* This method enables windowing queries such as grouping a dataset by `user_id`, sorting by
* increasing `date`, and summing up the `sales_amt` column over a window of 3 days (1 preceding
*day, the current day, and 1 following day).
*
* In this example,
* 1. `group_keys == [ user_id ]`
* 2. `timestamp_column == date`
* 3. `input == sales_amt`
* The data are grouped by `user_id`, and ordered by `date`. The aggregation
* (SUM) is then calculated for a window of 3 days around (and including) each row.
*
* For the following input:
*
* [ // user, sales_amt, YYYYMMDD (date)
* { "user1", 10, 20200101 },
* { "user2", 20, 20200101 },
* { "user1", 20, 20200102 },
* { "user1", 10, 20200103 },
* { "user2", 30, 20200101 },
* { "user2", 80, 20200102 },
* { "user1", 50, 20200107 },
* { "user1", 60, 20200107 },
* { "user2", 40, 20200104 }
* ]
*
* Partitioning (grouping) by `user_id`, and ordering by `date` yields the following `sales_amt`
* vector (with 2 groups, one for each distinct `user_id`):
*
* Date :(202001-) [ 01, 02, 03, 07, 07, 01, 01, 02, 04 ]
* Input: [ 10, 20, 10, 50, 60, 20, 30, 80, 40 ]
* <-------user1-------->|<---------user2--------->
*
* The SUM aggregation is applied, with 1 day preceding, and 1 day following, with a minimum of 1
* period. The aggregation window is thus 3 *days* wide, yielding the following output column:
*
* Results: [ 30, 40, 30, 110, 110, 130, 130, 130, 40 ]
*
* @endcode
*
* Note: The number of rows participating in each window might vary, based on the index within the
* group, datestamp, and `min_periods`. Apropos:
* 1. results[0] considers 2 values, because it is at the beginning of its group, and has no
* preceding values.
* 2. results[5] considers 3 values, despite being at the beginning of its group. It must include 2
* following values, based on its datestamp.
*
* Each aggregation operation cannot cross group boundaries.
*
* The returned column for `op == COUNT` always has `INT32` type. All other operators return a
* column of the same type as the input. Therefore it is suggested to convert integer column types
* (especially low-precision integers) to `FLOAT32` or `FLOAT64` before doing a rolling `MEAN`.
*
* @param[in] group_keys The (pre-sorted) grouping columns
* @param[in] timestamp_column The (pre-sorted) timestamps for each row
* @param[in] timestamp_order The order (ASCENDING/DESCENDING) in which the timestamps are sorted
* @param[in] input The input column (to be aggregated)
* @param[in] preceding_window_in_days The rolling window time-interval in the backward direction
* @param[in] following_window_in_days The rolling window time-interval in the forward direction
* @param[in] min_periods Minimum number of observations in window required to have a value,
* otherwise element `i` is null.
* @param[in] aggr The rolling window aggregation type (SUM, MAX, MIN, etc.)
* @param[in] mr Device memory resource used to allocate the returned column's device memory
*
* @returns A nullable output column containing the rolling window results
*/
std::unique_ptr<column> grouped_time_range_rolling_window(
table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
size_type preceding_window_in_days,
size_type following_window_in_days,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Applies a grouping-aware, timestamp-based rolling window function to the values in a
* column,.
*
* @details @copydetails grouped_time_range_rolling_window(
* table_view const& group_keys,
* column_view const& timestamp_column,
* cudf::order const& timestamp_order,
* column_view const& input,
* size_type preceding_window_in_days,
* size_type following_window_in_days,
* size_type min_periods,
* rolling_aggregation const& aggr,
* rmm::mr::device_memory_resource* mr)
*
* The `preceding_window_in_days` and `following_window_in_days` are specified as a `window_bounds`
* and supports "unbounded" windows, if set to `window_bounds::unbounded()`.
*/
std::unique_ptr<column> grouped_time_range_rolling_window(
table_view const& group_keys,
column_view const& timestamp_column,
cudf::order const& timestamp_order,
column_view const& input,
window_bounds preceding_window_in_days,
window_bounds following_window_in_days,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Applies a grouping-aware, value range-based rolling window function to the values in a
* column.
*
* This function aggregates rows in a window around each element of a specified `input` column.
* The window is determined based on the values of an ordered `orderby` column, and on the values
* of a `preceding` and `following` scalar representing an inclusive range of orderby column values.
*
* 1. The elements of the `input` column are grouped into distinct groups (e.g. the result of a
* groupby), determined by the corresponding values of the columns under `group_keys`. The
* window-aggregation cannot cross the group boundaries.
* 2. Within a group, with all rows sorted by the `orderby` column, the aggregation window
* for a row at index `i` is determined as follows:
* a) If `orderby` is ASCENDING, aggregation window for row `i` includes all `input` rows at
* index `j` such that:
* @code{.pseudo}
* (orderby[i] - preceding) <= orderby[j] <= orderby[i] + following
* @endcode
* b) If `orderby` is DESCENDING, aggregation window for row `i` includes all `input` rows at
* index `j` such that:
* @code{.pseudo}
* (orderby[i] + preceding) >= orderby[j] >= orderby[i] - following
* @endcode
*
* Note: This method requires that the rows are presorted by the group keys and orderby column
* values.
*
* The window intervals are specified as scalar values appropriate for the orderby column.
* Currently, only the following combinations of `orderby` column type and range types
* are supported:
* 1. If `orderby` column is a TIMESTAMP, the `preceding`/`following` windows are specified
* in terms of `DURATION` scalars of the same resolution.
* E.g. For `orderby` column of type `TIMESTAMP_SECONDS`, the intervals may only be
* `DURATION_SECONDS`. Durations of higher resolution (e.g. `DURATION_NANOSECONDS`)
* or lower (e.g. `DURATION_DAYS`) cannot be used.
* 2. If the `orderby` column is an integral type (e.g. `INT32`), the `preceding`/`following`
* should be the exact same type (`INT32`).
*
* @code{.pseudo}
* Example: Consider a motor-racing statistics dataset, containing the following columns:
* 1. driver_name: (STRING) Name of the car driver
* 2. num_overtakes: (INT32) Number of times the driver overtook another car in a lap
* 3. lap_number: (INT32) The number of the lap
*
* The `group_range_rolling_window()` function allows one to calculate the total number of overtakes
* each driver made within any 3 lap window of each entry:
* 1. Group/partition the dataset by `driver_id` (This is the group_keys argument.)
* 2. Sort each group by the `lap_number` (i.e. This is the orderby_column.)
* 3. Calculate the SUM(num_overtakes) over a window (preceding=1, following=1)
*
* For the following input:
*
* [ // driver_name, num_overtakes, lap_number
* { "bottas", 1, 1 },
* { "hamilton", 2, 1 },
* { "bottas", 2, 2 },
* { "bottas", 1, 3 },
* { "hamilton", 3, 1 },
* { "hamilton", 8, 2 },
* { "bottas", 5, 7 },
* { "bottas", 6, 8 },
* { "hamilton", 4, 4 }
* ]
*
* Partitioning (grouping) by `driver_name`, and ordering by `lap_number` yields the following
* `num_overtakes` vector (with 2 groups, one for each distinct `driver_name`):
*
* lap_number: [ 1, 2, 3, 7, 8, 1, 1, 2, 4 ]
* num_overtakes: [ 1, 2, 1, 5, 6, 2, 3, 8, 4 ]
* <-----bottas------>|<----hamilton--->
*
* The SUM aggregation is applied, with 1 preceding, and 1 following, with a minimum of 1
* period. The aggregation window is thus 3 (laps) wide, yielding the following output column:
*
* Results: [ 3, 4, 3, 11, 11, 13, 13, 13, 4 ]
*
* @endcode
*
* Note: The number of rows participating in each window might vary, based on the index within the
* group, datestamp, and `min_periods`. Apropos:
* 1. results[0] considers 2 values, because it is at the beginning of its group, and has no
* preceding values.
* 2. results[5] considers 3 values, despite being at the beginning of its group. It must include 2
* following values, based on its orderby_column value.
*
* Each aggregation operation cannot cross group boundaries.
*
* The type of the returned column depends on the input column type `T`, and the aggregation:
* 1. COUNT returns `INT32` columns
* 2. MIN/MAX returns `T` columns
* 3. SUM returns the promoted type for T. Sum on `INT32` yields `INT64`.
* 4. MEAN returns FLOAT64 columns
* 5. COLLECT returns columns of type `LIST<T>`.
*
* LEAD/LAG/ROW_NUMBER are undefined for range queries.
*
* @param[in] group_keys The (pre-sorted) grouping columns
* @param[in] orderby_column The (pre-sorted) order-by column, for range comparisons
* @param[in] order The order (ASCENDING/DESCENDING) in which the order-by column is sorted
* @param[in] input The input column (to be aggregated)
* @param[in] preceding The interval value in the backward direction
* @param[in] following The interval value in the forward direction
* @param[in] min_periods Minimum number of observations in window required to have a value,
* otherwise element `i` is null.
* @param[in] aggr The rolling window aggregation type (SUM, MAX, MIN, etc.)
* @param[in] mr Device memory resource used to allocate the returned column's device memory
*
* @returns A nullable output column containing the rolling window results
*/
std::unique_ptr<column> grouped_range_rolling_window(
table_view const& group_keys,
column_view const& orderby_column,
cudf::order const& order,
column_view const& input,
range_window_bounds const& preceding,
range_window_bounds const& following,
size_type min_periods,
rolling_aggregation const& aggr,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Applies a variable-size rolling window function to the values in a column.
*
* This function aggregates values in a window around each element i of the input column, and
* invalidates the bit mask for element i if there are not enough observations. The window size is
* dynamic (varying for each element). This matches Pandas' API for DataFrame.rolling with a few
* notable differences:
* - instead of the center flag it uses a two-part window to allow for more flexible windows.
* The total window size = `preceding_window + following_window`. Element `i` uses elements
* `[i-preceding_window+1, i+following_window]` to do the window computation.
* - instead of storing NA/NaN for output rows that do not meet the minimum number of observations
* this function updates the valid bitmask of the column to indicate which elements are valid.
* - support for dynamic rolling windows, i.e. window size can be specified for each element using
* an additional array.
*
* The returned column for count aggregation always has INT32 type. All other operators return a
* column of the same type as the input. Therefore it is suggested to convert integer column types
* (especially low-precision integers) to `FLOAT32` or `FLOAT64` before doing a rolling `MEAN`.
*
* @throws cudf::logic_error if window column type is not INT32
*
* @param[in] input The input column
* @param[in] preceding_window A non-nullable column of INT32 window sizes in the forward direction.
* `preceding_window[i]` specifies preceding window size for
* element `i`.
* @param[in] following_window A non-nullable column of INT32 window sizes in the backward
* direction. `following_window[i]` specifies following window size
* for element `i`.
* @param[in] min_periods Minimum number of observations in window required to have a value,
* otherwise element `i` is null.
* @param[in] agg The rolling window aggregation type (sum, max, min, etc.)
* @param[in] mr Device memory resource used to allocate the returned column's device memory
*
* @returns A nullable output column containing the rolling window results
*/
std::unique_ptr<column> rolling_window(
column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
rolling_aggregation const& agg,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/contiguous_split.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/table/table.hpp>
#include <cudf/types.hpp>
#include <memory>
#include <vector>
namespace cudf {
/**
* @addtogroup column_copy
* @{
* @file
* @brief Table APIs for contiguous_split, pack, unpack, and metadata
*/
/**
* @brief Column data in a serialized format
*
* @ingroup copy_split
*
* Contains data from an array of columns in two contiguous buffers: one on host, which contains
* table metadata and one on device which contains the table data.
*/
struct packed_columns {
packed_columns()
: metadata(std::make_unique<std::vector<uint8_t>>()),
gpu_data(std::make_unique<rmm::device_buffer>())
{
}
/**
* @brief Construct a new packed columns object
*
* @param md Host-side metadata buffer
* @param gd Device-side data buffer
*/
packed_columns(std::unique_ptr<std::vector<uint8_t>>&& md,
std::unique_ptr<rmm::device_buffer>&& gd)
: metadata(std::move(md)), gpu_data(std::move(gd))
{
}
std::unique_ptr<std::vector<uint8_t>> metadata; ///< Host-side metadata buffer
std::unique_ptr<rmm::device_buffer> gpu_data; ///< Device-side data buffer
};
/**
* @brief The result(s) of a cudf::contiguous_split
*
* @ingroup copy_split
*
* Each table_view resulting from a split operation performed by contiguous_split,
* will be returned wrapped in a `packed_table`. The table_view and internal
* column_views in this struct are not owned by a top level cudf::table or cudf::column.
* The backing memory and metadata is instead owned by the `data` field and is in one
* contiguous block.
*
* The user is responsible for assuring that the `table` or any derived table_views do
* not outlive the memory owned by `data`.
*/
struct packed_table {
cudf::table_view table; ///< Result table_view of a cudf::contiguous_split
packed_columns data; ///< Column data owned
};
/**
* @brief Performs a deep-copy split of a `table_view` into a vector of `packed_table` where each
* `packed_table` is using a single contiguous block of memory for all of the split's column data.
*
* @ingroup copy_split
*
* The memory for the output views is allocated in a single contiguous `rmm::device_buffer` returned
* in the `packed_table`. There is no top-level owning table.
*
* The returned views of `input` are constructed from a vector of indices, that indicate
* where each split should occur. The `i`th returned `table_view` is sliced as
* `[0, splits[i])` if `i`=0, else `[splits[i], input.size())` if `i` is the last view and
* `[splits[i-1], splits[i]]` otherwise.
*
* For all `i` it is expected `splits[i] <= splits[i+1] <= input.size()`.
* For a `splits` size N, there will always be N+1 splits in the output.
*
* @note It is the caller's responsibility to ensure that the returned views
* do not outlive the viewed device memory contained in the `all_data` field of the
* returned packed_table.
*
* @code{.pseudo}
* Example:
* input: [{10, 12, 14, 16, 18, 20, 22, 24, 26, 28},
* {50, 52, 54, 56, 58, 60, 62, 64, 66, 68}]
* splits: {2, 5, 9}
* output: [{{10, 12}, {14, 16, 18}, {20, 22, 24, 26}, {28}},
* {{50, 52}, {54, 56, 58}, {60, 62, 64, 66}, {68}}]
* @endcode
*
*
* @throws cudf::logic_error if `splits` has end index > size of `input`.
* @throws cudf::logic_error When the value in `splits` is not in the range [0, input.size()).
* @throws cudf::logic_error When the values in the `splits` are 'strictly decreasing'.
*
* @param input View of a table to split
* @param splits A vector of indices where the view will be split
* @param mr An optional memory resource to use for all returned device allocations
* @return The set of requested views of `input` indicated by the `splits` and the viewed memory
* buffer
*/
std::vector<packed_table> contiguous_split(
cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
namespace detail {
struct contiguous_split_state;
};
/**
* @brief Perform a chunked "pack" operation of the input `table_view` using a user provided
* buffer of size `user_buffer_size`.
*
* The intent of this operation is to be used in a streamed fashion at times of GPU
* out-of-memory, where we want to minimize the number of small cudaMemcpy calls and
* tracking of all the metadata associated with cudf tables. Because of the memory constraints,
* all thrust and scratch memory allocations are using the passed-in memory resource exclusively,
* not a per-device memory resource.
*
* This class defines two methods that must be used in concert to carry out the chunked_pack:
* has_next and next. Here is an example:
*
* @code{.pseudo}
* // Create a table_view
* cudf::table_view tv = ...;
*
* // Choose a memory resource (optional). This memory resource is used for scratch/thrust temporary
* // data. In memory constrained cases, this can be used to set aside scratch memory
* // for `chunked_pack` at the beginning of a program.
* auto mr = rmm::mr::get_current_device_resource();
*
* // Define a buffer size for each chunk: the larger the buffer is, the more SMs can be
* // occupied by this algorithm.
* //
* // Internally, the GPU unit of work is a 1MB batch. When we instantiate `cudf::chunked_pack`,
* // all the 1MB batches for the source table_view are computed up front. Additionally,
* // chunked_pack calculates the number of iterations that are required to go through all those
* // batches given a `user_buffer_size` buffer. The number of 1MB batches in each iteration (chunk)
* // equals the number of CUDA blocks that will be used for the main kernel launch.
* //
* std::size_t user_buffer_size = 128*1024*1024;
*
* auto chunked_packer = cudf::chunked_pack::create(tv, user_buffer_size, mr);
*
* std::size_t host_offset = 0;
* auto host_buffer = ...; // obtain a host buffer you would like to copy to
*
* while (chunked_packer->has_next()) {
* // get a user buffer of size `user_buffer_size`
* cudf::device_span<uint8_t> user_buffer = ...;
* std::size_t bytes_copied = chunked_packer->next(user_buffer);
*
* // buffer will hold the contents of at most `user_buffer_size` bytes
* // of the contiguously packed input `table_view`. You are now free to copy
* // this memory somewhere else, for example, to host.
* cudaMemcpyAsync(
* host_buffer.data() + host_offset,
* user_buffer.data(),
* bytes_copied,
* cudaMemcpyDefault,
* stream);
*
* host_offset += bytes_copied;
* }
* @endcode
*/
class chunked_pack {
public:
/**
* @brief Construct a `chunked_pack` class.
*
* @param input source `table_view` to pack
* @param user_buffer_size buffer size (in bytes) that will be passed on `next`. Must be
* at least 1MB
* @param temp_mr An optional memory resource to be used for temporary and scratch allocations
* only
*/
explicit chunked_pack(
cudf::table_view const& input,
std::size_t user_buffer_size,
rmm::mr::device_memory_resource* temp_mr = rmm::mr::get_current_device_resource());
/**
* @brief Destructor that will be implemented as default. Declared with definition here because
* contiguous_split_state is incomplete at this stage.
*/
~chunked_pack();
/**
* @brief Obtain the total size of the contiguously packed `table_view`.
*
* @return total size (in bytes) of all the chunks
*/
[[nodiscard]] std::size_t get_total_contiguous_size() const;
/**
* @brief Function to check if there are chunks left to be copied.
*
* @return true if there are chunks left to be copied, and false otherwise
*/
[[nodiscard]] bool has_next() const;
/**
* @brief Packs the next chunk into `user_buffer`. This should be called as long as
* `has_next` returns true. If `next` is called when `has_next` is false, an exception
* is thrown.
*
* @throws cudf::logic_error If the size of `user_buffer` is different than `user_buffer_size`
* @throws cudf::logic_error If called after all chunks have been copied
*
* @param user_buffer device span target for the chunk. The size of this span must equal
* the `user_buffer_size` parameter passed at construction
* @return The number of bytes that were written to `user_buffer` (at most
* `user_buffer_size`)
*/
[[nodiscard]] std::size_t next(cudf::device_span<uint8_t> const& user_buffer);
/**
* @brief Build the opaque metadata for all added columns.
*
* @return A vector containing the serialized column metadata
*/
[[nodiscard]] std::unique_ptr<std::vector<uint8_t>> build_metadata() const;
/**
* @brief Creates a `chunked_pack` instance to perform a "pack" of the `table_view`
* "input", where a buffer of `user_buffer_size` is filled with chunks of the
* overall operation. This operation can be used in cases where GPU memory is constrained.
*
* The memory resource (`temp_mr`) could be a special memory resource to be used in
* situations when GPU memory is low and we want scratch and temporary allocations to
* happen from a small reserved pool of memory. Note that it defaults to the regular cuDF
* per-device resource.
*
* @throws cudf::logic_error When user_buffer_size is less than 1MB
*
* @param input source `table_view` to pack
* @param user_buffer_size buffer size (in bytes) that will be passed on `next`. Must be
* at least 1MB
* @param temp_mr RMM memory resource to be used for temporary and scratch allocations only
* @return a unique_ptr of chunked_pack
*/
[[nodiscard]] static std::unique_ptr<chunked_pack> create(
cudf::table_view const& input,
std::size_t user_buffer_size,
rmm::mr::device_memory_resource* temp_mr = rmm::mr::get_current_device_resource());
private:
// internal state of contiguous split
std::unique_ptr<detail::contiguous_split_state> state;
};
/**
* @brief Deep-copy a `table_view` into a serialized contiguous memory format.
*
* The metadata from the `table_view` is copied into a host vector of bytes and the data from the
* `table_view` is copied into a `device_buffer`. Pass the output of this function into
* `cudf::unpack` to deserialize.
*
* @param input View of the table to pack
* @param mr An optional memory resource to use for all returned device allocations
* @return packed_columns A struct containing the serialized metadata and data in contiguous host
* and device memory respectively
*/
packed_columns pack(cudf::table_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Produce the metadata used for packing a table stored in a contiguous buffer.
*
* The metadata from the `table_view` is copied into a host vector of bytes which can be used to
* construct a `packed_columns` or `packed_table` structure. The caller is responsible for
* guaranteeing that all of the columns in the table point into `contiguous_buffer`.
*
* @param table View of the table to pack
* @param contiguous_buffer A contiguous buffer of device memory which contains the data referenced
* by the columns in `table`
* @param buffer_size The size of `contiguous_buffer`
* @return Vector of bytes representing the metadata used to `unpack` a packed_columns struct
*/
std::vector<uint8_t> pack_metadata(table_view const& table,
uint8_t const* contiguous_buffer,
size_t buffer_size);
/**
* @brief Deserialize the result of `cudf::pack`.
*
* Converts the result of a serialized table into a `table_view` that points to the data stored in
* the contiguous device buffer contained in `input`.
*
* It is the caller's responsibility to ensure that the `table_view` in the output does not outlive
* the data in the input.
*
* No new device memory is allocated in this function.
*
* @param input The packed columns to unpack
* @return The unpacked `table_view`
*/
table_view unpack(packed_columns const& input);
/**
* @brief Deserialize the result of `cudf::pack`.
*
* Converts the result of a serialized table into a `table_view` that points to the data stored in
* the contiguous device buffer contained in `gpu_data` using the metadata contained in the host
* buffer `metadata`.
*
* It is the caller's responsibility to ensure that the `table_view` in the output does not outlive
* the data in the input.
*
* No new device memory is allocated in this function.
*
* @param metadata The host-side metadata buffer resulting from the initial pack() call
* @param gpu_data The device-side contiguous buffer storing the data that will be referenced by
* the resulting `table_view`
* @return The unpacked `table_view`
*/
table_view unpack(uint8_t const* metadata, uint8_t const* gpu_data);
/** @} */
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/round.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
/**
* @addtogroup transformation_unaryops
* @{
* @file
* @brief Column APIs for round
*/
/**
* @brief Different rounding methods for `cudf::round`
*
* Info on HALF_UP rounding: https://en.wikipedia.org/wiki/Rounding#Round_half_up
* Info on HALF_EVEN rounding: https://en.wikipedia.org/wiki/Rounding#Round_half_to_even
*/
enum class rounding_method : int32_t { HALF_UP, HALF_EVEN };
/**
* @brief Rounds all the values in a column to the specified number of decimal places.
*
* `cudf::round` currently supports HALF_UP and HALF_EVEN rounding for integer, floating point and
* `decimal32` and `decimal64` numbers. For `decimal32` and `decimal64` numbers, negated
* `numeric::scale` is equivalent to `decimal_places`.
*
* Example:
* ```
* using namespace cudf;
*
* column_view a; // contains { 1.729, 17.29, 172.9, 1729 };
*
* auto result1 = round(a); // { 2, 17, 173, 1729 }
* auto result2 = round(a, 1); // { 1.7, 17.3, 172.9, 1729 }
* auto result3 = round(a, -1); // { 0, 20, 170, 1730 }
*
* column_view b; // contains { 1.5, 2.5, 1.35, 1.45, 15, 25 };
*
* auto result4 = round(b, 0, rounding_method::HALF_EVEN); // { 2, 2, 1, 1, 15, 25};
* auto result5 = round(b, 1, rounding_method::HALF_EVEN); // { 1.5, 2.5, 1.4, 1.4, 15, 25};
* auto result6 = round(b, -1, rounding_method::HALF_EVEN); // { 0, 0, 0, 0, 20, 20};
* ```
*
* @param input Column of values to be rounded
* @param decimal_places Number of decimal places to round to (default 0). If negative, this
* specifies the number of positions to the left of the decimal point.
* @param method Rounding method
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @return Column with each of the values rounded
*/
std::unique_ptr<column> round(
column_view const& input,
int32_t decimal_places = 0,
rounding_method method = rounding_method::HALF_UP,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/concatenate.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
namespace cudf {
/**
* @addtogroup copy_concatenate
* @{
* @file
* @brief Concatenate columns APIs
*/
/**
* @brief Concatenates `views[i]`'s bitmask from the bits
* `[views[i].offset(), views[i].offset() + views[i].size())` for all elements
* `views` into an `rmm::device_buffer`
*
* Returns an empty buffer if the column is not nullable.
*
* @param views Column views whose bitmasks will be concatenated
* @param mr Device memory resource used for allocating the returned memory
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Bitmasks of all the column views in the views vector
*/
rmm::device_buffer concatenate_masks(
host_span<column_view const> views,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Concatenates multiple columns into a single column
*
* @throws cudf::logic_error If types of the input columns mismatch
* @throws std::overflow_error If the total number of output rows exceeds cudf::size_type
*
* @param columns_to_concat Column views to be concatenated into a single column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A single column having all the rows from the elements of `columns_to_concat` respectively
* in the same order.
*/
std::unique_ptr<column> concatenate(
host_span<column_view const> columns_to_concat,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Columns of `tables_to_concat` are concatenated vertically to return a
* single table
*
* @code{.pseudo}
* column_view c0 is {0,1,2,3}
* column_view c1 is {4,5,6,7}
* table_view t0{{c0, c0}};
* table_view t1{{c1, c1}};
* ...
* auto t = concatenate({t0.view(), t1.view()});
* column_view tc0 = (t->view()).column(0) is {0,1,2,3,4,5,6,7}
* column_view tc1 = (t->view()).column(1) is {0,1,2,3,4,5,6,7}
* @endcode
*
* @throws cudf::logic_error If number of columns mismatch
* @throws std::overflow_error If the total number of output rows exceeds cudf::size_type
*
* @param tables_to_concat Table views to be concatenated into a single table
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return A single table having all the rows from the elements of
* `tables_to_concat` respectively in the same order.
*/
std::unique_ptr<table> concatenate(
host_span<table_view const> tables_to_concat,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/timezone.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <optional>
#include <string>
namespace cudf {
class table;
// Cycle in which the time offsets repeat in Gregorian calendar
static constexpr int32_t solar_cycle_years = 400;
// Number of future entries in the timezone transition table:
// Two entries per year, over the length of the Gregorian calendar's solar cycle
static constexpr uint32_t solar_cycle_entry_count = 2 * solar_cycle_years;
/**
* @brief Creates a transition table to convert ORC timestamps to UTC.
*
* Uses system's TZif files. Assumes little-endian platform when parsing these files.
* The transition table starts with the entries from the TZif file. For timestamps after the file's
* last transition, the table includes entries that form a `solar_cycle_years`-year cycle (future
* entries). This portion of the table has `solar_cycle_entry_count` elements, as it assumes two
* transitions per year from Daylight Saving Time. If the timezone does not have DST, the table will
* still include the future entries, which will all have the same offset.
*
* @param tzif_dir The directory where the TZif files are located
* @param timezone_name standard timezone name (for example, "America/Los_Angeles")
* @param mr Device memory resource used to allocate the returned table's device memory.
*
* @return The transition table for the given timezone
*/
std::unique_ptr<table> make_timezone_transition_table(
std::optional<std::string_view> tzif_dir,
std::string_view timezone_name,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/reduction.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/scalar/scalar.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <optional>
namespace cudf {
/**
* @addtogroup aggregation_reduction
* @{
* @file
*/
/**
* @brief Enum to describe scan operation type
*/
enum class scan_type : bool { INCLUSIVE, EXCLUSIVE };
/**
* @brief Computes the reduction of the values in all rows of a column.
*
* This function does not detect overflows in reductions. When `output_dtype`
* does not match the `col.type()`, their values may be promoted to
* `int64_t` or `double` for computing aggregations and then cast to
* `output_dtype` before returning.
*
* Only `min` and `max` ops are supported for reduction of non-arithmetic
* types (e.g. timestamp or string).
*
* Any null values are skipped for the operation.
*
* If the column is empty or contains all null entries `col.size()==col.null_count()`,
* the output scalar value will be `false` for reduction type `any` and `true`
* for reduction type `all`. For all other reductions, the output scalar
* returns with `is_valid()==false`.
*
* If the input column is an arithmetic type, the `output_dtype` can be any arithmetic
* type. If the input column is a non-arithmetic type (e.g. timestamp or string)
* the `output_dtype` must match the `col.type()`. If the reduction type is `any` or
* `all`, the `output_dtype` must be type BOOL8.
*
* If the reduction fails, the output scalar returns with `is_valid()==false`.
*
* @throw cudf::logic_error if reduction is called for non-arithmetic output
* type and operator other than `min` and `max`.
* @throw cudf::logic_error if input column data type is not convertible to
* `output_dtype`.
* @throw cudf::logic_error if `min` or `max` reduction is called and the
* output type does not match the input column data type.
* @throw cudf::logic_error if `any` or `all` reduction is called and the
* output type is not BOOL8.
* @throw cudf::logic_error if `mean`, `var`, or `std` reduction is called and
* the `output_dtype` is not floating point.
*
* @param col Input column view
* @param agg Aggregation operator applied by the reduction
* @param output_dtype The output scalar type
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @returns Output scalar with reduce result
*/
std::unique_ptr<scalar> reduce(
column_view const& col,
reduce_aggregation const& agg,
data_type output_dtype,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the reduction of the values in all rows of a column with an initial value
*
* Only `sum`, `product`, `min`, `max`, `any`, and `all` reductions are supported.
*
* @throw cudf::logic_error if reduction is not `sum`, `product`, `min`, `max`, `any`, or `all`
* and `init` is specified.
*
* @param col Input column view
* @param agg Aggregation operator applied by the reduction
* @param output_dtype The output scalar type
* @param init The initial value of the reduction
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @returns Output scalar with reduce result
*/
std::unique_ptr<scalar> reduce(
column_view const& col,
reduce_aggregation const& agg,
data_type output_dtype,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Compute reduction of each segment in the input column
*
* This function does not detect overflows in reductions. When `output_dtype`
* does not match the `segmented_values.type()`, their values may be promoted to
* `int64_t` or `double` for computing aggregations and then cast to
* `output_dtype` before returning.
*
* Null values are treated as identities during reduction.
*
* If the segment is empty, the row corresponding to the result of the
* segment is null.
*
* If any index in `offsets` is out of bound of `segmented_values`, the behavior
* is undefined.
*
* If the input column has arithmetic type, `output_dtype` can be any arithmetic
* type. If the input column has non-arithmetic type, e.g. timestamp, the same
* output type must be specified.
*
* If input is not empty, the result is always nullable.
*
* @throw cudf::logic_error if reduction is called for non-arithmetic output
* type and operator other than `min` and `max`.
* @throw cudf::logic_error if input column data type is not convertible to
* `output_dtype` type.
* @throw cudf::logic_error if `min` or `max` reduction is called and the
* `output_dtype` does not match the input column data type.
* @throw cudf::logic_error if `any` or `all` reduction is called and the
* `output_dtype` is not BOOL8.
*
* @param segmented_values Column view of segmented inputs
* @param offsets Each segment's offset of `segmented_values`. A list of offsets with size
* `num_segments + 1`. The size of `i`th segment is `offsets[i+1] - offsets[i]`.
* @param agg Aggregation operator applied by the reduction
* @param output_dtype The output column type
* @param null_handling If `INCLUDE`, the reduction is valid if all elements in a segment are valid,
* otherwise null. If `EXCLUDE`, the reduction is valid if any element in the segment is valid,
* otherwise null.
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @returns Output column with results of segmented reduction
*/
std::unique_ptr<column> segmented_reduce(
column_view const& segmented_values,
device_span<size_type const> offsets,
segmented_reduce_aggregation const& agg,
data_type output_dtype,
null_policy null_handling,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Compute reduction of each segment in the input column with an initial value. Only SUM,
* PRODUCT, MIN, MAX, ANY, and ALL aggregations are supported.
*
* @param segmented_values Column view of segmented inputs
* @param offsets Each segment's offset of `segmented_values`. A list of offsets with size
* `num_segments + 1`. The size of `i`th segment is `offsets[i+1] - offsets[i]`.
* @param agg Aggregation operator applied by the reduction
* @param output_dtype The output column type
* @param null_handling If `INCLUDE`, the reduction is valid if all elements in a segment are valid,
* otherwise null. If `EXCLUDE`, the reduction is valid if any element in the segment is valid,
* otherwise null.
* @param init The initial value of the reduction
* @param mr Device memory resource used to allocate the returned scalar's device memory
* @returns Output column with results of segmented reduction.
*/
std::unique_ptr<column> segmented_reduce(
column_view const& segmented_values,
device_span<size_type const> offsets,
segmented_reduce_aggregation const& agg,
data_type output_dtype,
null_policy null_handling,
std::optional<std::reference_wrapper<scalar const>> init,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the scan of a column.
*
* The null values are skipped for the operation, and if an input element
* at `i` is null, then the output element at `i` will also be null.
*
* @throws cudf::logic_error if column datatype is not numeric type.
*
* @param[in] input The input column view for the scan
* @param[in] agg unique_ptr to aggregation operator applied by the scan
* @param[in] inclusive The flag for applying an inclusive scan if scan_type::INCLUSIVE, an
* exclusive scan if scan_type::EXCLUSIVE.
* @param[in] null_handling Exclude null values when computing the result if null_policy::EXCLUDE.
* Include nulls if null_policy::INCLUDE. Any operation with a null results in a null.
* @param[in] mr Device memory resource used to allocate the returned scalar's device memory
* @returns Scanned output column
*/
std::unique_ptr<column> scan(
column_view const& input,
scan_aggregation const& agg,
scan_type inclusive,
null_policy null_handling = null_policy::EXCLUDE,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Determines the minimum and maximum values of a column.
*
*
* @param col column to compute minmax
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A std::pair of scalars with the first scalar being the minimum value and the second
* scalar being the maximum value of the input column.
*/
std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax(
column_view const& col,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/binaryop.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
namespace cudf {
/**
* @addtogroup transformation_binaryops
* @{
* @file
* @brief Column APIs for binary ops
*/
/**
* @brief Types of binary operations that can be performed on data.
*/
enum class binary_operator : int32_t {
ADD, ///< operator +
SUB, ///< operator -
MUL, ///< operator *
DIV, ///< operator / using common type of lhs and rhs
TRUE_DIV, ///< operator / after promoting type to floating point
FLOOR_DIV, ///< operator //
///< integer division rounding towards negative
///< infinity if both arguments are integral;
///< floor division for floating types (using C++ type
///< promotion for mixed integral/floating arguments)
///< If different promotion semantics are required, it
///< is the responsibility of the caller to promote
///< manually before calling in to this function.
MOD, ///< operator %
PMOD, ///< positive modulo operator
///< If remainder is negative, this returns (remainder + divisor) % divisor
///< else, it returns (dividend % divisor)
PYMOD, ///< operator % but following Python's sign rules for negatives
POW, ///< lhs ^ rhs
INT_POW, ///< int ^ int, used to avoid floating point precision loss. Returns 0 for negative
///< exponents.
LOG_BASE, ///< logarithm to the base
ATAN2, ///< 2-argument arctangent
SHIFT_LEFT, ///< operator <<
SHIFT_RIGHT, ///< operator >>
SHIFT_RIGHT_UNSIGNED, ///< operator >>> (from Java)
///< Logical right shift. Casts to an unsigned value before shifting.
BITWISE_AND, ///< operator &
BITWISE_OR, ///< operator |
BITWISE_XOR, ///< operator ^
LOGICAL_AND, ///< operator &&
LOGICAL_OR, ///< operator ||
EQUAL, ///< operator ==
NOT_EQUAL, ///< operator !=
LESS, ///< operator <
GREATER, ///< operator >
LESS_EQUAL, ///< operator <=
GREATER_EQUAL, ///< operator >=
NULL_EQUALS, ///< Returns true when both operands are null; false when one is null; the
///< result of equality when both are non-null
NULL_MAX, ///< Returns max of operands when both are non-null; returns the non-null
///< operand when one is null; or invalid when both are null
NULL_MIN, ///< Returns min of operands when both are non-null; returns the non-null
///< operand when one is null; or invalid when both are null
GENERIC_BINARY, ///< generic binary operator to be generated with input
///< ptx code
NULL_LOGICAL_AND, ///< operator && with Spark rules: (null, null) is null, (null, true) is null,
///< (null, false) is false, and (valid, valid) == LOGICAL_AND(valid, valid)
NULL_LOGICAL_OR, ///< operator || with Spark rules: (null, null) is null, (null, true) is true,
///< (null, false) is null, and (valid, valid) == LOGICAL_OR(valid, valid)
INVALID_BINARY ///< invalid operation
};
/**
* @brief Performs a binary operation between a scalar and a column.
*
* The output contains the result of `op(lhs, rhs[i])` for all `0 <= i < rhs.size()`
* The scalar is the left operand and the column elements are the right operand.
* This distinction is significant in case of non-commutative binary operations
*
* Regardless of the operator, the validity of the output value is the logical
* AND of the validity of the two operands except NullMin and NullMax (logical OR).
*
* @param lhs The left operand scalar
* @param rhs The right operand column
* @param op The binary operator
* @param output_type The desired data type of the output column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Output column of `output_type` type containing the result of
* the binary operation
* @throw cudf::logic_error if @p output_type dtype isn't fixed-width
* @throw cudf::logic_error if @p output_type dtype isn't boolean for comparison and logical
* operations.
* @throw cudf::data_type_error if the operation is not supported for the types of @p lhs and @p rhs
*/
std::unique_ptr<column> binary_operation(
scalar const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs a binary operation between a column and a scalar.
*
* The output contains the result of `op(lhs[i], rhs)` for all `0 <= i < lhs.size()`
* The column elements are the left operand and the scalar is the right operand.
* This distinction is significant in case of non-commutative binary operations
*
* Regardless of the operator, the validity of the output value is the logical
* AND of the validity of the two operands except NullMin and NullMax (logical OR).
*
* @param lhs The left operand column
* @param rhs The right operand scalar
* @param op The binary operator
* @param output_type The desired data type of the output column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Output column of `output_type` type containing the result of
* the binary operation
* @throw cudf::logic_error if @p output_type dtype isn't fixed-width
* @throw cudf::logic_error if @p output_type dtype isn't boolean for comparison and logical
* operations.
* @throw cudf::data_type_error if the operation is not supported for the types of @p lhs and @p rhs
*/
std::unique_ptr<column> binary_operation(
column_view const& lhs,
scalar const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs a binary operation between two columns.
*
* The output contains the result of `op(lhs[i], rhs[i])` for all `0 <= i < lhs.size()`
*
* Regardless of the operator, the validity of the output value is the logical
* AND of the validity of the two operands except NullMin and NullMax (logical OR).
*
* @param lhs The left operand column
* @param rhs The right operand column
* @param op The binary operator
* @param output_type The desired data type of the output column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Output column of `output_type` type containing the result of
* the binary operation
* @throw cudf::logic_error if @p lhs and @p rhs are different sizes
* @throw cudf::logic_error if @p output_type dtype isn't boolean for comparison and logical
* operations.
* @throw cudf::logic_error if @p output_type dtype isn't fixed-width
* @throw cudf::data_type_error if the operation is not supported for the types of @p lhs and @p rhs
*/
std::unique_ptr<column> binary_operation(
column_view const& lhs,
column_view const& rhs,
binary_operator op,
data_type output_type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs a binary operation between two columns using a
* user-defined PTX function.
*
* The output contains the result of `op(lhs[i], rhs[i])` for all `0 <= i < lhs.size()`
*
* Regardless of the operator, the validity of the output value is the logical
* AND of the validity of the two operands
*
* @param lhs The left operand column
* @param rhs The right operand column
* @param ptx String containing the PTX of a binary function
* @param output_type The desired data type of the output column. It is assumed
* that output_type is compatible with the output data type
* of the function in the PTX code
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Output column of `output_type` type containing the result of
* the binary operation
* @throw cudf::logic_error if @p lhs and @p rhs are different sizes
* @throw cudf::logic_error if @p lhs and @p rhs dtypes aren't numeric
* @throw cudf::logic_error if @p output_type dtype isn't numeric
*/
std::unique_ptr<column> binary_operation(
column_view const& lhs,
column_view const& rhs,
std::string const& ptx,
data_type output_type,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the `scale` for a `fixed_point` number based on given binary operator `op`
*
* @param op The binary_operator used for two `fixed_point` numbers
* @param left_scale Scale of left `fixed_point` number
* @param right_scale Scale of right `fixed_point` number
* @return The resulting `scale` of the computed `fixed_point` number
*/
int32_t binary_operation_fixed_point_scale(binary_operator op,
int32_t left_scale,
int32_t right_scale);
/**
* @brief Computes the `data_type` for a `fixed_point` number based on given binary operator `op`
*
* @param op The binary_operator used for two `fixed_point` numbers
* @param lhs `cudf::data_type` of left `fixed_point` number
* @param rhs `cudf::data_type` of right `fixed_point` number
* @return The resulting `cudf::data_type` of the computed `fixed_point` number
*/
cudf::data_type binary_operation_fixed_point_output_type(binary_operator op,
cudf::data_type const& lhs,
cudf::data_type const& rhs);
namespace binops {
/**
* @brief Computes output valid mask for op between a column and a scalar
*
* @param col Column to compute the valid mask from
* @param s Scalar to compute the valid mask from
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned valid mask
* @return Computed validity mask
*/
std::pair<rmm::device_buffer, size_type> scalar_col_valid_mask_and(
column_view const& col,
scalar const& s,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
namespace compiled {
namespace detail {
/**
* @brief struct binary operation using `NaN` aware sorting physical element comparators
*
* @param out mutable view of output column
* @param lhs view of left operand column
* @param rhs view of right operand column
* @param is_lhs_scalar true if @p lhs is a single element column representing a scalar
* @param is_rhs_scalar true if @p rhs is a single element column representing a scalar
* @param op binary operator identifier
* @param stream CUDA stream used for device memory operations
*/
void apply_sorting_struct_binary_op(mutable_column_view& out,
column_view const& lhs,
column_view const& rhs,
bool is_lhs_scalar,
bool is_rhs_scalar,
binary_operator op,
rmm::cuda_stream_view stream);
} // namespace detail
} // namespace compiled
} // namespace binops
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/sorting.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/aggregation.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <vector>
namespace cudf {
/**
* @addtogroup column_sort
* @{
* @file
* @brief Column APIs for sort and rank
*/
/**
* @brief Computes the row indices that would produce `input` in a lexicographical sorted order.
*
* @param input The table to sort
* @param column_order The desired sort order for each column. Size must be
* equal to `input.num_columns()` or empty. If empty, all columns will be sorted
* in ascending order.
* @param null_precedence The desired order of null compared to other elements
* for each column. Size must be equal to `input.num_columns()` or empty.
* If empty, all columns will be sorted in `null_order::BEFORE`.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A non-nullable column of elements containing the permuted row indices of
* `input` if it were sorted
*/
std::unique_ptr<column> sorted_order(
table_view const& input,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the row indices that would produce `input` in a stable
* lexicographical sorted order.
*
* The order of equivalent elements is guaranteed to be preserved.
*
* @copydoc cudf::sorted_order
*/
std::unique_ptr<column> stable_sorted_order(
table_view const& input,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Checks whether the rows of a `table` are sorted in a lexicographical
* order.
*
* @param table Table whose rows need to be compared for ordering
* @param column_order The expected sort order for each column. Size
* must be equal to `in.num_columns()` or empty. If
* empty, it is expected all columns are in
* ascending order.
* @param null_precedence The desired order of null compared to other
* elements for each column. Size must be equal to
* `input.num_columns()` or empty. If empty,
* `null_order::BEFORE` is assumed for all columns.
*
* @param stream CUDA stream used for device memory operations and kernel launches
* @returns true if sorted as expected, false if not
*/
bool is_sorted(cudf::table_view const& table,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Performs a lexicographic sort of the rows of a table
*
* @param input The table to sort
* @param column_order The desired order for each column. Size must be
* equal to `input.num_columns()` or empty. If empty, all columns are sorted in
* ascending order.
* @param null_precedence The desired order of a null element compared to other
* elements for each column in `input`. Size must be equal to
* `input.num_columns()` or empty. If empty, all columns will be sorted with
* `null_order::BEFORE`.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return New table containing the desired sorted order of `input`
*/
std::unique_ptr<table> sort(
table_view const& input,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs a key-value sort.
*
* Creates a new table that reorders the rows of `values` according to the
* lexicographic ordering of the rows of `keys`.
*
* @throws cudf::logic_error if `values.num_rows() != keys.num_rows()`.
*
* @param values The table to reorder
* @param keys The table that determines the ordering
* @param column_order The desired order for each column in `keys`. Size must be
* equal to `keys.num_columns()` or empty. If empty, all columns are sorted in
* ascending order.
* @param null_precedence The desired order of a null element compared to other
* elements for each column in `keys`. Size must be equal to
* `keys.num_columns()` or empty. If empty, all columns will be sorted with
* `null_order::BEFORE`.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return The reordering of `values` determined by the lexicographic order of
* the rows of `keys`.
*/
std::unique_ptr<table> sort_by_key(
table_view const& values,
table_view const& keys,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs a key-value stable sort.
*
* Creates a new table that reorders the rows of `values` according to the
* lexicographic ordering of the rows of `keys`.
*
* The order of equivalent elements is guaranteed to be preserved.
*
* @throws cudf::logic_error if `values.num_rows() != keys.num_rows()`.
*
* @param values The table to reorder
* @param keys The table that determines the ordering
* @param column_order The desired order for each column in `keys`. Size must be
* equal to `keys.num_columns()` or empty. If empty, all columns are sorted in
* ascending order.
* @param null_precedence The desired order of a null element compared to other
* elements for each column in `keys`. Size must be equal to
* `keys.num_columns()` or empty. If empty, all columns will be sorted with
* `null_order::BEFORE`.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return The reordering of `values` determined by the lexicographic order of
* the rows of `keys`.
*/
std::unique_ptr<table> stable_sort_by_key(
table_view const& values,
table_view const& keys,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the ranks of input column in sorted order.
*
* Rank indicate the position of each element in the sorted column and rank
* value starts from 1.
*
* @code{.pseudo}
* input = { 3, 4, 5, 4, 1, 2}
* Result for different rank_method are
* FIRST = {3, 4, 6, 5, 1, 2}
* AVERAGE = {3, 4.5, 6, 4.5, 1, 2}
* MIN = {3, 4, 6, 4, 1, 2}
* MAX = {3, 5, 6, 5, 1, 2}
* DENSE = {3, 4, 5, 4, 1, 2}
* @endcode
*
* @param input The column to rank
* @param method The ranking method used for tie breaking (same values)
* @param column_order The desired sort order for ranking
* @param null_handling flag to include nulls during ranking. If nulls are not
* included, corresponding rank will be null.
* @param null_precedence The desired order of null compared to other elements
* for column
* @param percentage flag to convert ranks to percentage in range (0,1]
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A column of containing the rank of the each element of the column of `input`. The output
* column type will be `size_type`column by default or else `double` when
* `method=rank_method::AVERAGE` or `percentage=True`
*/
std::unique_ptr<column> rank(
column_view const& input,
rank_method method,
order column_order,
null_policy null_handling,
null_order null_precedence,
bool percentage,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns sorted order after sorting each segment in the table.
*
* If segment_offsets contains values larger than the number of rows, the behavior is undefined.
* @throws cudf::logic_error if `segment_offsets` is not `size_type` column.
*
* @code{.pseudo}
* Example:
* keys = { {9, 8, 7, 6, 5, 4, 3, 2, 1, 0} }
* offsets = {0, 3, 7, 10}
* result = cudf::segmented_sorted_order(keys, offsets);
* result is { 2,1,0, 6,5,4,3, 9,8,7 }
* @endcode
*
* If segment_offsets is empty or contains a single index, no values are sorted
* and the result is a sequence of integers from 0 to keys.size()-1.
*
* The segment_offsets are not required to include all indices. Any indices
* outside the specified segments will not be sorted.
*
* @code{.pseudo}
* Example: (offsets do not cover all indices)
* keys = { {9, 8, 7, 6, 5, 4, 3, 2, 1, 0} }
* offsets = {3, 7}
* result = cudf::segmented_sorted_order(keys, offsets);
* result is { 0,1,2, 6,5,4,3, 7,8,9 }
* @endcode
*
* @param keys The table that determines the ordering of elements in each segment
* @param segment_offsets The column of `size_type` type containing start offset index for each
* contiguous segment.
* @param column_order The desired order for each column in `keys`. Size must be
* equal to `keys.num_columns()` or empty. If empty, all columns are sorted in
* ascending order.
* @param null_precedence The desired order of a null element compared to other
* elements for each column in `keys`. Size must be equal to
* `keys.num_columns()` or empty. If empty, all columns will be sorted with
* `null_order::BEFORE`.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource to allocate any returned objects
* @return sorted order of the segment sorted table
*
*/
std::unique_ptr<column> segmented_sorted_order(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns sorted order after stably sorting each segment in the table.
*
* @copydoc cudf::segmented_sorted_order
*/
std::unique_ptr<column> stable_segmented_sorted_order(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs a lexicographic segmented sort of a table
*
* If segment_offsets contains values larger than the number of rows, the behavior is undefined.
* @throws cudf::logic_error if `values.num_rows() != keys.num_rows()`.
* @throws cudf::logic_error if `segment_offsets` is not `size_type` column.
*
* @code{.pseudo}
* Example:
* keys = { {9, 8, 7, 6, 5, 4, 3, 2, 1, 0} }
* values = { {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'} }
* offsets = {0, 3, 7, 10}
* result = cudf::segmented_sort_by_key(keys, values, offsets);
* result is { 'c','b','a', 'g','f','e','d', 'j','i','h' }
* @endcode
*
* If segment_offsets is empty or contains a single index, no values are sorted
* and the result is a copy of the values.
*
* The segment_offsets are not required to include all indices. Any indices
* outside the specified segments will not be sorted.
*
* @code{.pseudo}
* Example: (offsets do not cover all indices)
* keys = { {9, 8, 7, 6, 5, 4, 3, 2, 1, 0} }
* values = { {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'} }
* offsets = {3, 7}
* result = cudf::segmented_sort_by_key(keys, values, offsets);
* result is { 'a','b','c', 'g','f','e','d', 'h','i','j' }
* @endcode
*
* @param values The table to reorder
* @param keys The table that determines the ordering of elements in each segment
* @param segment_offsets The column of `size_type` type containing start offset index for each
* contiguous segment.
* @param column_order The desired order for each column in `keys`. Size must be
* equal to `keys.num_columns()` or empty. If empty, all columns are sorted in
* ascending order.
* @param null_precedence The desired order of a null element compared to other
* elements for each column in `keys`. Size must be equal to
* `keys.num_columns()` or empty. If empty, all columns will be sorted with
* `null_order::BEFORE`.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource to allocate any returned objects
* @return table with elements in each segment sorted
*
*/
std::unique_ptr<table> segmented_sort_by_key(
table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Performs a stably lexicographic segmented sort of a table
*
* @copydoc cudf::segmented_sort_by_key
*/
std::unique_ptr<table> stable_segmented_sort_by_key(
table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order = {},
std::vector<null_order> const& null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/replace.hpp
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
namespace cudf {
/**
* @addtogroup transformation_replace
* @{
* @file
*/
/**
* @brief Policy to specify the position of replacement values relative to null rows
*
* `PRECEDING` means the replacement value is the first non-null value preceding the null row.
* `FOLLOWING` means the replacement value is the first non-null value following the null row.
*/
enum class replace_policy : bool { PRECEDING, FOLLOWING };
/**
* @brief Replaces all null values in a column with corresponding values of another column
*
* If `input[i]` is NULL, then `output[i]` will contain `replacement[i]`.
* `input` and `replacement` must be of the same type and size.
*
* @param[in] input A column whose null values will be replaced
* @param[in] replacement A cudf::column whose values will replace null values in input
* @param stream CUDA stream used for device memory operations and kernel launches
* @param[in] mr Device memory resource used to allocate device memory of the returned column
*
* @returns A copy of `input` with the null values replaced with corresponding values from
* `replacement`.
*/
std::unique_ptr<column> replace_nulls(
column_view const& input,
column_view const& replacement,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Replaces all null values in a column with a scalar.
*
* If `input[i]` is NULL, then `output[i]` will contain `replacement`.
* `input` and `replacement` must have the same type.
*
* @param[in] input A column whose null values will be replaced
* @param[in] replacement Scalar used to replace null values in `input`
* @param stream CUDA stream used for device memory operations and kernel launches
* @param[in] mr Device memory resource used to allocate device memory of the returned column
*
* @returns Copy of `input` with null values replaced by `replacement`
*/
std::unique_ptr<column> replace_nulls(
column_view const& input,
scalar const& replacement,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Replaces all null values in a column with the first non-null value that precedes/follows.
*
* If `input[i]` is NULL, then `output[i]` will contain the first non-null value that precedes or
* follows the null value, based on `replace_policy`.
*
* @param[in] input A column whose null values will be replaced
* @param[in] replace_policy Specify the position of replacement values relative to null values
* @param stream CUDA stream used for device memory operations and kernel launches
* @param[in] mr Device memory resource used to allocate device memory of the returned column
*
* @returns Copy of `input` with null values replaced based on `replace_policy`
*/
std::unique_ptr<column> replace_nulls(
column_view const& input,
replace_policy const& replace_policy,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Replaces all NaN values in a column with corresponding values from another column
*
* If `input[i]` is NaN, then `output[i]` will contain `replacement[i]`.
* @code{.pseudo}
* input = {1.0, NaN, 4.0}
* replacement = {3.0, 9.0, 7.0}
* output = {1.0, 9.0, 4.0}
* @endcode
*
* @note Nulls are not considered as NaN
*
* @throws cudf::logic_error If `input` and `replacement` are of different type or size.
* @throws cudf::logic_error If `input` or `replacement` are not of floating-point dtype.
*
* @param input A column whose NaN values will be replaced
* @param replacement A cudf::column whose values will replace NaN values in input
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A copy of `input` with the NaN values replaced with corresponding values from
* `replacement`.
*/
std::unique_ptr<column> replace_nans(
column_view const& input,
column_view const& replacement,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Replaces all NaN values in a column with a scalar
*
* If `input[i]` is NaN, then `output[i]` will contain `replacement`.
* @code{.pseudo}
* input = {1.0, NaN, 4.0}
* replacement = 7.0
* output = {1.0, 7.0, 4.0}
* @endcode
*
* @note Nulls are not considered as NaN
*
* @throws cudf::logic_error If `input` and `replacement` are of different type.
* @throws cudf::logic_error If `input` or `replacement` are not of floating-point dtype.
*
* @param input A column whose NaN values will be replaced
* @param replacement A cudf::scalar whose value will replace NaN values in input
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return A copy of `input` with the NaN values replaced by `replacement`
*/
std::unique_ptr<column> replace_nans(
column_view const& input,
scalar const& replacement,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Return a copy of `input_col` replacing any `values_to_replace[i]`
* found with `replacement_values[i]`.
*
* @param input_col The column to find and replace values in
* @param values_to_replace The values to replace
* @param replacement_values The values to replace with
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns Copy of `input_col` with specified values replaced
*/
std::unique_ptr<column> find_and_replace_all(
column_view const& input_col,
column_view const& values_to_replace,
column_view const& replacement_values,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Replaces values less than `lo` in `input` with `lo_replace`,
* and values greater than `hi` with `hi_replace`.
*
* if `lo` is invalid, then lo will not be considered while
* evaluating the input (Essentially considered minimum value of that type).
* if `hi` is invalid, then hi will not be considered while
* evaluating the input (Essentially considered maximum value of that type).
*
* @note: If `lo` is valid then `lo_replace` should be valid
* If `hi` is valid then `hi_replace` should be valid
*
* ```
* Example:
* input: {1, 2, 3, NULL, 5, 6, 7}
*
* valid lo and hi
* lo: 3, hi: 5, lo_replace : 0, hi_replace : 16
* output:{0, 0, 3, NULL, 5, 16, 16}
*
* invalid lo
* lo: NULL, hi: 5, lo_replace : 0, hi_replace : 16
* output:{1, 2, 3, NULL, 5, 16, 16}
*
* invalid hi
* lo: 3, hi: NULL, lo_replace : 0, hi_replace : 16
* output:{0, 0, 3, NULL, 5, 6, 7}
* ```
*
* @throws cudf::logic_error if `lo.type() != hi.type()`
* @throws cudf::logic_error if `lo_replace.type() != hi_replace.type()`
* @throws cudf::logic_error if `lo.type() != lo_replace.type()`
* @throws cudf::logic_error if `lo.type() != input.type()`
*
* @param[in] input Column whose elements will be clamped
* @param[in] lo Minimum clamp value. All elements less than `lo` will be replaced by `lo_replace`
* Ignored if null.
* @param[in] lo_replace All elements less than `lo` will be replaced by `lo_replace`
* @param[in] hi Maximum clamp value. All elements greater than `hi` will be replaced by
* `hi_replace`. Ignored if null.
* @param[in] hi_replace All elements greater than `hi` will be replaced by `hi_replace`
* @param stream CUDA stream used for device memory operations and kernel launches
* @param[in] mr Device memory resource used to allocate device memory of the returned column
*
* @return Returns a clamped column as per `lo` and `hi` boundaries
*/
std::unique_ptr<column> clamp(
column_view const& input,
scalar const& lo,
scalar const& lo_replace,
scalar const& hi,
scalar const& hi_replace,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Replaces values less than `lo` in `input` with `lo`,
* and values greater than `hi` with `hi`.
*
* if `lo` is invalid, then lo will not be considered while
* evaluating the input (Essentially considered minimum value of that type).
* if `hi` is invalid, then hi will not be considered while
* evaluating the input (Essentially considered maximum value of that type).
*
* ```
* Example:
* input: {1, 2, 3, NULL, 5, 6, 7}
*
* valid lo and hi
* lo: 3, hi: 5
* output:{3, 3, 3, NULL, 5, 5, 5}
*
* invalid lo
* lo: NULL, hi:5
* output:{1, 2, 3, NULL, 5, 5, 5}
*
* invalid hi
* lo: 3, hi:NULL
* output:{3, 3, 3, NULL, 5, 6, 7}
* ```
*
* @throws cudf::logic_error if `lo.type() != hi.type()`
* @throws cudf::logic_error if `lo.type() != input.type()`
*
* @param[in] input Column whose elements will be clamped
* @param[in] lo Minimum clamp value. All elements less than `lo` will be replaced by `lo` Ignored
* if null.
* @param[in] hi Maximum clamp value. All elements greater than `hi` will be replaced by `hi`
* Ignored if null.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param[in] mr Device memory resource used to allocate device memory of the returned column
*
* @return Returns a clamped column as per `lo` and `hi` boundaries
*/
std::unique_ptr<column> clamp(
column_view const& input,
scalar const& lo,
scalar const& hi,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Copies from a column of floating-point elements and replaces `-NaN` and `-0.0` with `+NaN`
* and `+0.0`, respectively.
*
* Converts floating point values from @p input using the following rules:
* Convert -NaN -> NaN
* Convert -0.0 -> 0.0
*
* @throws cudf::logic_error if column does not have floating point data type.
* @param[in] input column_view of floating-point elements to copy and normalize
* @param stream CUDA stream used for device memory operations and kernel launches
* @param[in] mr device_memory_resource allocator for allocating output data
*
* @returns new column with the modified data
*/
std::unique_ptr<column> normalize_nans_and_zeros(
column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Modifies a column of floating-point elements to replace all `-NaN` and `-0.0` with `+NaN`
* and `+0.0`, respectively.
*
* Converts floating point values from @p in_out using the following rules:
* Convert -NaN -> NaN
* Convert -0.0 -> 0.0
*
* @throws cudf::logic_error if column does not have floating point data type.
* @param[in, out] in_out of floating-point elements to normalize
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void normalize_nans_and_zeros(mutable_column_view& in_out,
rmm::cuda_stream_view stream = cudf::get_default_stream());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/partitioning.hpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/hashing.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <vector>
namespace cudf {
/**
* @addtogroup reorder_partition
* @{
* @file
* @brief Column partitioning APIs
*/
/**
* @brief Partitions rows of `t` according to the mapping specified by
* `partition_map`.
*
* For each row at `i` in `t`, `partition_map[i]` indicates which partition row
* `i` belongs to. `partition` creates a new table by rearranging the rows of
* `t` such that rows in the same partition are contiguous. The returned table
* is in ascending partition order from `[0, num_partitions)`. The order within
* each partition is undefined.
*
* Returns a `vector<size_type>` of `num_partitions + 1` values that indicate
* the starting position of each partition within the returned table, i.e.,
* partition `i` starts at `offsets[i]` (inclusive) and ends at `offset[i+1]`
* (exclusive). As a result, if value `j` in `[0, num_partitions)` does not
* appear in `partition_map`, partition `j` will be empty, i.e.,
* `offsets[j+1] - offsets[j] == 0`.
*
* Values in `partition_map` must be in the range `[0, num_partitions)`,
* otherwise behavior is undefined.
*
* @throw cudf::logic_error when `partition_map` is a non-integer type
* @throw cudf::logic_error when `partition_map.has_nulls() == true`
* @throw cudf::logic_error when `partition_map.size() != t.num_rows()`
*
* @param t The table to partition
* @param partition_map Non-nullable column of integer values that map each row
* in `t` to it's partition.
* @param num_partitions The total number of partitions
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Pair containing the reordered table and vector of `num_partitions +
* 1` offsets to each partition such that the size of partition `i` is
* determined by `offset[i+1] - offset[i]`.
*/
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Partitions rows from the input table into multiple output tables.
*
* Partitions rows of `input` into `num_partitions` bins based on the hash
* value of the columns specified by `columns_to_hash`. Rows partitioned into
* the same bin are grouped consecutively in the output table. Returns a vector
* of row offsets to the start of each partition in the output table.
*
* @throw std::out_of_range if index is `columns_to_hash` is invalid
*
* @param input The table to partition
* @param columns_to_hash Indices of input columns to hash
* @param num_partitions The number of partitions to use
* @param hash_function Optional hash id that chooses the hash function to use
* @param seed Optional seed value to the hash function
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
*
* @returns An output table and a vector of row offsets to each partition
*/
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
hash_id hash_function = hash_id::HASH_MURMUR3,
uint32_t seed = DEFAULT_HASH_SEED,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Round-robin partition.
*
* Returns a new table with rows re-arranged into partition groups and
* a vector of row offsets to the start of each partition in the output table.
* Rows are assigned partitions based on their row index in the table,
* in a round robin fashion.
*
* @throws cudf::logic_error if `num_partitions <= 1`
* @throws cudf::logic_error if `start_partition >= num_partitions`
*
* A good analogy for the algorithm is dealing out cards:
*
* 1. The deck of cards is represented as the rows in the table.
* 2. The number of partitions is the number of players being dealt cards.
* 3. the start_partition indicates which player starts getting cards first.
*
* The algorithm has two outcomes:
*
* 1. Another deck of cards formed by stacking each
* player's cards back into a deck again,
* preserving the order of cards dealt to each player,
* starting with player 0.
* 2. A vector into the output deck indicating where a player's cards start.
*
* A player's deck (partition) is the range of cards starting
* at the corresponding offset and ending at the next player's
* starting offset or the last card in the deck if it's the last player.
*
* When num_partitions > nrows, we have more players than cards.
* We start dealing to the first indicated player and continuing
* around the players until we run out of cards before we run out of players.
* Players that did not get any cards are represented by
* `offset[i] == offset[i+1] or
* offset[i] == table.num_rows() if i == num_partitions-1`
* meaning there are no cards (rows) in their deck (partition).
*
* ```
* Example 1:
* input:
* table => col 1 {0, ..., 12}
* num_partitions = 3
* start_partition = 0
*
* output: pair<table, partition_offsets>
* table => col 1 {0,3,6,9,12,1,4,7,10,2,5,8,11}
* partition_offsets => {0,5,9}
*
* Example 2:
* input:
* table => col 1 {0, ..., 12}
* num_partitions = 3
* start_partition = 1
*
* output: pair<table, partition_offsets>
* table => col 1 {2,5,8,11,0,3,6,9,12,1,4,7,10}
* partition_offsets => {0,4,9}
*
* Example 3:
* input:
* table => col 1 {0, ..., 10}
* num_partitions = 3
* start_partition = 0
*
* output: pair<table, partition_offsets>
* table => col 1 {0,3,6,9,1,4,7,10,2,5,8}
* partition_offsets => {0,4,8}
*
* Example 4:
* input:
* table => col 1 {0, ..., 10}
* num_partitions = 3
* start_partition = 1
*
* output: pair<table, partition_offsets>
* table => col 1 {2,5,8,0,3,6,9,1,4,7,10}
* partition_offsets => {0,3,7}
*
* Example 5:
* input:
* table => col 1 {0, ..., 10}
* num_partitions = 3
* start_partition = 2
*
* output: pair<table, partition_offsets>
* table => col 1 {1,4,7,10,2,5,8,0,3,6,9}
* partition_offsets => {0,4,7}
*
* Example 6:
* input:
* table => col 1 {0, ..., 10}
* num_partitions = 15 > num_rows = 11
* start_partition = 2
*
* output: pair<table, partition_offsets>
* table => col 1 {0,1,2,3,4,5,6,7,8,9,10}
* partition_offsets => {0,0,0,1,2,3,4,5,6,7,8,9,10,11,11}
*
* Example 7:
* input:
* table => col 1 {0, ..., 10}
* num_partitions = 15 > num_rows = 11
* start_partition = 10
*
* output: pair<table, partition_offsets>
* table => col 1 {5,6,7,8,9,10,0,1,2,3,4}
* partition_offsets => {0,1,2,3,4,5,6,6,6,6,6,7,8,9,10}
*
* Example 8:
* input:
* table => col 1 {0, ..., 10}
* num_partitions = 15 > num_rows = 11
* start_partition = 14
*
* output: pair<table, partition_offsets>
* table => col 1 {1,2,3,4,5,6,7,8,9,10,0}
* partition_offsets => {0,1,2,3,4,5,6,7,8,9,10,10,10,10,10}
*
* Example 9:
* input:
* table => col 1 {0, ..., 10}
* num_partitions = 11 == num_rows = 11
* start_partition = 2
*
* output: pair<table, partition_offsets>
* table => col 1 {9,10,0,1,2,3,4,5,6,7,8}
* partition_offsets => {0,1,2,3,4,5,6,7,8,9,10}
* ```
*
* @param[in] input The input table to be round-robin partitioned
* @param[in] num_partitions Number of partitions for the table
* @param[in] start_partition Index of the 1st partition
* @param[in] mr Device memory resource used to allocate the returned table's device memory
*
* @return A std::pair consisting of a unique_ptr to the partitioned table
* and the partition offsets for each partition within the table.
*/
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> round_robin_partition(
table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/aggregation.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <functional>
#include <memory>
#include <vector>
/**
* @file aggregation.hpp
* @brief Representation for specifying desired aggregations from
* aggregation-based APIs, e.g., groupby, reductions, rolling, etc.
*
* @note Not all aggregation APIs support all aggregation operations. See
* individual function documentation to see what aggregations are supported.
*/
namespace cudf {
/**
* @addtogroup aggregation_factories
* @{
* @file
*/
// forward declaration
namespace detail {
class simple_aggregations_collector;
class aggregation_finalizer;
} // namespace detail
/**
* @brief Tie-breaker method to use for ranking the column.
*
* @see cudf::make_rank_aggregation for more details.
* @ingroup column_sort
*/
enum class rank_method : int32_t {
FIRST, ///< stable sort order ranking (no ties)
AVERAGE, ///< mean of first in the group
MIN, ///< min of first in the group
MAX, ///< max of first in the group
DENSE ///< rank always increases by 1 between groups
};
/**
* @brief Whether returned rank should be percentage or not and
* mention the type of percentage normalization.
*
*/
enum class rank_percentage : int32_t {
NONE, ///< rank
ZERO_NORMALIZED, ///< rank / count
ONE_NORMALIZED ///< (rank - 1) / (count - 1)
};
/**
* @brief Abstract base class for specifying the desired aggregation in an
* `aggregation_request`.
*
* All aggregations must derive from this class to implement the pure virtual
* functions and potentially encapsulate additional information needed to
* compute the aggregation.
*/
class aggregation {
public:
/**
* @brief Possible aggregation operations
*/
enum Kind {
SUM, ///< sum reduction
PRODUCT, ///< product reduction
MIN, ///< min reduction
MAX, ///< max reduction
COUNT_VALID, ///< count number of valid elements
COUNT_ALL, ///< count number of elements
ANY, ///< any reduction
ALL, ///< all reduction
SUM_OF_SQUARES, ///< sum of squares reduction
MEAN, ///< arithmetic mean reduction
M2, ///< sum of squares of differences from the mean
VARIANCE, ///< variance
STD, ///< standard deviation
MEDIAN, ///< median reduction
QUANTILE, ///< compute specified quantile(s)
ARGMAX, ///< Index of max element
ARGMIN, ///< Index of min element
NUNIQUE, ///< count number of unique elements
NTH_ELEMENT, ///< get the nth element
ROW_NUMBER, ///< get row-number of current index (relative to rolling window)
RANK, ///< get rank of current index
COLLECT_LIST, ///< collect values into a list
COLLECT_SET, ///< collect values into a list without duplicate entries
LEAD, ///< window function, accesses row at specified offset following current row
LAG, ///< window function, accesses row at specified offset preceding current row
PTX, ///< PTX UDF based reduction
CUDA, ///< CUDA UDF based reduction
MERGE_LISTS, ///< merge multiple lists values into one list
MERGE_SETS, ///< merge multiple lists values into one list then drop duplicate entries
MERGE_M2, ///< merge partial values of M2 aggregation,
COVARIANCE, ///< covariance between two sets of elements
CORRELATION, ///< correlation between two sets of elements
TDIGEST, ///< create a tdigest from a set of input values
MERGE_TDIGEST, ///< create a tdigest by merging multiple tdigests together
HISTOGRAM, ///< compute frequency of each element
MERGE_HISTOGRAM ///< merge partial values of HISTOGRAM aggregation,
};
aggregation() = delete;
/**
* @brief Construct a new aggregation object
*
* @param a aggregation::Kind enum value
*/
aggregation(aggregation::Kind a) : kind{a} {}
Kind kind; ///< The aggregation to perform
virtual ~aggregation() = default;
/**
* @brief Compares two aggregation objects for equality
*
* @param other The other aggregation to compare with
* @return True if the two aggregations are equal
*/
[[nodiscard]] virtual bool is_equal(aggregation const& other) const { return kind == other.kind; }
/**
* @brief Computes the hash value of the aggregation
*
* @return The hash value of the aggregation
*/
[[nodiscard]] virtual size_t do_hash() const { return std::hash<int>{}(kind); }
/**
* @pure @brief Clones the aggregation object
*
* @return A copy of the aggregation object
*/
[[nodiscard]] virtual std::unique_ptr<aggregation> clone() const = 0;
// override functions for compound aggregations
/**
* @pure @brief Get the simple aggregations that this aggregation requires to compute.
*
* @param col_type The type of the column to aggregate
* @param collector The collector visitor pattern to use to collect the simple aggregations
* @return Vector of pre-requisite simple aggregations
*/
virtual std::vector<std::unique_ptr<aggregation>> get_simple_aggregations(
data_type col_type, cudf::detail::simple_aggregations_collector& collector) const = 0;
/**
* @pure @brief Compute the aggregation after pre-requisite simple aggregations have been
* computed.
*
* @param finalizer The finalizer visitor pattern to use to compute the aggregation
*/
virtual void finalize(cudf::detail::aggregation_finalizer& finalizer) const = 0;
};
/**
* @brief Derived class intended for rolling_window specific aggregation usage.
*
* As an example, rolling_window will only accept rolling_aggregation inputs,
* and the appropriate derived classes (sum_aggregation, mean_aggregation, etc)
* derive from this interface to represent these valid options.
*/
class rolling_aggregation : public virtual aggregation {
public:
~rolling_aggregation() override = default;
protected:
rolling_aggregation() {}
/// constructor inherited from cudf::aggregation
using aggregation::aggregation;
};
/**
* @brief Derived class intended for groupby specific aggregation usage.
*/
class groupby_aggregation : public virtual aggregation {
public:
~groupby_aggregation() override = default;
protected:
groupby_aggregation() {}
};
/**
* @brief Derived class intended for groupby specific scan usage.
*/
class groupby_scan_aggregation : public virtual aggregation {
public:
~groupby_scan_aggregation() override = default;
protected:
groupby_scan_aggregation() {}
};
/**
* @brief Derived class intended for reduction usage.
*/
class reduce_aggregation : public virtual aggregation {
public:
~reduce_aggregation() override = default;
protected:
reduce_aggregation() {}
};
/**
* @brief Derived class intended for scan usage.
*/
class scan_aggregation : public virtual aggregation {
public:
~scan_aggregation() override = default;
protected:
scan_aggregation() {}
};
/**
* @brief Derived class intended for segmented reduction usage.
*/
class segmented_reduce_aggregation : public virtual aggregation {
public:
~segmented_reduce_aggregation() override = default;
protected:
segmented_reduce_aggregation() {}
};
/// Type of code in the user defined function string.
enum class udf_type : bool { CUDA, PTX };
/// Type of correlation method.
enum class correlation_type : int32_t { PEARSON, KENDALL, SPEARMAN };
/// Factory to create a SUM aggregation
/// @return A SUM aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_sum_aggregation();
/// Factory to create a PRODUCT aggregation
/// @return A PRODUCT aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_product_aggregation();
/// Factory to create a MIN aggregation
/// @return A MIN aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_min_aggregation();
/// Factory to create a MAX aggregation
/// @return A MAX aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_max_aggregation();
/**
* @brief Factory to create a COUNT aggregation
*
* @param null_handling Indicates if null values will be counted
* @return A COUNT aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_count_aggregation(null_policy null_handling = null_policy::EXCLUDE);
/// Factory to create an ANY aggregation
/// @return A ANY aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_any_aggregation();
/// Factory to create a ALL aggregation
/// @return A ALL aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_all_aggregation();
/// Factory to create a HISTOGRAM aggregation
/// @return A HISTOGRAM aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_histogram_aggregation();
/// Factory to create a SUM_OF_SQUARES aggregation
/// @return A SUM_OF_SQUARES aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_sum_of_squares_aggregation();
/// Factory to create a MEAN aggregation
/// @return A MEAN aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_mean_aggregation();
/**
* @brief Factory to create a M2 aggregation
*
* A M2 aggregation is sum of squares of differences from the mean. That is:
* `M2 = SUM((x - MEAN) * (x - MEAN))`.
*
* This aggregation produces the intermediate values that are used to compute variance and standard
* deviation across multiple discrete sets. See
* `https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm` for more
* detail.
* @return A M2 aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_m2_aggregation();
/**
* @brief Factory to create a VARIANCE aggregation
*
* @param ddof Delta degrees of freedom. The divisor used in calculation of
* `variance` is `N - ddof`, where `N` is the population size.
*
* @throw cudf::logic_error if input type is chrono or compound types.
* @return A VARIANCE aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_variance_aggregation(size_type ddof = 1);
/**
* @brief Factory to create a STD aggregation
*
* @param ddof Delta degrees of freedom. The divisor used in calculation of
* `std` is `N - ddof`, where `N` is the population size.
*
* @throw cudf::logic_error if input type is chrono or compound types.
* @return A STD aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_std_aggregation(size_type ddof = 1);
/// Factory to create a MEDIAN aggregation
/// @return A MEDIAN aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_median_aggregation();
/**
* @brief Factory to create a QUANTILE aggregation
*
* @param quantiles The desired quantiles
* @param interp The desired interpolation
* @return A QUANTILE aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_quantile_aggregation(std::vector<double> const& quantiles,
interpolation interp = interpolation::LINEAR);
/**
* @brief Factory to create an ARGMAX aggregation
*
* ARGMAX returns the index of the maximum element.
* @return A ARGMAX aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_argmax_aggregation();
/**
* @brief Factory to create an ARGMIN aggregation
*
* `argmin` returns the index of the minimum element.
* @return A ARGMIN aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_argmin_aggregation();
/**
* @brief Factory to create a NUNIQUE aggregation
*
* NUNIQUE returns the number of unique elements.
* @param null_handling Indicates if null values will be counted
* @return A NUNIQUE aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_nunique_aggregation(null_policy null_handling = null_policy::EXCLUDE);
/**
* @brief Factory to create a NTH_ELEMENT aggregation
*
* NTH_ELEMENT returns the n'th element of the group/series.
*
* If @p n is not within the range `[-group_size, group_size)`, the result of
* the respective group will be null. Negative indices `[-group_size, -1]`
* corresponds to `[0, group_size-1]` indices respectively where `group_size` is
* the size of each group.
*
* @param n index of nth element in each group
* @param null_handling Indicates to include/exclude nulls during indexing
* @return A NTH_ELEMENT aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_nth_element_aggregation(
size_type n, null_policy null_handling = null_policy::INCLUDE);
/// Factory to create a ROW_NUMBER aggregation
/// @return A ROW_NUMBER aggregation object
template <typename Base = aggregation>
std::unique_ptr<Base> make_row_number_aggregation();
/**
* @brief Factory to create a RANK aggregation
*
* `RANK` returns a column of size_type or double "ranks" (see note 3 below for how the
* data type is determined) for a given rank method and column order.
* If nulls are excluded, the rank will be null for those rows, otherwise a non-nullable column is
* returned. Double precision column is returned only when percentage!=NONE and when rank method is
* average.
*
* This aggregation only works with "scan" algorithms. The input column into the group or
* ungrouped scan is an orderby column that orders the rows that the aggregate function ranks.
* If rows are ordered by more than one column, the orderby input column should be a struct
* column containing the ordering columns.
*
* Note:
* 1. This method could work faster with the rows that are presorted by the group keys and order_by
* columns. Though groupby object does not require order_by column to be sorted, groupby rank
* scan aggregation does require the order_by column to be sorted if the keys are sorted.
* 2. `RANK` aggregations are not compatible with exclusive scans.
* 3. All rank methods except AVERAGE method and percentage!=NONE returns size_type column.
* For AVERAGE method and percentage!=NONE, the return type is double column.
*
* @code{.pseudo}
* Example: Consider a motor-racing statistics dataset, containing the following columns:
* 1. venue: (STRING) Location of the race event
* 2. driver: (STRING) Name of the car driver (abbreviated to 3 characters)
* 3. time: (INT32) Time taken to complete the circuit
*
* For the following presorted data:
*
* [ // venue, driver, time
* { "silverstone", "HAM" ("hamilton"), 15823},
* { "silverstone", "LEC" ("leclerc"), 15827},
* { "silverstone", "BOT" ("bottas"), 15834}, // <-- Tied for 3rd place.
* { "silverstone", "NOR" ("norris"), 15834}, // <-- Tied for 3rd place.
* { "silverstone", "RIC" ("ricciardo"), 15905},
* { "monza", "RIC" ("ricciardo"), 12154},
* { "monza", "NOR" ("norris"), 12156}, // <-- Tied for 2nd place.
* { "monza", "BOT" ("bottas"), 12156}, // <-- Tied for 2nd place.
* { "monza", "LEC" ("leclerc"), 12201},
* { "monza", "PER" ("perez"), 12203}
* ]
*
* A grouped rank aggregation scan with:
* groupby column : venue
* input orderby column: time
* Produces the following rank column for each methods:
* first: { 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}
* average: { 1, 2, 3.5, 3.5, 5, 1, 2.5, 2.5, 4, 5}
* min: { 1, 2, 3, 3, 5, 1, 2, 2, 4, 5}
* max: { 1, 2, 4, 4, 5, 1, 3, 3, 4, 5}
* dense: { 1, 2, 3, 3, 4, 1, 2, 2, 3, 4}
* This corresponds to the following grouping and `driver` rows:
* { "HAM", "LEC", "BOT", "NOR", "RIC", "RIC", "NOR", "BOT", "LEC", "PER" }
* <----------silverstone----------->|<-------------monza-------------->
*
* min rank for each percentage types:
* NONE: { 1, 2, 3, 3, 5, 1, 2, 2, 4, 5 }
* ZERO_NORMALIZED : { 0.16, 0.33, 0.50, 0.50, 0.83, 0.16, 0.33, 0.33, 0.66, 0.83 }
* ONE_NORMALIZED: { 0.00, 0.25, 0.50, 0.50, 1.00, 0.00, 0.25, 0.25, 0.75, 1.00 }
* where count corresponds to the number of rows in the group. @see cudf::rank_percentage
*
* @endcode
*
* @param method The ranking method used for tie breaking (same values)
* @param column_order The desired sort order for ranking
* @param null_handling flag to include nulls during ranking If nulls are not included,
* the corresponding rank will be null.
* @param null_precedence The desired order of null compared to other elements for column
* @param percentage enum to denote the type of conversion of ranks to percentage in range (0,1]
* @return A RANK aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_rank_aggregation(rank_method method,
order column_order = order::ASCENDING,
null_policy null_handling = null_policy::EXCLUDE,
null_order null_precedence = null_order::AFTER,
rank_percentage percentage = rank_percentage::NONE);
/**
* @brief Factory to create a COLLECT_LIST aggregation
*
* `COLLECT_LIST` returns a list column of all included elements in the group/series.
*
* If `null_handling` is set to `EXCLUDE`, null elements are dropped from each
* of the list rows.
*
* @param null_handling Indicates whether to include/exclude nulls in list elements
* @return A COLLECT_LIST aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_collect_list_aggregation(
null_policy null_handling = null_policy::INCLUDE);
/**
* @brief Factory to create a COLLECT_SET aggregation
*
* `COLLECT_SET` returns a lists column of all included elements in the group/series. Within each
* list, the duplicated entries are dropped out such that each entry appears only once.
*
* If `null_handling` is set to `EXCLUDE`, null elements are dropped from each
* of the list rows.
*
* @param null_handling Indicates whether to include/exclude nulls during collection
* @param nulls_equal Flag to specify whether null entries within each list should be considered
* equal.
* @param nans_equal Flag to specify whether NaN values in floating point column should be
* considered equal.
* @return A COLLECT_SET aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_collect_set_aggregation(
null_policy null_handling = null_policy::INCLUDE,
null_equality nulls_equal = null_equality::EQUAL,
nan_equality nans_equal = nan_equality::ALL_EQUAL);
/**
* @brief Factory to create a LAG aggregation
*
* @param offset The number of rows to lag the input
* @return A LAG aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_lag_aggregation(size_type offset);
/**
* @brief Factory to create a LEAD aggregation
*
* @param offset The number of rows to lead the input
* @return A LEAD aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_lead_aggregation(size_type offset);
/**
* @brief Factory to create an aggregation base on UDF for PTX or CUDA
*
* @param[in] type: either udf_type::PTX or udf_type::CUDA
* @param[in] user_defined_aggregator A string containing the aggregator code
* @param[in] output_type expected output type
*
* @return An aggregation containing a user-defined aggregator string
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_udf_aggregation(udf_type type,
std::string const& user_defined_aggregator,
data_type output_type);
/**
* @brief Factory to create a MERGE_LISTS aggregation.
*
* Given a lists column, this aggregation merges all the lists corresponding to the same key value
* into one list. It is designed specifically to merge the partial results of multiple (distributed)
* groupby `COLLECT_LIST` aggregations into a final `COLLECT_LIST` result. As such, it requires the
* input lists column to be non-nullable (the child column containing list entries is not subjected
* to this requirement).
*
* @return A MERGE_LISTS aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_merge_lists_aggregation();
/**
* @brief Factory to create a MERGE_SETS aggregation.
*
* Given a lists column, this aggregation firstly merges all the lists corresponding to the same key
* value into one list, then it drops all the duplicate entries in each lists, producing a lists
* column containing non-repeated entries.
*
* This aggregation is designed specifically to merge the partial results of multiple (distributed)
* groupby `COLLECT_LIST` or `COLLECT_SET` aggregations into a final `COLLECT_SET` result. As such,
* it requires the input lists column to be non-nullable (the child column containing list entries
* is not subjected to this requirement).
*
* In practice, the input (partial results) to this aggregation should be generated by (distributed)
* `COLLECT_LIST` aggregations, not `COLLECT_SET`, to avoid unnecessarily removing duplicate entries
* for the partial results.
*
* @param nulls_equal Flag to specify whether nulls within each list should be considered equal
* during dropping duplicate list entries.
* @param nans_equal Flag to specify whether NaN values in floating point column should be
* considered equal during dropping duplicate list entries.
* @return A MERGE_SETS aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_merge_sets_aggregation(
null_equality nulls_equal = null_equality::EQUAL,
nan_equality nans_equal = nan_equality::ALL_EQUAL);
/**
* @brief Factory to create a MERGE_M2 aggregation
*
* Merges the results of `M2` aggregations on independent sets into a new `M2` value equivalent to
* if a single `M2` aggregation was done across all of the sets at once. This aggregation is only
* valid on structs whose members are the result of the `COUNT_VALID`, `MEAN`, and `M2` aggregations
* on the same sets. The output of this aggregation is a struct containing the merged `COUNT_VALID`,
* `MEAN`, and `M2` aggregations.
*
* The input `M2` aggregation values are expected to be all non-negative numbers, since they
* were output from `M2` aggregation.
*
* @return A MERGE_M2 aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_merge_m2_aggregation();
/**
* @brief Factory to create a MERGE_HISTOGRAM aggregation
*
* Merges the results of `HISTOGRAM` aggregations on independent sets into a new `HISTOGRAM` value
* equivalent to if a single `HISTOGRAM` aggregation was done across all of the sets at once.
*
* @return A MERGE_HISTOGRAM aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_merge_histogram_aggregation();
/**
* @brief Factory to create a COVARIANCE aggregation
*
* Compute covariance between two columns.
* The input columns are child columns of a non-nullable struct columns.
* @param min_periods Minimum number of non-null observations required to produce a result
* @param ddof Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N is
* the number of non-null observations.
* @return A COVARIANCE aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_covariance_aggregation(size_type min_periods = 1, size_type ddof = 1);
/**
* @brief Factory to create a CORRELATION aggregation
*
* Compute correlation coefficient between two columns.
* The input columns are child columns of a non-nullable struct columns.
*
* @param type correlation_type
* @param min_periods Minimum number of non-null observations required to produce a result
* @return A CORRELATION aggregation object
*/
template <typename Base = aggregation>
std::unique_ptr<Base> make_correlation_aggregation(correlation_type type,
size_type min_periods = 1);
/**
* @brief Factory to create a TDIGEST aggregation
*
* Produces a tdigest (https://arxiv.org/pdf/1902.04023.pdf) column from input values.
* The input aggregation values are expected to be fixed-width numeric types.
*
* The tdigest column produced is of the following structure:
*
* struct {
* // centroids for the digest
* list {
* struct {
* double // mean
* double // weight
* },
* ...
* }
* // these are from the input stream, not the centroids. they are used
* // during the percentile_approx computation near the beginning or
* // end of the quantiles
* double // min
* double // max
* }
*
* Each output row is a single tdigest. The length of the row is the "size" of the
* tdigest, each element of which represents a weighted centroid (mean, weight).
*
* @param max_centroids Parameter controlling compression level and accuracy on subsequent
* queries on the output tdigest data. `max_centroids` places an upper bound on the size of
* the computed tdigests: A value of 1000 will result in a tdigest containing no
* more than 1000 centroids (32 bytes each). Higher result in more accurate tdigest information.
*
* @return A TDIGEST aggregation object
*/
template <typename Base>
std::unique_ptr<Base> make_tdigest_aggregation(int max_centroids = 1000);
/**
* @brief Factory to create a MERGE_TDIGEST aggregation
*
* Merges the results from a previous aggregation resulting from a `make_tdigest_aggregation`
* or `make_merge_tdigest_aggregation` to produce a new a tdigest
* (https://arxiv.org/pdf/1902.04023.pdf) column.
*
* The tdigest column produced is of the following structure:
*
* struct {
* // centroids for the digest
* list {
* struct {
* double // mean
* double // weight
* },
* ...
* }
* // these are from the input stream, not the centroids. they are used
* // during the percentile_approx computation near the beginning or
* // end of the quantiles
* double // min
* double // max
* }
*
* Each output row is a single tdigest. The length of the row is the "size" of the
* tdigest, each element of which represents a weighted centroid (mean, weight).
*
* @param max_centroids Parameter controlling compression level and accuracy on subsequent
* queries on the output tdigest data. `max_centroids` places an upper bound on the size of
* the computed tdigests: A value of 1000 will result in a tdigest containing no
* more than 1000 centroids (32 bytes each). Higher result in more accurate tdigest information.
*
* @return A MERGE_TDIGEST aggregation object
*/
template <typename Base>
std::unique_ptr<Base> make_merge_tdigest_aggregation(int max_centroids = 1000);
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/datetime.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
/**
* @file datetime.hpp
* @brief DateTime column APIs.
*/
namespace cudf {
namespace datetime {
/**
* @addtogroup datetime_extract
* @{
* @file
*/
/**
* @brief Extracts year from any datetime type and returns an int16_t
* cudf::column.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t years
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_year(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts month from any datetime type and returns an int16_t
* cudf::column.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t months
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_month(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts day from any datetime type and returns an int16_t
* cudf::column.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t days
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_day(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts a weekday from any datetime type and returns an int16_t
* cudf::column.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t days
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_weekday(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts hour from any datetime type and returns an int16_t
* cudf::column.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t hours
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_hour(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts minute from any datetime type and returns an int16_t
* cudf::column.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t minutes
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_minute(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts second from any datetime type and returns an int16_t
* cudf::column.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t seconds
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_second(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts millisecond fraction from any datetime type and returns an int16_t
* cudf::column.
*
* A millisecond fraction is only the 3 digits that make up the millisecond portion of a duration.
* For example, the millisecond fraction of 1.234567890 seconds is 234.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t milliseconds
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_millisecond_fraction(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts microsecond fraction from any datetime type and returns an int16_t
* cudf::column.
*
* A microsecond fraction is only the 3 digits that make up the microsecond portion of a duration.
* For example, the microsecond fraction of 1.234567890 seconds is 567.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t microseconds
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_microsecond_fraction(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extracts nanosecond fraction from any datetime type and returns an int16_t
* cudf::column.
*
* A nanosecond fraction is only the 3 digits that make up the nanosecond portion of a duration.
* For example, the nanosecond fraction of 1.234567890 seconds is 890.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of the extracted int16_t nanoseconds
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> extract_nanosecond_fraction(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
/**
* @addtogroup datetime_compute
* @{
* @file
*/
/**
* @brief Computes the last day of the month in datetime type and returns a TIMESTAMP_DAYS
* cudf::column.
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column containing last day of the month as TIMESTAMP_DAYS
* @throw cudf::logic_error if input column datatype is not TIMESTAMP
*/
std::unique_ptr<cudf::column> last_day_of_month(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Computes the day number since the start of the year from the datetime and
* returns an int16_t cudf::column. The value is between [1, {365-366}]
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of datatype INT16 containing the day number since the start of the year
* @throw cudf::logic_error if input column datatype is not a TIMESTAMP
*/
std::unique_ptr<cudf::column> day_of_year(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Adds or subtracts a number of months from the datetime type and returns a
* timestamp column that is of the same type as the input `timestamps` column.
*
* For a given row, if the `timestamps` or the `months` column value is null,
* the output for that row is null.
* This method preserves the input time and the day where applicable. The date is rounded
* down to the last day of the month for that year, if the new day is invalid for that month.
*
* @code{.pseudo}
* Example:
* timestamps = [5/31/20 08:00:00, 5/31/20 00:00:00, 5/31/20 13:00:00, 5/31/20 23:00:00,
* 6/30/20 00:00:01, 6/30/20 14:12:13]
* months = [1 , -1 , -3 , -15 ,
* -1 , 1]
* r = add_calendrical_months(timestamp_column, months_column)
* r is [6/30/20 08:00:00, 4/30/20 00:00:00, 2/29/20 13:00:00, 2/28/19 23:00:00,
* 5/30/20 00:00:01, 7/30/20 14:12:13]
* @endcode
* @throw cudf::logic_error if `timestamps` datatype is not a TIMESTAMP or if `months` datatype
* is not INT16 or INT32.
* @throw cudf::logic_error if `timestamps` column size is not equal to `months` column size.
*
* @param timestamps cudf::column_view of timestamp type
* @param months cudf::column_view of integer type containing the number of months to add
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of timestamp type containing the computed timestamps
*/
std::unique_ptr<cudf::column> add_calendrical_months(
cudf::column_view const& timestamps,
cudf::column_view const& months,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Adds or subtracts a number of months from the datetime type and returns a
* timestamp column that is of the same type as the input `timestamps` column.
*
* For a given row, if the `timestamps` value is null, the output for that row is null.
* A null months scalar would result in an all null column.
* This method preserves the input time and the day where applicable. The date is rounded
* down to the last day of the month for that year, if the new day is invalid for that month.
*
* @code{.pseudo}
* Example:
* timestamps = [5/31/20 08:00:00, 6/30/20 00:00:00, 7/31/20 13:00:00]
* months = -3
* output is [2/29/20 08:00:00, 3/30/20 00:00:00, 4/30/20 13:00:00]
*
* timestamps = [4/28/20 04:00:00, 5/30/20 01:00:00, 6/30/20 21:00:00]
* months = 1
* output is [5/28/20 04:00:00, 6/30/20 01:00:00, 7/30/20 21:00:00]
* @endcode
*
* @throw cudf::logic_error if `timestamps` datatype is not a TIMESTAMP or if `months` datatype
* is not INT16 or INT32.
* @throw cudf::logic_error if `timestamps` column size is not equal to `months` column size.
*
* @param timestamps cudf::column_view of timestamp type
* @param months cudf::scalar of integer type containing the number of months to add
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @return cudf::column of timestamp type containing the computed timestamps
*/
std::unique_ptr<cudf::column> add_calendrical_months(
cudf::column_view const& timestamps,
cudf::scalar const& months,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Check if the year of the given date is a leap year
*
* `output[i] == true` if year of `column[i]` is a leap year
* `output[i] == false` if year of `column[i]` is not a leap year
* `output[i] is null` if `column[i]` is null
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @returns cudf::column of datatype BOOL8 truth value of the corresponding date
* @throw cudf::logic_error if input column datatype is not a TIMESTAMP
*/
std::unique_ptr<cudf::column> is_leap_year(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Extract the number of days in the month
*
* output[i] contains the number of days in the month of date `column[i]`
* output[i] is null if `column[i]` is null
*
* @throw cudf::logic_error if input column datatype is not a TIMESTAMP
*
* @param column cudf::column_view of the input datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
* @return cudf::column of datatype INT16 of days in month of the corresponding date
*/
std::unique_ptr<cudf::column> days_in_month(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the quarter of the date
*
* `output[i]` will be a value from {1, 2, 3, 4} corresponding to the quarter of month given by
* `column[i]`. It will be null if the input row at `column[i]` is null.
*
* @throw cudf::logic_error if input column datatype is not a TIMESTAMP
*
* @param column The input column containing datetime values
* @param mr Device memory resource used to allocate device memory of the returned column
* @return A column of INT16 type indicating which quarter the date is in
*/
std::unique_ptr<cudf::column> extract_quarter(
cudf::column_view const& column,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Fixed frequencies supported by datetime rounding functions ceil, floor, round.
*
*/
enum class rounding_frequency : int32_t {
DAY,
HOUR,
MINUTE,
SECOND,
MILLISECOND,
MICROSECOND,
NANOSECOND
};
/**
* @brief Round datetimes up to the nearest multiple of the given frequency.
*
* @param column cudf::column_view of the input datetime values
* @param freq rounding_frequency indicating the frequency to round up to
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @throw cudf::logic_error if input column datatype is not TIMESTAMP.
* @return cudf::column of the same datetime resolution as the input column
*/
std::unique_ptr<cudf::column> ceil_datetimes(
cudf::column_view const& column,
rounding_frequency freq,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Round datetimes down to the nearest multiple of the given frequency.
*
* @param column cudf::column_view of the input datetime values
* @param freq rounding_frequency indicating the frequency to round down to
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @throw cudf::logic_error if input column datatype is not TIMESTAMP.
* @return cudf::column of the same datetime resolution as the input column
*/
std::unique_ptr<cudf::column> floor_datetimes(
cudf::column_view const& column,
rounding_frequency freq,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Round datetimes to the nearest multiple of the given frequency.
*
* @param column cudf::column_view of the input datetime values
* @param freq rounding_frequency indicating the frequency to round to
* @param mr Device memory resource used to allocate device memory of the returned column
*
* @throw cudf::logic_error if input column datatype is not TIMESTAMP.
* @return cudf::column of the same datetime resolution as the input column
*/
std::unique_ptr<cudf::column> round_datetimes(
cudf::column_view const& column,
rounding_frequency freq,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace datetime
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include
|
rapidsai_public_repos/cudf/cpp/include/cudf/interop.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// We disable warning 611 because the `arrow::TableBatchReader` only partially
// override the `ReadNext` method of `arrow::RecordBatchReader::ReadNext`
// triggering warning 611-D from nvcc.
#ifdef __CUDACC__
#pragma nv_diag_suppress 611
#pragma nv_diag_suppress 2810
#endif
#include <arrow/api.h>
#ifdef __CUDACC__
#pragma nv_diag_default 611
#pragma nv_diag_default 2810
#endif
#include <cudf/column/column.hpp>
#include <cudf/detail/transform.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
struct DLManagedTensor;
namespace cudf {
/**
* @addtogroup interop_dlpack
* @{
* @file
*/
/**
* @brief Convert a DLPack DLTensor into a cudf table
*
* The `device_type` of the DLTensor must be `kDLCPU`, `kDLCuda`, or
* `kDLCUDAHost`, and `device_id` must match the current device. The `ndim`
* must be set to 1 or 2. The `dtype` must have 1 lane and the bitsize must
* match a supported `cudf::data_type`.
*
* @note The managed tensor is not deleted by this function.
*
* @throw cudf::logic_error if the any of the DLTensor fields are unsupported
*
* @param managed_tensor a 1D or 2D column-major (Fortran order) tensor
* @param mr Device memory resource used to allocate the returned table's device memory
*
* @return Table with a copy of the tensor data
*/
std::unique_ptr<table> from_dlpack(
DLManagedTensor const* managed_tensor,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Convert a cudf table into a DLPack DLTensor
*
* All columns must have the same data type and this type must be numeric. The
* columns may be nullable, but the null count must be zero. If the input table
* is empty or has zero rows, the result will be nullptr.
*
* @note The `deleter` method of the returned `DLManagedTensor` must be used to
* free the memory allocated for the tensor.
*
* @throw cudf::logic_error if the data types are not equal or not numeric,
* or if any of columns have non-zero null count
*
* @param input Table to convert to DLPack
* @param mr Device memory resource used to allocate the returned DLPack tensor's device memory
*
* @return 1D or 2D DLPack tensor with a copy of the table data, or nullptr
*/
DLManagedTensor* to_dlpack(
table_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
/**
* @addtogroup interop_arrow
* @{
* @file
*/
/**
* @brief Detailed metadata information for arrow array.
*
* As of now this contains only name in the hierarchy of children of cudf column,
* but in future this can be updated as per requirement.
*/
struct column_metadata {
std::string name; ///< Name of the column
std::vector<column_metadata> children_meta; ///< Metadata of children of the column
/**
* @brief Construct a new column metadata object
*
* @param _name Name of the column
*/
column_metadata(std::string const& _name) : name(_name) {}
column_metadata() = default;
};
/**
* @brief Create `arrow::Table` from cudf table `input`
*
* Converts the `cudf::table_view` to `arrow::Table` with the provided
* metadata `column_names`.
*
* @throws cudf::logic_error if `column_names` size doesn't match with number of columns.
*
* @param input table_view that needs to be converted to arrow Table
* @param metadata Contains hierarchy of names of columns and children
* @param stream CUDA stream used for device memory operations and kernel launches
* @param ar_mr arrow memory pool to allocate memory for arrow Table
* @return arrow Table generated from `input`
*
* @note For decimals, since the precision is not stored for them in libcudf,
* it will be converted to an Arrow decimal128 that has the widest-precision the cudf decimal type
* supports. For example, numeric::decimal32 will be converted to Arrow decimal128 of the precision
* 9 which is the maximum precision for 32-bit types. Similarly, numeric::decimal128 will be
* converted to Arrow decimal128 of the precision 38.
*/
std::shared_ptr<arrow::Table> to_arrow(table_view input,
std::vector<column_metadata> const& metadata = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
arrow::MemoryPool* ar_mr = arrow::default_memory_pool());
/**
* @brief Create `arrow::Scalar` from cudf scalar `input`
*
* Converts the `cudf::scalar` to `arrow::Scalar`.
*
* @param input scalar that needs to be converted to arrow Scalar
* @param metadata Contains hierarchy of names of columns and children
* @param stream CUDA stream used for device memory operations and kernel launches
* @param ar_mr arrow memory pool to allocate memory for arrow Scalar
* @return arrow Scalar generated from `input`
*
* @note For decimals, since the precision is not stored for them in libcudf,
* it will be converted to an Arrow decimal128 that has the widest-precision the cudf decimal type
* supports. For example, numeric::decimal32 will be converted to Arrow decimal128 of the precision
* 9 which is the maximum precision for 32-bit types. Similarly, numeric::decimal128 will be
* converted to Arrow decimal128 of the precision 38.
*/
std::shared_ptr<arrow::Scalar> to_arrow(cudf::scalar const& input,
column_metadata const& metadata = {},
rmm::cuda_stream_view stream = cudf::get_default_stream(),
arrow::MemoryPool* ar_mr = arrow::default_memory_pool());
/**
* @brief Create `cudf::table` from given arrow Table input
*
* @param input arrow:Table that needs to be converted to `cudf::table`
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate `cudf::table`
* @return cudf table generated from given arrow Table
*/
std::unique_ptr<table> from_arrow(
arrow::Table const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Create `cudf::scalar` from given arrow Scalar input
*
* @param input `arrow::Scalar` that needs to be converted to `cudf::scalar`
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate `cudf::scalar`
* @return cudf scalar generated from given arrow Scalar
*/
std::unique_ptr<cudf::scalar> from_arrow(
arrow::Scalar const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/labeling/label_bins.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/types.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
/**
* @addtogroup label_bins
* @{
* @file
* @brief APIs for labeling values by bin.
*/
/**
* @brief Enum used to define whether or not bins include their boundary points.
*/
enum class inclusive { YES, NO };
/**
* @brief Labels elements based on membership in the specified bins.
*
* A bin `i` is defined by `left_edges[i], right_edges[i]`. Whether the edges are inclusive or
* not is determined by `left_inclusive` and `right_inclusive`, respectively.
*
* A value `input[j]` belongs to bin `i` if `value[j]` is contained in the range `left_edges[i],
* right_edges[i]` (with the specified inclusiveness) and `label[j] == i`. If `input[j]` does not
* belong to any bin, then `label[j]` is NULL.
*
* Notes:
* - If an empty set of edges is provided, all elements in `input` are labeled NULL.
* - NULL elements in `input` belong to no bin and their corresponding label is NULL.
* - NaN elements in `input` belong to no bin and their corresponding label is NULL.
* - Bins must be provided in monotonically increasing order, otherwise behavior is undefined.
* - If two or more bins overlap, behavior is undefined.
*
* @throws cudf::logic_error if `input.type() == left_edges.type() == right_edges.type()` is
* violated.
* @throws cudf::logic_error if `left_edges.size() != right_edges.size()`
* @throws cudf::logic_error if `left_edges.has_nulls()` or `right_edges.has_nulls()`
*
* @param input The input elements to label according to the specified bins.
* @param left_edges Values of the left edge of each bin.
* @param left_inclusive Whether or not the left edge is inclusive.
* @param right_edges Value of the right edge of each bin.
* @param right_inclusive Whether or not the right edge is inclusive.
* @param mr Device memory resource used to allocate the returned column's device.
* @return The integer labels of the elements in `input` according to the specified bins.
*/
std::unique_ptr<column> label_bins(
column_view const& input,
column_view const& left_edges,
inclusive left_inclusive,
column_view const& right_edges,
inclusive right_inclusive,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/table/table_view.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
#include <cudf/types.hpp>
#include <algorithm>
#include <vector>
/**
* @file
* @brief Class definitions for `(mutable)_table_view`
*
* A `(mutable_)table_view` is a set of `(mutable_)column_view`s of equal
* size.
*
* A `(mutable_)table_view` is non-owning and trivially copyable and should be
* passed by value.
*/
namespace cudf {
namespace detail {
/**
* @brief Base class for a table of `ColumnView`s
*
* This class should not be used directly. Instead:
* - `table_view` should be used for a table of columns of type `column_view`
* - `mutable_table_view` should be used for a table of columns of type `mutable_column_view`
*
* All public constructors and member functions of `table_view_base` are
* available in both `table_view` and `mutable_table_view`.
*
* @tparam ColumnView The type of column view the table contains:
* expects column_view or mutable_column_view
*/
template <typename ColumnView>
class table_view_base {
static_assert(std::is_same_v<ColumnView, column_view> or
std::is_same_v<ColumnView, mutable_column_view>,
"table_view_base can only be instantiated with column_view or "
"column_view_base.");
private:
std::vector<ColumnView> _columns{}; ///< ColumnViews to columns of equal size
size_type _num_rows{}; ///< The number of elements in every column
public:
using iterator = decltype(std::begin(_columns)); ///< Iterator type for the table
using const_iterator = decltype(std::cbegin(_columns)); ///< const iterator type for the table
/**
* @brief Construct a table from a vector of column views
*
* @note Because a `std::vector` is constructible from a
* `std::initializer_list`, this constructor also supports the following
* usage:
* ```
* column_view c0, c1, c2;
* ...
* table_view t{{c0,c1,c2}}; // Creates a `table` from c0, c1, c2
* ```
*
* @throws cudf::logic_error If all views do not have the same size
*
* @param cols The vector of columns to construct the table from
*/
explicit table_view_base(std::vector<ColumnView> const& cols);
/**
* @brief Returns an iterator to the first view in the `table`.
*
* @return An iterator to the first column_view
*/
iterator begin() noexcept { return std::begin(_columns); }
/**
* @brief Returns an iterator to the first view in the `table`.
*
* @return An iterator to the first view in the `table`
*/
[[nodiscard]] const_iterator begin() const noexcept { return std::begin(_columns); }
/**
* @brief Returns an iterator one past the last column view in the `table`.
*
* `end()` acts as a place holder. Attempting to dereference it results in
* undefined behavior.
*
* @return An iterator to one past the last column view in the `table`
*/
iterator end() noexcept { return std::end(_columns); }
/**
* @brief Returns an iterator one past the last column view in the `table`.
*
* `end()` acts as a place holder. Attempting to dereference it results in
* undefined behavior.
*
* @return An iterator to one past the last column view in the `table`
*/
[[nodiscard]] const_iterator end() const noexcept { return std::end(_columns); }
/**
* @brief Returns a reference to the view of the specified column
*
* @throws std::out_of_range
* If `column_index` is out of the range [0, num_columns)
*
* @param column_index The index of the desired column
* @return A reference to the desired column
*/
ColumnView const& column(size_type column_index) const;
/**
* @brief Returns the number of columns
*
* @return The number of columns
*/
[[nodiscard]] size_type num_columns() const noexcept { return _columns.size(); }
/**
* @brief Returns the number of rows
*
* @return The number of rows
*/
[[nodiscard]] size_type num_rows() const noexcept { return _num_rows; }
/**
* @brief Returns true if `num_columns()` returns zero, or false otherwise
*
* @return True if `num_columns()` returns zero, or false otherwise
*/
[[nodiscard]] size_type is_empty() const noexcept { return num_columns() == 0; }
table_view_base() = default;
~table_view_base() = default;
table_view_base(table_view_base const&) = default; ///< Copy constructor
table_view_base(table_view_base&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return Reference to this object
*/
table_view_base& operator=(table_view_base const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this object (after transferring ownership)
*/
table_view_base& operator=(table_view_base&&) = default;
};
/**
* @brief Determine if any nested columns exist in a given table.
*
* @param table The input table
* @return Whether nested columns exist in the input table
*/
bool has_nested_columns(table_view const& table);
} // namespace detail
/**
* @brief A set of cudf::column_view's of the same size.
*
* @ingroup table_classes
*
* All public member functions and constructors are inherited from
* `table_view_base<column_view>`.
*/
class table_view : public detail::table_view_base<column_view> {
using detail::table_view_base<column_view>::table_view_base;
public:
using ColumnView = column_view; ///< The type of column view the table contains
table_view() = default;
/**
* @brief Construct a table from a vector of table views
*
* @note Because a `std::vector` is constructible from a
* `std::initializer_list`, this constructor also supports the following
* usage:
* ```
* table_view t0, t1, t2;
* ...
* table_view t{{t0,t1,t2}}; // Creates a `table` from the columns of
* t0, t1, t2
* ```
*
* @throws cudf::logic_error
* If number of rows mismatch
*
* @param views The vector of table views to construct the table from
*/
table_view(std::vector<table_view> const& views);
/**
* @brief Returns a table_view built from a range of column indices.
*
* @throws std::out_of_range
* If any index is outside [0, num_columns())
*
* @param begin Beginning of the range
* @param end Ending of the range
* @return A table_view consisting of columns from the original table
* specified by the elements of `column_indices`
*/
template <typename InputIterator>
table_view select(InputIterator begin, InputIterator end) const
{
std::vector<column_view> columns(std::distance(begin, end));
std::transform(begin, end, columns.begin(), [this](auto index) { return this->column(index); });
return table_view(columns);
}
/**
* @brief Returns a table_view with set of specified columns.
*
* @throws std::out_of_range
* If any element in `column_indices` is outside [0, num_columns())
*
* @param column_indices Indices of columns in the table
* @return A table_view consisting of columns from the original table
* specified by the elements of `column_indices`
*/
[[nodiscard]] table_view select(std::vector<size_type> const& column_indices) const;
};
/**
* @brief A set of `mutable_column_view`s of the same size.
*
* @ingroup table_classes
*
* All public member functions and constructors are inherited from
* `table_view_base<mutable_column_view>`.
*/
class mutable_table_view : public detail::table_view_base<mutable_column_view> {
using detail::table_view_base<mutable_column_view>::table_view_base;
public:
using ColumnView = mutable_column_view; ///< The type of column views in the table
mutable_table_view() = default;
/**
* @brief Returns column at specified index
*
* @param column_index The index of the desired column
* @return A mutable column view reference to the desired column
*/
[[nodiscard]] mutable_column_view& column(size_type column_index) const
{
return const_cast<mutable_column_view&>(table_view_base::column(column_index));
}
/**
* @brief Creates an immutable `table_view` of the columns
*/
operator table_view();
/**
* @brief Construct a table from a vector of table views
*
* @note Because a `std::vector` is constructible from a
* `std::initializer_list`, this constructor also supports the following
* usage:
* ```
* table_view t0, t1, t2;
* ...
* table_view t{{t0,t1,t2}}; // Creates a `table` from the columns of
* t0, t1, t2
* ```
*
* @throws cudf::logic_error
* If number of rows mismatch
*
* @param views The vector of table views to construct the table from
*/
mutable_table_view(std::vector<mutable_table_view> const& views);
};
/**
* @brief Returns True if any of the columns in the table is nullable. (not entire hierarchy)
*
* @param view The table to check for nullability
* @return True if any of the columns in the table is nullable, false otherwise
*/
inline bool nullable(table_view const& view)
{
return std::any_of(view.begin(), view.end(), [](auto const& col) { return col.nullable(); });
}
/**
* @brief Returns True if the table has nulls in any of its columns.
*
* This checks for nulls in the columns and but not in any of the columns' children.
*
* @param view The table to check for nulls
* @return True if the table has nulls in any of its columns, false otherwise
*/
inline bool has_nulls(table_view const& view)
{
return std::any_of(view.begin(), view.end(), [](auto const& col) { return col.has_nulls(); });
}
/**
* @brief Returns True if the table has nulls in any of its columns hierarchy
*
* @param input The table to check for nulls
* @return True if the table has nulls in any of its columns hierarchy, false otherwise
*/
inline bool has_nested_nulls(table_view const& input)
{
return std::any_of(input.begin(), input.end(), [](auto const& col) {
return col.has_nulls() ||
std::any_of(col.child_begin(), col.child_end(), [](auto const& child_col) {
return has_nested_nulls(table_view{{child_col}});
});
});
}
/**
* @brief Returns True if the table has a nullable column at any level of the column hierarchy
*
* @param input The table to check for nullable columns
* @return True if the table has nullable columns at any level of the column hierarchy, false
* otherwise
*/
inline bool has_nested_nullable_columns(table_view const& input)
{
return std::any_of(input.begin(), input.end(), [](auto const& col) {
return col.nullable() ||
std::any_of(col.child_begin(), col.child_end(), [](auto const& child_col) {
return has_nested_nullable_columns(table_view{{child_col}});
});
});
}
/**
* @brief The function to collect all nullable columns at all nested levels in a given table.
*
* @param table The input table
* @return A vector containing all nullable columns in the input table
*/
std::vector<column_view> get_nullable_columns(table_view const& table);
/**
* @brief Checks if two `table_view`s have columns of same types
*
* @param lhs left-side table_view operand
* @param rhs right-side table_view operand
* @return boolean comparison result
*/
inline bool have_same_types(table_view const& lhs, table_view const& rhs)
{
return std::equal(
lhs.begin(),
lhs.end(),
rhs.begin(),
rhs.end(),
[](column_view const& lcol, column_view const& rcol) { return (lcol.type() == rcol.type()); });
}
/**
* @brief Copy column_views from a table_view into another table_view according to
* a column indices map.
*
* The output table view, `out_table` is a copy of the `target` table_view but with
* elements updated according to `out_table[map[i]] = source[i]` where `i` is
* `[0,source.size())`
*
* @param source Table of new columns to scatter into the output table view.
* @param map The indices where each new_column should be copied into the output.
* @param target Table to receive the updated column views.
* @return New table_view.
*/
table_view scatter_columns(table_view const& source,
std::vector<size_type> const& map,
table_view const& target);
namespace detail {
/**
* @brief Indicates whether respective columns in input tables are relationally comparable.
*
* @param lhs The first table
* @param rhs The second table (may be the same table as `lhs`)
* @return true all of respective columns on `lhs` and 'rhs` tables are comparable.
* @return false any of respective columns on `lhs` and 'rhs` tables are not comparable.
*/
template <typename TableView>
bool is_relationally_comparable(TableView const& lhs, TableView const& rhs);
// @cond
extern template bool is_relationally_comparable<table_view>(table_view const& lhs,
table_view const& rhs);
extern template bool is_relationally_comparable<mutable_table_view>(mutable_table_view const& lhs,
mutable_table_view const& rhs);
// @endcond
} // namespace detail
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/table/table_device_view.cuh
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <cassert>
#include <memory>
#include <numeric>
/**
* @file table_device_view.cuh
* @brief Table device view class definitions
*/
namespace cudf {
namespace detail {
/**
* @brief Base class for a device table of `ColumnDeviceView`s
*
* This class should not be used directly. Instead:
* - `table_device_view` should be used for a table of columns of type `column_device_view`
* - `mutable_table_device_view` should be used for a table of columns of type
* `mutable_column_device_view`
*
* All public constructors and member functions of `table_device_view_base` are
* available in both `table_device_view` and `mutable_table_device_view`.
*
* @tparam ColumnDeviceView The type of column device view the table contains:
* expects column_device_view or mutable_column_device_view
* @tparam HostTableView The type of table view used to create the table device view:
* expects table_view or mutable_table_view
*/
template <typename ColumnDeviceView, typename HostTableView>
class table_device_view_base {
public:
table_device_view_base() = delete;
~table_device_view_base() = default;
table_device_view_base(table_device_view_base const&) = default; ///< Copy constructor
table_device_view_base(table_device_view_base&&) = default; ///< Move constructor
/**
* @brief Copy assignment operator
*
* @return Reference to this object
*/
table_device_view_base& operator=(table_device_view_base const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this object (after transferring ownership)
*/
table_device_view_base& operator=(table_device_view_base&&) = default;
/**
* @brief Returns an iterator to the first view in the `table`.
*
* @return An iterator to the first view in the `table`
*/
__device__ ColumnDeviceView* begin() const noexcept { return _columns; }
/**
* @brief Returns an iterator one past the last column view in the `table`.
*
* `end()` acts as a place holder. Attempting to dereference it results in
* undefined behavior.
*
* @return An iterator to one past the last column view in the `table`
*/
__device__ ColumnDeviceView* end() const noexcept { return _columns + _num_columns; }
/**
* @brief Returns a reference to the view of the specified column
*
* @param column_index The index of the desired column
* @return A reference to the desired column
*/
__device__ ColumnDeviceView const& column(size_type column_index) const noexcept
{
assert(column_index >= 0);
assert(column_index < _num_columns);
return _columns[column_index];
}
/**
* @brief Returns a reference to the view of the specified column
*
* @param column_index The index of the desired column
* @return A reference to the desired column
*/
__device__ ColumnDeviceView& column(size_type column_index) noexcept
{
assert(column_index >= 0);
assert(column_index < _num_columns);
return _columns[column_index];
}
/**
* @brief Returns the number of columns
*
* @return The number of columns
*/
[[nodiscard]] __host__ __device__ size_type num_columns() const noexcept { return _num_columns; }
/**
* @brief Returns the number of rows
*
* @return The number of rows
*/
[[nodiscard]] __host__ __device__ size_type num_rows() const noexcept { return _num_rows; }
/**
* @brief Destroy the `table_device_view` object.
*
* @note Does not free the table data, simply frees the device memory
* allocated to hold the constituent column views.
*/
void destroy();
private:
ColumnDeviceView* _columns{}; ///< Array of view objects in device memory
size_type _num_rows{};
size_type _num_columns{};
protected:
/**
* @brief Construct a new table device view base object from host table_view
*
* @param source_view The host table_view to create table device view from
* @param stream The CUDA stream to use for device memory allocation
*/
table_device_view_base(HostTableView source_view, rmm::cuda_stream_view stream);
/// Pointer to device memory holding the descendant storage
rmm::device_buffer* _descendant_storage{};
};
} // namespace detail
/**
* @brief Table device view that is usable in device memory
*/
class table_device_view : public detail::table_device_view_base<column_device_view, table_view> {
public:
/**
* @brief Factory to construct a table device view that is usable in device memory.
*
* Allocates and copies views of `source_view`'s children to device memory to
* make them accessible in device code.
*
* Returns a `std::unique_ptr<table_device_view>` with a custom deleter to
* free the device memory allocated for the children.
*
* @param source_view The table view whose contents will be copied to create a new table
* @param stream CUDA stream used for device memory operations
* @return A `unique_ptr` to a `table_device_view` that makes the data from `source_view`
* available in device memory
*/
static auto create(table_view source_view,
rmm::cuda_stream_view stream = cudf::get_default_stream())
{
auto deleter = [](table_device_view* t) { t->destroy(); };
return std::unique_ptr<table_device_view, decltype(deleter)>{
new table_device_view(source_view, stream), deleter};
}
private:
table_device_view(table_view source_view, rmm::cuda_stream_view stream)
: detail::table_device_view_base<column_device_view, table_view>(source_view, stream)
{
}
};
/**
* @brief Mutable table device view that is usable in device memory
*
* Elements of the table can be modified in device memory.
*/
class mutable_table_device_view
: public detail::table_device_view_base<mutable_column_device_view, mutable_table_view> {
public:
/**
* @brief Factory to construct a mutable table device view that is usable in device memory.
*
* Allocates and copies views of `source_view`'s children to device memory to
* make them accessible in device code.
*
* Returns a `std::unique_ptr<mutable_table_device_view>` with a custom deleter to
* free the device memory allocated for the children.
*
* @param source_view The table view whose contents will be copied to create a new table
* @param stream CUDA stream used for device memory operations
* @return A `unique_ptr` to a `mutable_table_device_view` that makes the data from `source_view`
* available in device memory
*/
static auto create(mutable_table_view source_view,
rmm::cuda_stream_view stream = cudf::get_default_stream())
{
auto deleter = [](mutable_table_device_view* t) { t->destroy(); };
return std::unique_ptr<mutable_table_device_view, decltype(deleter)>{
new mutable_table_device_view(source_view, stream), deleter};
}
private:
mutable_table_device_view(mutable_table_view source_view, rmm::cuda_stream_view stream)
: detail::table_device_view_base<mutable_column_device_view, mutable_table_view>(source_view,
stream)
{
}
};
/**
* @brief Copies the contents of a table_view to a column device view in contiguous device memory
*
* @tparam ColumnDeviceView The column device view type to copy to
* @tparam HostTableView The type of the table_view to copy from
* @param source_view The table_view to copy from
* @param stream The stream to use for device memory allocation
* @return tuple of device_buffer and @p ColumnDeviceView device pointer
*/
template <typename ColumnDeviceView, typename HostTableView>
auto contiguous_copy_column_device_views(HostTableView source_view, rmm::cuda_stream_view stream)
{
// First calculate the size of memory needed to hold the
// table's ColumnDeviceViews. This is done by calling extent()
// for each of the table's ColumnViews columns.
std::size_t views_size_bytes = std::accumulate(
source_view.begin(), source_view.end(), std::size_t{0}, [](std::size_t init, auto col) {
return init + ColumnDeviceView::extent(col);
});
// pad the allocation for aligning the first pointer
auto padded_views_size_bytes = views_size_bytes + std::size_t{alignof(ColumnDeviceView) - 1};
// A buffer of CPU memory is allocated to hold the ColumnDeviceView
// objects. Once filled, the CPU memory is then copied to device memory
// and the pointer is set in the d_columns member.
std::vector<int8_t> h_buffer(padded_views_size_bytes);
// Each ColumnDeviceView instance may have child objects which may
// require setting some internal device pointers before being copied
// from CPU to device.
// Allocate the device memory to be used in the result.
// We need this pointer in order to pass it down when creating the
// ColumnDeviceViews so the column can set the pointer(s) for any
// of its child objects.
// align both h_ptr, d_ptr
auto descendant_storage = std::make_unique<rmm::device_buffer>(padded_views_size_bytes, stream);
void* h_ptr = detail::align_ptr_for_type<ColumnDeviceView>(h_buffer.data());
void* d_ptr = detail::align_ptr_for_type<ColumnDeviceView>(descendant_storage->data());
auto d_columns = detail::child_columns_to_device_array<ColumnDeviceView>(
source_view.begin(), source_view.end(), h_ptr, d_ptr);
CUDF_CUDA_TRY(cudaMemcpyAsync(d_ptr, h_ptr, views_size_bytes, cudaMemcpyDefault, stream.value()));
stream.synchronize();
return std::make_tuple(std::move(descendant_storage), d_columns);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/table/table.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <memory>
#include <vector>
/**
* @file
* @brief Class definition for cudf::table
*/
namespace cudf {
/**
* @brief A set of cudf::column's of the same size.
*
* @ingroup table_classes
*/
class table {
public:
table() = default;
~table() = default;
table(table&&) = default; ///< Move constructor
table& operator=(table const&) = delete;
table& operator=(table&&) = delete;
/**
* @brief Construct a new table by copying the contents of another table.
*
* @param other The table to copy
*/
table(table const& other);
/**
* @brief Moves the contents from a vector of `unique_ptr`s to columns to
* construct a new table.
*
* @param columns The vector of `unique_ptr`s to columns whose contents will
* be moved into the new table.
*/
table(std::vector<std::unique_ptr<column>>&& columns);
/**
* @brief Copy the contents of a `table_view` to construct a new `table`.
*
* @param view The view whose contents will be copied to create a new `table`
* @param stream CUDA stream used for device memory operations.
* @param mr Device memory resource used for allocating the device memory for the new columns
*/
table(table_view view,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the number of columns in the table
*
* @return The number of columns in the table
*/
[[nodiscard]] size_type num_columns() const noexcept { return _columns.size(); }
/**
* @brief Returns the number of rows
*
* @return The number of rows
*/
[[nodiscard]] size_type num_rows() const noexcept { return _num_rows; }
/**
* @brief Returns an immutable, non-owning `table_view` of the contents of
*this `table`.
*
* @return An immutable, non-owning `table_view` of the contents of this `table`
*/
[[nodiscard]] table_view view() const;
/**
* @brief Conversion operator to an immutable, non-owning `table_view` of the
* contents of this `table`.
*/
operator table_view() const { return this->view(); };
/**
* @brief Returns a mutable, non-owning `mutable_table_view` of the contents
* of this `table`.
*
* @return A mutable, non-owning `mutable_table_view` of the contents of this `table`
*/
mutable_table_view mutable_view();
/**
* @brief Conversion operator to a mutable, non-owning `mutable_table_view` of
*the contents of this `table`.
*/
operator mutable_table_view() { return this->mutable_view(); };
/**
* @brief Releases ownership of the `column`s by returning a vector of
* `unique_ptr`s to the constituent columns.
*
* After `release()`, `num_columns() == 0` and `num_rows() == 0`
*
* @returns A vector of `unique_ptr`s to the constituent columns
*/
std::vector<std::unique_ptr<column>> release();
/**
* @brief Returns a table_view built from a range of column indices.
*
* @throws std::out_of_range
* If any index is outside [0, num_columns())
*
* @param begin Beginning of the range
* @param end Ending of the range
* @return A table_view consisting of columns from the original table
* specified by the elements of `column_indices`
*/
template <typename InputIterator>
table_view select(InputIterator begin, InputIterator end) const
{
std::vector<column_view> columns(std::distance(begin, end));
std::transform(
begin, end, columns.begin(), [this](auto index) { return _columns.at(index)->view(); });
return table_view(columns);
}
/**
* @brief Returns a table_view with set of specified columns.
*
* @throws std::out_of_range
* If any element in `column_indices` is outside [0, num_columns())
*
* @param column_indices Indices of columns in the table
* @return A table_view consisting of columns from the original table
* specified by the elements of `column_indices`
*/
[[nodiscard]] table_view select(std::vector<cudf::size_type> const& column_indices) const
{
return select(column_indices.begin(), column_indices.end());
};
/**
* @brief Returns a reference to the specified column
*
* @throws std::out_of_range
* If i is out of the range [0, num_columns)
*
* @param column_index Index of the desired column
* @return A reference to the desired column
*/
column& get_column(cudf::size_type column_index) { return *(_columns.at(column_index)); }
/**
* @brief Returns a const reference to the specified column
*
* @throws std::out_of_range
* If i is out of the range [0, num_columns)
*
* @param i Index of the desired column
* @return A const reference to the desired column
*/
[[nodiscard]] column const& get_column(cudf::size_type i) const { return *(_columns.at(i)); }
private:
std::vector<std::unique_ptr<column>> _columns{};
size_type _num_rows{};
};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/table/row_operators.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/hashing/detail/hash_functions.cuh>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/sorting.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/equal.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/swap.h>
#include <thrust/transform_reduce.h>
#include <limits>
namespace cudf {
/**
* @brief Result type of the `element_relational_comparator` function object.
*
* Indicates how two elements `a` and `b` compare with one and another.
*
* Equivalence is defined as `not (a<b) and not (b<a)`. Elements that are
* EQUIVALENT may not necessarily be *equal*.
*/
enum class weak_ordering {
LESS, ///< Indicates `a` is less than (ordered before) `b`
EQUIVALENT, ///< Indicates `a` is ordered neither before nor after `b`
GREATER ///< Indicates `a` is greater than (ordered after) `b`
};
namespace detail {
/**
* @brief Compare the elements ordering with respect to `lhs`.
*
* @param lhs first element
* @param rhs second element
* @return Indicates the relationship between the elements in
* the `lhs` and `rhs` columns.
*/
template <typename Element>
__device__ weak_ordering compare_elements(Element lhs, Element rhs)
{
if (lhs < rhs) {
return weak_ordering::LESS;
} else if (rhs < lhs) {
return weak_ordering::GREATER;
}
return weak_ordering::EQUIVALENT;
}
} // namespace detail
/**
* @brief A specialization for floating-point `Element` type relational comparison
* to derive the order of the elements with respect to `lhs`.
*
* This specialization handles `nan` in the following order:
* `[-Inf, -ve, 0, -0, +ve, +Inf, NaN, NaN, null] (for null_order::AFTER)`
* `[null, -Inf, -ve, 0, -0, +ve, +Inf, NaN, NaN] (for null_order::BEFORE)`
*
*/
template <typename Element, std::enable_if_t<std::is_floating_point_v<Element>>* = nullptr>
__device__ weak_ordering relational_compare(Element lhs, Element rhs)
{
if (isnan(lhs) and isnan(rhs)) {
return weak_ordering::EQUIVALENT;
} else if (isnan(rhs)) {
return weak_ordering::LESS;
} else if (isnan(lhs)) {
return weak_ordering::GREATER;
}
return detail::compare_elements(lhs, rhs);
}
/**
* @brief Compare the nulls according to null order.
*
* @param lhs_is_null boolean representing if lhs is null
* @param rhs_is_null boolean representing if lhs is null
* @param null_precedence null order
* @return Indicates the relationship between null in lhs and rhs columns.
*/
inline __device__ auto null_compare(bool lhs_is_null, bool rhs_is_null, null_order null_precedence)
{
if (lhs_is_null and rhs_is_null) { // null <? null
return weak_ordering::EQUIVALENT;
} else if (lhs_is_null) { // null <? x
return (null_precedence == null_order::BEFORE) ? weak_ordering::LESS : weak_ordering::GREATER;
} else if (rhs_is_null) { // x <? null
return (null_precedence == null_order::AFTER) ? weak_ordering::LESS : weak_ordering::GREATER;
}
return weak_ordering::EQUIVALENT;
}
/**
* @brief A specialization for non-floating-point `Element` type relational
* comparison to derive the order of the elements with respect to `lhs`.
*
* @param lhs The first element
* @param rhs The second element
* @return Indicates the relationship between the elements in the `lhs` and `rhs` columns
*/
template <typename Element, std::enable_if_t<not std::is_floating_point_v<Element>>* = nullptr>
__device__ weak_ordering relational_compare(Element lhs, Element rhs)
{
return detail::compare_elements(lhs, rhs);
}
/**
* @brief A specialization for floating-point `Element` type to check if
* `lhs` is equivalent to `rhs`. `nan == nan`.
*
* @param lhs first element
* @param rhs second element
* @return `true` if `lhs` == `rhs` else `false`.
*/
template <typename Element, std::enable_if_t<std::is_floating_point_v<Element>>* = nullptr>
__device__ bool equality_compare(Element lhs, Element rhs)
{
if (isnan(lhs) and isnan(rhs)) { return true; }
return lhs == rhs;
}
/**
* @brief A specialization for non-floating-point `Element` type to check if
* `lhs` is equivalent to `rhs`.
*
* @param lhs first element
* @param rhs second element
* @return `true` if `lhs` == `rhs` else `false`.
*/
template <typename Element, std::enable_if_t<not std::is_floating_point_v<Element>>* = nullptr>
__device__ bool equality_compare(Element const lhs, Element const rhs)
{
return lhs == rhs;
}
/**
* @brief Performs an equality comparison between two elements in two columns.
*
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*/
template <typename Nullate>
class element_equality_comparator {
public:
/**
* @brief Construct type-dispatched function object for comparing equality
* between two elements.
*
* @note `lhs` and `rhs` may be the same.
*
* @param has_nulls Indicates if either input column contains nulls.
* @param lhs The column containing the first element
* @param rhs The column containing the second element (may be the same as lhs)
* @param nulls_are_equal Indicates if two null elements are treated as equivalent
*/
__host__ __device__
element_equality_comparator(Nullate has_nulls,
column_device_view lhs,
column_device_view rhs,
null_equality nulls_are_equal = null_equality::EQUAL)
: lhs{lhs}, rhs{rhs}, nulls{has_nulls}, nulls_are_equal{nulls_are_equal}
{
}
/**
* @brief Compares the specified elements for equality.
*
* @param lhs_element_index The index of the first element
* @param rhs_element_index The index of the second element
* @return True if both lhs and rhs element are both nulls and `nulls_are_equal` is true, or equal
*/
template <typename Element,
std::enable_if_t<cudf::is_equality_comparable<Element, Element>()>* = nullptr>
__device__ bool operator()(size_type lhs_element_index,
size_type rhs_element_index) const noexcept
{
if (nulls) {
bool const lhs_is_null{lhs.is_null(lhs_element_index)};
bool const rhs_is_null{rhs.is_null(rhs_element_index)};
if (lhs_is_null and rhs_is_null) {
return nulls_are_equal == null_equality::EQUAL;
} else if (lhs_is_null != rhs_is_null) {
return false;
}
}
return equality_compare(lhs.element<Element>(lhs_element_index),
rhs.element<Element>(rhs_element_index));
}
// @cond
template <typename Element,
std::enable_if_t<not cudf::is_equality_comparable<Element, Element>()>* = nullptr>
__device__ bool operator()(size_type lhs_element_index, size_type rhs_element_index)
{
CUDF_UNREACHABLE("Attempted to compare elements of uncomparable types.");
}
// @endcond
private:
column_device_view lhs;
column_device_view rhs;
Nullate nulls;
null_equality nulls_are_equal;
};
/**
* @brief Performs a relational comparison between two elements in two tables.
*
* @tparam Nullate A cudf::nullate type describing how to check for nulls
*/
template <typename Nullate>
class row_equality_comparator {
public:
/**
* @brief Construct a new row equality comparator object
*
* @param has_nulls Indicates if either input column contains nulls
* @param lhs The column containing the first element
* @param rhs The column containing the second element (may be the same as lhs)
* @param nulls_are_equal Indicates if two null elements are treated as equivalent
*/
row_equality_comparator(Nullate has_nulls,
table_device_view lhs,
table_device_view rhs,
null_equality nulls_are_equal = null_equality::EQUAL)
: lhs{lhs}, rhs{rhs}, nulls{has_nulls}, nulls_are_equal{nulls_are_equal}
{
CUDF_EXPECTS(lhs.num_columns() == rhs.num_columns(), "Mismatched number of columns.");
}
/**
* @brief Compares the specified rows for equality.
*
* @param lhs_row_index The index of the first row to compare (in the lhs table)
* @param rhs_row_index The index of the second row to compare (in the rhs table)
* @return true if both rows are equal, otherwise false
*/
__device__ bool operator()(size_type lhs_row_index, size_type rhs_row_index) const noexcept
{
auto equal_elements = [=](column_device_view l, column_device_view r) {
return cudf::type_dispatcher(l.type(),
element_equality_comparator{nulls, l, r, nulls_are_equal},
lhs_row_index,
rhs_row_index);
};
return thrust::equal(thrust::seq, lhs.begin(), lhs.end(), rhs.begin(), equal_elements);
}
private:
table_device_view lhs;
table_device_view rhs;
Nullate nulls;
null_equality nulls_are_equal;
};
/**
* @brief Performs a relational comparison between two elements in two columns.
*
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*/
template <typename Nullate>
class element_relational_comparator {
public:
/**
* @brief Construct type-dispatched function object for performing a
* relational comparison between two elements.
*
* @note `lhs` and `rhs` may be the same.
*
* @param lhs The column containing the first element
* @param rhs The column containing the second element (may be the same as lhs)
* @param has_nulls Indicates if either input column contains nulls.
* @param null_precedence Indicates how null values are ordered with other values
*/
__host__ __device__ element_relational_comparator(Nullate has_nulls,
column_device_view lhs,
column_device_view rhs,
null_order null_precedence)
: lhs{lhs}, rhs{rhs}, nulls{has_nulls}, null_precedence{null_precedence}
{
}
/**
* @brief Construct type-dispatched function object for performing a relational comparison between
* two elements in two columns.
*
* @param has_nulls Indicates if either input column contains nulls
* @param lhs The column containing the first element
* @param rhs The column containing the second element (may be the same as lhs)
*/
__host__ __device__ element_relational_comparator(Nullate has_nulls,
column_device_view lhs,
column_device_view rhs)
: lhs{lhs}, rhs{rhs}, nulls{has_nulls}
{
}
/**
* @brief Performs a relational comparison between the specified elements
*
* @param lhs_element_index The index of the first element
* @param rhs_element_index The index of the second element
* @return Indicates the relationship between the elements in
* the `lhs` and `rhs` columns.
*/
template <typename Element,
std::enable_if_t<cudf::is_relationally_comparable<Element, Element>()>* = nullptr>
__device__ weak_ordering operator()(size_type lhs_element_index,
size_type rhs_element_index) const noexcept
{
if (nulls) {
bool const lhs_is_null{lhs.is_null(lhs_element_index)};
bool const rhs_is_null{rhs.is_null(rhs_element_index)};
if (lhs_is_null or rhs_is_null) { // at least one is null
return null_compare(lhs_is_null, rhs_is_null, null_precedence);
}
}
return relational_compare(lhs.element<Element>(lhs_element_index),
rhs.element<Element>(rhs_element_index));
}
// @cond
template <typename Element,
std::enable_if_t<not cudf::is_relationally_comparable<Element, Element>()>* = nullptr>
__device__ weak_ordering operator()(size_type lhs_element_index, size_type rhs_element_index)
{
CUDF_UNREACHABLE("Attempted to compare elements of uncomparable types.");
}
// @endcond
private:
column_device_view lhs;
column_device_view rhs;
Nullate nulls;
null_order null_precedence{};
};
/**
* @brief Computes whether one row is lexicographically *less* than another row.
*
* Lexicographic ordering is determined by:
* - Two rows are compared element by element.
* - The first mismatching element defines which row is lexicographically less
* or greater than the other.
*
* Lexicographic ordering is exactly equivalent to doing an alphabetical sort of
* two words, for example, `aac` would be *less* than (or precede) `abb`. The
* second letter in both words is the first non-equal letter, and `a < b`, thus
* `aac < abb`.
*
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*/
template <typename Nullate>
class row_lexicographic_comparator {
public:
/**
* @brief Construct a function object for performing a lexicographic
* comparison between the rows of two tables.
*
* Behavior is undefined if called with incomparable column types.
*
* @throws cudf::logic_error if `lhs.num_columns() != rhs.num_columns()`
*
* @param has_nulls Indicates if either input table contains columns with nulls.
* @param lhs The first table
* @param rhs The second table (may be the same table as `lhs`)
* @param column_order Optional, device array the same length as a row that
* indicates the desired ascending/descending order of each column in a row.
* If `nullptr`, it is assumed all columns are sorted in ascending order.
* @param null_precedence Optional, device array the same length as a row
* and indicates how null values compare to all other for every column. If
* it is nullptr, then null precedence would be `null_order::BEFORE` for all
* columns.
*/
row_lexicographic_comparator(Nullate has_nulls,
table_device_view lhs,
table_device_view rhs,
order const* column_order = nullptr,
null_order const* null_precedence = nullptr)
: _lhs{lhs},
_rhs{rhs},
_nulls{has_nulls},
_column_order{column_order},
_null_precedence{null_precedence}
{
CUDF_EXPECTS(_lhs.num_columns() == _rhs.num_columns(), "Mismatched number of columns.");
}
/**
* @brief Checks whether the row at `lhs_index` in the `lhs` table compares
* lexicographically less than the row at `rhs_index` in the `rhs` table.
*
* @param lhs_index The index of the row in the `lhs` table to examine
* @param rhs_index The index of the row in the `rhs` table to examine
* @return `true` if row from the `lhs` table compares less than row in the
* `rhs` table
*/
__device__ bool operator()(size_type lhs_index, size_type rhs_index) const noexcept
{
for (size_type i = 0; i < _lhs.num_columns(); ++i) {
bool ascending = (_column_order == nullptr) or (_column_order[i] == order::ASCENDING);
null_order null_precedence =
_null_precedence == nullptr ? null_order::BEFORE : _null_precedence[i];
auto comparator =
element_relational_comparator{_nulls, _lhs.column(i), _rhs.column(i), null_precedence};
weak_ordering state =
cudf::type_dispatcher(_lhs.column(i).type(), comparator, lhs_index, rhs_index);
if (state == weak_ordering::EQUIVALENT) { continue; }
return state == (ascending ? weak_ordering::LESS : weak_ordering::GREATER);
}
return false;
}
private:
table_device_view _lhs;
table_device_view _rhs;
Nullate _nulls{};
null_order const* _null_precedence{};
order const* _column_order{};
}; // class row_lexicographic_comparator
/**
* @brief Computes the hash value of an element in the given column.
*
* @tparam hash_function Hash functor to use for hashing elements.
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*/
template <template <typename> class hash_function, typename Nullate>
class element_hasher {
public:
/**
* @brief Returns the hash value of the given element in the given column.
*
* @tparam T The type of the element to hash
* @param col The column to hash
* @param row_index The index of the row to hash
* @return The hash value of the given element
*/
template <typename T, CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view col, size_type row_index) const
{
if (has_nulls && col.is_null(row_index)) { return std::numeric_limits<hash_value_type>::max(); }
return hash_function<T>{}(col.element<T>(row_index));
}
/**
* @brief Returns the hash value of the given element in the given column.
*
* @tparam T The type of the element to hash
* @param col The column to hash
* @param row_index The index of the row to hash
* @return The hash value of the given element
*/
template <typename T, CUDF_ENABLE_IF(not column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view col, size_type row_index) const
{
CUDF_UNREACHABLE("Unsupported type in hash.");
}
Nullate has_nulls; ///< A cudf::nullate type describing how to check for nulls.
};
/**
* @brief Function object for computing the hash value of a row in a column.
*
* @tparam hash_function Hash functor to use for hashing elements
* @tparam Nullate A cudf::nullate type describing how to check for nulls
*/
template <template <typename> class hash_function, typename Nullate>
class element_hasher_with_seed {
public:
/**
* @brief Constructs a function object for hashing an element in the given column
*
* @param has_nulls Indicates if either input column contains nulls
* @param seed The seed to use for the hash function
*/
__device__ element_hasher_with_seed(Nullate has_nulls, uint32_t seed)
: _seed{seed}, _has_nulls{has_nulls}
{
}
/**
* @brief Constructs a function object for hashing an element in the given column
*
* @param has_nulls Indicates if either input column contains nulls
* @param seed The seed to use for the hash function
* @param null_hash The hash value to use for null elements
*/
__device__ element_hasher_with_seed(Nullate has_nulls, uint32_t seed, hash_value_type null_hash)
: _seed{seed}, _null_hash{null_hash}, _has_nulls{has_nulls}
{
}
/**
* @brief Returns the hash value of the given element in the given column.
*
* @tparam T The type of the element to hash
* @param col The column to hash
* @param row_index The index of the row to hash
* @return The hash value of the given element
*/
template <typename T, CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view col, size_type row_index) const
{
if (_has_nulls && col.is_null(row_index)) { return _null_hash; }
return hash_function<T>{_seed}(col.element<T>(row_index));
}
/**
* @brief Returns the hash value of the given element in the given column.
*
* @tparam T The type of the element to hash
* @param col The column to hash
* @param row_index The index of the row to hash
* @return The hash value of the given element
*/
template <typename T, CUDF_ENABLE_IF(not column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view col, size_type row_index) const
{
CUDF_UNREACHABLE("Unsupported type in hash.");
}
private:
uint32_t _seed{DEFAULT_HASH_SEED};
hash_value_type _null_hash{std::numeric_limits<hash_value_type>::max()};
Nullate _has_nulls;
};
/**
* @brief Computes the hash value of a row in the given table.
*
* @tparam hash_function Hash functor to use for hashing elements.
* @tparam Nullate A cudf::nullate type describing how to check for nulls.
*/
template <template <typename> class hash_function, typename Nullate>
class row_hasher {
public:
row_hasher() = delete;
/**
* @brief Constructs a row_hasher object.
*
* @param has_nulls Indicates if either input table contains nulls
* @param t A table_device_view to hash
*/
CUDF_HOST_DEVICE row_hasher(Nullate has_nulls, table_device_view t)
: _table{t}, _has_nulls{has_nulls}
{
}
/**
* @brief Constructs a row_hasher object with a seed value.
*
* @param has_nulls Indicates if either input table contains nulls
* @param t A table_device_view to hash
* @param seed A seed value to use for hashing
*/
CUDF_HOST_DEVICE row_hasher(Nullate has_nulls, table_device_view t, uint32_t seed)
: _table{t}, _seed(seed), _has_nulls{has_nulls}
{
}
/**
* @brief Computes the hash value of the row at `row_index` in the `table`
*
* @param row_index The index of the row in the `table` to hash
* @return The hash value of the row at `row_index` in the `table`
*/
__device__ auto operator()(size_type row_index) const
{
// Hash the first column w/ the seed
auto const initial_hash = cudf::hashing::detail::hash_combine(
hash_value_type{0},
type_dispatcher<dispatch_storage_type>(
_table.column(0).type(),
element_hasher_with_seed<hash_function, Nullate>{_has_nulls, _seed},
_table.column(0),
row_index));
// Hashes an element in a column
auto hasher = [=](size_type column_index) {
return cudf::type_dispatcher<dispatch_storage_type>(
_table.column(column_index).type(),
element_hasher<hash_function, Nullate>{_has_nulls},
_table.column(column_index),
row_index);
};
// Hash each element and combine all the hash values together
return thrust::transform_reduce(
thrust::seq,
// note that this starts at 1 and not 0 now since we already hashed the first column
thrust::make_counting_iterator(1),
thrust::make_counting_iterator(_table.num_columns()),
hasher,
initial_hash,
[](hash_value_type lhs, hash_value_type rhs) {
return cudf::hashing::detail::hash_combine(lhs, rhs);
});
}
private:
table_device_view _table;
Nullate _has_nulls;
uint32_t _seed{DEFAULT_HASH_SEED};
};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf/table
|
rapidsai_public_repos/cudf/cpp/include/cudf/table/experimental/row_operators.cuh
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/algorithm.cuh>
#include <cudf/detail/utilities/assert.cuh>
#include <cudf/hashing/detail/default_hash.cuh>
#include <cudf/hashing/detail/hashing.hpp>
#include <cudf/lists/detail/dremel.hpp>
#include <cudf/lists/list_device_view.cuh>
#include <cudf/lists/lists_column_device_view.cuh>
#include <cudf/sorting.hpp>
#include <cudf/structs/structs_column_device_view.cuh>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/detail/use_default.h>
#include <thrust/equal.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/iterator_adaptor.h>
#include <thrust/iterator/iterator_categories.h>
#include <thrust/iterator/iterator_facade.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/logical.h>
#include <thrust/swap.h>
#include <thrust/transform_reduce.h>
#include <cuda/std/tuple>
#include <cuda/std/utility>
#include <limits>
#include <memory>
#include <optional>
#include <type_traits>
#include <utility>
namespace cudf {
namespace experimental {
/**
* @brief A map from cudf::type_id to cudf type that excludes LIST and STRUCT types.
*
* To be used with type_dispatcher in place of the default map, when it is required that STRUCT and
* LIST map to void. This is useful when we want to avoid recursion in a functor. For example, in
* element_comparator, we have a specialization for STRUCT but the type_dispatcher in it is only
* used to dispatch to the same functor for non-nested types. Even when we're guaranteed to not have
* non-nested types at that point, the compiler doesn't know this and would try to create recursive
* code which is very slow.
*
* Usage:
* @code
* type_dispatcher<dispatch_nested_to_void>(data_type(), functor{});
* @endcode
*/
template <cudf::type_id t>
struct dispatch_void_if_nested {
/// The type to dispatch to if the type is nested
using type = std::conditional_t<t == type_id::STRUCT or t == type_id::LIST, void, id_to_type<t>>;
};
namespace row {
enum class lhs_index_type : size_type {};
enum class rhs_index_type : size_type {};
/**
* @brief A counting iterator that uses strongly typed indices bound to tables.
*
* Performing lexicographic or equality comparisons between values in two
* tables requires the use of strongly typed indices. The strong index types
* `lhs_index_type` and `rhs_index_type` ensure that index values are bound to
* the correct table, regardless of the order in which these indices are
* provided to the call operator. This struct and its type aliases
* `lhs_iterator` and `rhs_iterator` provide an interface similar to a counting
* iterator, with strongly typed values to represent the table indices.
*
* @tparam Index The strong index type
*/
template <typename Index, typename Underlying = std::underlying_type_t<Index>>
struct strong_index_iterator : public thrust::iterator_facade<strong_index_iterator<Index>,
Index,
thrust::use_default,
thrust::random_access_traversal_tag,
Index,
Underlying> {
using super_t =
thrust::iterator_adaptor<strong_index_iterator<Index>, Index>; ///< The base class
/**
* @brief Constructs a strong index iterator
*
* @param n The beginning index
*/
explicit constexpr strong_index_iterator(Underlying n) : begin{n} {}
friend class thrust::iterator_core_access; ///< Allow access to the base class
private:
__device__ constexpr void increment() { ++begin; }
__device__ constexpr void decrement() { --begin; }
__device__ constexpr void advance(Underlying n) { begin += n; }
__device__ constexpr bool equal(strong_index_iterator<Index> const& other) const noexcept
{
return begin == other.begin;
}
__device__ constexpr Index dereference() const noexcept { return static_cast<Index>(begin); }
__device__ constexpr Underlying distance_to(
strong_index_iterator<Index> const& other) const noexcept
{
return other.begin - begin;
}
Underlying begin{};
};
/**
* @brief Iterator representing indices into a left-side table.
*/
using lhs_iterator = strong_index_iterator<lhs_index_type>;
/**
* @brief Iterator representing indices into a right-side table.
*/
using rhs_iterator = strong_index_iterator<rhs_index_type>;
namespace lexicographic {
/**
* @brief Computes a weak ordering of two values with special sorting behavior.
*
* This relational comparator functor compares physical values rather than logical
* elements like lists, strings, or structs. It evaluates `NaN` as not less than, equal to, or
* greater than other values and is IEEE-754 compliant.
*/
struct physical_element_comparator {
/**
* @brief Operator for relational comparisons.
*
* @param lhs First element
* @param rhs Second element
* @return Relation between elements
*/
template <typename Element>
__device__ constexpr weak_ordering operator()(Element const lhs, Element const rhs) const noexcept
{
return detail::compare_elements(lhs, rhs);
}
};
/**
* @brief Relational comparator functor that compares physical values rather than logical
* elements like lists, strings, or structs. It evaluates `NaN` as equivalent to other `NaN`s and
* greater than all other values.
*/
struct sorting_physical_element_comparator {
/**
* @brief Operator for relational comparison of non-floating point values.
*
* @param lhs First element
* @param rhs Second element
* @return Relation between elements
*/
template <typename Element, CUDF_ENABLE_IF(not std::is_floating_point_v<Element>)>
__device__ constexpr weak_ordering operator()(Element const lhs, Element const rhs) const noexcept
{
return detail::compare_elements(lhs, rhs);
}
/**
* @brief Operator for relational comparison of floating point values.
*
* @param lhs First element
* @param rhs Second element
* @return Relation between elements
*/
template <typename Element, CUDF_ENABLE_IF(std::is_floating_point_v<Element>)>
__device__ constexpr weak_ordering operator()(Element const lhs, Element const rhs) const noexcept
{
if (isnan(lhs)) {
return isnan(rhs) ? weak_ordering::EQUIVALENT : weak_ordering::GREATER;
} else if (isnan(rhs)) {
return weak_ordering::LESS;
}
return detail::compare_elements(lhs, rhs);
}
};
using optional_dremel_view = thrust::optional<detail::dremel_device_view const>;
// The has_nested_columns template parameter of the device_row_comparator is
// necessary to help the compiler optimize our code. Without it, the list and
// struct view specializations are present in the code paths used for primitive
// types, and the compiler fails to inline this nearly as well resulting in a
// significant performance drop. As a result, there is some minor tension in
// the current design between the presence of this parameter and the way that
// the Dremel data is passed around, first as a
// std::optional<device_span<dremel_device_view>> in the
// preprocessed_table/device_row_comparator (which is always valid when
// has_nested_columns and is otherwise invalid) that is then unpacked to a
// thrust::optional<dremel_device_view> at the element_comparator level (which
// is always valid for a list column and otherwise invalid). We cannot use an
// additional template parameter for the element_comparator on a per-column
// basis because we cannot conditionally define dremel_device_view member
// variables without jumping through extra hoops with inheritance, so the
// thrust::optional<dremel_device_view> member must be an optional rather than
// a raw dremel_device_view.
/**
* @brief Computes the lexicographic comparison between 2 rows.
*
* Lexicographic ordering is determined by:
* - Two rows are compared element by element.
* - The first mismatching element defines which row is lexicographically less
* or greater than the other.
* - If the rows are compared without mismatched elements, the rows are equivalent
*
*
* Lexicographic ordering is exactly equivalent to doing an alphabetical sort of
* two words, for example, `aac` would be *less* than (or precede) `abb`. The
* second letter in both words is the first non-equal letter, and `a < b`, thus
* `aac < abb`.
*
* @note The operator overloads in sub-class `element_comparator` are templated via the
* `type_dispatcher` to help select an overload instance for each column in a table.
* So, `cudf::is_nested<Element>` will return `true` if the table has nested-type columns,
* but it will be a runtime error if template parameter `has_nested_columns != true`.
*
* @tparam has_nested_columns compile-time optimization for primitive types.
* This template parameter is to be used by the developer by querying
* `cudf::detail::has_nested_columns(input)`. `true` compiles operator
* overloads for nested types, while `false` only compiles operator
* overloads for primitive types.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
* @tparam PhysicalElementComparator A relational comparator functor that compares individual values
* rather than logical elements, defaults to `NaN` aware relational comparator that evaluates `NaN`
* as greater than all other values.
*/
template <bool has_nested_columns,
typename Nullate,
typename PhysicalElementComparator = sorting_physical_element_comparator>
class device_row_comparator {
public:
friend class self_comparator; ///< Allow self_comparator to access private members
friend class two_table_comparator; ///< Allow two_table_comparator to access private members
/**
* @brief Construct a function object for performing a lexicographic
* comparison between the rows of two tables.
*
* @param check_nulls Indicates if any input column contains nulls.
* @param lhs The first table
* @param rhs The second table (may be the same table as `lhs`)
* @param l_dremel_device_views lhs table dremel device view for list type
* @param r_dremel_device_views rhs table dremel device view for list type
* @param depth Optional, device array the same length as a row that contains starting depths of
* columns if they're nested, and 0 otherwise.
* @param column_order Optional, device array the same length as a row that indicates the desired
* ascending/descending order of each column in a row. If `nullopt`, it is assumed all columns are
* sorted in ascending order.
* @param null_precedence Optional, device array the same length as a row and indicates how null
* values compare to all other for every column. If `nullopt`, then null precedence would be
* `null_order::BEFORE` for all columns.
* @param comparator Physical element relational comparison functor.
*/
device_row_comparator(Nullate check_nulls,
table_device_view lhs,
table_device_view rhs,
device_span<detail::dremel_device_view const> l_dremel_device_views,
device_span<detail::dremel_device_view const> r_dremel_device_views,
std::optional<device_span<int const>> depth = std::nullopt,
std::optional<device_span<order const>> column_order = std::nullopt,
std::optional<device_span<null_order const>> null_precedence = std::nullopt,
PhysicalElementComparator comparator = {}) noexcept
: _lhs{lhs},
_rhs{rhs},
_l_dremel(l_dremel_device_views),
_r_dremel(r_dremel_device_views),
_check_nulls{check_nulls},
_depth{depth},
_column_order{column_order},
_null_precedence{null_precedence},
_comparator{comparator}
{
}
/**
* @brief Construct a function object for performing a lexicographic
* comparison between the rows of two tables.
* This is a special overload to allow device-side construction of the
* comparator for cases where no preprocessing is needed, i.e. tables with
* non-nested type columns.
*
* @param check_nulls Indicates if any input column contains nulls.
* @param lhs The first table
* @param rhs The second table (may be the same table as `lhs`)
* @param column_order Optional, device array the same length as a row that indicates the desired
* ascending/descending order of each column in a row. If `nullopt`, it is assumed all columns are
* sorted in ascending order.
* @param null_precedence Optional, device array the same length as a row and indicates how null
* values compare to all other for every column. If `nullopt`, then null precedence would be
* `null_order::BEFORE` for all columns.
* @param comparator Physical element relational comparison functor.
*/
template <bool nested_disable = not has_nested_columns, CUDF_ENABLE_IF(nested_disable)>
__device__ device_row_comparator(
Nullate check_nulls,
table_device_view lhs,
table_device_view rhs,
std::optional<device_span<order const>> column_order = std::nullopt,
std::optional<device_span<null_order const>> null_precedence = std::nullopt,
PhysicalElementComparator comparator = {}) noexcept
: _lhs{lhs},
_rhs{rhs},
_l_dremel{},
_r_dremel{},
_check_nulls{check_nulls},
_depth{},
_column_order{column_order},
_null_precedence{null_precedence},
_comparator{comparator}
{
}
/**
* @brief Performs a relational comparison between two elements in two columns.
*/
class element_comparator {
public:
/**
* @brief Construct type-dispatched function object for performing a
* relational comparison between two elements.
*
* @note `lhs` and `rhs` may be the same.
*
* @param check_nulls Indicates if either input column contains nulls.
* @param lhs The column containing the first element
* @param rhs The column containing the second element (may be the same as lhs)
* @param null_precedence Indicates how null values are ordered with other values
* @param depth The depth of the column if part of a nested column @see
* preprocessed_table::depths
* @param comparator Physical element relational comparison functor.
* @param l_dremel_device_view <>
* @param r_dremel_device_view <>
*/
__device__ element_comparator(Nullate check_nulls,
column_device_view lhs,
column_device_view rhs,
null_order null_precedence = null_order::BEFORE,
int depth = 0,
PhysicalElementComparator comparator = {},
optional_dremel_view l_dremel_device_view = {},
optional_dremel_view r_dremel_device_view = {})
: _lhs{lhs},
_rhs{rhs},
_check_nulls{check_nulls},
_null_precedence{null_precedence},
_depth{depth},
_l_dremel_device_view{l_dremel_device_view},
_r_dremel_device_view{r_dremel_device_view},
_comparator{comparator}
{
}
/**
* @brief Performs a relational comparison between the specified elements
*
* @param lhs_element_index The index of the first element
* @param rhs_element_index The index of the second element
* @return Indicates the relationship between the elements in the `lhs` and `rhs` columns, along
* with the depth at which a null value was encountered.
*/
template <typename Element,
CUDF_ENABLE_IF(cudf::is_relationally_comparable<Element, Element>())>
__device__ cuda::std::pair<weak_ordering, int> operator()(
size_type const lhs_element_index, size_type const rhs_element_index) const noexcept
{
if (_check_nulls) {
bool const lhs_is_null{_lhs.is_null(lhs_element_index)};
bool const rhs_is_null{_rhs.is_null(rhs_element_index)};
if (lhs_is_null or rhs_is_null) { // at least one is null
return cuda::std::pair(null_compare(lhs_is_null, rhs_is_null, _null_precedence), _depth);
}
}
return cuda::std::pair(_comparator(_lhs.element<Element>(lhs_element_index),
_rhs.element<Element>(rhs_element_index)),
std::numeric_limits<int>::max());
}
/**
* @brief Throws run-time error when columns types cannot be compared
* or if this class is instantiated with `has_nested_columns = false` but
* passed tables with nested columns
*
* @return Ordering
*/
template <typename Element,
CUDF_ENABLE_IF(not cudf::is_relationally_comparable<Element, Element>() and
(not has_nested_columns or not cudf::is_nested<Element>()))>
__device__ cuda::std::pair<weak_ordering, int> operator()(size_type const,
size_type const) const noexcept
{
CUDF_UNREACHABLE("Attempted to compare elements of uncomparable types.");
}
/**
* @brief Compares two struct-type columns
*
* @param lhs_element_index The index of the first element
* @param rhs_element_index The index of the second element
* @return Indicates the relationship between the elements in the `lhs` and `rhs` columns, along
* with the depth at which a null value was encountered.
*/
template <typename Element,
CUDF_ENABLE_IF(has_nested_columns and std::is_same_v<Element, cudf::struct_view>)>
__device__ cuda::std::pair<weak_ordering, int> operator()(
size_type const lhs_element_index, size_type const rhs_element_index) const noexcept
{
column_device_view lcol = _lhs;
column_device_view rcol = _rhs;
int depth = _depth;
while (lcol.type().id() == type_id::STRUCT) {
bool const lhs_is_null{lcol.is_null(lhs_element_index)};
bool const rhs_is_null{rcol.is_null(rhs_element_index)};
if (lhs_is_null or rhs_is_null) { // at least one is null
weak_ordering state = null_compare(lhs_is_null, rhs_is_null, _null_precedence);
return cuda::std::pair(state, depth);
}
if (lcol.num_child_columns() == 0) {
return cuda::std::pair(weak_ordering::EQUIVALENT, std::numeric_limits<int>::max());
}
// Non-empty structs have been modified to only have 1 child when using this.
lcol = detail::structs_column_device_view(lcol).get_sliced_child(0);
rcol = detail::structs_column_device_view(rcol).get_sliced_child(0);
++depth;
}
return cudf::type_dispatcher<dispatch_void_if_nested>(
lcol.type(),
element_comparator{_check_nulls, lcol, rcol, _null_precedence, depth, _comparator},
lhs_element_index,
rhs_element_index);
}
/**
* @brief Compares two list-type columns
*
* @param lhs_element_index The index of the first element
* @param rhs_element_index The index of the second element
* @return Indicates the relationship between the elements in the `lhs` and `rhs` columns, along
* with the depth at which a null value was encountered.
*/
template <typename Element,
CUDF_ENABLE_IF(has_nested_columns and std::is_same_v<Element, cudf::list_view>)>
__device__ cuda::std::pair<weak_ordering, int> operator()(size_type lhs_element_index,
size_type rhs_element_index)
{
// only order top-NULLs according to null_order
auto const is_l_row_null = _lhs.is_null(lhs_element_index);
auto const is_r_row_null = _rhs.is_null(rhs_element_index);
if (is_l_row_null || is_r_row_null) {
return cuda::std::pair(null_compare(is_l_row_null, is_r_row_null, _null_precedence),
_depth);
}
// These are all the values from the Dremel encoding.
auto const l_max_def_level = _l_dremel_device_view->max_def_level;
auto const r_max_def_level = _r_dremel_device_view->max_def_level;
auto const l_def_levels = _l_dremel_device_view->def_levels;
auto const r_def_levels = _r_dremel_device_view->def_levels;
auto const l_rep_levels = _l_dremel_device_view->rep_levels;
auto const r_rep_levels = _r_dremel_device_view->rep_levels;
// Traverse the nested list hierarchy to get a column device view
// pointing to the underlying child data.
column_device_view lcol = _lhs.slice(lhs_element_index, 1);
column_device_view rcol = _rhs.slice(rhs_element_index, 1);
while (lcol.type().id() == type_id::LIST) {
lcol = detail::lists_column_device_view(lcol).get_sliced_child();
rcol = detail::lists_column_device_view(rcol).get_sliced_child();
}
// These start and end values indicate the start and end points of all
// the elements of the lists in the current list element
// (`[lhs|rhs]_element_index`) that we are comparing.
auto const l_offsets = _l_dremel_device_view->offsets;
auto const r_offsets = _r_dremel_device_view->offsets;
auto l_start = l_offsets[lhs_element_index];
auto l_end = l_offsets[lhs_element_index + 1];
auto r_start = r_offsets[rhs_element_index];
auto r_end = r_offsets[rhs_element_index + 1];
// This comparator will be used to compare leaf (non-nested) data types.
auto comparator =
element_comparator{_check_nulls, lcol, rcol, _null_precedence, _depth, _comparator};
// Loop over each element in the encoding. Note that this includes nulls
// and empty lists, so not every index corresponds to an actual element
// in the child column. The element_index is used to keep track of the current
// child element that we're actually comparing.
for (int l_dremel_index = l_start, r_dremel_index = r_start, element_index = 0;
l_dremel_index < l_end and r_dremel_index < r_end;
++l_dremel_index, ++r_dremel_index) {
auto const l_rep_level = l_rep_levels[l_dremel_index];
auto const r_rep_level = r_rep_levels[r_dremel_index];
// early exit for smaller sub-list
if (l_rep_level != r_rep_level) {
// the lower repetition level is a smaller sub-list
return l_rep_level < r_rep_level ? cuda::std::pair(weak_ordering::LESS, _depth)
: cuda::std::pair(weak_ordering::GREATER, _depth);
}
// only compare if left and right are at same nesting level
auto const l_def_level = l_def_levels[l_dremel_index];
auto const r_def_level = r_def_levels[r_dremel_index];
// either left or right are empty or NULLs of arbitrary nesting
if (l_def_level < l_max_def_level || r_def_level < r_max_def_level) {
// in the fully unraveled version of the list column, only the
// most nested NULLs and leafs are present
// In this rare condition that we get to the most nested NULL, we increment
// element_index because either both rows have a deeply nested NULL at the
// same position, and we'll "continue" in our iteration, or we will early
// exit if only one of the rows has a deeply nested NULL
if ((lcol.nullable() and l_def_levels[l_dremel_index] == l_max_def_level - 1) or
(rcol.nullable() and r_def_levels[r_dremel_index] == r_max_def_level - 1)) {
++element_index;
}
if (l_def_level == r_def_level) { continue; }
// We require [] < [NULL] < [leaf] for nested nulls.
// The null_precedence only affects top level nulls.
return l_def_level < r_def_level ? cuda::std::pair(weak_ordering::LESS, _depth)
: cuda::std::pair(weak_ordering::GREATER, _depth);
}
// finally, compare leaf to leaf
weak_ordering state{weak_ordering::EQUIVALENT};
int last_null_depth = _depth;
cuda::std::tie(state, last_null_depth) = cudf::type_dispatcher<dispatch_void_if_nested>(
lcol.type(), comparator, element_index, element_index);
if (state != weak_ordering::EQUIVALENT) { return cuda::std::pair(state, _depth); }
++element_index;
}
// If we have reached this stage, we know that definition levels,
// repetition levels, and actual elements are identical in both list
// columns up to the `min(l_end - l_start, r_end - r_start)` element of
// the Dremel encoding. However, two lists can only compare equivalent if
// they are of the same length. Otherwise, the shorter of the two is less
// than the longer. This final check determines the appropriate resulting
// ordering by checking how many total elements each list is composed of.
return cuda::std::pair(detail::compare_elements(l_end - l_start, r_end - r_start), _depth);
}
private:
column_device_view const _lhs;
column_device_view const _rhs;
Nullate const _check_nulls;
null_order const _null_precedence;
int const _depth;
optional_dremel_view _l_dremel_device_view;
optional_dremel_view _r_dremel_device_view;
PhysicalElementComparator const _comparator;
};
public:
/**
* @brief Checks whether the row at `lhs_index` in the `lhs` table compares
* lexicographically less, greater, or equivalent to the row at `rhs_index` in the `rhs` table.
*
* @param lhs_index The index of the row in the `lhs` table to examine
* @param rhs_index The index of the row in the `rhs` table to examine
* @return weak ordering comparison of the row in the `lhs` table relative to the row in the `rhs`
* table
*/
__device__ constexpr weak_ordering operator()(size_type const lhs_index,
size_type const rhs_index) const noexcept
{
int last_null_depth = std::numeric_limits<int>::max();
size_type list_column_index{-1};
for (size_type i = 0; i < _lhs.num_columns(); ++i) {
if (_lhs.column(i).type().id() == type_id::LIST) { ++list_column_index; }
int const depth = _depth.has_value() ? (*_depth)[i] : 0;
if (depth > last_null_depth) { continue; }
bool const ascending =
_column_order.has_value() ? (*_column_order)[i] == order::ASCENDING : true;
null_order const null_precedence =
_null_precedence.has_value() ? (*_null_precedence)[i] : null_order::BEFORE;
// TODO: At what point do we verify that the columns of lhs and rhs are
// all of the same types? I assume that it's already happened before
// here, otherwise the current code would be failing.
auto const [l_dremel_i, r_dremel_i] =
_lhs.column(i).type().id() == type_id::LIST
? std::make_tuple(optional_dremel_view(_l_dremel[list_column_index]),
optional_dremel_view(_r_dremel[list_column_index]))
: std::make_tuple(optional_dremel_view{}, optional_dremel_view{});
auto element_comp = element_comparator{_check_nulls,
_lhs.column(i),
_rhs.column(i),
null_precedence,
depth,
_comparator,
l_dremel_i,
r_dremel_i};
weak_ordering state;
cuda::std::tie(state, last_null_depth) =
cudf::type_dispatcher(_lhs.column(i).type(), element_comp, lhs_index, rhs_index);
if (state == weak_ordering::EQUIVALENT) { continue; }
return ascending
? state
: (state == weak_ordering::GREATER ? weak_ordering::LESS : weak_ordering::GREATER);
}
return weak_ordering::EQUIVALENT;
}
private:
table_device_view const _lhs;
table_device_view const _rhs;
device_span<detail::dremel_device_view const> const _l_dremel;
device_span<detail::dremel_device_view const> const _r_dremel;
Nullate const _check_nulls;
std::optional<device_span<int const>> const _depth;
std::optional<device_span<order const>> const _column_order;
std::optional<device_span<null_order const>> const _null_precedence;
PhysicalElementComparator const _comparator;
}; // class device_row_comparator
/**
* @brief Wraps and interprets the result of templated Comparator that returns a weak_ordering.
* Returns true if the weak_ordering matches any of the templated values.
*
* Note that this should never be used with only `weak_ordering::EQUIVALENT`.
* An equality comparator should be used instead for optimal performance.
*
* @tparam Comparator generic comparator that returns a weak_ordering.
* @tparam values weak_ordering parameter pack of orderings to interpret as true
*/
template <typename Comparator, weak_ordering... values>
struct weak_ordering_comparator_impl {
static_assert(not((weak_ordering::EQUIVALENT == values) && ...),
"weak_ordering_comparator should not be used for pure equality comparisons. The "
"`row_equality_comparator` should be used instead");
template <typename LhsType, typename RhsType>
__device__ constexpr bool operator()(LhsType const lhs_index,
RhsType const rhs_index) const noexcept
{
weak_ordering const result = comparator(lhs_index, rhs_index);
return ((result == values) || ...);
}
Comparator const comparator;
};
/**
* @brief Wraps and interprets the result of device_row_comparator, true if the result is
* weak_ordering::LESS meaning one row is lexicographically *less* than another row.
*
* @tparam Comparator generic comparator that returns a weak_ordering
*/
template <typename Comparator>
struct less_comparator : weak_ordering_comparator_impl<Comparator, weak_ordering::LESS> {
/**
* @brief Constructs a less_comparator
*
* @param comparator The comparator to wrap
*/
less_comparator(Comparator const& comparator)
: weak_ordering_comparator_impl<Comparator, weak_ordering::LESS>{comparator}
{
}
};
/**
* @brief Wraps and interprets the result of device_row_comparator, true if the result is
* weak_ordering::LESS or weak_ordering::EQUIVALENT meaning one row is lexicographically *less* than
* or *equivalent* to another row.
*
* @tparam Comparator generic comparator that returns a weak_ordering
*/
template <typename Comparator>
struct less_equivalent_comparator
: weak_ordering_comparator_impl<Comparator, weak_ordering::LESS, weak_ordering::EQUIVALENT> {
/**
* @brief Constructs a less_equivalent_comparator
*
* @param comparator The comparator to wrap
*/
less_equivalent_comparator(Comparator const& comparator)
: weak_ordering_comparator_impl<Comparator, weak_ordering::LESS, weak_ordering::EQUIVALENT>{
comparator}
{
}
};
/**
* @brief Preprocessed table for use with lexicographical comparison
*
*/
struct preprocessed_table {
/// Type of table device view owner for the preprocessed table.
using table_device_view_owner =
std::invoke_result_t<decltype(table_device_view::create), table_view, rmm::cuda_stream_view>;
/**
* @brief Preprocess table for use with lexicographical comparison
*
* Sets up the table for use with lexicographical comparison. The resulting preprocessed table can
* be passed to the constructor of `lexicographic::self_comparator` or
* `lexicographic::two_table_comparator` to avoid preprocessing again.
*
* Note that the output of this factory function should not be used in `two_table_comparator` if
* the input table contains lists-of-structs. In such cases, please use the overload
* `preprocessed_table::create(table_view const&, table_view const&,...)` to preprocess both input
* tables at the same time.
*
* @param table The table to preprocess
* @param column_order Optional, host array the same length as a row that indicates the desired
* ascending/descending order of each column in a row. If empty, it is assumed all columns
* are sorted in ascending order.
* @param null_precedence Optional, an array having the same length as the number of columns in
* the input tables that indicates how null values compare to all other. If it is empty,
* the order `null_order::BEFORE` will be used for all columns.
* @param stream The stream to launch kernels and h->d copies on while preprocessing
* @return A shared pointer to a preprocessed table
*/
static std::shared_ptr<preprocessed_table> create(table_view const& table,
host_span<order const> column_order,
host_span<null_order const> null_precedence,
rmm::cuda_stream_view stream);
/**
* @brief Preprocess tables for use with lexicographical comparison
*
* Sets up the tables for use with lexicographical comparison. The resulting preprocessed tables
* can be passed to the constructor of `lexicographic::self_comparator` or
* `lexicographic::two_table_comparator` to avoid preprocessing again.
*
* This factory function performs some extra operations to guarantee that its output can be used
* in `two_table_comparator` for all cases.
*
* @param lhs The lhs table to preprocess
* @param rhs The rhs table to preprocess
* @param column_order Optional, host array the same length as a row that indicates the desired
* ascending/descending order of each column in a row. If empty, it is assumed all columns
* are sorted in ascending order.
* @param null_precedence Optional, an array having the same length as the number of columns in
* the input tables that indicates how null values compare to all other. If it is empty,
* the order `null_order::BEFORE` will be used for all columns.
* @param stream The stream to launch kernels and h->d copies on while preprocessing
* @return A pair of shared pointers to the preprocessed tables
*/
static std::pair<std::shared_ptr<preprocessed_table>, std::shared_ptr<preprocessed_table>> create(
table_view const& lhs,
table_view const& rhs,
host_span<order const> column_order,
host_span<null_order const> null_precedence,
rmm::cuda_stream_view stream);
private:
friend class self_comparator; ///< Allow self_comparator to access private members
friend class two_table_comparator; ///< Allow two_table_comparator to access private members
/**
* @brief Create the output preprocessed table from intermediate preprocessing results
*
* @param preprocessed_input The table resulted from preprocessing
* @param verticalized_col_depths The depths of each column resulting from decomposing struct
* columns in the original input table
* @param transformed_columns Store the intermediate columns generated from transforming
* nested children columns into integers columns using `cudf::rank()`
* @param column_order Optional, host array the same length as a row that indicates the desired
* ascending/descending order of each column in a row. If empty, it is assumed all columns
* are sorted in ascending order.
* @param null_precedence Optional, an array having the same length as the number of columns in
* the input tables that indicates how null values compare to all other. If it is empty,
* the order `null_order::BEFORE` will be used for all columns.
* @param has_ranked_children Flag indicating if the input table was preprocessed to transform
* any nested child column into an integer column using `cudf::rank`
* @param stream The stream to launch kernels and h->d copies on while preprocessing
* @return A shared pointer to a preprocessed table
*/
static std::shared_ptr<preprocessed_table> create(
table_view const& preprocessed_input,
std::vector<int>&& verticalized_col_depths,
std::vector<std::unique_ptr<column>>&& transformed_columns,
host_span<order const> column_order,
host_span<null_order const> null_precedence,
bool has_ranked_children,
rmm::cuda_stream_view stream);
/**
* @brief Construct a preprocessed table for use with lexicographical comparison
*
* Sets up the table for use with lexicographical comparison. The resulting preprocessed table can
* be passed to the constructor of `lexicographic::self_comparator` to avoid preprocessing again.
*
* @param table The table to preprocess
* @param column_order Optional, device array the same length as a row that indicates the desired
* ascending/descending order of each column in a row. If empty, it is assumed all columns
* are sorted in ascending order.
* @param null_precedence Optional, device array the same length as a row and indicates how null
* values compare to all other for every column. If it is nullptr, then null precedence
* would be `null_order::BEFORE` for all columns.
* @param depths The depths of each column resulting from decomposing struct columns.
* @param dremel_data The dremel data for each list column. The length of this object is the
* number of list columns in the table.
* @param dremel_device_views Device views into the dremel_data structs contained in the
* `dremel_data` parameter. For columns that are not list columns, this uvector will should
* contain an empty `dremel_device_view`. As such, this uvector has as many elements as
* there are columns in the table (unlike the `dremel_data` parameter, which is only as
* long as the number of list columns).
* @param transformed_columns Store the intermediate columns generated from transforming
* nested children columns into integers columns using `cudf::rank()`
* @param has_ranked_children Flag indicating if the input table was preprocessed to transform
* any lists-of-structs column having floating-point children using `cudf::rank`
*/
preprocessed_table(table_device_view_owner&& table,
rmm::device_uvector<order>&& column_order,
rmm::device_uvector<null_order>&& null_precedence,
rmm::device_uvector<size_type>&& depths,
std::vector<detail::dremel_data>&& dremel_data,
rmm::device_uvector<detail::dremel_device_view>&& dremel_device_views,
std::vector<std::unique_ptr<column>>&& transformed_columns,
bool has_ranked_children);
preprocessed_table(table_device_view_owner&& table,
rmm::device_uvector<order>&& column_order,
rmm::device_uvector<null_order>&& null_precedence,
rmm::device_uvector<size_type>&& depths,
std::vector<std::unique_ptr<column>>&& transformed_columns,
bool has_ranked_children);
/**
* @brief Implicit conversion operator to a `table_device_view` of the preprocessed table.
*
* @return table_device_view
*/
operator table_device_view() { return *_t; }
/**
* @brief Get a device array containing the desired order of each column in the preprocessed table
*
* @return Device array containing respective column orders. If no explicit column orders were
* specified during the creation of this object then this will be `nullopt`.
*/
[[nodiscard]] std::optional<device_span<order const>> column_order() const
{
return _column_order.size() ? std::optional<device_span<order const>>(_column_order)
: std::nullopt;
}
/**
* @brief Get a device array containing the desired null precedence of each column in the
* preprocessed table
*
* @return Device array containing respective column null precedence. If no explicit column null
* precedences were specified during the creation of this object then this will be `nullopt`.
*/
[[nodiscard]] std::optional<device_span<null_order const>> null_precedence() const
{
return _null_precedence.size() ? std::optional<device_span<null_order const>>(_null_precedence)
: std::nullopt;
}
/**
* @brief Get a device array containing the depth of each column in the preprocessed table
*
* @see struct_linearize()
*
* @return std::optional<device_span<int const>> Device array containing respective column depths.
* If there are no nested columns in the table then this will be `nullopt`.
*/
[[nodiscard]] std::optional<device_span<int const>> depths() const
{
return _depths.size() ? std::optional<device_span<int const>>(_depths) : std::nullopt;
}
[[nodiscard]] device_span<detail::dremel_device_view const> dremel_device_views() const
{
if (_dremel_device_views.has_value()) {
return device_span<detail::dremel_device_view const>(*_dremel_device_views);
} else {
return {};
}
}
template <typename PhysicalElementComparator>
void check_physical_element_comparator()
{
if constexpr (!std::is_same_v<PhysicalElementComparator, sorting_physical_element_comparator>) {
CUDF_EXPECTS(!_has_ranked_children,
"The input table has nested type children and they were transformed using a "
"different type of physical element comparator.");
}
}
private:
table_device_view_owner const _t;
rmm::device_uvector<order> const _column_order;
rmm::device_uvector<null_order> const _null_precedence;
rmm::device_uvector<size_type> const _depths;
// Dremel encoding of list columns used for the comparison algorithm
std::optional<std::vector<detail::dremel_data>> _dremel_data;
std::optional<rmm::device_uvector<detail::dremel_device_view>> _dremel_device_views;
// Intermediate columns generated from transforming nested children columns into
// integers columns using `cudf::rank()`, need to be kept alive.
std::vector<std::unique_ptr<column>> _transformed_columns;
// Flag to record if the input table was preprocessed to transform any nested children column(s)
// into integer column(s) using `cudf::rank`.
bool const _has_ranked_children;
};
/**
* @brief An owning object that can be used to lexicographically compare two rows of the same table
*
* This class can take a table_view and preprocess certain columns to allow for lexicographical
* comparison. The preprocessed table and temporary data required for the comparison are created and
* owned by this class.
*
* Alternatively, `self_comparator` can be constructed from an existing
* `shared_ptr<preprocessed_table>` when sharing the same table among multiple comparators.
*
* This class can then provide a functor object that can used on the device.
* The object of this class must outlive the usage of the device functor.
*/
class self_comparator {
public:
/**
* @brief Construct an owning object for performing a lexicographic comparison between two rows of
* the same table.
*
* @param t The table to compare
* @param column_order Optional, host array the same length as a row that indicates the desired
* ascending/descending order of each column in a row. If empty, it is assumed all columns
* are sorted in ascending order.
* @param null_precedence Optional, device array the same length as a row and indicates how null
* values compare to all other for every column. If empty, then null precedence would be
* `null_order::BEFORE` for all columns.
* @param stream The stream to construct this object on. Not the stream that will be used for
* comparisons using this object.
*/
self_comparator(table_view const& t,
host_span<order const> column_order = {},
host_span<null_order const> null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream())
: d_t{preprocessed_table::create(t, column_order, null_precedence, stream)}
{
}
/**
* @brief Construct an owning object for performing a lexicographic comparison between two rows of
* the same preprocessed table.
*
* This constructor allows independently constructing a `preprocessed_table` and sharing it among
* multiple comparators.
*
* @param t A table preprocessed for lexicographic comparison
*/
self_comparator(std::shared_ptr<preprocessed_table> t) : d_t{std::move(t)} {}
/**
* @brief Return the binary operator for comparing rows in the table.
*
* Returns a binary callable, `F`, with signature `bool F(size_type, size_type)`.
*
* `F(i,j)` returns true if and only if row `i` compares lexicographically less than row `j`.
*
* @note The operator overloads in sub-class `element_comparator` are templated via the
* `type_dispatcher` to help select an overload instance for each column in a table.
* So, `cudf::is_nested<Element>` will return `true` if the table has nested-type columns,
* but it will be a runtime error if template parameter `has_nested_columns != true`.
*
* @tparam has_nested_columns compile-time optimization for primitive types.
* This template parameter is to be used by the developer by querying
* `cudf::detail::has_nested_columns(input)`. `true` compiles operator
* overloads for nested types, while `false` only compiles operator
* overloads for primitive types.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
* @tparam PhysicalElementComparator A relational comparator functor that compares individual
* values rather than logical elements, defaults to `NaN` aware relational comparator
* that evaluates `NaN` as greater than all other values.
* @throw cudf::logic_error if the input table was preprocessed to transform any nested children
* columns into integer columns but `PhysicalElementComparator` is not
* `sorting_physical_element_comparator`.
* @param nullate Indicates if any input column contains nulls.
* @param comparator Physical element relational comparison functor.
* @return A binary callable object.
*/
template <bool has_nested_columns,
typename Nullate,
typename PhysicalElementComparator = sorting_physical_element_comparator>
auto less(Nullate nullate = {}, PhysicalElementComparator comparator = {}) const
{
d_t->check_physical_element_comparator<PhysicalElementComparator>();
return less_comparator{
device_row_comparator<has_nested_columns, Nullate, PhysicalElementComparator>{
nullate,
*d_t,
*d_t,
d_t->dremel_device_views(),
d_t->dremel_device_views(),
d_t->depths(),
d_t->column_order(),
d_t->null_precedence(),
comparator}};
}
/// @copydoc less()
template <bool has_nested_columns,
typename Nullate,
typename PhysicalElementComparator = sorting_physical_element_comparator>
auto less_equivalent(Nullate nullate = {}, PhysicalElementComparator comparator = {}) const
{
d_t->check_physical_element_comparator<PhysicalElementComparator>();
return less_equivalent_comparator{
device_row_comparator<has_nested_columns, Nullate, PhysicalElementComparator>{
nullate,
*d_t,
*d_t,
d_t->dremel_device_views(),
d_t->dremel_device_views(),
d_t->depths(),
d_t->column_order(),
d_t->null_precedence(),
comparator}};
}
private:
std::shared_ptr<preprocessed_table> d_t;
};
// @cond
template <typename Comparator>
struct strong_index_comparator_adapter {
strong_index_comparator_adapter(Comparator const& comparator) : comparator{comparator} {}
__device__ constexpr weak_ordering operator()(lhs_index_type const lhs_index,
rhs_index_type const rhs_index) const noexcept
{
return comparator(static_cast<cudf::size_type>(lhs_index),
static_cast<cudf::size_type>(rhs_index));
}
__device__ constexpr weak_ordering operator()(rhs_index_type const rhs_index,
lhs_index_type const lhs_index) const noexcept
{
auto const left_right_ordering =
comparator(static_cast<cudf::size_type>(lhs_index), static_cast<cudf::size_type>(rhs_index));
// Invert less/greater values to reflect right to left ordering
if (left_right_ordering == weak_ordering::LESS) {
return weak_ordering::GREATER;
} else if (left_right_ordering == weak_ordering::GREATER) {
return weak_ordering::LESS;
}
return weak_ordering::EQUIVALENT;
}
Comparator const comparator;
};
// @endcond
/**
* @brief An owning object that can be used to lexicographically compare rows of two different
* tables
*
* This class takes two table_views and preprocesses certain columns to allow for lexicographical
* comparison. The preprocessed table and temporary data required for the comparison are created and
* owned by this class.
*
* Alternatively, `two_table_comparator` can be constructed from two existing
* `shared_ptr<preprocessed_table>`s when sharing the same tables among multiple comparators.
*
* This class can then provide a functor object that can used on the device.
* The object of this class must outlive the usage of the device functor.
*/
class two_table_comparator {
public:
/**
* @brief Construct an owning object for performing a lexicographic comparison between rows of
* two different tables.
*
* The left and right table are expected to have the same number of columns
* and data types for each column.
*
* @param left The left table to compare
* @param right The right table to compare
* @param column_order Optional, host array the same length as a row that indicates the desired
* ascending/descending order of each column in a row. If empty, it is assumed all columns
* are sorted in ascending order.
* @param null_precedence Optional, device array the same length as a row and indicates how null
* values compare to all other for every column. If empty, then null precedence would be
* `null_order::BEFORE` for all columns.
* @param stream The stream to construct this object on. Not the stream that will be used for
* comparisons using this object.
*/
two_table_comparator(table_view const& left,
table_view const& right,
host_span<order const> column_order = {},
host_span<null_order const> null_precedence = {},
rmm::cuda_stream_view stream = cudf::get_default_stream());
/**
* @brief Construct an owning object for performing a lexicographic comparison between two rows of
* the same preprocessed table.
*
* This constructor allows independently constructing a `preprocessed_table` and sharing it among
* multiple comparators.
*
* The preprocessed_table(s) should have been pre-generated together using the factory function
* `preprocessed_table::create(table_view const&, table_view const&)`. Otherwise, the comparison
* results between two tables may be incorrect.
*
* @param left A table preprocessed for lexicographic comparison
* @param right A table preprocessed for lexicographic comparison
*/
two_table_comparator(std::shared_ptr<preprocessed_table> left,
std::shared_ptr<preprocessed_table> right)
: d_left_table{std::move(left)}, d_right_table{std::move(right)}
{
}
/**
* @brief Return the binary operator for comparing rows in the table.
*
* Returns a binary callable, `F`, with signatures
* `bool F(lhs_index_type, rhs_index_type)` and
* `bool F(rhs_index_type, lhs_index_type)`.
*
* `F(lhs_index_type i, rhs_index_type j)` returns true if and only if row
* `i` of the left table compares lexicographically less than row `j` of the
* right table.
*
* Similarly, `F(rhs_index_type i, lhs_index_type j)` returns true if and
* only if row `i` of the right table compares lexicographically less than row
* `j` of the left table.
*
* @note The operator overloads in sub-class `element_comparator` are templated via the
* `type_dispatcher` to help select an overload instance for each column in a table.
* So, `cudf::is_nested<Element>` will return `true` if the table has nested-type columns,
* but it will be a runtime error if template parameter `has_nested_columns != true`.
*
* @tparam has_nested_columns compile-time optimization for primitive types.
* This template parameter is to be used by the developer by querying
* `cudf::detail::has_nested_columns(input)`. `true` compiles operator
* overloads for nested types, while `false` only compiles operator
* overloads for primitive types.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
* @tparam PhysicalElementComparator A relational comparator functor that compares individual
* values rather than logical elements, defaults to `NaN` aware relational comparator
* that evaluates `NaN` as greater than all other values.
* @throw cudf::logic_error if the input tables were preprocessed to transform any nested children
* columns into integer columns but `PhysicalElementComparator` is not
* `sorting_physical_element_comparator`.
* @param nullate Indicates if any input column contains nulls.
* @param comparator Physical element relational comparison functor.
* @return A binary callable object.
*/
template <bool has_nested_columns,
typename Nullate,
typename PhysicalElementComparator = sorting_physical_element_comparator>
auto less(Nullate nullate = {}, PhysicalElementComparator comparator = {}) const
{
d_left_table->check_physical_element_comparator<PhysicalElementComparator>();
d_right_table->check_physical_element_comparator<PhysicalElementComparator>();
return less_comparator{strong_index_comparator_adapter{
device_row_comparator<has_nested_columns, Nullate, PhysicalElementComparator>{
nullate,
*d_left_table,
*d_right_table,
d_left_table->dremel_device_views(),
d_right_table->dremel_device_views(),
d_left_table->depths(),
d_left_table->column_order(),
d_left_table->null_precedence(),
comparator}}};
}
/// @copydoc less()
template <bool has_nested_columns,
typename Nullate,
typename PhysicalElementComparator = sorting_physical_element_comparator>
auto less_equivalent(Nullate nullate = {}, PhysicalElementComparator comparator = {}) const
{
d_left_table->check_physical_element_comparator<PhysicalElementComparator>();
d_right_table->check_physical_element_comparator<PhysicalElementComparator>();
return less_equivalent_comparator{strong_index_comparator_adapter{
device_row_comparator<has_nested_columns, Nullate, PhysicalElementComparator>{
nullate,
*d_left_table,
*d_right_table,
d_left_table->dremel_device_views(),
d_right_table->dremel_device_views(),
d_left_table->depths(),
d_left_table->column_order(),
d_left_table->null_precedence(),
comparator}}};
}
private:
std::shared_ptr<preprocessed_table> d_left_table;
std::shared_ptr<preprocessed_table> d_right_table;
};
} // namespace lexicographic
namespace hash {
class row_hasher;
} // namespace hash
namespace equality {
/**
* @brief Equality comparator functor that compares physical values rather than logical
* elements like lists, strings, or structs. It evaluates `NaN` not equal to all other values for
* IEEE-754 compliance.
*/
struct physical_equality_comparator {
/**
* @brief Operator for equality comparisons.
*
* Note that `NaN != NaN`, following IEEE-754.
*
* @param lhs First element
* @param rhs Second element
* @return `true` if `lhs == rhs` else `false`
*/
template <typename Element>
__device__ constexpr bool operator()(Element const lhs, Element const rhs) const noexcept
{
return lhs == rhs;
}
};
/**
* @brief Equality comparator functor that compares physical values rather than logical
* elements like lists, strings, or structs. It evaluates `NaN` as equal to other `NaN`s.
*/
struct nan_equal_physical_equality_comparator {
/**
* @brief Operator for equality comparison of non-floating point values.
*
* @param lhs First element
* @param rhs Second element
* @return `true` if `lhs == rhs` else `false`
*/
template <typename Element, CUDF_ENABLE_IF(not std::is_floating_point_v<Element>)>
__device__ constexpr bool operator()(Element const lhs, Element const rhs) const noexcept
{
return lhs == rhs;
}
/**
* @brief Operator for equality comparison of floating point values.
*
* Note that `NaN == NaN`.
*
* @param lhs First element
* @param rhs Second element
* @return `true` if `lhs` == `rhs` else `false`
*/
template <typename Element, CUDF_ENABLE_IF(std::is_floating_point_v<Element>)>
__device__ constexpr bool operator()(Element const lhs, Element const rhs) const noexcept
{
return isnan(lhs) and isnan(rhs) ? true : lhs == rhs;
}
};
/**
* @brief Computes the equality comparison between 2 rows.
*
* Equality is determined by comparing rows element by element. The first mismatching element
* returns false, representing unequal rows. If the rows are compared without mismatched elements,
* the rows are equal.
*
* @note The operator overloads in sub-class `element_comparator` are templated via the
* `type_dispatcher` to help select an overload instance for each column in a table.
* So, `cudf::is_nested<Element>` will return `true` if the table has nested-type columns,
* but it will be a runtime error if template parameter `has_nested_columns != true`.
*
* @tparam has_nested_columns compile-time optimization for primitive types.
* This template parameter is to be used by the developer by querying
* `cudf::detail::has_nested_columns(input)`. `true` compiles operator
* overloads for nested types, while `false` only compiles operator
* overloads for primitive types.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
* @tparam PhysicalEqualityComparator A equality comparator functor that compares individual values
* rather than logical elements, defaults to a comparator for which `NaN == NaN`.
*/
template <bool has_nested_columns,
typename Nullate,
typename PhysicalEqualityComparator = nan_equal_physical_equality_comparator>
class device_row_comparator {
friend class self_comparator; ///< Allow self_comparator to access private members
friend class two_table_comparator; ///< Allow two_table_comparator to access private members
public:
/**
* @brief Checks whether the row at `lhs_index` in the `lhs` table is equal to the row at
* `rhs_index` in the `rhs` table.
*
* @param lhs_index The index of the row in the `lhs` table to examine
* @param rhs_index The index of the row in the `rhs` table to examine
* @return `true` if row from the `lhs` table is equal to the row in the `rhs` table
*/
__device__ constexpr bool operator()(size_type const lhs_index,
size_type const rhs_index) const noexcept
{
auto equal_elements = [=](column_device_view l, column_device_view r) {
return cudf::type_dispatcher(
l.type(),
element_comparator{check_nulls, l, r, nulls_are_equal, comparator},
lhs_index,
rhs_index);
};
return thrust::equal(thrust::seq, lhs.begin(), lhs.end(), rhs.begin(), equal_elements);
}
private:
/**
* @brief Construct a function object for performing equality comparison between the rows of two
* tables.
*
* @param check_nulls Indicates if any input column contains nulls.
* @param lhs The first table
* @param rhs The second table (may be the same table as `lhs`)
* @param nulls_are_equal Indicates if two null elements are treated as equivalent
* @param comparator Physical element equality comparison functor.
*/
device_row_comparator(Nullate check_nulls,
table_device_view lhs,
table_device_view rhs,
null_equality nulls_are_equal = null_equality::EQUAL,
PhysicalEqualityComparator comparator = {}) noexcept
: lhs{lhs},
rhs{rhs},
check_nulls{check_nulls},
nulls_are_equal{nulls_are_equal},
comparator{comparator}
{
}
/**
* @brief Performs an equality comparison between two elements in two columns.
*/
class element_comparator {
public:
/**
* @brief Construct type-dispatched function object for comparing equality
* between two elements.
*
* @note `lhs` and `rhs` may be the same.
*
* @param check_nulls Indicates if either input column contains nulls.
* @param lhs The column containing the first element
* @param rhs The column containing the second element (may be the same as lhs)
* @param nulls_are_equal Indicates if two null elements are treated as equivalent
* @param comparator Physical element equality comparison functor.
*/
__device__ element_comparator(Nullate check_nulls,
column_device_view lhs,
column_device_view rhs,
null_equality nulls_are_equal = null_equality::EQUAL,
PhysicalEqualityComparator comparator = {}) noexcept
: lhs{lhs},
rhs{rhs},
check_nulls{check_nulls},
nulls_are_equal{nulls_are_equal},
comparator{comparator}
{
}
/**
* @brief Compares the specified elements for equality.
*
* @param lhs_element_index The index of the first element
* @param rhs_element_index The index of the second element
* @return True if lhs and rhs are equal or if both lhs and rhs are null and nulls are
* considered equal (`nulls_are_equal` == `null_equality::EQUAL`)
*/
template <typename Element, CUDF_ENABLE_IF(cudf::is_equality_comparable<Element, Element>())>
__device__ bool operator()(size_type const lhs_element_index,
size_type const rhs_element_index) const noexcept
{
if (check_nulls) {
bool const lhs_is_null{lhs.is_null(lhs_element_index)};
bool const rhs_is_null{rhs.is_null(rhs_element_index)};
if (lhs_is_null and rhs_is_null) {
return nulls_are_equal == null_equality::EQUAL;
} else if (lhs_is_null != rhs_is_null) {
return false;
}
}
return comparator(lhs.element<Element>(lhs_element_index),
rhs.element<Element>(rhs_element_index));
}
template <typename Element,
CUDF_ENABLE_IF(not cudf::is_equality_comparable<Element, Element>() and
(not has_nested_columns or not cudf::is_nested<Element>())),
typename... Args>
__device__ bool operator()(Args...)
{
CUDF_UNREACHABLE("Attempted to compare elements of uncomparable types.");
}
template <typename Element, CUDF_ENABLE_IF(has_nested_columns and cudf::is_nested<Element>())>
__device__ bool operator()(size_type const lhs_element_index,
size_type const rhs_element_index) const noexcept
{
column_device_view lcol = lhs.slice(lhs_element_index, 1);
column_device_view rcol = rhs.slice(rhs_element_index, 1);
while (lcol.type().id() == type_id::STRUCT || lcol.type().id() == type_id::LIST) {
if (check_nulls) {
auto lvalid = detail::make_validity_iterator<true>(lcol);
auto rvalid = detail::make_validity_iterator<true>(rcol);
if (nulls_are_equal == null_equality::UNEQUAL) {
if (thrust::any_of(
thrust::seq, lvalid, lvalid + lcol.size(), thrust::logical_not<bool>()) or
thrust::any_of(
thrust::seq, rvalid, rvalid + rcol.size(), thrust::logical_not<bool>())) {
return false;
}
} else {
if (not thrust::equal(thrust::seq, lvalid, lvalid + lcol.size(), rvalid)) {
return false;
}
}
}
if (lcol.type().id() == type_id::STRUCT) {
if (lcol.num_child_columns() == 0) { return true; }
// Non-empty structs are assumed to be decomposed and contain only one child
lcol = detail::structs_column_device_view(lcol).get_sliced_child(0);
rcol = detail::structs_column_device_view(rcol).get_sliced_child(0);
} else if (lcol.type().id() == type_id::LIST) {
auto l_list_col = detail::lists_column_device_view(lcol);
auto r_list_col = detail::lists_column_device_view(rcol);
auto lsizes = make_list_size_iterator(l_list_col);
auto rsizes = make_list_size_iterator(r_list_col);
if (not thrust::equal(thrust::seq, lsizes, lsizes + lcol.size(), rsizes)) {
return false;
}
lcol = l_list_col.get_sliced_child();
rcol = r_list_col.get_sliced_child();
if (lcol.size() != rcol.size()) { return false; }
}
}
auto comp = column_comparator{
element_comparator{check_nulls, lcol, rcol, nulls_are_equal, comparator}, lcol.size()};
return type_dispatcher<dispatch_void_if_nested>(lcol.type(), comp);
}
private:
/**
* @brief Serially compare two columns for equality.
*
* When we want to get the equivalence of two columns by serially comparing all elements in
* one column with the corresponding elements in the other column, this saves us from type
* dispatching for each individual element in the range
*/
struct column_comparator {
element_comparator const comp;
size_type const size;
/**
* @brief Serially compare two columns for equality.
*
* @return True if ALL elements compare equal, false otherwise
*/
template <typename Element, CUDF_ENABLE_IF(cudf::is_equality_comparable<Element, Element>())>
__device__ bool operator()() const noexcept
{
return thrust::all_of(thrust::seq,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + size,
[=](auto i) { return comp.template operator()<Element>(i, i); });
}
template <typename Element,
CUDF_ENABLE_IF(not cudf::is_equality_comparable<Element, Element>()),
typename... Args>
__device__ bool operator()(Args...) const noexcept
{
CUDF_UNREACHABLE("Attempted to compare elements of uncomparable types.");
}
};
column_device_view const lhs;
column_device_view const rhs;
Nullate const check_nulls;
null_equality const nulls_are_equal;
PhysicalEqualityComparator const comparator;
};
table_device_view const lhs;
table_device_view const rhs;
Nullate const check_nulls;
null_equality const nulls_are_equal;
PhysicalEqualityComparator const comparator;
};
/**
* @brief Preprocessed table for use with row equality comparison or row hashing
*
*/
struct preprocessed_table {
/**
* @brief Factory to construct preprocessed_table for use with
* row equality comparison or row hashing
*
* Sets up the table for use with row equality comparison or row hashing. The resulting
* preprocessed table can be passed to the constructor of `equality::self_comparator` to
* avoid preprocessing again.
*
* @param table The table to preprocess
* @param stream The cuda stream to use while preprocessing.
* @return A preprocessed table as shared pointer
*/
static std::shared_ptr<preprocessed_table> create(table_view const& table,
rmm::cuda_stream_view stream);
private:
friend class self_comparator; ///< Allow self_comparator to access private members
friend class two_table_comparator; ///< Allow two_table_comparator to access private members
friend class hash::row_hasher; ///< Allow row_hasher to access private members
using table_device_view_owner =
std::invoke_result_t<decltype(table_device_view::create), table_view, rmm::cuda_stream_view>;
preprocessed_table(table_device_view_owner&& table,
std::vector<rmm::device_buffer>&& null_buffers,
std::vector<std::unique_ptr<column>>&& tmp_columns)
: _t(std::move(table)),
_null_buffers(std::move(null_buffers)),
_tmp_columns(std::move(tmp_columns))
{
}
/**
* @brief Implicit conversion operator to a `table_device_view` of the preprocessed table.
*
* @return table_device_view
*/
operator table_device_view() { return *_t; }
table_device_view_owner _t;
std::vector<rmm::device_buffer> _null_buffers;
std::vector<std::unique_ptr<column>> _tmp_columns;
};
/**
* @brief Comparator for performing equality comparisons between two rows of the same table.
*
*/
class self_comparator {
public:
/**
* @brief Construct an owning object for performing equality comparisons between two rows of the
* same table.
*
* @param t The table to compare
* @param stream The stream to construct this object on. Not the stream that will be used for
* comparisons using this object.
*/
self_comparator(table_view const& t, rmm::cuda_stream_view stream)
: d_t(preprocessed_table::create(t, stream))
{
}
/**
* @brief Construct an owning object for performing equality comparisons between two rows of the
* same table.
*
* This constructor allows independently constructing a `preprocessed_table` and sharing it among
* multiple comparators.
*
* @param t A table preprocessed for equality comparison
*/
self_comparator(std::shared_ptr<preprocessed_table> t) : d_t{std::move(t)} {}
/**
* @brief Get the comparison operator to use on the device
*
* Returns a binary callable, `F`, with signature `bool F(size_type, size_type)`.
*
* `F(i,j)` returns true if and only if row `i` compares equal to row `j`.
*
* @note The operator overloads in sub-class `element_comparator` are templated via the
* `type_dispatcher` to help select an overload instance for each column in a table.
* So, `cudf::is_nested<Element>` will return `true` if the table has nested-type columns,
* but it will be a runtime error if template parameter `has_nested_columns != true`.
*
* @tparam has_nested_columns compile-time optimization for primitive types.
* This template parameter is to be used by the developer by querying
* `cudf::detail::has_nested_columns(input)`. `true` compiles operator
* overloads for nested types, while `false` only compiles operator
* overloads for primitive types.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
* @tparam PhysicalEqualityComparator A equality comparator functor that compares individual
* values rather than logical elements, defaults to a comparator for which `NaN == NaN`.
* @param nullate Indicates if any input column contains nulls.
* @param nulls_are_equal Indicates if nulls are equal.
* @param comparator Physical element equality comparison functor.
* @return A binary callable object
*/
template <bool has_nested_columns,
typename Nullate,
typename PhysicalEqualityComparator = nan_equal_physical_equality_comparator>
auto equal_to(Nullate nullate = {},
null_equality nulls_are_equal = null_equality::EQUAL,
PhysicalEqualityComparator comparator = {}) const noexcept
{
return device_row_comparator<has_nested_columns, Nullate, PhysicalEqualityComparator>{
nullate, *d_t, *d_t, nulls_are_equal, comparator};
}
private:
std::shared_ptr<preprocessed_table> d_t;
};
// @cond
template <typename Comparator>
struct strong_index_comparator_adapter {
strong_index_comparator_adapter(Comparator const& comparator) : comparator{comparator} {}
__device__ constexpr bool operator()(lhs_index_type const lhs_index,
rhs_index_type const rhs_index) const noexcept
{
return comparator(static_cast<cudf::size_type>(lhs_index),
static_cast<cudf::size_type>(rhs_index));
}
__device__ constexpr bool operator()(rhs_index_type const rhs_index,
lhs_index_type const lhs_index) const noexcept
{
return this->operator()(lhs_index, rhs_index);
}
Comparator const comparator;
};
// @endcond
/**
* @brief An owning object that can be used to equality compare rows of two different tables.
*
* This class takes two table_views and preprocesses certain columns to allow for equality
* comparison. The preprocessed table and temporary data required for the comparison are created and
* owned by this class.
*
* Alternatively, `two_table_comparator` can be constructed from two existing
* `shared_ptr<preprocessed_table>`s when sharing the same tables among multiple comparators.
*
* This class can then provide a functor object that can used on the device.
* The object of this class must outlive the usage of the device functor.
*/
class two_table_comparator {
public:
/**
* @brief Construct an owning object for performing equality comparisons between two rows from two
* tables.
*
* The left and right table are expected to have the same number of columns and data types for
* each column.
*
* @param left The left table to compare.
* @param right The right table to compare.
* @param stream The stream to construct this object on. Not the stream that will be used for
* comparisons using this object.
*/
two_table_comparator(table_view const& left,
table_view const& right,
rmm::cuda_stream_view stream);
/**
* @brief Construct an owning object for performing equality comparisons between two rows from two
* tables.
*
* This constructor allows independently constructing a `preprocessed_table` and sharing it among
* multiple comparators.
*
* @param left The left table preprocessed for equality comparison.
* @param right The right table preprocessed for equality comparison.
*/
two_table_comparator(std::shared_ptr<preprocessed_table> left,
std::shared_ptr<preprocessed_table> right)
: d_left_table{std::move(left)}, d_right_table{std::move(right)}
{
}
/**
* @brief Return the binary operator for comparing rows in the table.
*
* Returns a binary callable, `F`, with signatures `bool F(lhs_index_type, rhs_index_type)` and
* `bool F(rhs_index_type, lhs_index_type)`.
*
* `F(lhs_index_type i, rhs_index_type j)` returns true if and only if row `i` of the left table
* compares equal to row `j` of the right table.
*
* Similarly, `F(rhs_index_type i, lhs_index_type j)` returns true if and only if row `i` of the
* right table compares equal to row `j` of the left table.
*
* @note The operator overloads in sub-class `element_comparator` are templated via the
* `type_dispatcher` to help select an overload instance for each column in a table.
* So, `cudf::is_nested<Element>` will return `true` if the table has nested-type columns,
* but it will be a runtime error if template parameter `has_nested_columns != true`.
*
* @tparam has_nested_columns compile-time optimization for primitive types.
* This template parameter is to be used by the developer by querying
* `cudf::detail::has_nested_columns(input)`. `true` compiles operator
* overloads for nested types, while `false` only compiles operator
* overloads for primitive types.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
* @tparam PhysicalEqualityComparator A equality comparator functor that compares individual
* values rather than logical elements, defaults to a `NaN == NaN` equality comparator.
* @param nullate Indicates if any input column contains nulls.
* @param nulls_are_equal Indicates if nulls are equal.
* @param comparator Physical element equality comparison functor.
* @return A binary callable object
*/
template <bool has_nested_columns,
typename Nullate,
typename PhysicalEqualityComparator = nan_equal_physical_equality_comparator>
auto equal_to(Nullate nullate = {},
null_equality nulls_are_equal = null_equality::EQUAL,
PhysicalEqualityComparator comparator = {}) const noexcept
{
return strong_index_comparator_adapter{
device_row_comparator<has_nested_columns, Nullate, PhysicalEqualityComparator>(
nullate, *d_left_table, *d_right_table, nulls_are_equal, comparator)};
}
private:
std::shared_ptr<preprocessed_table> d_left_table;
std::shared_ptr<preprocessed_table> d_right_table;
};
} // namespace equality
namespace hash {
/**
* @brief Computes the hash value of an element in the given column.
*
* @tparam hash_function Hash functor to use for hashing elements.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
*/
template <template <typename> class hash_function, typename Nullate>
class element_hasher {
public:
/**
* @brief Constructs an element_hasher object.
*
* @param nulls Indicates whether to check for nulls
* @param seed The seed to use for the hash function
* @param null_hash The hash value to use for nulls
*/
__device__ element_hasher(
Nullate nulls,
uint32_t seed = DEFAULT_HASH_SEED,
hash_value_type null_hash = std::numeric_limits<hash_value_type>::max()) noexcept
: _check_nulls(nulls), _seed(seed), _null_hash(null_hash)
{
}
/**
* @brief Returns the hash value of the given element.
*
* @tparam T The type of the element to hash
* @param col The column to hash
* @param row_index The index of the row to hash
* @return The hash value of the given element
*/
template <typename T, CUDF_ENABLE_IF(column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view const& col,
size_type row_index) const noexcept
{
if (_check_nulls && col.is_null(row_index)) { return _null_hash; }
return hash_function<T>{_seed}(col.element<T>(row_index));
}
/**
* @brief Returns the hash value of the given element.
*
* @tparam T The type of the element to hash
* @param col The column to hash
* @param row_index The index of the row to hash
* @return The hash value of the given element
*/
template <typename T, CUDF_ENABLE_IF(not column_device_view::has_element_accessor<T>())>
__device__ hash_value_type operator()(column_device_view const& col,
size_type row_index) const noexcept
{
CUDF_UNREACHABLE("Unsupported type in hash.");
}
Nullate _check_nulls; ///< Whether to check for nulls
uint32_t _seed; ///< The seed to use for hashing
hash_value_type _null_hash; ///< Hash value to use for null elements
};
/**
* @brief Computes the hash value of a row in the given table.
*
* @tparam hash_function Hash functor to use for hashing elements.
* @tparam Nullate A cudf::nullate type describing whether to check for nulls.
*/
template <template <typename> class hash_function, typename Nullate>
class device_row_hasher {
friend class row_hasher; ///< Allow row_hasher to access private members.
public:
/**
* @brief Return the hash value of a row in the given table.
*
* @param row_index The row index to compute the hash value of
* @return The hash value of the row
*/
__device__ auto operator()(size_type row_index) const noexcept
{
auto it = thrust::make_transform_iterator(_table.begin(), [=](auto const& column) {
return cudf::type_dispatcher<dispatch_storage_type>(
column.type(),
element_hasher_adapter<hash_function>{_check_nulls, _seed},
column,
row_index);
});
// Hash each element and combine all the hash values together
return detail::accumulate(it, it + _table.num_columns(), _seed, [](auto hash, auto h) {
return cudf::hashing::detail::hash_combine(hash, h);
});
}
private:
/**
* @brief Computes the hash value of an element in the given column.
*
* When the column is non-nested, this is a simple wrapper around the element_hasher.
* When the column is nested, this uses the element_hasher to hash the shape and values of the
* column.
*/
template <template <typename> class hash_fn>
class element_hasher_adapter {
static constexpr hash_value_type NULL_HASH = std::numeric_limits<hash_value_type>::max();
static constexpr hash_value_type NON_NULL_HASH = 0;
public:
__device__ element_hasher_adapter(Nullate check_nulls, uint32_t seed) noexcept
: _element_hasher(check_nulls, seed), _check_nulls(check_nulls)
{
}
template <typename T, CUDF_ENABLE_IF(not cudf::is_nested<T>())>
__device__ hash_value_type operator()(column_device_view const& col,
size_type row_index) const noexcept
{
return _element_hasher.template operator()<T>(col, row_index);
}
template <typename T, CUDF_ENABLE_IF(cudf::is_nested<T>())>
__device__ hash_value_type operator()(column_device_view const& col,
size_type row_index) const noexcept
{
auto hash = hash_value_type{0};
column_device_view curr_col = col.slice(row_index, 1);
while (curr_col.type().id() == type_id::STRUCT || curr_col.type().id() == type_id::LIST) {
if (_check_nulls) {
auto validity_it = detail::make_validity_iterator<true>(curr_col);
hash = detail::accumulate(
validity_it, validity_it + curr_col.size(), hash, [](auto hash, auto is_valid) {
return cudf::hashing::detail::hash_combine(hash,
is_valid ? NON_NULL_HASH : NULL_HASH);
});
}
if (curr_col.type().id() == type_id::STRUCT) {
if (curr_col.num_child_columns() == 0) { return hash; }
// Non-empty structs are assumed to be decomposed and contain only one child
curr_col = detail::structs_column_device_view(curr_col).get_sliced_child(0);
} else if (curr_col.type().id() == type_id::LIST) {
auto list_col = detail::lists_column_device_view(curr_col);
auto list_sizes = make_list_size_iterator(list_col);
hash = detail::accumulate(
list_sizes, list_sizes + list_col.size(), hash, [](auto hash, auto size) {
return cudf::hashing::detail::hash_combine(hash, hash_fn<size_type>{}(size));
});
curr_col = list_col.get_sliced_child();
}
}
for (int i = 0; i < curr_col.size(); ++i) {
hash = cudf::hashing::detail::hash_combine(
hash,
type_dispatcher<dispatch_void_if_nested>(curr_col.type(), _element_hasher, curr_col, i));
}
return hash;
}
element_hasher<hash_fn, Nullate> const _element_hasher;
Nullate const _check_nulls;
};
CUDF_HOST_DEVICE device_row_hasher(Nullate check_nulls,
table_device_view t,
uint32_t seed = DEFAULT_HASH_SEED) noexcept
: _check_nulls{check_nulls}, _table{t}, _seed(seed)
{
}
Nullate const _check_nulls;
table_device_view const _table;
uint32_t const _seed;
};
// Inject row::equality::preprocessed_table into the row::hash namespace
// As a result, row::equality::preprocessed_table and row::hash::preprocessed table are the same
// type and are interchangeable.
using preprocessed_table = row::equality::preprocessed_table;
/**
* @brief Computes the hash value of a row in the given table.
*
*/
class row_hasher {
public:
/**
* @brief Construct an owning object for hashing the rows of a table
*
* @param t The table containing rows to hash
* @param stream The stream to construct this object on. Not the stream that will be used for
* comparisons using this object.
*/
row_hasher(table_view const& t, rmm::cuda_stream_view stream)
: d_t(preprocessed_table::create(t, stream))
{
}
/**
* @brief Construct an owning object for hashing the rows of a table from an existing
* preprocessed_table
*
* This constructor allows independently constructing a `preprocessed_table` and sharing it among
* multiple `row_hasher` and `equality::self_comparator` objects.
*
* @param t A table preprocessed for hashing or equality.
*/
row_hasher(std::shared_ptr<preprocessed_table> t) : d_t{std::move(t)} {}
/**
* @brief Get the hash operator to use on the device
*
* Returns a unary callable, `F`, with signature `hash_function::hash_value_type F(size_type)`.
*
* `F(i)` returns the hash of row i.
*
* @tparam Nullate A cudf::nullate type describing whether to check for nulls
* @param nullate Indicates if any input column contains nulls
* @param seed The seed to use for the hash function
* @return A hash operator to use on the device
*/
template <template <typename> class hash_function = cudf::hashing::detail::default_hash,
template <template <typename> class, typename>
class DeviceRowHasher = device_row_hasher,
typename Nullate>
DeviceRowHasher<hash_function, Nullate> device_hasher(Nullate nullate = {},
uint32_t seed = DEFAULT_HASH_SEED) const
{
return DeviceRowHasher<hash_function, Nullate>(nullate, *d_t, seed);
}
private:
std::shared_ptr<preprocessed_table> d_t;
};
} // namespace hash
} // namespace row
} // namespace experimental
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/string_view.cuh
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/strings/detail/utf8.hpp>
#include <cudf/strings/string_view.hpp>
#ifndef __CUDA_ARCH__
#include <cudf/utilities/error.hpp>
#endif
// This is defined when including this header in a https://github.com/NVIDIA/jitify
// or jitify2 source file. The jitify cannot include thrust headers at this time.
#ifndef CUDF_JIT_UDF
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#endif
#include <algorithm>
// This file should only include device code logic.
// Host-only or host/device code should be defined in the string_view.hpp header file.
namespace cudf {
namespace strings {
namespace detail {
/**
* @brief Return the number of UTF-8 characters in this provided char array.
*
* @param str String with encoded char bytes.
* @param bytes Number of bytes in str.
* @return The number of characters in the array.
*/
__device__ inline size_type characters_in_string(char const* str, size_type bytes)
{
if ((str == nullptr) || (bytes == 0)) return 0;
auto ptr = reinterpret_cast<uint8_t const*>(str);
#ifndef CUDF_JIT_UDF
return thrust::count_if(
thrust::seq, ptr, ptr + bytes, [](uint8_t chr) { return is_begin_utf8_char(chr); });
#else
size_type chars = 0;
auto const end = ptr + bytes;
while (ptr < end) {
chars += is_begin_utf8_char(*ptr++);
}
return chars;
#endif
}
/**
* @brief Count the bytes to a specified character position
*
* Returns the number of bytes and any left over position value.
* The returned position is > 0 if the given position would read past
* the end of the input string.
*
* @param d_str Input string to count bytes within
* @param pos Character position to count to
* @return The number of bytes and the left over non-counted position value
*/
__device__ inline std::pair<size_type, size_type> bytes_to_character_position(string_view d_str,
size_type pos)
{
size_type bytes = 0;
auto ptr = d_str.data();
auto const end_ptr = ptr + d_str.size_bytes();
while ((pos > 0) && (ptr < end_ptr)) {
auto const width = strings::detail::bytes_in_utf8_byte(static_cast<uint8_t>(*ptr));
if (width) { --pos; }
bytes += width;
++ptr;
}
return {bytes, pos};
}
/**
* @brief string value for sentinel which is used in min, max reduction
* operators
*
* This sentinel string value is the highest possible valid UTF-8 encoded
* character. This serves as identity value for maximum operator on string
* values. Also, this char pointer serves as valid device pointer of identity
* value for minimum operator on string values.
*/
static __constant__ char max_string_sentinel[5]{"\xF7\xBF\xBF\xBF"};
} // namespace detail
} // namespace strings
/**
* @brief Return minimum value associated with the string type
*
* This function is needed to be host callable because it is called by a host
* callable function DeviceMax::identity<string_view>()
*
* @return An empty string
*/
CUDF_HOST_DEVICE inline string_view string_view::min() { return string_view(); }
/**
* @brief Return maximum value associated with the string type
*
* This function is needed to be host callable because it is called by a host
* callable function DeviceMin::identity<string_view>()
*
* @return A string value which represents the highest possible valid UTF-8 encoded
* character.
*/
CUDF_HOST_DEVICE inline string_view string_view::max()
{
char const* psentinel{nullptr};
#if defined(__CUDA_ARCH__)
psentinel = &cudf::strings::detail::max_string_sentinel[0];
#else
CUDF_CUDA_TRY(
cudaGetSymbolAddress((void**)&psentinel, cudf::strings::detail::max_string_sentinel));
#endif
return string_view(psentinel, 4);
}
__device__ inline size_type string_view::length() const
{
if (_length == UNKNOWN_STRING_LENGTH)
_length = strings::detail::characters_in_string(_data, _bytes);
return _length;
}
// @cond
// this custom iterator knows about UTF8 encoding
__device__ inline string_view::const_iterator::const_iterator(string_view const& str, size_type pos)
: p{str.data()}, bytes{str.size_bytes()}, char_pos{pos}, byte_pos{str.byte_offset(pos)}
{
}
__device__ inline string_view::const_iterator::const_iterator(string_view const& str,
size_type pos,
size_type offset)
: p{str.data()}, bytes{str.size_bytes()}, char_pos{pos}, byte_pos{offset}
{
}
__device__ inline string_view::const_iterator& string_view::const_iterator::operator++()
{
if (byte_pos < bytes)
byte_pos += strings::detail::bytes_in_utf8_byte(static_cast<uint8_t>(p[byte_pos]));
++char_pos;
return *this;
}
__device__ inline string_view::const_iterator string_view::const_iterator::operator++(int)
{
string_view::const_iterator tmp(*this);
operator++();
return tmp;
}
__device__ inline string_view::const_iterator string_view::const_iterator::operator+(
string_view::const_iterator::difference_type offset) const
{
const_iterator tmp(*this);
size_type adjust = abs(offset);
while (adjust-- > 0)
offset > 0 ? ++tmp : --tmp;
return tmp;
}
__device__ inline string_view::const_iterator& string_view::const_iterator::operator+=(
string_view::const_iterator::difference_type offset)
{
size_type adjust = abs(offset);
while (adjust-- > 0)
offset > 0 ? operator++() : operator--();
return *this;
}
__device__ inline string_view::const_iterator& string_view::const_iterator::operator--()
{
if (byte_pos > 0)
while (strings::detail::bytes_in_utf8_byte(static_cast<uint8_t>(p[--byte_pos])) == 0)
;
--char_pos;
return *this;
}
__device__ inline string_view::const_iterator string_view::const_iterator::operator--(int)
{
string_view::const_iterator tmp(*this);
operator--();
return tmp;
}
__device__ inline string_view::const_iterator& string_view::const_iterator::operator-=(
string_view::const_iterator::difference_type offset)
{
size_type adjust = abs(offset);
while (adjust-- > 0)
offset > 0 ? operator--() : operator++();
return *this;
}
__device__ inline string_view::const_iterator string_view::const_iterator::operator-(
string_view::const_iterator::difference_type offset) const
{
const_iterator tmp(*this);
size_type adjust = abs(offset);
while (adjust-- > 0)
offset > 0 ? --tmp : ++tmp;
return tmp;
}
__device__ inline string_view::const_iterator& string_view::const_iterator::move_to(
size_type new_pos)
{
*this += (new_pos - char_pos); // more efficient than recounting from the start
return *this;
}
__device__ inline bool string_view::const_iterator::operator==(
string_view::const_iterator const& rhs) const
{
return (p == rhs.p) && (char_pos == rhs.char_pos);
}
__device__ inline bool string_view::const_iterator::operator!=(
string_view::const_iterator const& rhs) const
{
return (p != rhs.p) || (char_pos != rhs.char_pos);
}
__device__ inline bool string_view::const_iterator::operator<(
string_view::const_iterator const& rhs) const
{
return (p == rhs.p) && (char_pos < rhs.char_pos);
}
__device__ inline bool string_view::const_iterator::operator<=(
string_view::const_iterator const& rhs) const
{
return (p == rhs.p) && (char_pos <= rhs.char_pos);
}
__device__ inline bool string_view::const_iterator::operator>(
string_view::const_iterator const& rhs) const
{
return (p == rhs.p) && (char_pos > rhs.char_pos);
}
__device__ inline bool string_view::const_iterator::operator>=(
string_view::const_iterator const& rhs) const
{
return (p == rhs.p) && (char_pos >= rhs.char_pos);
}
__device__ inline char_utf8 string_view::const_iterator::operator*() const
{
char_utf8 chr = 0;
strings::detail::to_char_utf8(p + byte_offset(), chr);
return chr;
}
__device__ inline size_type string_view::const_iterator::position() const { return char_pos; }
__device__ inline size_type string_view::const_iterator::byte_offset() const { return byte_pos; }
__device__ inline string_view::const_iterator string_view::begin() const
{
return const_iterator(*this, 0, 0);
}
__device__ inline string_view::const_iterator string_view::end() const
{
return const_iterator(*this, length(), size_bytes());
}
// @endcond
__device__ inline char_utf8 string_view::operator[](size_type pos) const
{
size_type offset = byte_offset(pos);
if (offset >= _bytes) return 0;
char_utf8 chr = 0;
strings::detail::to_char_utf8(data() + offset, chr);
return chr;
}
__device__ inline size_type string_view::byte_offset(size_type pos) const
{
if (length() == size_bytes()) return pos;
return std::get<0>(strings::detail::bytes_to_character_position(*this, pos));
}
__device__ inline int string_view::compare(string_view const& in) const
{
return compare(in.data(), in.size_bytes());
}
__device__ inline int string_view::compare(char const* data, size_type bytes) const
{
size_type const len1 = size_bytes();
auto const* ptr1 = reinterpret_cast<unsigned char const*>(this->data());
auto const* ptr2 = reinterpret_cast<unsigned char const*>(data);
if ((ptr1 == ptr2) && (bytes == len1)) return 0;
size_type idx = 0;
for (; (idx < len1) && (idx < bytes); ++idx) {
if (*ptr1 != *ptr2) return static_cast<int32_t>(*ptr1) - static_cast<int32_t>(*ptr2);
++ptr1;
++ptr2;
}
if (idx < len1) return 1;
if (idx < bytes) return -1;
return 0;
}
__device__ inline bool string_view::operator==(string_view const& rhs) const
{
return (size_bytes() == rhs.size_bytes()) && (compare(rhs) == 0);
}
__device__ inline bool string_view::operator!=(string_view const& rhs) const
{
return compare(rhs) != 0;
}
__device__ inline bool string_view::operator<(string_view const& rhs) const
{
return compare(rhs) < 0;
}
__device__ inline bool string_view::operator>(string_view const& rhs) const
{
return compare(rhs) > 0;
}
__device__ inline bool string_view::operator<=(string_view const& rhs) const
{
int rc = compare(rhs);
return (rc == 0) || (rc < 0);
}
__device__ inline bool string_view::operator>=(string_view const& rhs) const
{
int rc = compare(rhs);
return (rc == 0) || (rc > 0);
}
__device__ inline size_type string_view::find(string_view const& str,
size_type pos,
size_type count) const
{
return find(str.data(), str.size_bytes(), pos, count);
}
template <bool forward>
__device__ inline size_type string_view::find_impl(char const* str,
size_type bytes,
size_type pos,
size_type count) const
{
auto const nchars = length();
if (!str || pos < 0 || pos > nchars) return npos;
if (count < 0) count = nchars;
// use iterator to help reduce character/byte counting
auto itr = begin() + pos;
auto const spos = itr.byte_offset();
auto const epos = ((pos + count) < nchars) ? (itr + count).byte_offset() : size_bytes();
auto const find_length = (epos - spos) - bytes + 1;
auto ptr = data() + (forward ? spos : (epos - bytes));
for (size_type idx = 0; idx < find_length; ++idx) {
bool match = true;
for (size_type jdx = 0; match && (jdx < bytes); ++jdx) {
match = (ptr[jdx] == str[jdx]);
}
if (match) { return forward ? pos : character_offset(epos - bytes - idx); }
// use pos to record the current find position
pos += strings::detail::is_begin_utf8_char(*ptr);
forward ? ++ptr : --ptr;
}
return npos;
}
__device__ inline size_type string_view::find(char const* str,
size_type bytes,
size_type pos,
size_type count) const
{
return find_impl<true>(str, bytes, pos, count);
}
__device__ inline size_type string_view::find(char_utf8 chr, size_type pos, size_type count) const
{
char str[sizeof(char_utf8)];
size_type chwidth = strings::detail::from_char_utf8(chr, str);
return find(str, chwidth, pos, count);
}
__device__ inline size_type string_view::rfind(string_view const& str,
size_type pos,
size_type count) const
{
return rfind(str.data(), str.size_bytes(), pos, count);
}
__device__ inline size_type string_view::rfind(char const* str,
size_type bytes,
size_type pos,
size_type count) const
{
return find_impl<false>(str, bytes, pos, count);
}
__device__ inline size_type string_view::rfind(char_utf8 chr, size_type pos, size_type count) const
{
char str[sizeof(char_utf8)];
size_type chwidth = strings::detail::from_char_utf8(chr, str);
return rfind(str, chwidth, pos, count);
}
// parameters are character position values
__device__ inline string_view string_view::substr(size_type pos, size_type count) const
{
if (pos < 0 || pos >= length()) { return string_view{}; }
auto const itr = begin() + pos;
auto const spos = itr.byte_offset();
auto const epos = count >= 0 ? (itr + count).byte_offset() : size_bytes();
return string_view(data() + spos, epos - spos);
}
__device__ inline size_type string_view::character_offset(size_type bytepos) const
{
if (length() == size_bytes()) return bytepos;
return strings::detail::characters_in_string(data(), bytepos);
}
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/find.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_find
* @{
* @file
*/
/**
* @brief Returns a column of character position values where the target
* string is first found in each string of the provided column.
*
* If `target` is not found, -1 is returned for that row entry in the output column.
*
* The target string is searched within each string in the character
* position range [start,stop). If the stop parameter is -1, then the
* end of each string becomes the final position to include in the search.
*
* Any null string entries return corresponding null output column entries.
*
* @throw cudf::logic_error if start position is greater than stop position.
*
* @param input Strings instance for this operation
* @param target UTF-8 encoded string to search for in each string
* @param start First character position to include in the search
* @param stop Last position (exclusive) to include in the search.
* Default of -1 will search to the end of the string.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New integer column with character position values
*/
std::unique_ptr<column> find(
strings_column_view const& input,
string_scalar const& target,
size_type start = 0,
size_type stop = -1,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of character position values where the target
* string is first found searching from the end of each string.
*
* If `target` is not found, -1 is returned for that entry.
*
* The target string is searched within each string in the character
* position range [start,stop). If the stop parameter is -1, then the
* end of each string becomes the final position to include in the search.
*
* Any null string entries return corresponding null output column entries.
*
* @throw cudf::logic_error if start position is greater than stop position.
*
* @param input Strings instance for this operation
* @param target UTF-8 encoded string to search for in each string
* @param start First position to include in the search
* @param stop Last position (exclusive) to include in the search.
* Default of -1 will search starting at the end of the string.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New integer column with character position values
*/
std::unique_ptr<column> rfind(
strings_column_view const& input,
string_scalar const& target,
size_type start = 0,
size_type stop = -1,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of character position values where the target
* string is first found in the corresponding string of the provided column
*
* The output of row `i` is the character position of the target string for row `i`
* within input string of row `i` starting at the character position `start`.
* If the target is not found within the input string, -1 is returned for that
* row entry in the output column.
*
* Any null input or target entries return corresponding null output column entries.
*
* @throw cudf::logic_error if `input.size() != target.size()`
*
* @param input Strings to search against
* @param target Strings to search for in `input`
* @param start First character position to include in the search
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New integer column with character position values
*/
std::unique_ptr<column> find(
strings_column_view const& input,
strings_column_view const& target,
size_type start = 0,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of boolean values for each string where true indicates
* the target string was found within that string in the provided column.
*
* If the `target` is not found for a string, false is returned for that entry in the output column.
* If `target` is an empty string, true is returned for all non-null entries in the output column.
*
* Any null string entries return corresponding null entries in the output columns.
*
* @param input Strings instance for this operation
* @param target UTF-8 encoded string to search for in each string
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New BOOL8 column
*/
std::unique_ptr<column> contains(
strings_column_view const& input,
string_scalar const& target,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of boolean values for each string where true indicates
* the corresponding target string was found within that string in the provided column.
*
* The 'output[i] = true` if string `targets[i]` is found inside `input[i]` otherwise
* `output[i] = false`.
* If `target[i]` is an empty string, true is returned for `output[i]`.
* If `target[i]` is null, false is returned for `output[i]`.
*
* Any null string entries return corresponding null entries in the output columns.
*
* @throw cudf::logic_error if `strings.size() != targets.size()`.
*
* @param input Strings instance for this operation
* @param targets Strings column of targets to check row-wise in `strings`
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New BOOL8 column
*/
std::unique_ptr<column> contains(
strings_column_view const& input,
strings_column_view const& targets,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of boolean values for each string where true indicates
* the target string was found at the beginning of that string in the provided column.
*
* If `target` is not found at the beginning of a string, false is set for
* that row entry in the output column.
* If `target` is an empty string, true is returned for all non-null entries in the output column.
*
* Any null string entries return corresponding null entries in the output columns.
*
* @param input Strings instance for this operation
* @param target UTF-8 encoded string to search for in each string
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New type_id::BOOL8 column.
*/
std::unique_ptr<column> starts_with(
strings_column_view const& input,
string_scalar const& target,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of boolean values for each string where true indicates
* corresponding string in target column was found at the beginning of that string in
* the provided column.
*
* If `targets[i]` is not found at the beginning of a string in `strings[i]`, false is set for
* that row entry in the output column.
* If `targets[i]` is an empty string, true is returned for corresponding entry in the
* output column.
*
* Any null string entries in `targets` return corresponding null entries in the output columns.
*
* @throw cudf::logic_error if `strings.size() != targets.size()`.
*
* @param input Strings instance for this operation
* @param targets Strings instance for this operation
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New BOOL8 column
*/
std::unique_ptr<column> starts_with(
strings_column_view const& input,
strings_column_view const& targets,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of boolean values for each string where true indicates
* the target string was found at the end of that string in the provided column.
*
* If `target` is not found at the end of a string, false is set for
* that row entry in the output column.
* If `target` is an empty string, true is returned for all non-null entries in the output column.
*
* Any null string entries return corresponding null entries in the output columns.
*
* @param input Strings instance for this operation
* @param target UTF-8 encoded string to search for in each string
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New BOOL8 column
*/
std::unique_ptr<column> ends_with(
strings_column_view const& input,
string_scalar const& target,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of boolean values for each string where true indicates
* corresponding string in target column was found at the end of that string in
* the provided column.
*
* If `targets[i]` is not found at the end of a string in `strings[i]`, false is set for
* that row entry in the output column.
* If `targets[i]` is an empty string, true is returned for the corresponding entry in the
* output column.
*
* Any null string entries in `targets` return corresponding null entries in the output columns.
*
* @throw cudf::logic_error if `strings.size() != targets.size()`.
*
* @param input Strings instance for this operation
* @param targets Strings instance for this operation
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New BOOL8 column
*/
std::unique_ptr<column> ends_with(
strings_column_view const& input,
strings_column_view const& targets,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/find_multiple.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_find
* @{
* @file
*/
/**
* @brief Returns a lists column with character position values where each
* of the target strings are found in each string.
*
* The size of the output column is `input.size()`.
* Each row of the output column is of size `targets.size()`.
*
* `output[i,j]` contains the position of `targets[j]` in `input[i]`
*
* @code{.pseudo}
* Example:
* s = ["abc", "def"]
* t = ["a", "c", "e"]
* r = find_multiple(s, t)
* r is now {[ 0, 2,-1], // for "abc": "a" at pos 0, "c" at pos 2, "e" not found
* [-1,-1, 1 ]} // for "def": "a" and "b" not found, "e" at pos 1
* @endcode
*
* @throw cudf::logic_error if `targets` is empty or contains nulls
*
* @param input Strings instance for this operation
* @param targets Strings to search for in each string
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Lists column with character position values
*/
std::unique_ptr<column> find_multiple(
strings_column_view const& input,
strings_column_view const& targets,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/capitalize.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/char_types/char_types.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_case
* @{
* @file
*/
/**
* @brief Returns a column of capitalized strings.
*
* If the `delimiters` is an empty string, then only the first character of each
* row is capitalized. Otherwise, a non-delimiter character is capitalized after
* any delimiter character is found.
*
* @code{.pseudo}
* Example:
* input = ["tesT1", "a Test", "Another Test", "a\tb"];
* output = capitalize(input)
* output is ["Test1", "A test", "Another test", "A\tb"]
* output = capitalize(input, " ")
* output is ["Test1", "A Test", "Another Test", "A\tb"]
* output = capitalize(input, " \t")
* output is ["Test1", "A Test", "Another Test", "A\tB"]
* @endcode
*
* Any null string entries return corresponding null output column entries.
*
* @throw cudf::logic_error if `delimiter.is_valid()` is `false`.
*
* @param input String column
* @param delimiters Characters for identifying words to capitalize
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Column of strings capitalized from the input column
*/
std::unique_ptr<column> capitalize(
strings_column_view const& input,
string_scalar const& delimiters = string_scalar("", true, cudf::get_default_stream()),
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Modifies first character of each word to upper-case and lower-cases the rest.
*
* A word here is a sequence of characters of `sequence_type` delimited by
* any characters not part of the `sequence_type` character set.
*
* This function returns a column of strings where, for each string row in the input,
* the first character of each word is converted to upper-case,
* while all the remaining characters in a word are converted to lower-case.
*
* @code{.pseudo}
* Example:
* input = [" teST1", "a Test", " Another test ", "n2vidia"];
* output = title(input)
* output is [" Test1", "A Test", " Another Test ", "N2Vidia"]
* output = title(input,ALPHANUM)
* output is [" Test1", "A Test", " Another Test ", "N2vidia"]
* @endcode
*
* Any null string entries return corresponding null output column entries.
*
* @param input String column
* @param sequence_type The character type that is used when identifying words
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Column of titled strings
*/
std::unique_ptr<column> title(
strings_column_view const& input,
string_character_types sequence_type = string_character_types::ALPHA,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Checks if the strings in the input column are title formatted.
*
* The first character of each word should be upper-case while all other
* characters should be lower-case. A word is a sequence of upper-case
* and lower-case characters.
*
* This function returns a column of booleans indicating true if the string in
* the input row is in title format and false if not.
*
* @code{.pseudo}
* Example:
* input = [" Test1", "A Test", " Another test ", "N2Vidia Corp", "!Abc"];
* output = is_title(input)
* output is [true, true, false, true, true]
* @endcode
*
* Any null string entries result in corresponding null output column entries.
*
* @param input String column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Column of type BOOL8
*/
std::unique_ptr<column> is_title(
strings_column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/case.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_case
* @{
* @file
*/
/**
* @brief Converts a column of strings to lower case.
*
* Only upper case alphabetical characters are converted. All other characters are copied.
* Case conversion may result in strings that are longer or shorter than the
* original string in bytes.
*
* Any null entries create null entries in the output column.
*
* @param strings Strings instance for this operation.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return New column of strings with characters converted.
*/
std::unique_ptr<column> to_lower(
strings_column_view const& strings,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Converts a column of strings to upper case.
*
* Only lower case alphabetical characters are converted. All other characters are copied.
* Case conversion may result in strings that are longer or shorter than the
* original string in bytes.
*
* Any null entries create null entries in the output column.
*
* @param strings Strings instance for this operation.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return New column of strings with characters converted.
*/
std::unique_ptr<column> to_upper(
strings_column_view const& strings,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column of strings converting lower case characters to
* upper case and vice versa.
*
* Only upper or lower case alphabetical characters are converted. All other characters are copied.
* Case conversion may result in strings that are longer or shorter than the
* original string in bytes.
*
* Any null entries create null entries in the output column.
*
* @param strings Strings instance for this operation.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return New column of strings with characters converted.
*/
std::unique_ptr<column> swapcase(
strings_column_view const& strings,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/reverse.hpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_modify
* @{
* @file
*/
/**
* @brief Reverses the characters within each string
*
* Any null string entries return corresponding null output column entries.
*
* @code{.pseudo}
* Example:
* s = ["abcdef", "12345", "", "A"]
* r = reverse(s)
* r is now ["fedcba", "54321", "", "A"]
* @endcode
*
* @param input Strings column for this operation
* @param mr Device memory resource used to allocate the returned column's device memory
* @param stream CUDA stream used for device memory operations and kernel launches
* @return New strings column
*/
std::unique_ptr<column> reverse(
strings_column_view const& input,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/translate.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/string_view.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <vector>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_modify
* @{
* @file
*/
/**
* @brief Translates individual characters within each string.
*
* This can also be used to remove a character by specifying 0 for the corresponding table entry.
*
* Null string entries result in null entries in the output column.
*
* @code{.pseudo}
* Example:
* s = ["aa","bbb","cccc","abcd"]
* t = [['a','A'],['b',''],['d':'Q']]
* r = translate(s,t)
* r is now ["AA", "", "cccc", "AcQ"]
* @endcode
*
* @param input Strings instance for this operation
* @param chars_table Table of UTF-8 character mappings
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New column with padded strings
*/
std::unique_ptr<column> translate(
strings_column_view const& input,
std::vector<std::pair<char_utf8, char_utf8>> const& chars_table,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Removes or keeps the specified character ranges in cudf::strings::filter_characters
*/
enum class filter_type : bool {
KEEP, ///< All characters but those specified are removed
REMOVE ///< Only the specified characters are removed
};
/**
* @brief Removes ranges of characters from each string in a strings column.
*
* This can also be used to keep only the specified character ranges
* and remove all others from each string.
*
* @code{.pseudo}
* Example:
* s = ["aeiou", "AEIOU", "0123456789", "bcdOPQ5"]
* f = [{'M','Z'}, {'a','l'}, {'4','6'}]
* r1 = filter_characters(s, f)
* r1 is now ["aei", "OU", "456", "bcdOPQ5"]
* r2 = filter_characters(s, f, REMOVE)
* r2 is now ["ou", "AEI", "0123789", ""]
* r3 = filter_characters(s, f, KEEP, "*")
* r3 is now ["aei**", "***OU", "****456***", "bcdOPQ5"]
* @endcode
*
* Null string entries result in null entries in the output column.
*
* @throw cudf::logic_error if `replacement` is invalid
*
* @param input Strings instance for this operation
* @param characters_to_filter Table of character ranges to filter on
* @param keep_characters If true, the `characters_to_filter` are retained and all other characters
* are removed
* @param replacement Optional replacement string for each character removed
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New column with filtered strings
*/
std::unique_ptr<column> filter_characters(
strings_column_view const& input,
std::vector<std::pair<cudf::char_utf8, cudf::char_utf8>> characters_to_filter,
filter_type keep_characters = filter_type::KEEP,
string_scalar const& replacement = string_scalar(""),
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/combine.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_combine
* @{
* @file strings/combine.hpp
* @brief Strings APIs for concatenate and join
*/
/**
* @brief Setting for specifying how separators are added with
* null strings elements.
*/
enum class separator_on_nulls {
YES, ///< Always add separators between elements
NO ///< Do not add separators if an element is null
};
/**
* @brief Setting for specifying what will be output from `join_list_elements` when an input list
* is empty.
*/
enum class output_if_empty_list {
EMPTY_STRING, ///< Empty list will result in empty string
NULL_ELEMENT ///< Empty list will result in a null
};
/**
* @brief Concatenates all strings in the column into one new string delimited
* by an optional separator string.
*
* This returns a column with one string. Any null entries are ignored unless
* the @p narep parameter specifies a replacement string.
*
* @code{.pseudo}
* Example:
* s = ['aa', null, '', 'zz' ]
* r = join_strings(s,':','_')
* r is ['aa:_::zz']
* @endcode
*
* @throw cudf::logic_error if separator is not valid.
*
* @param input Strings for this operation
* @param separator String that should inserted between each string.
* Default is an empty string.
* @param narep String to replace any null strings found.
* Default of invalid-scalar will ignore any null entries.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return New column containing one string.
*/
std::unique_ptr<column> join_strings(
strings_column_view const& input,
string_scalar const& separator = string_scalar(""),
string_scalar const& narep = string_scalar("", false),
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Concatenates a list of strings columns using separators for each row
* and returns the result as a strings column.
*
* Each new string is created by concatenating the strings from the same
* row delimited by the row separator provided for that row. The following rules
* are applicable:
*
* - If row separator for a given row is null, output column for that row is null, unless
* there is a valid @p separator_narep
* - The separator is applied between two output row values if the @p separate_nulls
* is `YES` or only between valid rows if @p separate_nulls is `NO`.
* - If @p separator_narep and @p col_narep are both valid, the output column is always
* non nullable
*
* @code{.pseudo}
* Example:
* c0 = ['aa', null, '', 'ee', null, 'ff']
* c1 = [null, 'cc', 'dd', null, null, 'gg']
* c2 = ['bb', '', null, null, null, 'hh']
* sep = ['::', '%%', '^^', '!', '*', null]
* out = concatenate({c0, c1, c2}, sep)
* // all rows have at least one null or sep[i]==null
* out is [null, null, null, null, null, null]
*
* sep_rep = '+'
* out = concatenate({c0, c1, c2}, sep, sep_rep)
* // all rows with at least one null output as null
* out is [null, null, null, null, null, 'ff+gg+hh']
*
* col_narep = '-'
* sep_na = non-valid scalar
* out = concatenate({c0, c1, c2}, sep, sep_na, col_narep)
* // only the null entry in the sep column produces a null row
* out is ['aa::-::bb', '-%%cc%%', '^^dd^^-', 'ee!-!-', '-*-*-', null]
*
* col_narep = ''
* out = concatenate({c0, c1, c2}, sep, sep_rep, col_narep, separator_on_nulls:NO)
* // parameter suppresses separator for null rows
* out is ['aa::bb', 'cc%%', '^^dd', 'ee', '', 'ff+gg+hh']
* @endcode
*
* @throw cudf::logic_error if no input columns are specified - table view is empty
* @throw cudf::logic_error if input columns are not all strings columns.
* @throw cudf::logic_error if the number of rows from @p separators and @p strings_columns
* do not match
*
* @param strings_columns List of strings columns to concatenate
* @param separators Strings column that provides the separator for a given row
* @param separator_narep String to replace a null separator for a given row.
* Default of invalid-scalar means no row separator value replacements.
* @param col_narep String that should be used in place of any null strings found in any column.
* Default of invalid-scalar means no null column value replacements.
* @param separate_nulls If YES, then the separator is included for null rows
* if `col_narep` is valid.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Resource for allocating device memory
* @return New column with concatenated results
*/
std::unique_ptr<column> concatenate(
table_view const& strings_columns,
strings_column_view const& separators,
string_scalar const& separator_narep = string_scalar("", false),
string_scalar const& col_narep = string_scalar("", false),
separator_on_nulls separate_nulls = separator_on_nulls::YES,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Row-wise concatenates the given list of strings columns and
* returns a single strings column result.
*
* Each new string is created by concatenating the strings from the same
* row delimited by the separator provided.
*
* Any row with a null entry will result in the corresponding output
* row to be null entry unless a narep string is specified to be used
* in its place.
*
* If @p separate_nulls is set to `NO` and @p narep is valid then
* separators are not added to the output between null elements.
* Otherwise, separators are always added if @p narep is valid.
*
* More than one column must be specified in the input @p strings_columns
* table.
*
* @code{.pseudo}
* Example:
* s1 = ['aa', null, '', 'dd']
* s2 = ['', 'bb', 'cc', null]
* out = concatenate({s1, s2})
* out is ['aa', null, 'cc', null]
*
* out = concatenate({s1, s2}, ':', '_')
* out is ['aa:', '_:bb', ':cc', 'dd:_']
*
* out = concatenate({s1, s2}, ':', '', separator_on_nulls::NO)
* out is ['aa:', 'bb', ':cc', 'dd']
* @endcode
*
* @throw cudf::logic_error if input columns are not all strings columns.
* @throw cudf::logic_error if separator is not valid.
* @throw cudf::logic_error if only one column is specified
*
* @param strings_columns List of string columns to concatenate
* @param separator String that should inserted between each string from each row.
* Default is an empty string.
* @param narep String to replace any null strings found in any column.
* Default of invalid-scalar means any null entry in any column will
* produces a null result for that row.
* @param separate_nulls If YES, then the separator is included for null rows if `narep` is valid
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New column with concatenated results
*/
std::unique_ptr<column> concatenate(
table_view const& strings_columns,
string_scalar const& separator = string_scalar(""),
string_scalar const& narep = string_scalar("", false),
separator_on_nulls separate_nulls = separator_on_nulls::YES,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Given a lists column of strings (each row is a list of strings), concatenates the strings
* within each row and returns a single strings column result.
*
* Each new string is created by concatenating the strings from the same row (same list element)
* delimited by the row separator provided in the @p separators strings column.
*
* A null list row will always result in a null string in the output row. Any non-null list row
* having a null element will result in the corresponding output row to be null unless a valid
* @p string_narep scalar is provided to be used in its place. Any null row in the @p separators
* column will also result in a null output row unless a valid @p separator_narep scalar is provided
* to be used in place of the null separators.
*
* If @p separate_nulls is set to `NO` and @p string_narep is valid then separators are not added to
* the output between null elements. Otherwise, separators are always added if @p string_narep is
* valid.
*
* If @p empty_list_policy is set to `EMPTY_STRING`, any row that is an empty list will result in
* an empty output string. Otherwise, the output will be a null.
*
* In the special case when the input list row contains all null elements, the output will be the
* same as in case of empty input list regardless of @p string_narep and @p separate_nulls values.
*
* @code{.pseudo}
* Example:
* s = [ ['aa', 'bb', 'cc'], null, ['', 'dd'], ['ee', null], ['ff', 'gg'] ]
* sep = ['::', '%%', '!', '*', null]
*
* out = join_list_elements(s, sep)
* out is ['aa::bb::cc', null, '!dd', null, null]
*
* out = join_list_elements(s, sep, ':', '_')
* out is ['aa::bb::cc', null, '!dd', 'ee*_', 'ff:gg']
*
* out = join_list_elements(s, sep, ':', '', separator_on_nulls::NO)
* out is ['aa::bb::cc', null, '!dd', 'ee', 'ff:gg']
* @endcode
*
* @throw cudf::logic_error if input column is not lists of strings column.
* @throw cudf::logic_error if the number of rows from `separators` and `lists_strings_column` do
* not match
*
* @param lists_strings_column Column containing lists of strings to concatenate
* @param separators Strings column that provides separators for concatenation
* @param separator_narep String that should be used to replace a null separator.
* Default is an invalid-scalar denoting that rows containing null separator will result in
* a null string in the corresponding output rows.
* @param string_narep String to replace null strings in any non-null list row.
* Default is an invalid-scalar denoting that list rows containing null strings will result
* in a null string in the corresponding output rows.
* @param separate_nulls If YES, then the separator is included for null rows if `narep` is valid
* @param empty_list_policy If set to EMPTY_STRING, any input row that is an empty list will
* result in an empty string. Otherwise, it will result in a null.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column with concatenated results
*/
std::unique_ptr<column> join_list_elements(
lists_column_view const& lists_strings_column,
strings_column_view const& separators,
string_scalar const& separator_narep = string_scalar("", false),
string_scalar const& string_narep = string_scalar("", false),
separator_on_nulls separate_nulls = separator_on_nulls::YES,
output_if_empty_list empty_list_policy = output_if_empty_list::EMPTY_STRING,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Given a lists column of strings (each row is a list of strings), concatenates the strings
* within each row and returns a single strings column result.
*
* Each new string is created by concatenating the strings from the same row (same list element)
* delimited by the @p separator provided.
*
* A null list row will always result in a null string in the output row. Any non-null list row
* having a null element will result in the corresponding output row to be null unless a
* @p narep string is specified to be used in its place.
*
* If @p separate_nulls is set to `NO` and @p narep is valid then separators are not added to the
* output between null elements. Otherwise, separators are always added if @p narep is valid.
*
* If @p empty_list_policy is set to `EMPTY_STRING`, any row that is an empty list will result in
* an empty output string. Otherwise, the output will be a null.
*
* In the special case when the input list row contains all null elements, the output will be the
* same as in case of empty input list regardless of @p narep and @p separate_nulls values.
*
* @code{.pseudo}
* Example:
* s = [ ['aa', 'bb', 'cc'], null, ['', 'dd'], ['ee', null], ['ff'] ]
*
* out = join_list_elements(s)
* out is ['aabbcc', null, 'dd', null, 'ff']
*
* out = join_list_elements(s, ':', '_')
* out is ['aa:bb:cc', null, ':dd', 'ee:_', 'ff']
*
* out = join_list_elements(s, ':', '', separator_on_nulls::NO)
* out is ['aa:bb:cc', null, ':dd', 'ee', 'ff']
* @endcode
*
* @throw cudf::logic_error if input column is not lists of strings column.
* @throw cudf::logic_error if separator is not valid.
*
* @param lists_strings_column Column containing lists of strings to concatenate
* @param separator String to insert between strings of each list row.
* Default is an empty string.
* @param narep String to replace null strings in any non-null list row.
* Default is an invalid-scalar denoting that list rows containing null strings will result
* in a null string in the corresponding output rows.
* @param separate_nulls If YES, then the separator is included for null rows if `narep` is valid
* @param empty_list_policy If set to EMPTY_STRING, any input row that is an empty list will result
* in an empty string. Otherwise, it will result in a null.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column with concatenated results
*/
std::unique_ptr<column> join_list_elements(
lists_column_view const& lists_strings_column,
string_scalar const& separator = string_scalar(""),
string_scalar const& narep = string_scalar("", false),
separator_on_nulls separate_nulls = separator_on_nulls::YES,
output_if_empty_list empty_list_policy = output_if_empty_list::EMPTY_STRING,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/attributes.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
//! Strings column APIs
namespace strings {
/**
* @addtogroup strings_apis
* @{
* @file strings/attributes.hpp
* @brief Read attributes of strings column
*/
/**
* @brief Returns a column containing character lengths
* of each string in the given column
*
* The output column will have the same number of rows as the
* specified strings column. Each row value will be the number of
* characters in the corresponding string.
*
* Any null string will result in a null entry for that row in the output column.
*
* @param input Strings instance for this operation
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New column with lengths for each string
*/
std::unique_ptr<column> count_characters(
strings_column_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a column containing byte lengths
* of each string in the given column
*
* The output column will have the same number of rows as the
* specified strings column. Each row value will be the number of
* bytes in the corresponding string.
*
* Any null string will result in a null entry for that row in the output column.
*
* @param input Strings instance for this operation
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New column with the number of bytes for each string
*/
std::unique_ptr<column> count_bytes(
strings_column_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Creates a numeric column with code point values (integers) for each
* character of each string
*
* A code point is the integer value representation of a character.
* For example, the code point value for the character 'A' in UTF-8 is 65.
*
* The size of the output column will be the total number of characters in the
* strings column.
*
* Any null string is ignored. No null entries will appear in the output column.
*
* @param input Strings instance for this operation
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New INT32 column with code point integer values for each character
*/
std::unique_ptr<column> code_points(
strings_column_view const& input,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of strings_apis group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/replace_re.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/regex/flags.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <optional>
namespace cudf {
namespace strings {
struct regex_program;
/**
* @addtogroup strings_replace
* @{
* @file
*/
/**
* @brief For each string, replaces any character sequence matching the given regex
* with the provided replacement string.
*
* Any null string entries return corresponding null output column entries.
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @param input Strings instance for this operation
* @param prog Regex program instance
* @param replacement The string used to replace the matched sequence in each string.
* Default is an empty string.
* @param max_replace_count The maximum number of times to replace the matched pattern
* within each string. Default replaces every substring that is matched.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column
*/
std::unique_ptr<column> replace_re(
strings_column_view const& input,
regex_program const& prog,
string_scalar const& replacement = string_scalar(""),
std::optional<size_type> max_replace_count = std::nullopt,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief For each string, replaces any character sequence matching the given patterns
* with the corresponding string in the `replacements` column.
*
* Any null string entries return corresponding null output column entries.
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @param input Strings instance for this operation
* @param patterns The regular expression patterns to search within each string
* @param replacements The strings used for replacement
* @param flags Regex flags for interpreting special characters in the patterns
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column
*/
std::unique_ptr<column> replace_re(
strings_column_view const& input,
std::vector<std::string> const& patterns,
strings_column_view const& replacements,
regex_flags const flags = regex_flags::DEFAULT,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief For each string, replaces any character sequence matching the given regex
* using the replacement template for back-references.
*
* Any null string entries return corresponding null output column entries.
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @throw cudf::logic_error if capture index values in `replacement` are not in range 0-99, and also
* if the index exceeds the group count specified in the pattern
*
* @param input Strings instance for this operation
* @param prog Regex program instance
* @param replacement The replacement template for creating the output string
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column
*/
std::unique_ptr<column> replace_with_backrefs(
strings_column_view const& input,
regex_program const& prog,
std::string_view replacement,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/contains.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/regex/flags.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
struct regex_program;
/**
* @addtogroup strings_contains
* @{
* @file strings/contains.hpp
* @brief Strings APIs for regex contains, count, matches, like
*/
/**
* @brief Returns a boolean column identifying rows which
* match the given regex_program object
*
* @code{.pseudo}
* Example:
* s = ["abc", "123", "def456"]
* p = regex_program::create("\\d+")
* r = contains_re(s, p)
* r is now [false, true, true]
* @endcode
*
* Any null string entries return corresponding null output column entries.
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @param input Strings instance for this operation
* @param prog Regex program instance
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New column of boolean results for each string
*/
std::unique_ptr<column> contains_re(
strings_column_view const& input,
regex_program const& prog,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a boolean column identifying rows which
* matching the given regex_program object but only at the beginning the string.
*
* @code{.pseudo}
* Example:
* s = ["abc", "123", "def456"]
* p = regex_program::create("\\d+")
* r = matches_re(s, p)
* r is now [false, true, false]
* @endcode
*
* Any null string entries return corresponding null output column entries.
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @param input Strings instance for this operation
* @param prog Regex program instance
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New column of boolean results for each string
*/
std::unique_ptr<column> matches_re(
strings_column_view const& input,
regex_program const& prog,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns the number of times the given regex_program's pattern
* matches in each string
*
* @code{.pseudo}
* Example:
* s = ["abc", "123", "def45"]
* p = regex_program::create("\\d")
* r = count_re(s, p)
* r is now [0, 3, 2]
* @endcode
*
* Any null string entries return corresponding null output column entries.
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @param input Strings instance for this operation
* @param prog Regex program instance
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New column of match counts for each string
*/
std::unique_ptr<column> count_re(
strings_column_view const& input,
regex_program const& prog,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a boolean column identifying rows which
* match the given like pattern.
*
* The like pattern expects only 2 wildcard special characters:
* - `%` zero or more of any character
* - `_` any single character
*
* @code{.pseudo}
* Example:
* s = ["azaa", "ababaabba", "aaxa"]
* r = like(s, "%a_aa%")
* r is now [1, 1, 0]
* r = like(s, "a__a")
* r is now [1, 0, 1]
* @endcode
*
* Specify an escape character to include either `%` or `_` in the search.
* The `escape_character` is expected to be either 0 or 1 characters.
* If more than one character is specified only the first character is used.
*
* @code{.pseudo}
* Example:
* s = ["abc_def", "abc1def", "abc_"]
* r = like(s, "abc/_d%", "/")
* r is now [1, 0, 0]
* @endcode
*
* Any null string entries return corresponding null output column entries.
*
* @throw cudf::logic_error if `pattern` or `escape_character` is invalid
*
* @param input Strings instance for this operation
* @param pattern Like pattern to match within each string
* @param escape_character Optional character specifies the escape prefix.
* Default is no escape character.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New boolean column
*/
std::unique_ptr<column> like(
strings_column_view const& input,
string_scalar const& pattern,
string_scalar const& escape_character = string_scalar(""),
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a boolean column identifying rows which
* match the corresponding like pattern in the given patterns
*
* The like pattern expects only 2 wildcard special characters:
* - `%` zero or more of any character
* - `_` any single character
*
* @code{.pseudo}
* Example:
* s = ["azaa", "ababaabba", "aaxa"]
* p = ["%a", "b%", "__x_"]
* r = like(s, p)
* r is now [1, 0, 1]
* @endcode
*
* Specify an escape character to include either `%` or `_` in the search.
* The `escape_character` is expected to be either 0 or 1 characters.
* If more than one character is specified only the first character is used.
* The escape character is applied to all patterns.
*
* Any null string entries return corresponding null output column entries.
*
* @throw cudf::logic_error if `patterns` contains nulls or `escape_character` is invalid
* @throw cudf::logic_error if `patterns.size() != input.size()`
*
* @param input Strings instance for this operation
* @param patterns Like patterns to match within each corresponding string
* @param escape_character Optional character specifies the escape prefix.
* Default is no escape character.
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New boolean column
*/
std::unique_ptr<column> like(
strings_column_view const& input,
strings_column_view const& patterns,
string_scalar const& escape_character = string_scalar(""),
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/strip.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/side_type.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_modify
* @{
* @file
*/
/**
* @brief Removes the specified characters from the beginning or end
* (or both) of each string.
*
* The to_strip parameter can contain one or more characters.
* All characters in `to_strip` are removed from the input strings.
*
* If `to_strip` is the empty string, whitespace characters are removed.
* Whitespace is considered the space character plus control characters
* like tab and line feed.
*
* Any null string entries return corresponding null output column entries.
*
* @code{.pseudo}
* Example:
* s = [" aaa ", "_bbbb ", "__cccc ", "ddd", " ee _ff gg_"]
* r = strip(s,both," _")
* r is now ["aaa", "bbbb", "cccc", "ddd", "ee _ff gg"]
* @endcode
*
* @throw cudf::logic_error if `to_strip` is invalid.
*
* @param input Strings column for this operation
* @param side Indicates characters are to be stripped from the beginning, end, or both of each
* string; Default is both
* @param to_strip UTF-8 encoded characters to strip from each string;
* Default is empty string which indicates strip whitespace characters
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory.
* @return New strings column.
*/
std::unique_ptr<column> strip(
strings_column_view const& input,
side_type side = side_type::BOTH,
string_scalar const& to_strip = string_scalar(""),
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/slice.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_slice
* @{
* @file
*/
/**
* @brief Returns a new strings column that contains substrings of the
* strings in the provided column.
*
* The character positions to retrieve in each string are `[start,stop)`.
* If the start position is outside a string's length, an empty
* string is returned for that entry. If the stop position is past the
* end of a string's length, the end of the string is used for
* stop position for that string.
*
* Null string entries will return null output string entries.
*
* @code{.pseudo}
* Example:
* s = ["hello", "goodbye"]
* r = slice_strings(s,2,6)
* r is now ["llo","odby"]
* r2 = slice_strings(s,2,5,2)
* r2 is now ["lo","ob"]
* @endcode
*
* @param input Strings column for this operation
* @param start First character position to begin the substring
* @param stop Last character position (exclusive) to end the substring
* @param step Distance between input characters retrieved
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column with sorted elements of this instance
*/
std::unique_ptr<column> slice_strings(
strings_column_view const& input,
numeric_scalar<size_type> const& start = numeric_scalar<size_type>(0, false),
numeric_scalar<size_type> const& stop = numeric_scalar<size_type>(0, false),
numeric_scalar<size_type> const& step = numeric_scalar<size_type>(1),
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a new strings column that contains substrings of the
* strings in the provided column using unique ranges for each string.
*
* The character positions to retrieve in each string are specified in
* the `starts` and `stops` integer columns.
* If a start position is outside a string's length, an empty
* string is returned for that entry. If a stop position is past the
* end of a string's length, the end of the string is used for
* stop position for that string. Any stop position value set to -1 will
* indicate to use the end of the string as the stop position for that
* string.
*
* Null string entries will return null output string entries.
*
* The starts and stops column must both be the same integer type and
* must be the same size as the strings column.
*
* @code{.pseudo}
* Example:
* s = ["hello", "goodbye"]
* starts = [ 1, 2 ]
* stops = [ 5, 4 ]
* r = slice_strings(s,starts,stops)
* r is now ["ello","od"]
* @endcode
*
* @throw cudf::logic_error if starts or stops is a different size than the strings column.
* @throw cudf::logic_error if starts and stops are not same integer type.
* @throw cudf::logic_error if starts or stops contains nulls.
*
* @param input Strings column for this operation
* @param starts First character positions to begin the substring
* @param stops Last character (exclusive) positions to end the substring
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New strings column with sorted elements of this instance
*/
std::unique_ptr<column> slice_strings(
strings_column_view const& input,
column_view const& starts,
column_view const& stops,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/side_type.hpp
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace cudf {
namespace strings {
/**
* @addtogroup strings_modify
* @{
* @file
*/
/**
* @brief Direction identifier for cudf::strings::strip and cudf::strings::pad functions.
*/
enum class side_type {
LEFT, ///< strip/pad characters from the beginning of the string
RIGHT, ///< strip/pad characters from the end of the string
BOTH ///< strip/pad characters from the beginning and end of the string
};
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/strings_column_view.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column_view.hpp>
/**
* @file
* @brief Class definition for cudf::strings_column_view
*/
namespace cudf {
/**
* @addtogroup strings_classes
* @{
*/
/**
* @brief Given a column-view of strings type, an instance of this class
* provides a wrapper on this compound column for strings operations.
*/
class strings_column_view : private column_view {
public:
/**
* @brief Construct a new strings column view object from a column view.s
*
* @param strings_column The column view to wrap.
*/
strings_column_view(column_view strings_column);
strings_column_view(strings_column_view&&) = default; ///< Move constructor
strings_column_view(strings_column_view const&) = default; ///< Copy constructor
~strings_column_view() = default;
/**
* @brief Copy assignment operator
*
* @return Reference to this instance
*/
strings_column_view& operator=(strings_column_view const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this instance (after transferring ownership)
*/
strings_column_view& operator=(strings_column_view&&) = default;
static constexpr size_type offsets_column_index{0}; ///< Child index of the offsets column
static constexpr size_type chars_column_index{1}; ///< Child index of the characters column
using column_view::has_nulls;
using column_view::is_empty;
using column_view::null_count;
using column_view::null_mask;
using column_view::offset;
using column_view::size;
using offset_iterator = size_type const*; ///< offsets iterator type
using chars_iterator = char const*; ///< character iterator type
/**
* @brief Returns the parent column.
*
* @return The parents column
*/
[[nodiscard]] column_view parent() const;
/**
* @brief Returns the internal column of offsets
*
* @throw cudf::logic error if this is an empty column
* @return The offsets column
*/
[[nodiscard]] column_view offsets() const;
/**
* @brief Return an iterator for the offsets child column.
*
* This automatically applies the offset of the parent.
*
* @return Iterator pointing to the first offset value.
*/
[[nodiscard]] offset_iterator offsets_begin() const;
/**
* @brief Return an end iterator for the offsets child column.
*
* This automatically applies the offset of the parent.
*
* @return Iterator pointing 1 past the last offset value.
*/
[[nodiscard]] offset_iterator offsets_end() const;
/**
* @brief Returns the internal column of chars
*
* @throw cudf::logic error if this is an empty column
* @return The chars column
*/
[[nodiscard]] column_view chars() const;
/**
* @brief Returns the number of bytes in the chars child column.
*
* This accounts for empty columns but does not reflect a sliced parent column
* view (i.e.: non-zero offset or reduced row count).
*
* @return Number of bytes in the chars child column
*/
[[nodiscard]] size_type chars_size() const noexcept;
/**
* @brief Return an iterator for the chars child column.
*
* This does not apply the offset of the parent.
* The offsets child must be used to properly address the char bytes.
*
* For example, to access the first character of string `i` (accounting for
* a sliced column offset) use: `chars_begin()[offsets_begin()[i]]`.
*
* @return Iterator pointing to the first char byte.
*/
[[nodiscard]] chars_iterator chars_begin() const;
/**
* @brief Return an end iterator for the offsets child column.
*
* This does not apply the offset of the parent.
* The offsets child must be used to properly address the char bytes.
*
* @return Iterator pointing 1 past the last char byte.
*/
[[nodiscard]] chars_iterator chars_end() const;
};
//! Strings column APIs.
namespace strings {
} // namespace strings
/** @} */ // end of group
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/findall.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/strings/regex/flags.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
struct regex_program;
/**
* @addtogroup strings_contains
* @{
* @file
*/
/**
* @brief Returns a lists column of strings for each matching occurrence using
* the regex_program pattern within each string
*
* Each output row includes all the substrings within the corresponding input row
* that match the given pattern. If no matches are found, the output row is empty.
*
* @code{.pseudo}
* Example:
* s = ["bunny", "rabbit", "hare", "dog"]
* p = regex_program::create("[ab]")
* r = findall(s, p)
* r is now a lists column like:
* [ ["b"]
* ["a","b","b"]
* ["a"]
* [] ]
* @endcode
*
* A null output row occurs if the corresponding input row is null.
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @param input Strings instance for this operation
* @param prog Regex program instance
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New lists column of strings
*/
std::unique_ptr<column> findall(
strings_column_view const& input,
regex_program const& prog,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/string_view.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/types.hpp>
#include <cuda_runtime.h>
#include <iterator>
/**
* @file
* @brief Class definition for cudf::string_view.
*/
namespace cudf {
using char_utf8 = uint32_t; ///< UTF-8 characters are 1-4 bytes
/**
* @brief A non-owning, immutable view of device data that is a variable length
* char array representing a UTF-8 string.
*
* @ingroup strings_classes
*
* The caller must maintain the device memory for the lifetime of this instance.
*
* This may be used to wrap a device pointer and size but any member function
* that requires accessing the device memory must be called from a kernel.
*/
class string_view {
public:
/**
* @brief Return the number of bytes in this string
*
* @return The number of bytes in this string
*/
CUDF_HOST_DEVICE [[nodiscard]] inline size_type size_bytes() const { return _bytes; }
/**
* @brief Return the number of characters in this string
*
* @return The number of characters in this string
*/
__device__ [[nodiscard]] inline size_type length() const;
/**
* @brief Return a pointer to the internal device array
*
* @return A pointer to the internal device array
*/
CUDF_HOST_DEVICE [[nodiscard]] inline char const* data() const { return _data; }
/**
* @brief Return true if string has no characters
*
* @return true if string has no characters
*/
CUDF_HOST_DEVICE [[nodiscard]] inline bool empty() const { return size_bytes() == 0; }
/**
* @brief Handy iterator for navigating through encoded characters.
*/
class const_iterator {
/// @cond
public:
using difference_type = ptrdiff_t;
using value_type = char_utf8;
using reference = char_utf8&;
using pointer = char_utf8*;
using iterator_category = std::input_iterator_tag;
__device__ inline const_iterator(string_view const& str, size_type pos);
const_iterator(const_iterator const& mit) = default;
const_iterator(const_iterator&& mit) = default;
const_iterator& operator=(const_iterator const&) = default;
const_iterator& operator=(const_iterator&&) = default;
__device__ inline const_iterator& operator++();
__device__ inline const_iterator operator++(int);
__device__ inline const_iterator& operator+=(difference_type);
__device__ inline const_iterator operator+(difference_type) const;
__device__ inline const_iterator& operator--();
__device__ inline const_iterator operator--(int);
__device__ inline const_iterator& operator-=(difference_type);
__device__ inline const_iterator operator-(difference_type) const;
__device__ inline const_iterator& move_to(size_type);
__device__ inline bool operator==(const_iterator const&) const;
__device__ inline bool operator!=(const_iterator const&) const;
__device__ inline bool operator<(const_iterator const&) const;
__device__ inline bool operator<=(const_iterator const&) const;
__device__ inline bool operator>(const_iterator const&) const;
__device__ inline bool operator>=(const_iterator const&) const;
__device__ inline char_utf8 operator*() const;
[[nodiscard]] __device__ inline size_type position() const;
[[nodiscard]] __device__ inline size_type byte_offset() const;
private:
friend class string_view;
char const* p{};
size_type bytes{};
size_type char_pos{};
size_type byte_pos{};
__device__ inline const_iterator(string_view const& str, size_type pos, size_type offset);
/// @endcond
};
/**
* @brief Return new iterator pointing to the beginning of this string
*
* @return new iterator pointing to the beginning of this string
*/
__device__ [[nodiscard]] inline const_iterator begin() const;
/**
* @brief Return new iterator pointing past the end of this string
*
* @return new iterator pointing past the end of this string
*/
__device__ [[nodiscard]] inline const_iterator end() const;
/**
* @brief Return single UTF-8 character at the given character position
*
* @param pos Character position
* @return UTF-8 character at the given character position
*/
__device__ inline char_utf8 operator[](size_type pos) const;
/**
* @brief Return the byte offset from data() for a given character position
*
* @param pos Character position
* @return Byte offset from data() for a given character position
*/
__device__ [[nodiscard]] inline size_type byte_offset(size_type pos) const;
/**
* @brief Comparing target string with this string. Each character is compared
* as a UTF-8 code-point value.
*
* @param str Target string to compare with this string.
* @return 0 If they compare equal.
* <0 Either the value of the first character of this string that does
* not match is lower in the arg string, or all compared characters
* match but the arg string is shorter.
* >0 Either the value of the first character of this string that does
* not match is greater in the arg string, or all compared characters
* match but the arg string is longer.
*/
__device__ [[nodiscard]] inline int compare(string_view const& str) const;
/**
* @brief Comparing target string with this string. Each character is compared
* as a UTF-8 code-point value.
*
* @param str Target string to compare with this string.
* @param bytes Number of bytes in str.
* @return 0 If they compare equal.
* <0 Either the value of the first character of this string that does
* not match is lower in the arg string, or all compared characters
* match but the arg string is shorter.
* >0 Either the value of the first character of this string that does
* not match is greater in the arg string, or all compared characters
* match but the arg string is longer.
*/
__device__ inline int compare(char const* str, size_type bytes) const;
/**
* @brief Returns true if rhs matches this string exactly.
*
* @param rhs Target string to compare with this string.
* @return true if rhs matches this string exactly
*/
__device__ inline bool operator==(string_view const& rhs) const;
/**
* @brief Returns true if rhs does not match this string.
*
* @param rhs Target string to compare with this string.
* @return true if rhs does not match this string
*/
__device__ inline bool operator!=(string_view const& rhs) const;
/**
* @brief Returns true if this string is ordered before rhs.
*
* @param rhs Target string to compare with this string.
* @return true if this string is ordered before rhs
*/
__device__ inline bool operator<(string_view const& rhs) const;
/**
* @brief Returns true if rhs is ordered before this string.
*
* @param rhs Target string to compare with this string.
* @return true if rhs is ordered before this string
*/
__device__ inline bool operator>(string_view const& rhs) const;
/**
* @brief Returns true if this string matches or is ordered before rhs.
*
* @param rhs Target string to compare with this string.
* @return true if this string matches or is ordered before rhs
*/
__device__ inline bool operator<=(string_view const& rhs) const;
/**
* @brief Returns true if rhs matches or is ordered before this string.
*
* @param rhs Target string to compare with this string.
* @return true if rhs matches or is ordered before this string
*/
__device__ inline bool operator>=(string_view const& rhs) const;
/**
* @brief Returns the character position of the first occurrence where the
* argument str is found in this string within the character range [pos,pos+n).
*
* @param str Target string to search within this string.
* @param pos Character position to start search within this string.
* @param count Number of characters from pos to include in the search.
* Specify -1 to indicate to the end of the string.
* @return npos if str is not found in this string.
*/
__device__ [[nodiscard]] inline size_type find(string_view const& str,
size_type pos = 0,
size_type count = -1) const;
/**
* @brief Returns the character position of the first occurrence where the
* array str is found in this string within the character range [pos,pos+n).
*
* @param str Target array to search within this string.
* @param bytes Number of bytes in str.
* @param pos Character position to start search within this string.
* @param count Number of characters from pos to include in the search.
* Specify -1 to indicate to the end of the string.
* @return npos if arg string is not found in this string.
*/
__device__ inline size_type find(char const* str,
size_type bytes,
size_type pos = 0,
size_type count = -1) const;
/**
* @brief Returns the character position of the first occurrence where
* character is found in this string within the character range [pos,pos+n).
*
* @param character Single encoded character.
* @param pos Character position to start search within this string.
* @param count Number of characters from pos to include in the search.
* Specify -1 to indicate to the end of the string.
* @return npos if arg string is not found in this string.
*/
__device__ [[nodiscard]] inline size_type find(char_utf8 character,
size_type pos = 0,
size_type count = -1) const;
/**
* @brief Returns the character position of the last occurrence where the
* argument str is found in this string within the character range [pos,pos+n).
*
* @param str Target string to search within this string.
* @param pos Character position to start search within this string.
* @param count Number of characters from pos to include in the search.
* Specify -1 to indicate to the end of the string.
* @return npos if arg string is not found in this string.
*/
__device__ [[nodiscard]] inline size_type rfind(string_view const& str,
size_type pos = 0,
size_type count = -1) const;
/**
* @brief Returns the character position of the last occurrence where the
* array str is found in this string within the character range [pos,pos+n).
*
* @param str Target string to search with this string.
* @param bytes Number of bytes in str.
* @param pos Character position to start search within this string.
* @param count Number of characters from pos to include in the search.
* Specify -1 to indicate to the end of the string.
* @return npos if arg string is not found in this string.
*/
__device__ inline size_type rfind(char const* str,
size_type bytes,
size_type pos = 0,
size_type count = -1) const;
/**
* @brief Returns the character position of the last occurrence where
* character is found in this string within the character range [pos,pos+n).
*
* @param character Single encoded character.
* @param pos Character position to start search within this string.
* @param count Number of characters from pos to include in the search.
* Specify -1 to indicate to the end of the string.
* @return npos if arg string is not found in this string.
*/
__device__ [[nodiscard]] inline size_type rfind(char_utf8 character,
size_type pos = 0,
size_type count = -1) const;
/**
* @brief Return a sub-string of this string. The original string and device
* memory must still be maintained for the lifetime of the returned instance.
*
* @param start Character position to start the sub-string.
* @param length Number of characters from start to include in the sub-string.
* @return New instance pointing to a subset of the characters within this instance.
*/
__device__ [[nodiscard]] inline string_view substr(size_type start, size_type length) const;
/**
* @brief Return minimum value associated with the string type
*
* This function is needed to be host callable because it is called by a host
* callable function DeviceMax::identity<string_view>()
*
* @return An empty string
*/
CUDF_HOST_DEVICE inline static string_view min();
/**
* @brief Return maximum value associated with the string type
*
* This function is needed to be host callable because it is called by a host
* callable function DeviceMin::identity<string_view>()
*
* @return A string value which represents the highest possible valid UTF-8 encoded
* character.
*/
CUDF_HOST_DEVICE inline static string_view max();
/**
* @brief Default constructor represents an empty string.
*/
CUDF_HOST_DEVICE inline string_view() : _data("") {}
/**
* @brief Create instance from existing device char array.
*
* @param data Device char array encoded in UTF8.
* @param bytes Number of bytes in data array.
*/
CUDF_HOST_DEVICE inline string_view(char const* data, size_type bytes)
: _data(data), _bytes(bytes), _length(UNKNOWN_STRING_LENGTH)
{
}
string_view(string_view const&) = default; ///< Copy constructor
string_view(string_view&&) = default; ///< Move constructor
~string_view() = default;
/**
* @brief Copy assignment operator
*
* @return Reference to this instance
*/
string_view& operator=(string_view const&) = default;
/**
* @brief Move assignment operator
*
* @return Reference to this instance (after transferring ownership)
*/
string_view& operator=(string_view&&) = default;
/**
* @brief No-position value.
*
* Used when specifying or returning an invalid or unknown character position value.
*/
static inline cudf::size_type const npos{-1};
private:
char const* _data{}; ///< Pointer to device memory contain char array for this string
size_type _bytes{}; ///< Number of bytes in _data for this string
mutable size_type _length{}; ///< Number of characters in this string (computed)
/**
* @brief The string length is initialized to this value as a place-holder
*
* The number of characters in a string is computed on-demand.
*/
static inline cudf::size_type const UNKNOWN_STRING_LENGTH{-1};
/**
* @brief Return the character position of the given byte offset.
*
* @param bytepos Byte position from start of _data.
* @return The character position for the specified byte.
*/
__device__ [[nodiscard]] inline size_type character_offset(size_type bytepos) const;
/**
* @brief Common internal implementation for string_view::find and string_view::rfind.
*
* @tparam forward True for find and false for rfind
*
* @param str Target string to search with this string
* @param bytes Number of bytes in str
* @param pos Character position to start search within this string
* @param count Number of characters from pos to include in the search
* @return npos if str is not found in this string
*/
template <bool forward>
__device__ inline size_type find_impl(char const* str,
size_type bytes,
size_type pos,
size_type count) const;
};
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/wrap.hpp
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/column/column.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
/**
* @addtogroup strings_modify
* @{
* @file
*/
/**
* @brief Wraps strings onto multiple lines shorter than `width` by replacing appropriate white
* space with new-line characters (ASCII 0x0A).
*
* For each string row in the input column longer than `width`, the corresponding output string row
* will have newline characters inserted so that each line is no more than `width characters`.
* Attempts to use existing white space locations to split the strings, but may split
* non-white-space sequences if necessary.
*
* Any null string entries return corresponding null output column entries.
*
* Example 1:
* ```
* width = 3
* input_string_tbl = [ "12345", "thesé", nullptr, "ARE THE", "tést strings", "" ];
*
* wrapped_string_tbl = wrap(input_string_tbl, width)
* wrapped_string_tbl = [ "12345", "thesé", nullptr, "ARE\nTHE", "tést\nstrings", "" ]
* ```
*
* Example 2:
* ```
* width = 12;
* input_string_tbl = ["the quick brown fox jumped over the lazy brown dog", "hello, world"]
*
* wrapped_string_tbl = wrap(input_string_tbl, width)
* wrapped_string_tbl = ["the quick\nbrown fox\njumped over\nthe lazy\nbrown dog", "hello, world"]
* ```
*
* @param input String column
* @param width Maximum character width of a line within each string
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return Column of wrapped strings
*/
std::unique_ptr<column> wrap(
strings_column_view const& input,
size_type width,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
rapidsai_public_repos/cudf/cpp/include/cudf
|
rapidsai_public_repos/cudf/cpp/include/cudf/strings/extract.hpp
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cudf/strings/regex/flags.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
namespace cudf {
namespace strings {
struct regex_program;
/**
* @addtogroup strings_extract
* @{
* @file
*/
/**
* @brief Returns a table of strings columns where each column corresponds to the matching
* group specified in the given regex_program object
*
* All the strings for the first group will go in the first output column; the second group
* go in the second column and so on. Null entries are added to the columns in row `i` if
* the string at row `i` does not match.
*
* Any null string entries return corresponding null output column entries.
*
* @code{.pseudo}
* Example:
* s = ["a1", "b2", "c3"]
* p = regex_program::create("([ab])(\\d)")
* r = extract(s, p)
* r is now [ ["a", "b", null],
* ["1", "2", null] ]
* @endcode
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @param input Strings instance for this operation
* @param prog Regex program instance
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned table's device memory
* @return Columns of strings extracted from the input column
*/
std::unique_ptr<table> extract(
strings_column_view const& input,
regex_program const& prog,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/**
* @brief Returns a lists column of strings where each string column row corresponds to the
* matching group specified in the given regex_program object
*
* All the matching groups for the first row will go in the first row output column; the second
* row results will go into the second row output column and so on.
*
* A null output row will result if the corresponding input string row does not match or
* that input row is null.
*
* @code{.pseudo}
* Example:
* s = ["a1 b4", "b2", "c3 a5", "b", null]
* p = regex_program::create("([ab])(\\d)")
* r = extract_all_record(s, p)
* r is now [ ["a", "1", "b", "4"],
* ["b", "2"],
* ["a", "5"],
* null,
* null ]
* @endcode
*
* See the @ref md_regex "Regex Features" page for details on patterns supported by this API.
*
* @param input Strings instance for this operation
* @param prog Regex program instance
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate any returned device memory
* @return Lists column containing strings extracted from the input column
*/
std::unique_ptr<column> extract_all_record(
strings_column_view const& input,
regex_program const& prog,
rmm::cuda_stream_view stream = cudf::get_default_stream(),
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource());
/** @} */ // end of doxygen group
} // namespace strings
} // namespace cudf
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.