name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_CustomHeadersDecorator_setCustomHeaders_rdh
|
/**
* Sets the custom headers for the message.
*
* @param customHeaders
* A collection of custom headers.
*/
public void setCustomHeaders(Collection<HttpHeader> customHeaders) {
this.customHeaders = customHeaders;
}
| 3.26 |
flink_RawFormatSerializationSchema_createNotNullConverter_rdh
|
/**
* Creates a runtime converter.
*/
private SerializationRuntimeConverter createNotNullConverter(LogicalType type,
String charsetName, boolean isBigEndian) {
switch (type.getTypeRoot()) {
case CHAR :
case VARCHAR
:
return createStringConverter(charsetName);
case VARBINARY :
case BINARY :
return row -> row.getBinary(0);
case RAW :
return createRawValueConverter(((RawType<?>) (type)));
case BOOLEAN :
return row ->
{
byte b = ((byte) ((row.getBoolean(0)) ? 1 : 0));
return new byte[]{ b };
};
case TINYINT :
return row -> new byte[]{ row.getByte(0) };
case SMALLINT :
return new ShortSerializationConverter(isBigEndian);
case INTEGER :
return new IntegerSerializationConverter(isBigEndian);
case BIGINT :
return new LongSerializationConverter(isBigEndian);
case FLOAT :
return new
FloatSerializationConverter(isBigEndian);
case DOUBLE :
return new DoubleSerializationConverter(isBigEndian);
default :
throw new UnsupportedOperationException("'raw' format currently doesn't support type: " + type);
}
}
| 3.26 |
flink_RawFormatSerializationSchema_createConverter_rdh
|
/**
* Creates a runtime converter.
*/
private SerializationRuntimeConverter createConverter(LogicalType type, String charsetName, boolean isBigEndian) {
final SerializationRuntimeConverter converter = createNotNullConverter(type, charsetName, isBigEndian);
return new SerializationRuntimeConverter()
{
private static final long serialVersionUID = 1L;
@Override
public void open() {
converter.open();
}
@Override
public byte[]
convert(RowData row) throws IOException {
if (row.isNullAt(0)) {
return null;
}
return converter.convert(row);
}
};
}
| 3.26 |
flink_HeapReducingState_mergeState_rdh
|
// ------------------------------------------------------------------------
// state merging
// ------------------------------------------------------------------------
@Override
protected V mergeState(V a, V b) throws Exception {
return reduceTransformation.apply(a, b);
}
| 3.26 |
flink_HeapReducingState_get_rdh
|
// state access
// ------------------------------------------------------------------------
@Override
public V get() {
return getInternal();
}
| 3.26 |
flink_DataStreamAllroundTestProgram_m0_rdh
|
/**
* A general purpose test job for Flink's DataStream API operators and primitives.
*
* <p>The job is constructed of generic components from {@link DataStreamAllroundTestJobFactory}. It
* currently covers the following aspects that are frequently present in Flink DataStream jobs:
*
* <ul>
* <li>A generic Kryo input type.
* <li>A state type for which we register a {@link KryoSerializer}.
* <li>Operators with {@link ValueState}.
* <li>Operators with union state.
* <li>Operators with broadcast state.
* </ul>
*
* <p>The cli job configuration options are described in {@link DataStreamAllroundTestJobFactory}.
*/
public class DataStreamAllroundTestProgram {public static void m0(String[] args) throws Exception {
final ParameterTool pt = ParameterTool.fromArgs(args);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
setupEnvironment(env, pt);
// add a keyed stateful map operator, which uses Kryo for state serialization
DataStream<Event> eventStream = env.addSource(createEventSource(pt)).name(EVENT_SOURCE.getName()).uid(EVENT_SOURCE.getUid()).assignTimestampsAndWatermarks(createTimestampExtractor(pt)).keyBy(Event::getKey).map(// map function simply forwards the inputs
// state is verified and updated per event as a wrapped
// ComplexPayload state object
// custom
// stateful
// serializer
// KryoSerializer via type
// extraction
createArtificialKeyedStateMapper(((MapFunction<Event, Event>) (in -> in)), (Event event,ComplexPayload lastState) -> {
if (((lastState != null) && (!lastState.getStrPayload().equals(KEYED_STATE_OPER_WITH_KRYO_AND_CUSTOM_SER.getName()))) && (lastState.getInnerPayLoad().getSequenceNumber() == (event.getSequenceNumber() - 1))) {
throwIncorrectRestoredStateException(event.getSequenceNumber() - 1, KEYED_STATE_OPER_WITH_KRYO_AND_CUSTOM_SER.getName(), lastState.getStrPayload());}
return new ComplexPayload(event, KEYED_STATE_OPER_WITH_KRYO_AND_CUSTOM_SER.getName());
}, // KryoSerializer
Arrays.asList(new KryoSerializer<>(ComplexPayload.class,
env.getConfig()), new StatefulComplexPayloadSerializer()), Collections.singletonList(ComplexPayload.class))).returns(Event.class).name(KEYED_STATE_OPER_WITH_KRYO_AND_CUSTOM_SER.getName()).uid(KEYED_STATE_OPER_WITH_KRYO_AND_CUSTOM_SER.getUid());
// add a keyed stateful map operator, which uses Avro for state serialization
eventStream = eventStream.keyBy(Event::getKey).map(// map function simply forwards the inputs
// state is verified and updated per event as a wrapped
// ComplexPayloadAvro state object
// custom AvroSerializer
// AvroSerializer via type
// extraction
createArtificialKeyedStateMapper(((MapFunction<Event, Event>) (in -> in)), (Event event,ComplexPayloadAvro lastState) -> {
if (((lastState != null) && (!lastState.getStrPayload().equals(KEYED_STATE_OPER_WITH_AVRO_SER.getName()))) && (lastState.getInnerPayLoad().getSequenceNumber() == (event.getSequenceNumber() - 1))) {
throwIncorrectRestoredStateException(event.getSequenceNumber() - 1, KEYED_STATE_OPER_WITH_AVRO_SER.getName(), lastState.getStrPayload());
}
ComplexPayloadAvro payload = new ComplexPayloadAvro();
payload.setEventTime(event.getEventTime());
payload.setInnerPayLoad(new InnerPayLoadAvro(event.getSequenceNumber()));
payload.setStrPayload(KEYED_STATE_OPER_WITH_AVRO_SER.getName());
payload.setStringList(Arrays.asList(String.valueOf(event.getKey()), event.getPayload()));return
payload;
}, Collections.singletonList(new AvroSerializer<>(ComplexPayloadAvro.class)), Collections.singletonList(ComplexPayloadAvro.class))).returns(Event.class).name(KEYED_STATE_OPER_WITH_AVRO_SER.getName()).uid(KEYED_STATE_OPER_WITH_AVRO_SER.getUid());
DataStream<Event> eventStream2 = eventStream.map(createArtificialOperatorStateMapper(((MapFunction<Event, Event>) (in -> in)))).returns(Event.class).name(OPERATOR_STATE_OPER.getName()).uid(OPERATOR_STATE_OPER.getUid());// apply a tumbling window that simply passes forward window elements;
// this allows the job to cover timers state
@SuppressWarnings("Convert2Lambda")
DataStream<Event> v5 = applyTumblingWindows(eventStream2.keyBy(Event::getKey), pt).apply(new WindowFunction<Event, Event, Integer, TimeWindow>() {
@Override
public void apply(Integer integer, TimeWindow window,
Iterable<Event> input, Collector<Event> out) {
for (Event e : input) {
out.collect(e);
}
}
}).name(TIME_WINDOW_OPER.getName()).uid(TIME_WINDOW_OPER.getUid());
v5 = DataStreamAllroundTestJobFactory.verifyCustomStatefulTypeSerializer(v5);
if (isSimulateFailures(pt)) {
v5 = v5.map(createFailureMapper(pt)).setParallelism(1).name(FAILURE_MAPPER_NAME.getName()).uid(FAILURE_MAPPER_NAME.getUid());
}
v5.keyBy(Event::getKey).flatMap(createSemanticsCheckMapper(pt)).name(SEMANTICS_CHECK_MAPPER.getName()).uid(SEMANTICS_CHECK_MAPPER.getUid()).addSink(new PrintSinkFunction<>()).name(SEMANTICS_CHECK_PRINT_SINK.getName()).uid(SEMANTICS_CHECK_PRINT_SINK.getUid());
// Check sliding windows aggregations. Output all elements assigned to a window and later on
// check if each event was emitted slide_factor number of times
DataStream<Tuple2<Integer, List<Event>>> eventStream4 = eventStream2.keyBy(Event::getKey).window(createSlidingWindow(pt)).apply(new WindowFunction<Event, Tuple2<Integer, List<Event>>, Integer, TimeWindow>() {
private static final long serialVersionUID = 3166250579972849440L;
@Override
public void apply(Integer key, TimeWindow window, Iterable<Event> input, Collector<Tuple2<Integer, List<Event>>> out) {
out.collect(Tuple2.of(key, StreamSupport.stream(input.spliterator(), false).collect(Collectors.toList())));}
}
| 3.26 |
flink_DecimalBigDecimalConverter_create_rdh
|
// --------------------------------------------------------------------------------------------
// Factory method
// --------------------------------------------------------------------------------------------
static DecimalBigDecimalConverter create(DataType dataType) {
final DecimalType
decimalType = ((DecimalType) (dataType.getLogicalType()));
return new DecimalBigDecimalConverter(decimalType.getPrecision(), decimalType.getScale());
}
| 3.26 |
flink_PatternProcessFunctionBuilder_fromFlatSelect_rdh
|
/**
* Starts constructing a {@link PatternProcessFunction} from a {@link PatternFlatSelectFunction}
* that emitted elements through {@link org.apache.flink.util.Collector}.
*/
static <IN, OUT> FlatSelectBuilder<IN, OUT> fromFlatSelect(final PatternFlatSelectFunction<IN, OUT> function) {
return new FlatSelectBuilder<>(function);
}
| 3.26 |
flink_PatternProcessFunctionBuilder_fromSelect_rdh
|
/**
* Starts constructing a {@link PatternProcessFunction} from a {@link PatternSelectFunction}
* that emitted elements through return value.
*/
static <IN, OUT> SelectBuilder<IN, OUT> fromSelect(final PatternSelectFunction<IN, OUT> function) {
return new SelectBuilder<>(function);
}
| 3.26 |
flink_ScheduledDropwizardReporter_open_rdh
|
// ------------------------------------------------------------------------
// life cycle
// ------------------------------------------------------------------------
@Override
public void open(MetricConfig config) {
this.reporter = getReporter(config);
}
| 3.26 |
flink_ScheduledDropwizardReporter_getCounters_rdh
|
// ------------------------------------------------------------------------
// Getters
// ------------------------------------------------------------------------
@VisibleForTesting
Map<Counter, String> getCounters() {
return counters;
}
| 3.26 |
flink_ScheduledDropwizardReporter_notifyOfAddedMetric_rdh
|
// ------------------------------------------------------------------------
// adding / removing metrics
// ------------------------------------------------------------------------
@Override
public void notifyOfAddedMetric(Metric metric, String
metricName, MetricGroup group) {
final String fullName = group.getMetricIdentifier(metricName, this);
synchronized(this) {
switch (metric.getMetricType()) {
case COUNTER :
counters.put(((Counter) (metric)), fullName);
registry.register(fullName, new FlinkCounterWrapper(((Counter) (metric))));
break;
case GAUGE :
gauges.put(((Gauge<?>) (metric)), fullName);
registry.register(fullName, FlinkGaugeWrapper.fromGauge(((Gauge<?>) (metric))));
break;
case HISTOGRAM :
Histogram histogram = ((Histogram) (metric));
histograms.put(histogram, fullName);
if (histogram instanceof DropwizardHistogramWrapper) {
registry.register(fullName, ((DropwizardHistogramWrapper) (histogram)).getDropwizardHistogram());
} else {
registry.register(fullName, new FlinkHistogramWrapper(histogram));
}
break;
case METER :
Meter meter = ((Meter) (metric));
meters.put(meter, fullName);
if (meter instanceof DropwizardMeterWrapper) {
registry.register(fullName, ((DropwizardMeterWrapper) (meter)).getDropwizardMeter());
} else {
registry.register(fullName, new FlinkMeterWrapper(meter));
}
break;
default :
log.warn("Cannot add metric of type {}. This indicates that the reporter " + "does not support this metric type.", metric.getClass().getName());
}
}
}
| 3.26 |
flink_ScheduledDropwizardReporter_report_rdh
|
// ------------------------------------------------------------------------
// scheduled reporting
// ------------------------------------------------------------------------
@Override
public void report() {
// we do not need to lock here, because the dropwizard registry is
// internally a concurrent map
@SuppressWarnings("rawtypes")
final SortedMap<String, Gauge> gauges = registry.getGauges();
final SortedMap<String, Counter> counters = registry.getCounters();
final SortedMap<String, Histogram> histograms = registry.getHistograms();
final SortedMap<String, Meter> meters = registry.getMeters();
final SortedMap<String, Timer> timers = registry.getTimers();
this.reporter.report(gauges, counters, histograms, meters, timers);
}
| 3.26 |
flink_AsyncLookupFunction_eval_rdh
|
/**
* Invokes {@link #asyncLookup} and chains futures.
*/
public final void eval(CompletableFuture<Collection<RowData>> future, Object... keys) {
GenericRowData keyRow = GenericRowData.of(keys);
asyncLookup(keyRow).whenComplete((result, exception) -> {
if (exception != null) {
future.completeExceptionally(new TableException(String.format("Failed to asynchronously lookup entries with key '%s'", keyRow), exception));
return;
}
future.complete(result);
});
}
| 3.26 |
flink_HiveParserUtils_getGenericUDAFInfo_rdh
|
/**
* Returns the GenericUDAFInfo struct for the aggregation.
*/
public static GenericUDAFInfo getGenericUDAFInfo(GenericUDAFEvaluator evaluator, GenericUDAFEvaluator.Mode emode, ArrayList<ExprNodeDesc> aggParameters) throws SemanticException {
GenericUDAFInfo res = new GenericUDAFInfo();
// set r.genericUDAFEvaluator
res.genericUDAFEvaluator = evaluator;
// set r.returnType
ObjectInspector returnOI;
try {
ArrayList<ObjectInspector> aggOIs = getWritableObjectInspector(aggParameters);
ObjectInspector[] aggOIArray = new ObjectInspector[aggOIs.size()];
for (int i =
0; i < aggOIs.size(); ++i) {
aggOIArray[i] = aggOIs.get(i);
}
returnOI = res.genericUDAFEvaluator.init(emode, aggOIArray);
res.returnType = TypeInfoUtils.getTypeInfoFromObjectInspector(returnOI);
} catch (HiveException e) {
throw new SemanticException(e);
}
// set r.convertedParameters
// TODO: type conversion
res.convertedParameters = aggParameters;
return res;
}
| 3.26 |
flink_HiveParserUtils_toImmutableList_rdh
|
// converts a collection to guava ImmutableList
private static Object toImmutableList(Collection collection) {
try {
Class clz = (useShadedImmutableList) ? shadedImmutableListClz : immutableListClz;
return HiveReflectionUtils.invokeMethod(clz, null, "copyOf", new Class[]{ Collection.class }, new Object[]{ collection });
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create immutable list", e);
}
}
| 3.26 |
flink_HiveParserUtils_toRelDataType_rdh
|
// converts a hive TypeInfo to RelDataType
public static RelDataType toRelDataType(TypeInfo typeInfo, RelDataTypeFactory relTypeFactory) throws SemanticException {
RelDataType res;
switch (typeInfo.getCategory()) {
case PRIMITIVE :
// hive sets NULLABLE for all primitive types, revert that
res = HiveParserTypeConverter.convert(typeInfo, relTypeFactory);
return relTypeFactory.createTypeWithNullability(res, false);
case LIST :
RelDataType elementType = toRelDataType(((ListTypeInfo) (typeInfo)).getListElementTypeInfo(), relTypeFactory);
return relTypeFactory.createArrayType(elementType, -1);
case MAP :
RelDataType keyType = toRelDataType(((MapTypeInfo) (typeInfo)).getMapKeyTypeInfo(), relTypeFactory);
RelDataType valType = toRelDataType(((MapTypeInfo) (typeInfo)).getMapValueTypeInfo(), relTypeFactory);
return relTypeFactory.createMapType(keyType, valType);
case STRUCT :
List<TypeInfo> types = ((StructTypeInfo) (typeInfo)).getAllStructFieldTypeInfos();
List<RelDataType> convertedTypes = new ArrayList<>(types.size());
for (TypeInfo type : types) {
convertedTypes.add(toRelDataType(type, relTypeFactory));
}
return relTypeFactory.createStructType(convertedTypes, ((StructTypeInfo) (typeInfo)).getAllStructFieldNames());
case UNION :
default :
throw new SemanticException(String.format("%s type is not supported yet", typeInfo.getCategory().name()));
}
}
| 3.26 |
flink_HiveParserUtils_isNative_rdh
|
// TODO: we need a way to tell whether a function is built-in, for now just return false so that
// the unparser will quote them
public static boolean isNative(SqlOperator sqlOperator) {
return false;
}
| 3.26 |
flink_HiveParserUtils_getWritableObjectInspector_rdh
|
/**
* Convert exprNodeDesc array to ObjectInspector array.
*/
public static ArrayList<ObjectInspector> getWritableObjectInspector(ArrayList<ExprNodeDesc> exprs) {
ArrayList<ObjectInspector> result = new ArrayList<>();
for (ExprNodeDesc expr
: exprs) {
result.add(expr.getWritableObjectInspector());
}
return result;
}
| 3.26 |
flink_HiveParserUtils_isValuesTempTable_rdh
|
/**
* Check if the table is the temporary table created by VALUES() syntax.
*
* @param tableName
* table name
*/
public static boolean isValuesTempTable(String tableName) {
return tableName.toLowerCase().startsWith(HiveParserSemanticAnalyzer.VALUES_TMP_TABLE_NAME_PREFIX.toLowerCase());
}
| 3.26 |
flink_HiveParserUtils_createAggregateCall_rdh
|
/**
* Counterpart of org.apache.calcite.rel.core.AggregateCall#create. It uses
* HiveParserOperatorBinding as SqlOperatorBinding to create AggregateCall instead, which
* enables to get literal value for operand.
*/
private static AggregateCall createAggregateCall(SqlAggFunction aggFunction,
boolean distinct, boolean approximate, boolean ignoreNulls, List<Integer> argList, int filterArg, RelCollation collation, int groupCount, RelNode input, RelDataType type, String name, List<RexNode> operands) {
if (type == null) {
final RelDataTypeFactory v176 = input.getCluster().getTypeFactory();
final List<RelDataType> types = SqlTypeUtil.projectTypes(input.getRowType(), argList);
final HiveParserOperatorBinding callBinding = new HiveParserAggOperatorBinding(v176, aggFunction, types, operands, groupCount, filterArg >= 0);
type = aggFunction.inferReturnType(callBinding);
}
return AggregateCall.create(aggFunction, distinct, approximate, ignoreNulls, argList, filterArg, null, collation, type, name);
}
| 3.26 |
flink_HiveParserUtils_writeAsText_rdh
|
/**
* Convert a string to Text format and write its bytes in the same way TextOutputFormat would
* do. This is needed to properly encode non-ascii characters.
*/
public static void writeAsText(String text, FSDataOutputStream out) throws IOException {
Text to = new Text(text);
out.write(to.getBytes(), 0,
to.getLength());}
| 3.26 |
flink_HiveParserUtils_projectNonColumnEquiConditions_rdh
|
/**
* Push any equi join conditions that are not column references as Projections on top of the
* children.
*/
public static RexNode projectNonColumnEquiConditions(RelFactories.ProjectFactory factory, RelNode[] inputRels, List<RexNode> leftJoinKeys, List<RexNode> rightJoinKeys, int systemColCount, List<Integer> leftKeys, List<Integer> rightKeys) {
RelNode leftRel = inputRels[0];
RelNode rightRel = inputRels[1];
RexBuilder rexBuilder = leftRel.getCluster().getRexBuilder();
RexNode outJoinCond = null;int origLeftInputSize = leftRel.getRowType().getFieldCount();
int origRightInputSize = rightRel.getRowType().getFieldCount();
List<RexNode> newLeftFields = new ArrayList<>();
List<String> newLeftFieldNames = new ArrayList<>();
List<RexNode> newRightFields = new ArrayList<>();
List<String> newRightFieldNames = new ArrayList<>();
int leftKeyCount = leftJoinKeys.size();
int i;
for (i = 0; i < origLeftInputSize; i++) {
final RelDataTypeField field = leftRel.getRowType().getFieldList().get(i);
newLeftFields.add(rexBuilder.makeInputRef(field.getType(), i));
newLeftFieldNames.add(field.getName());
}
for (i = 0; i < origRightInputSize; i++) {
final RelDataTypeField field = rightRel.getRowType().getFieldList().get(i);
newRightFields.add(rexBuilder.makeInputRef(field.getType(), i));
newRightFieldNames.add(field.getName());
}
ImmutableBitSet.Builder origColEqCondsPosBuilder = ImmutableBitSet.builder();
int newKeyCount = 0;
List<Pair<Integer, Integer>> origColEqConds = new ArrayList<>();
for (i = 0; i < leftKeyCount; i++) {
RexNode leftKey = leftJoinKeys.get(i);
RexNode rightKey = rightJoinKeys.get(i);
if ((leftKey instanceof RexInputRef) && (rightKey instanceof RexInputRef)) {
origColEqConds.add(Pair.of(((RexInputRef) (leftKey)).getIndex(), ((RexInputRef) (rightKey)).getIndex()));
origColEqCondsPosBuilder.set(i);
}
else {
newLeftFields.add(leftKey);
newLeftFieldNames.add(null);
newRightFields.add(rightKey);
newRightFieldNames.add(null);
newKeyCount++;
}
}
ImmutableBitSet origColEqCondsPos = origColEqCondsPosBuilder.build();
for (i = 0; i < origColEqConds.size(); i++) {
Pair<Integer, Integer> p = origColEqConds.get(i);
int condPos = origColEqCondsPos.nth(i);
RexNode leftKey = leftJoinKeys.get(condPos);
RexNode rightKey = rightJoinKeys.get(condPos);
leftKeys.add(p.left);
rightKeys.add(p.right);
RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(leftKey.getType(), systemColCount + p.left), rexBuilder.makeInputRef(rightKey.getType(), ((systemColCount + origLeftInputSize) + newKeyCount) + p.right));
if (outJoinCond == null) {
outJoinCond = cond;
} else {
outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, cond);
}
}
if
(newKeyCount == 0) {
return outJoinCond;
}
int newLeftOffset = systemColCount + origLeftInputSize;
int newRightOffset = ((systemColCount
+ origLeftInputSize) + origRightInputSize) + newKeyCount;
for (i = 0; i < newKeyCount;
i++) {
leftKeys.add(origLeftInputSize + i);
rightKeys.add(origRightInputSize + i);
RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(newLeftFields.get(origLeftInputSize + i).getType(), newLeftOffset + i), rexBuilder.makeInputRef(newRightFields.get(origRightInputSize + i).getType(), newRightOffset + i));
if (outJoinCond == null) {
outJoinCond = cond;} else {
outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, cond);
}
}
// added project if need to produce new keys than the original input fields
if (newKeyCount > 0) {
leftRel = factory.createProject(leftRel, Collections.emptyList(), newLeftFields, SqlValidatorUtil.uniquify(newLeftFieldNames, false));
rightRel = factory.createProject(rightRel, Collections.emptyList(), newRightFields, SqlValidatorUtil.uniquify(newRightFieldNames, false));
}
inputRels[0] = leftRel;
inputRels[1] = rightRel;
return outJoinCond;
}
| 3.26 |
flink_HiveParserUtils_canHandleQbForCbo_rdh
|
// Overrides CalcitePlanner::canHandleQbForCbo to support SORT BY, CLUSTER BY, etc.
public static String canHandleQbForCbo(QueryProperties queryProperties) {
if (!queryProperties.hasPTF())
{return null;
}
String msg = "";
if (queryProperties.hasPTF()) {
msg += "has PTF; ";
}
return msg;
}
| 3.26 |
flink_HiveParserUtils_toImmutableSet_rdh
|
// converts a collection to guava ImmutableSet
private static Object toImmutableSet(Collection collection) {
try {
Class clz = (useShadedImmutableSet) ? shadedImmutableSetClz : immutableSetClz;
return HiveReflectionUtils.invokeMethod(clz, null, "copyOf", new Class[]{ Collection.class }, new Object[]{ collection });
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create immutable set", e);
}
}
| 3.26 |
flink_HiveParserUtils_rexSubQueryIn_rdh
|
/**
* Proxy to {@link RexSubQuery#in(RelNode, com.google.common.collect.ImmutableList)}.
*/
public static RexSubQuery rexSubQueryIn(RelNode relNode, Collection<RexNode> rexNodes) {
Class[] argTypes = new Class[]{ RelNode.class, null };
argTypes[1] = (useShadedImmutableList) ? shadedImmutableListClz : immutableListClz;
Method method = HiveReflectionUtils.tryGetMethod(RexSubQuery.class, "in", argTypes);
Preconditions.checkState(method != null, "Cannot get the method to create an IN sub-query");
try {
return ((RexSubQuery) (method.invoke(null, relNode, toImmutableList(rexNodes))));
} catch (IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create RexSubQuery", e);
}
}
| 3.26 |
flink_HiveParserUtils_genFilterRelNode_rdh
|
// creates LogicFilter node
public static RelNode genFilterRelNode(RelNode relNode, RexNode rexNode, Collection<CorrelationId> variables) { Class[] argTypes = new Class[]{ RelNode.class, RexNode.class, useShadedImmutableSet ? shadedImmutableSetClz : immutableSetClz };
Method method = HiveReflectionUtils.tryGetMethod(LogicalFilter.class, "create", argTypes);
Preconditions.checkState(method != null, "Cannot get the method to create a LogicalFilter");
try {
return ((LogicalFilter) (method.invoke(null, relNode, rexNode, toImmutableSet(variables))));
} catch (IllegalAccessException | InvocationTargetException e) {throw new FlinkHiveException("Failed to create LogicalFilter", e);
}
}
| 3.26 |
flink_HiveParserUtils_getFunctionInfo_rdh
|
// Get FunctionInfo and always look for it in metastore when FunctionRegistry returns null.
public static FunctionInfo getFunctionInfo(String funcName) throws SemanticException {
FunctionInfo res = FunctionRegistry.getFunctionInfo(funcName);
if (res == null)
{
SessionState sessionState = SessionState.get();
HiveConf hiveConf = (sessionState != null) ? sessionState.getConf() : null;
if (hiveConf != null) {// TODO: need to support overriding hive version
try (HiveMetastoreClientWrapper hmsClient = new HiveMetastoreClientWrapper(hiveConf, HiveShimLoader.getHiveVersion()))
{
String[] parts
= FunctionUtils.getQualifiedFunctionNameParts(funcName);
Function v136 = hmsClient.getFunction(parts[0], parts[1]);
getSessionHiveShim().registerTemporaryFunction(FunctionUtils.qualifyFunctionName(parts[1], parts[0]), Thread.currentThread().getContextClassLoader().loadClass(v136.getClassName()));
res = FunctionRegistry.getFunctionInfo(funcName);
} catch
(NoSuchObjectException e) {
LOG.warn("Function {} doesn't exist in metastore", funcName);
} catch (Exception e) {
LOG.warn("Failed to look up function in metastore", e);
}
}
}
return res;
}
| 3.26 |
flink_HiveParserUtils_makeOver_rdh
|
/**
* Proxy to {@link RexBuilder#makeOver(RelDataType, SqlAggFunction, List, List,
* com.google.common.collect.ImmutableList, RexWindowBound, RexWindowBound, boolean, boolean,
* boolean, boolean, boolean)}.
*/
public static RexNode makeOver(RexBuilder rexBuilder, RelDataType type, SqlAggFunction operator, List<RexNode> exprs, List<RexNode> partitionKeys, List<RexFieldCollation> orderKeys, RexWindowBound lowerBound, RexWindowBound upperBound, boolean physical, boolean allowPartial, boolean nullWhenCountZero, boolean distinct, boolean ignoreNulls) {
Preconditions.checkState((immutableListClz != null) || (shadedImmutableListClz != null), "Neither original nor shaded guava class can be found");
Method method = null;
final String methodName = "makeOver";
final int orderKeysIndex = 4;
Class[] argTypes = new Class[]{ RelDataType.class, SqlAggFunction.class, List.class, List.class, null, RexWindowBound.class, RexWindowBound.class, boolean.class, boolean.class, boolean.class, boolean.class, boolean.class };
if (immutableListClz != null) {
argTypes[orderKeysIndex] = immutableListClz;
method = HiveReflectionUtils.tryGetMethod(rexBuilder.getClass(), methodName, argTypes);
}
if (method == null) {
Preconditions.checkState(shadedImmutableListClz != null, String.format("Shaded guava class not found, but method %s takes shaded parameter", methodName));
argTypes[orderKeysIndex] = shadedImmutableListClz;
method = HiveReflectionUtils.tryGetMethod(rexBuilder.getClass(), methodName, argTypes);}
Preconditions.checkState(method != null, "Neither original nor shaded method can be found");
Object orderKeysArg = toImmutableList(orderKeys);
Object[] args = new Object[]{ type, operator, exprs, partitionKeys, orderKeysArg, lowerBound, upperBound, physical, allowPartial, nullWhenCountZero, distinct, ignoreNulls };
try {
return ((RexNode) (method.invoke(rexBuilder, args)));
} catch (InvocationTargetException | IllegalAccessException e) {
throw new RuntimeException("Failed to invoke " + methodName, e);
}
}
| 3.26 |
flink_HiveParserUtils_getGenericUDAFEvaluator_rdh
|
// Returns the GenericUDAFEvaluator for the aggregation. This is called once for each GroupBy
// aggregation.
// TODO: Requiring a GenericUDAFEvaluator means we only support hive UDAFs. Need to avoid this
// to support flink UDAFs.
public static GenericUDAFEvaluator getGenericUDAFEvaluator(String aggName, ArrayList<ExprNodeDesc> aggParameters, HiveParserASTNode aggTree, boolean isDistinct,
boolean isAllColumns, SqlOperatorTable opTable) throws SemanticException { ArrayList<ObjectInspector> originalParameterTypeInfos = getWritableObjectInspector(aggParameters);
GenericUDAFEvaluator result = FunctionRegistry.getGenericUDAFEvaluator(aggName, originalParameterTypeInfos, isDistinct,
isAllColumns);
if (result == null) {
// this happens for temp functions
SqlOperator sqlOperator = getSqlOperator(aggName, opTable, SqlFunctionCategory.USER_DEFINED_FUNCTION);
if (isBridgingSqlAggFunction(sqlOperator) && (getBridgingSqlFunctionDefinition(sqlOperator) instanceof HiveGenericUDAF))
{
HiveGenericUDAF hiveGenericUDAF = ((HiveGenericUDAF) (getBridgingSqlFunctionDefinition(sqlOperator)));
result = hiveGenericUDAF.createEvaluator(originalParameterTypeInfos.toArray(new ObjectInspector[0]));
}
}
if (null == result) {
String reason = (("Looking for UDAF Evaluator\"" + aggName) + "\" with parameters ") + originalParameterTypeInfos;
throw new
SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_FUNCTION_SIGNATURE, aggTree.getChild(0), reason));
}
return result;
}
| 3.26 |
flink_HiveParserUtils_genValuesRelNode_rdh
|
// creates LogicalValues node
public static RelNode genValuesRelNode(RelOptCluster cluster, RelDataType rowType, List<List<RexLiteral>> rows) {
List<Object> immutableRows = rows.stream().map(HiveParserUtils::toImmutableList).collect(Collectors.toList());
Class[] argTypes = new Class[]{ RelOptCluster.class, RelDataType.class, null };
if (useShadedImmutableList) {
argTypes[2] = HiveParserUtils.shadedImmutableListClz;
} else {
argTypes[2] = HiveParserUtils.immutableListClz;}
Method method = HiveReflectionUtils.tryGetMethod(LogicalValues.class, "create", argTypes);
Preconditions.checkState(method != null, "Cannot get the method to create LogicalValues");
try
{
return ((RelNode) (method.invoke(null, cluster, rowType, HiveParserUtils.toImmutableList(immutableRows))));
} catch (IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create LogicalValues", e);
}
}
| 3.26 |
flink_HiveParserUtils_isRegex_rdh
|
/**
* Returns whether the pattern is a regex expression (instead of a normal string). Normal string
* is a string with all alphabets/digits and "_".
*/
public static boolean
isRegex(String pattern, HiveConf conf) {String qIdSupport = HiveConf.getVar(conf, ConfVars.HIVE_QUOTEDID_SUPPORT);
if ("column".equals(qIdSupport)) {
return false;
}
for (int i = 0; i < pattern.length(); i++) {
if ((!Character.isLetterOrDigit(pattern.charAt(i))) && (pattern.charAt(i) != '_')) {
return true;
}
}
return false;
}
| 3.26 |
flink_SessionManager_create_rdh
|
/**
* Create the {@link SessionManager} with the default configuration.
*/
static SessionManager
create(DefaultContext defaultContext) {
return new SessionManagerImpl(defaultContext);
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_combinePostJoinFilters_rdh
|
/**
* Combines the post-join filters from the left and right inputs (if they are MultiJoinRels)
* into a single AND'd filter.
*
* @param joinRel
* the original LogicalJoin
* @param left
* left child of the LogicalJoin
* @param right
* right child of the LogicalJoin
* @return combined post-join filters AND'd together
*/
private List<RexNode> combinePostJoinFilters(Join joinRel, RelNode left, RelNode right) {
final List<RexNode> filters = new ArrayList<>();
if (right instanceof MultiJoin) {
final
MultiJoin multiRight = ((MultiJoin) (right));
filters.add(shiftRightFilter(joinRel, left, multiRight, multiRight.getPostJoinFilter()));
}
if (left instanceof MultiJoin) {
filters.add(((MultiJoin) (left)).getPostJoinFilter());
}
return filters;
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_combineJoinFilters_rdh
|
/**
* Combines the join filters from the left and right inputs (if they are MultiJoinRels) with the
* join filter in the joinrel into a single AND'd join filter, unless the inputs correspond to
* null generating inputs in an outer join.
*
* @param join
* Join
* @param left
* Left input of the join
* @param right
* Right input of the join
* @return combined join filters AND-ed together
*/
private List<RexNode> combineJoinFilters(Join join, RelNode left, RelNode right, List<Boolean> inputNullGenFieldList) {
JoinRelType joinType = join.getJoinType();
JoinInfo joinInfo = join.analyzeCondition();
ImmutableIntList leftKeys = joinInfo.leftKeys;
ImmutableIntList rightKeys = joinInfo.rightKeys;
// AND the join condition if this isn't a left or right outer join; In those cases, the
// outer join condition is already tracked separately.
final List<RexNode> filters = new ArrayList<>();
if ((joinType != JoinRelType.LEFT) && (joinType != JoinRelType.RIGHT)) {
filters.add(join.getCondition());
}
if (canCombine(left, leftKeys, joinType, joinType.generatesNullsOnLeft(), true, inputNullGenFieldList, 0)) {
filters.add(((MultiJoin) (left)).getJoinFilter());
}
// Need to adjust the RexInputs of the right child, since those need to shift over to the
// right.
if (canCombine(right, rightKeys, joinType, joinType.generatesNullsOnRight(), false, inputNullGenFieldList, left.getRowType().getFieldCount())) {
MultiJoin multiJoin = ((MultiJoin) (right));
filters.add(shiftRightFilter(join, left, multiJoin, multiJoin.getJoinFilter()));
}
return filters; }
| 3.26 |
flink_FlinkJoinToMultiJoinRule_matches_rdh
|
// ~ Methods ----------------------------------------------------------------
@Overridepublic boolean matches(RelOptRuleCall call) {
final Join origJoin = call.rel(0);
return
origJoin.getJoinType().projectsRight();
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_combineOuterJoins_rdh
|
/**
* Combines the outer join conditions and join types from the left and right join inputs. If the
* join itself is either a left or right outer join, then the join condition corresponding to
* the join is also set in the position corresponding to the null-generating input into the
* join. The join type is also set.
*
* @param joinRel
* join rel
* @param combinedInputs
* the combined inputs to the join
* @param left
* left child of the joinrel
* @param right
* right child of the joinrel
* @param joinSpecs
* the list where the join types and conditions will be copied
*/
private void combineOuterJoins(Join joinRel, List<RelNode> combinedInputs, RelNode left, RelNode right, List<Pair<JoinRelType, RexNode>> joinSpecs, List<Boolean> inputNullGenFieldList) {
JoinRelType joinType = joinRel.getJoinType();
JoinInfo joinInfo = joinRel.analyzeCondition();
ImmutableIntList
leftKeys = joinInfo.leftKeys;
ImmutableIntList rightKeys = joinInfo.rightKeys;
boolean leftCombined = canCombine(left, leftKeys, joinType, joinType.generatesNullsOnLeft(), true, inputNullGenFieldList, 0);
boolean rightCombined = canCombine(right, rightKeys, joinType, joinType.generatesNullsOnRight(), false, inputNullGenFieldList, left.getRowType().getFieldCount());
switch (joinType) {
case LEFT :
if (leftCombined) {
copyOuterJoinInfo(((MultiJoin) (left)), joinSpecs, 0, null, null);
}
else {
joinSpecs.add(Pair.of(JoinRelType.INNER, null));
}
joinSpecs.add(Pair.of(joinType, joinRel.getCondition()));
break;
case RIGHT :
joinSpecs.add(Pair.of(joinType, joinRel.getCondition()));
if (rightCombined) {
copyOuterJoinInfo(((MultiJoin) (right)), joinSpecs, left.getRowType().getFieldCount(), right.getRowType().getFieldList(), joinRel.getRowType().getFieldList());
} else {
joinSpecs.add(Pair.of(JoinRelType.INNER, null));
}
break;
default :
if (leftCombined) {
copyOuterJoinInfo(((MultiJoin) (left)), joinSpecs, 0,
null, null);
} else {
joinSpecs.add(Pair.of(JoinRelType.INNER, null));
}
if (rightCombined) {
copyOuterJoinInfo(((MultiJoin) (right)), joinSpecs, left.getRowType().getFieldCount(), right.getRowType().getFieldList(), joinRel.getRowType().getFieldList());
} else {
joinSpecs.add(Pair.of(JoinRelType.INNER, null));
}
}
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_shiftRightFilter_rdh
|
/**
* Shifts a filter originating from the right child of the LogicalJoin to the right, to reflect
* the filter now being applied on the resulting MultiJoin.
*
* @param joinRel
* the original LogicalJoin
* @param left
* the left child of the LogicalJoin
* @param right
* the right child of the LogicalJoin
* @param rightFilter
* the filter originating from the right child
* @return the adjusted right filter
*/
private RexNode shiftRightFilter(Join joinRel, RelNode left, MultiJoin right, RexNode rightFilter) {
if (rightFilter == null) {return null;
}
int nFieldsOnLeft = left.getRowType().getFieldList().size();
int nFieldsOnRight = right.getRowType().getFieldList().size();
int[] adjustments = new int[nFieldsOnRight];
for (int i = 0;
i < nFieldsOnRight; i++) {
adjustments[i] = nFieldsOnLeft;
}
rightFilter = rightFilter.accept(new RelOptUtil.RexInputConverter(joinRel.getCluster().getRexBuilder(), right.getRowType().getFieldList(), joinRel.getRowType().getFieldList(),
adjustments));
return rightFilter;
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_copyOuterJoinInfo_rdh
|
/**
* Copies outer join data from a source MultiJoin to a new set of arrays. Also adjusts the
* conditions to reflect the new position of an input if that input ends up being shifted to the
* right.
*
* @param multiJoin
* the source MultiJoin
* @param destJoinSpecs
* the list where the join types and conditions will be copied
* @param adjustmentAmount
* if > 0, the amount the RexInputRefs in the join conditions need to
* be adjusted by
* @param srcFields
* the source fields that the original join conditions are referencing
* @param destFields
* the destination fields that the new join conditions
*/
private void copyOuterJoinInfo(MultiJoin multiJoin, List<Pair<JoinRelType, RexNode>> destJoinSpecs, int adjustmentAmount, List<RelDataTypeField> srcFields, List<RelDataTypeField> destFields) {
final List<Pair<JoinRelType, RexNode>> srcJoinSpecs = Pair.zip(multiJoin.getJoinTypes(), multiJoin.getOuterJoinConditions());
if (adjustmentAmount == 0) {
destJoinSpecs.addAll(srcJoinSpecs);
} else {
assert srcFields != null;
assert destFields != null;
int nFields = srcFields.size();
int[] adjustments = new int[nFields];
for (int idx = 0; idx < nFields; idx++) {
adjustments[idx] = adjustmentAmount;
}
for (Pair<JoinRelType, RexNode> src : srcJoinSpecs) {
destJoinSpecs.add(Pair.of(src.left, src.right == null ? null : src.right.accept(new RelOptUtil.RexInputConverter(multiJoin.getCluster().getRexBuilder(), srcFields, destFields, adjustments)))); } }
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_canCombine_rdh
|
/**
* Returns whether an input can be merged into a given relational expression without changing
* semantics.
*
* @param input
* input into a join
* @param nullGenerating
* true if the input is null generating
* @return true if the input can be combined into a parent MultiJoin
*/
private boolean canCombine(RelNode input, ImmutableIntList joinKeys, JoinRelType joinType, boolean nullGenerating, boolean isLeft, List<Boolean> inputNullGenFieldList, int beginIndex) {
if (input instanceof MultiJoin) {MultiJoin
join = ((MultiJoin) (input));
if (join.isFullOuterJoin() || nullGenerating) {
return false;
}
if (joinType == JoinRelType.LEFT) {
if (!isLeft) {
return false;
} else {
for (int joinKey : joinKeys) {
if (inputNullGenFieldList.get(joinKey + beginIndex)) {
return false;
}
}
}
} else if (joinType == JoinRelType.RIGHT) {
if (isLeft) {
return false;
} else {for (int joinKey : joinKeys) {
if (inputNullGenFieldList.get(joinKey + beginIndex)) {
return false;
}
}}
} else if (joinType == JoinRelType.INNER) {
for
(int joinKey : joinKeys) {
if (inputNullGenFieldList.get(joinKey + beginIndex)) {
return false;
}
}
} else {
return false;
}
return true;
} else {
return false;
}
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_addOnJoinFieldRefCounts_rdh
|
/**
* Adds on to the existing join condition reference counts the references from the new join
* condition.
*
* @param multiJoinInputs
* inputs into the new MultiJoin
* @param nTotalFields
* total number of fields in the MultiJoin
* @param joinCondition
* the new join condition
* @param origJoinFieldRefCounts
* existing join condition reference counts
* @return Map containing the new join condition
*/
private ImmutableMap<Integer, ImmutableIntList> addOnJoinFieldRefCounts(List<RelNode> multiJoinInputs, int nTotalFields, RexNode joinCondition, List<int[]> origJoinFieldRefCounts) {
// count the input references in the join condition
int[] joinCondRefCounts =
new int[nTotalFields];
joinCondition.accept(new InputReferenceCounter(joinCondRefCounts));
// first, make a copy of the ref counters
final Map<Integer, int[]> refCountsMap = new HashMap<>();
int nInputs = multiJoinInputs.size();
int currInput = 0;
for (int[] origRefCounts : origJoinFieldRefCounts) {
refCountsMap.put(currInput, origRefCounts.clone());
currInput++;
}
// add on to the counts for each input into the MultiJoin the
// reference counts computed for the current join condition
currInput = -1;
int startField = 0;
int nFields = 0;
for (int i = 0; i < nTotalFields; i++) {
if (joinCondRefCounts[i] == 0) {
continue;
}
while (i >= (startField + nFields)) {
startField
+= nFields;currInput++;
assert currInput < nInputs;
nFields = multiJoinInputs.get(currInput).getRowType().getFieldCount();
} int[] refCounts = refCountsMap.get(currInput);
refCounts[i - startField] += joinCondRefCounts[i];
}
final ImmutableMap.Builder<Integer, ImmutableIntList> builder = ImmutableMap.builder();
for (Map.Entry<Integer, int[]> entry : refCountsMap.entrySet()) {
builder.put(entry.getKey(), ImmutableIntList.of(entry.getValue()));
}
return builder.build();
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_withOperandFor_rdh
|
/**
* Defines an operand tree for the given classes.
*/
default Config withOperandFor(Class<? extends Join> joinClass) {
return withOperandSupplier(b0 -> b0.operand(joinClass).inputs(b1 -> b1.operand(RelNode.class).anyInputs(), b2 -> b2.operand(RelNode.class).anyInputs())).as(FlinkJoinToMultiJoinRule.Config.class);
}
| 3.26 |
flink_FlinkJoinToMultiJoinRule_combineInputs_rdh
|
/**
* Combines the inputs into a LogicalJoin into an array of inputs.
*
* @param join
* original join
* @param left
* left input into join
* @param right
* right input into join
* @param projFieldsList
* returns a list of the new combined projection fields
* @param joinFieldRefCountsList
* returns a list of the new combined join field reference counts
* @return combined left and right inputs in an array
*/
private List<RelNode> combineInputs(Join join, RelNode left, RelNode right, List<ImmutableBitSet> projFieldsList, List<int[]> joinFieldRefCountsList, List<Boolean> inputNullGenFieldList) {
final List<RelNode> v23 = new ArrayList<>();
// Leave the null generating sides of an outer join intact; don't pull up those children
// inputs into the array we're constructing.
JoinInfo joinInfo = join.analyzeCondition();
ImmutableIntList
leftKeys = joinInfo.leftKeys;
ImmutableIntList rightKeys = joinInfo.rightKeys;
if (canCombine(left, leftKeys, join.getJoinType(), join.getJoinType().generatesNullsOnLeft(), true, inputNullGenFieldList, 0)) {final MultiJoin leftMultiJoin = ((MultiJoin) (left));for (int i = 0; i < leftMultiJoin.getInputs().size(); i++) {
v23.add(leftMultiJoin.getInput(i));
projFieldsList.add(leftMultiJoin.getProjFields().get(i));
joinFieldRefCountsList.add(leftMultiJoin.getJoinFieldRefCountsMap().get(i).toIntArray());
}
} else {
v23.add(left);
projFieldsList.add(null);
joinFieldRefCountsList.add(new int[left.getRowType().getFieldCount()]);
}
if (canCombine(right, rightKeys, join.getJoinType(), join.getJoinType().generatesNullsOnRight(), false, inputNullGenFieldList, left.getRowType().getFieldCount())) {
final MultiJoin rightMultiJoin = ((MultiJoin) (right)); for (int i = 0; i < rightMultiJoin.getInputs().size(); i++) { v23.add(rightMultiJoin.getInput(i));
projFieldsList.add(rightMultiJoin.getProjFields().get(i));
joinFieldRefCountsList.add(rightMultiJoin.getJoinFieldRefCountsMap().get(i).toIntArray());
}
} else {
v23.add(right);
projFieldsList.add(null);
joinFieldRefCountsList.add(new int[right.getRowType().getFieldCount()]);
}return v23;
}
| 3.26 |
flink_OutputTag_equals_rdh
|
// ------------------------------------------------------------------------
@Overridepublic boolean equals(Object obj) {
if
(obj == this) {
return true;
}
if ((obj == null) || (!(obj instanceof OutputTag))) {
return false;
}
OutputTag other = ((OutputTag) (obj));
return Objects.equals(this.id, other.id);
}
| 3.26 |
flink_OutputTag_getId_rdh
|
// ------------------------------------------------------------------------
public String getId()
{
return id;
}
| 3.26 |
flink_Runnables_assertNoException_rdh
|
/**
* Utils related to {@link Runnable}.
*/public class Runnables {
/**
* Asserts that the given {@link Runnable} does not throw exceptions. If the runnable throws
* exceptions, then it will call the {@link FatalExitExceptionHandler}.
*
* @param runnable
* to assert for no exceptions
*/
public static void assertNoException(Runnable runnable) {
withUncaughtExceptionHandler(runnable, FatalExitExceptionHandler.INSTANCE).run();
}
| 3.26 |
flink_Runnables_withUncaughtExceptionHandler_rdh
|
/**
* Guard {@link Runnable} with uncaughtException handler, because {@link java.util.concurrent.ScheduledExecutorService} does not respect the one assigned to executing
* {@link Thread} instance.
*
* @param runnable
* Runnable future to guard.
* @param uncaughtExceptionHandler
* Handler to call in case of uncaught exception.
* @return Future with handler.
*/
public static Runnable withUncaughtExceptionHandler(Runnable runnable, Thread.UncaughtExceptionHandler uncaughtExceptionHandler)
{
return () -> {
try {
runnable.run();
} catch (Throwable t) {
uncaughtExceptionHandler.uncaughtException(Thread.currentThread(), t);
}
};
}
| 3.26 |
flink_DefaultOperatorStateBackend_getBroadcastState_rdh
|
// -------------------------------------------------------------------------------------------
// State access methods
// -------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override
public <K, V> BroadcastState<K, V> getBroadcastState(final MapStateDescriptor<K, V> stateDescriptor) throws StateMigrationException {
Preconditions.checkNotNull(stateDescriptor);
String name = Preconditions.checkNotNull(stateDescriptor.getName());
BackendWritableBroadcastState<K, V> previous = ((BackendWritableBroadcastState<K, V>) (accessedBroadcastStatesByName.get(name)));
if (previous != null) {
checkStateNameAndMode(previous.getStateMetaInfo().getName(), name, previous.getStateMetaInfo().getAssignmentMode(), Mode.BROADCAST);
return previous;
}
stateDescriptor.initializeSerializerUnlessSet(getExecutionConfig());
TypeSerializer<K> broadcastStateKeySerializer = Preconditions.checkNotNull(stateDescriptor.getKeySerializer());
TypeSerializer<V> broadcastStateValueSerializer = Preconditions.checkNotNull(stateDescriptor.getValueSerializer());
BackendWritableBroadcastState<K, V> broadcastState = ((BackendWritableBroadcastState<K, V>) (registeredBroadcastStates.get(name)));
if (broadcastState == null)
{
broadcastState = new HeapBroadcastState<>(new RegisteredBroadcastStateBackendMetaInfo<>(name, Mode.BROADCAST, broadcastStateKeySerializer, broadcastStateValueSerializer)); registeredBroadcastStates.put(name, broadcastState);
} else {
// has restored state; check compatibility of new state access
checkStateNameAndMode(broadcastState.getStateMetaInfo().getName(), name, broadcastState.getStateMetaInfo().getAssignmentMode(), Mode.BROADCAST);
RegisteredBroadcastStateBackendMetaInfo<K, V> restoredBroadcastStateMetaInfo = broadcastState.getStateMetaInfo();// check whether new serializers are incompatible
TypeSerializerSchemaCompatibility<K> keyCompatibility = restoredBroadcastStateMetaInfo.updateKeySerializer(broadcastStateKeySerializer);
if (keyCompatibility.isIncompatible()) {
throw new StateMigrationException("The new key typeSerializer for broadcast state must not be incompatible.");
}
TypeSerializerSchemaCompatibility<V> valueCompatibility = restoredBroadcastStateMetaInfo.updateValueSerializer(broadcastStateValueSerializer);
if (valueCompatibility.isIncompatible()) {throw new StateMigrationException("The new value typeSerializer for broadcast state must not be incompatible.");
}
broadcastState.setStateMetaInfo(restoredBroadcastStateMetaInfo);
}
accessedBroadcastStatesByName.put(name, broadcastState);
return broadcastState;
}
| 3.26 |
flink_DefaultOperatorStateBackend_snapshot_rdh
|
// -------------------------------------------------------------------------------------------
// Snapshot
// -------------------------------------------------------------------------------------------
@Nonnull
@Override
public RunnableFuture<SnapshotResult<OperatorStateHandle>> snapshot(long checkpointId, long timestamp, @Nonnull
CheckpointStreamFactory streamFactory, @Nonnull
CheckpointOptions checkpointOptions) throws Exception { return snapshotStrategyRunner.snapshot(checkpointId, timestamp, streamFactory, checkpointOptions);
}
| 3.26 |
flink_TaskStateManagerImpl_notifyCheckpointAborted_rdh
|
/**
* Tracking when some local state can be disposed.
*/
@Override
public void notifyCheckpointAborted(long checkpointId) {
localStateStore.abortCheckpoint(checkpointId);
}
| 3.26 |
flink_TaskStateManagerImpl_notifyCheckpointComplete_rdh
|
/**
* Tracking when local state can be confirmed and disposed.
*/
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
localStateStore.confirmCheckpoint(checkpointId);
}
| 3.26 |
flink_TopologyGraph_link_rdh
|
/**
* Link an edge from `from` node to `to` node if no loop will occur after adding this edge.
* Returns if this edge is successfully added.
*/
boolean link(ExecNode<?> from, ExecNode<?> to) {
TopologyNode fromNode = getOrCreateTopologyNode(from);
TopologyNode toNode = getOrCreateTopologyNode(to);
if (canReach(toNode, fromNode)) {
// invalid edge, as `to` is the predecessor of `from`
return false;
} else
{
// link `from` and `to`
fromNode.outputs.add(toNode);
toNode.inputs.add(fromNode);
return true;
}
}
| 3.26 |
flink_TopologyGraph_makeAsFarAs_rdh
|
/**
* Make the distance of node A at least as far as node B by adding edges from all inputs of node
* B to node A.
*/
void makeAsFarAs(ExecNode<?> a, ExecNode<?> b) {
TopologyNode nodeA = getOrCreateTopologyNode(a);
TopologyNode nodeB = getOrCreateTopologyNode(b);
for (TopologyNode input : nodeB.inputs) {
link(input.execNode, nodeA.execNode);
}
}
| 3.26 |
flink_TopologyGraph_m0_rdh
|
/**
* Remove the edge from `from` node to `to` node. If there is no edge between them then do
* nothing.
*/
void m0(ExecNode<?> from, ExecNode<?> to) {
TopologyNode fromNode
= getOrCreateTopologyNode(from);
TopologyNode toNode = getOrCreateTopologyNode(to);
fromNode.outputs.remove(toNode);
toNode.inputs.remove(fromNode);
}
| 3.26 |
flink_TopologyGraph_calculateMaximumDistance_rdh
|
/**
* Calculate the maximum distance of the currently added nodes from the nodes without inputs.
* The smallest distance is 0 (which are exactly the nodes without inputs) and the distances of
* other nodes are the largest distances in their inputs plus 1.
*
* <p>Distance of a node is defined as the number of edges one needs to go through from the
* nodes without inputs to this node.
*/
Map<ExecNode<?>, Integer> calculateMaximumDistance() {
Map<ExecNode<?>, Integer> result = new HashMap<>();
Map<TopologyNode, Integer> inputsVisitedMap = new HashMap<>();
Queue<TopologyNode> queue = new LinkedList<>();
for (TopologyNode node : nodes.values()) {
if (node.inputs.size() == 0) {
queue.offer(node);
}
}
while (!queue.isEmpty()) {TopologyNode node = queue.poll();
int dist = -1;
for (TopologyNode input : node.inputs) {
dist = Math.max(dist, Preconditions.checkNotNull(result.get(input.execNode), "The distance of an input node is not calculated. This is a bug."));
}
dist++;
result.put(node.execNode, dist);
for (TopologyNode output
: node.outputs) {
int inputsVisited = inputsVisitedMap.compute(output, (k, v) -> v == null ? 1 : v + 1);
if (inputsVisited == output.inputs.size())
{
queue.offer(output);
}
}
}
return result;
}
| 3.26 |
flink_PathPattern_tokens_rdh
|
/**
* Returns the pattern given at the constructor, without slashes at both ends, and split by
* {@code '/'}.
*/
public String[] tokens() {
return tokens;
}
| 3.26 |
flink_PathPattern_pattern_rdh
|
/**
* Returns the pattern given at the constructor, without slashes at both ends.
*/
public String pattern() {
return pattern;
}
| 3.26 |
flink_PathPattern_match_rdh
|
// --------------------------------------------------------------------------
/**
* {@code params} will be updated with params embedded in the request path.
*
* <p>This method signature is designed so that {@code requestPathTokens} and {@code params} can
* be created only once then reused, to optimize for performance when a large number of path
* patterns need to be matched.
*
* @return {@code false} if not matched; in this case params should be reset
*/
public boolean match(String[] requestPathTokens, Map<String, String> params) {
if
(tokens.length == requestPathTokens.length) {
for (int i = 0; i < tokens.length; i++) {String key = tokens[i];
String value = requestPathTokens[i];
if ((key.length() > 0) && (key.charAt(0) == ':')) {
// This is a placeholder
params.put(key.substring(1), value);
} else if (!key.equals(value)) {// This is a constant
return false;
}
}
return true;
}
if (((tokens.length > 0) && tokens[tokens.length - 1].equals(":*")) && (tokens.length <= requestPathTokens.length)) {
// The first part
for (int i = 0; i < (tokens.length - 2); i++) {
String key = tokens[i];
String value = requestPathTokens[i];
if ((key.length() > 0) && (key.charAt(0) == ':')) {
// This is a placeholder
params.put(key.substring(1), value);
} else if (!key.equals(value)) {
// This is a constant
return false;}
}
// The last :* part
StringBuilder b = new StringBuilder(requestPathTokens[tokens.length - 1]);
for
(int i = tokens.length; i < requestPathTokens.length; i++) {
b.append('/');
b.append(requestPathTokens[i]);
}
params.put("*", b.toString());
return true;
}
return false;
}
| 3.26 |
flink_PathPattern_hashCode_rdh
|
// --------------------------------------------------------------------------
// Instances of this class can be conveniently used as Map keys.
@Override
public int hashCode() {
return pattern.hashCode();
}
| 3.26 |
flink_MethodlessRouter_addRoute_rdh
|
/**
* This method does nothing if the path pattern has already been added. A path pattern can only
* point to one target.
*/public MethodlessRouter<T> addRoute(String pathPattern, T target) {PathPattern p = new
PathPattern(pathPattern);
if (routes.containsKey(p)) {
return this;}routes.put(p, target);
return this;
}
| 3.26 |
flink_MethodlessRouter_anyMatched_rdh
|
/**
* Checks if there's any matching route.
*/
public boolean anyMatched(String[] requestPathTokens) {
Map<String,
String> pathParams = new HashMap<>();
for (PathPattern pattern : routes.keySet()) {
if (pattern.match(requestPathTokens, pathParams)) {
return true;
}
// Reset for the next loop
pathParams.clear();
}
return false;
}
| 3.26 |
flink_MethodlessRouter_route_rdh
|
// --------------------------------------------------------------------------
/**
*
* @return {@code null} if no match
*/
public RouteResult<T> route(String uri, String decodedPath, Map<String,
List<String>> queryParameters, String[] pathTokens) {// Optimize: reuse requestPathTokens and pathParams in the loop
Map<String, String> pathParams = new HashMap<>();
for (Entry<PathPattern, T> entry : routes.entrySet()) {
PathPattern pattern = entry.getKey();
if (pattern.match(pathTokens, pathParams)) {
T v6 = entry.getValue();
return new RouteResult<T>(uri, decodedPath, pathParams, queryParameters, v6);
}
// Reset for the next try
pathParams.clear();
}
return null;
}
| 3.26 |
flink_MethodlessRouter_removePathPattern_rdh
|
// --------------------------------------------------------------------------
/**
* Removes the route specified by the path pattern.
*/
public void removePathPattern(String pathPattern) {
PathPattern p = new PathPattern(pathPattern);
T target = routes.remove(p);
if (target == null) {
return;
}
}
| 3.26 |
flink_MethodlessRouter_routes_rdh
|
// --------------------------------------------------------------------------
/**
* Returns all routes in this router, an unmodifiable map of {@code PathPattern -> Target}.
*/
public Map<PathPattern, T> routes() {
return Collections.unmodifiableMap(routes);
}
| 3.26 |
flink_BlockCompressionFactory_createBlockCompressionFactory_rdh
|
/**
* Creates {@link BlockCompressionFactory} according to the configuration.
*
* @param compressionFactoryName
* supported compression codecs or user-defined class name
* inherited from {@link BlockCompressionFactory}.
*/
static BlockCompressionFactory createBlockCompressionFactory(String compressionFactoryName) {
checkNotNull(compressionFactoryName);
CompressionFactoryName compressionName;
try {
compressionName = CompressionFactoryName.valueOf(compressionFactoryName.toUpperCase());
} catch (IllegalArgumentException e) {
compressionName = null;}
BlockCompressionFactory blockCompressionFactory;
if (compressionName != null) {
switch (compressionName) {
case LZ4 :blockCompressionFactory = new Lz4BlockCompressionFactory();
break;
case LZO :
blockCompressionFactory = new AirCompressorFactory(new LzoCompressor(), new LzoDecompressor());
break;
case ZSTD :
blockCompressionFactory = new AirCompressorFactory(new ZstdCompressor(), new ZstdDecompressor());
break;
default :
throw new IllegalStateException("Unknown CompressionMethod " + compressionName);
}
} else {
Object factoryObj;
try {factoryObj = Class.forName(compressionFactoryName).newInstance();} catch (ClassNotFoundException e) {
throw new IllegalConfigurationException("Cannot load class " + compressionFactoryName, e);
} catch (Exception e) {
throw
new IllegalConfigurationException("Cannot create object for class " + compressionFactoryName, e);
}
if (factoryObj instanceof BlockCompressionFactory) {
blockCompressionFactory = ((BlockCompressionFactory) (factoryObj));
} else {
throw new IllegalArgumentException("CompressionFactoryName should inherit from" + " interface BlockCompressionFactory, or use the default compression codec.");
}
}
checkNotNull(blockCompressionFactory);
return blockCompressionFactory;
}
| 3.26 |
flink_DoubleMinimum_add_rdh
|
// ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(double value) {
this.min = Math.min(this.min, value);
}
| 3.26 |
flink_DoubleMinimum_toString_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "DoubleMinimum " + this.min;
}
| 3.26 |
flink_HiveStatsUtil_getFieldNames_rdh
|
/**
* Get field names from field schemas.
*/
private static Set<String> getFieldNames(List<FieldSchema> fieldSchemas) {
Set<String> names = new HashSet<>();
for (FieldSchema fs : fieldSchemas) {
names.add(fs.getName());
}
return names;
}
| 3.26 |
flink_HiveStatsUtil_getPartialPartitionVals_rdh
|
/**
* Get the partial partition values whose {@param partitionColIndex} partition column value will
* be {@param defaultPartitionName} and the value for preceding partition column will empty
* string.
*
* <p>For example, if partitionColIndex = 3, defaultPartitionName = __default_partition__, the
* partial partition values will be ["", "", "", __default_partition__].
*
* <p>It's be useful when we want to list all the these Hive's partitions, of which the value
* for one specific partition column is null.
*/
private static List<String> getPartialPartitionVals(int partitionColIndex, String defaultPartitionName) {
List<String> partitionValues = new ArrayList<>(); for (int i = 0; i < partitionColIndex; i++) {
partitionValues.add(StringUtils.EMPTY);
}
partitionValues.add(defaultPartitionName);
return partitionValues;
}
| 3.26 |
flink_HiveStatsUtil_getPartitionColumnStats_rdh
|
/**
* Get statistics for a specific partition column.
*
* @param logicalType
* the specific partition column's logical type
* @param partitionValue
* the partition value for the specific partition column
* @param partitionColIndex
* the index of the specific partition column
* @param defaultPartitionName
* the default partition name for null value
*/
private static CatalogColumnStatisticsDataBase
getPartitionColumnStats(HiveMetastoreClientWrapper client, Table hiveTable, LogicalType logicalType, Object partitionValue, int partitionColIndex, String defaultPartitionName) {
switch (logicalType.getTypeRoot()) {
case CHAR :
case VARCHAR :
{
Long maxLength = null;
Double avgLength =
null;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount = getPartitionColumnNullCount(client, hiveTable, partitionColIndex, defaultPartitionName);
} else {
long valLength = ((String) (partitionValue)).length();
maxLength = valLength;
avgLength = ((double) (valLength));
}
return new CatalogColumnStatisticsDataString(maxLength, avgLength, 1L, nullCount);
}
case BOOLEAN :
{
long trueCount = 0L;
long falseCount = 0L;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount = getPartitionColumnNullCount(client, hiveTable, partitionColIndex, defaultPartitionName);
} else {
Boolean
boolVal = ((Boolean) (partitionValue));
if (boolVal) {
trueCount = 1L;
} else {
falseCount = 1L;
}
}
return new CatalogColumnStatisticsDataBoolean(trueCount, falseCount, nullCount);
}
case TINYINT :
case SMALLINT :
case INTEGER :
case BIGINT :
{
Long min = null;
Long max = null;
Long nullCount = 0L;
if (partitionValue == null)
{
nullCount = getPartitionColumnNullCount(client, hiveTable, partitionColIndex, defaultPartitionName);
} else
{
min = ((Number) (partitionValue)).longValue();
max = min;
}
return new CatalogColumnStatisticsDataLong(min, max, 1L, nullCount);
}
case FLOAT :
case DOUBLE :
case DECIMAL :
{
Double min = null;
Double max = null;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount = getPartitionColumnNullCount(client, hiveTable, partitionColIndex, defaultPartitionName);
}
else {
min = ((Number) (partitionValue)).doubleValue();
max = min;
}
return new CatalogColumnStatisticsDataDouble(min, max, 1L, nullCount);
}
case DATE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
{
Date min =
null;
Date max = null;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount = getPartitionColumnNullCount(client, hiveTable, partitionColIndex, defaultPartitionName);
} else {
if (partitionValue instanceof LocalDate) {
min = new Date(((LocalDate) (partitionValue)).toEpochDay());
} else if (partitionValue instanceof LocalDateTime) {
min = new Date(((LocalDateTime) (partitionValue)).toLocalDate().toEpochDay());
}
max =
min;
}
return new CatalogColumnStatisticsDataDate(min, max, 1L, nullCount);
}
default
:
return null;
}
}
| 3.26 |
flink_HiveStatsUtil_updateStats_rdh
|
/**
* Update original table statistics parameters.
*
* @param newTableStats
* new catalog table statistics.
* @param parameters
* original hive table statistics parameters.
*/
public static void updateStats(CatalogTableStatistics newTableStats, Map<String, String> parameters) {
parameters.put(StatsSetupConst.ROW_COUNT, String.valueOf(newTableStats.getRowCount()));
parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(newTableStats.getTotalSize()));
parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(newTableStats.getFileCount()));parameters.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(newTableStats.getRawDataSize()));
}
| 3.26 |
flink_HiveStatsUtil_createTableColumnStats_rdh
|
/**
* Create Flink ColumnStats from Hive ColumnStatisticsData.
*/
private static CatalogColumnStatisticsDataBase createTableColumnStats(DataType colType, ColumnStatisticsData stats, String hiveVersion) {
HiveShim v51 = HiveShimLoader.loadHiveShim(hiveVersion);
if (stats.isSetBinaryStats()) {
BinaryColumnStatsData binaryStats = stats.getBinaryStats();
return new CatalogColumnStatisticsDataBinary(binaryStats.isSetMaxColLen() ? binaryStats.getMaxColLen() : null, binaryStats.isSetAvgColLen() ? binaryStats.getAvgColLen() : null, binaryStats.isSetNumNulls() ? binaryStats.getNumNulls() : null);
} else if (stats.isSetBooleanStats()) {BooleanColumnStatsData booleanStats = stats.getBooleanStats();
return new CatalogColumnStatisticsDataBoolean(booleanStats.isSetNumTrues() ? booleanStats.getNumTrues() : null, booleanStats.isSetNumFalses()
? booleanStats.getNumFalses() : null, booleanStats.isSetNumNulls() ? booleanStats.getNumNulls() : null);
} else if (v51.isDateStats(stats)) {
return v51.toFlinkDateColStats(stats);
}
else if (stats.isSetDoubleStats()) {
DoubleColumnStatsData doubleStats
= stats.getDoubleStats();
return
new CatalogColumnStatisticsDataDouble(doubleStats.isSetLowValue() ? doubleStats.getLowValue() : null, doubleStats.isSetHighValue() ? doubleStats.getHighValue() : null, doubleStats.isSetNumDVs() ? doubleStats.getNumDVs() : null, doubleStats.isSetNumNulls() ? doubleStats.getNumNulls() : null);
} else if (stats.isSetLongStats())
{
LongColumnStatsData longColStats = stats.getLongStats();
return new CatalogColumnStatisticsDataLong(longColStats.isSetLowValue() ? longColStats.getLowValue() : null, longColStats.isSetHighValue() ? longColStats.getHighValue() : null, longColStats.isSetNumDVs() ? longColStats.getNumDVs() : null, longColStats.isSetNumNulls() ? longColStats.getNumNulls() :
null);
} else if (stats.isSetStringStats()) {
StringColumnStatsData stringStats = stats.getStringStats();
return new CatalogColumnStatisticsDataString(stringStats.isSetMaxColLen() ? stringStats.getMaxColLen() : null, stringStats.isSetAvgColLen() ? stringStats.getAvgColLen() : null, stringStats.isSetNumDVs()
? stringStats.getNumDVs() : null, stringStats.isSetNumDVs() ? stringStats.getNumNulls() : null);
} else if (stats.isSetDecimalStats()) {
DecimalColumnStatsData decimalStats = stats.getDecimalStats();
// for now, just return CatalogColumnStatisticsDataDouble for decimal columns
Double max = null;
if (decimalStats.isSetHighValue()) {
max = toHiveDecimal(decimalStats.getHighValue()).doubleValue();
}
Double min = null;
if (decimalStats.isSetLowValue()) {
min = toHiveDecimal(decimalStats.getLowValue()).doubleValue();
}
Long v60 = (decimalStats.isSetNumDVs()) ? decimalStats.getNumDVs() : null;
Long nullCount = (decimalStats.isSetNumNulls()) ? decimalStats.getNumNulls() : null;return new CatalogColumnStatisticsDataDouble(min, max, v60, nullCount);
} else {
LOG.warn("Flink does not support converting ColumnStatisticsData '{}' for Hive column type '{}' yet.", stats, colType);
return null;
}}
| 3.26 |
flink_HiveStatsUtil_createPartitionColumnStats_rdh
|
/**
* Create columnStatistics from the given Hive column stats of a hive partition.
*/
public static ColumnStatistics createPartitionColumnStats(Partition hivePartition, String partName, Map<String, CatalogColumnStatisticsDataBase> colStats, String hiveVersion) {
ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, hivePartition.getDbName(), hivePartition.getTableName());
desc.setPartName(partName);
return createHiveColumnStatistics(colStats, hivePartition.getSd(), desc, hiveVersion);
}
| 3.26 |
flink_HiveStatsUtil_getPartitionColumnNullCount_rdh
|
/**
* Get the null count for the {@param partitionColIndex} partition column in table {@param hiveTable}.
*
* <p>To get the null count, it will first list all the partitions whose {@param partitionColIndex} partition column is null, and merge the partition's statistic to get the
* total rows, which is exactly null count for the {@param partitionColIndex} partition column.
*/
private static Long getPartitionColumnNullCount(HiveMetastoreClientWrapper client, Table hiveTable, int partitionColIndex, String defaultPartitionName) {
// get the partial partition values
List<String> partialPartitionVals = getPartialPartitionVals(partitionColIndex, defaultPartitionName);
try {
// list all the partitions that match the partial partition values
List<Partition> partitions = client.listPartitions(hiveTable.getDbName(), hiveTable.getTableName(), partialPartitionVals, ((short) (-1)));
List<TableStats> catalogTableStatistics = partitions.stream().map(p -> new TableStats(HiveStatsUtil.createCatalogTableStatistics(p.getParameters()).getRowCount())).collect(Collectors.toList());
Set<String>
partitionKeys = getFieldNames(hiveTable.getPartitionKeys());
TableStats resultTableStats = catalogTableStatistics.stream().reduce((s1, s2) -> s1.merge(s2, partitionKeys)).orElse(TableStats.UNKNOWN);
if ((resultTableStats == TableStats.UNKNOWN) || (resultTableStats.getRowCount() < 0)) {return null;
} else {
return resultTableStats.getRowCount();
}
} catch (Exception e) {
LOG.warn("Can't list partition for table `{}.{}`, partition value {}.", hiveTable.getDbName(), hiveTable.getTableName(), partialPartitionVals);
}
return null;
}
| 3.26 |
flink_HiveStatsUtil_createCatalogColumnStats_rdh
|
/**
* Create a map of Flink column stats from the given Hive column stats.
*/
public static Map<String, CatalogColumnStatisticsDataBase> createCatalogColumnStats(@Nonnull
List<ColumnStatisticsObj> hiveColStats, String hiveVersion) {
checkNotNull(hiveColStats, "hiveColStats can not be null");
Map<String, CatalogColumnStatisticsDataBase> colStats = new HashMap<>();
for
(ColumnStatisticsObj colStatsObj : hiveColStats) {
CatalogColumnStatisticsDataBase columnStats = createTableColumnStats(HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(colStatsObj.getColType())), colStatsObj.getStatsData(), hiveVersion);
colStats.put(colStatsObj.getColName(), columnStats);
}
return colStats;}
| 3.26 |
flink_HiveStatsUtil_getCatalogPartitionColumnStats_rdh
|
/**
* Get column statistic for partition columns.
*/
public static Map<String, CatalogColumnStatisticsDataBase> getCatalogPartitionColumnStats(HiveMetastoreClientWrapper client, HiveShim hiveShim, Table hiveTable, String partitionName, List<FieldSchema> partitionColsSchema, String defaultPartitionName) {
Map<String, CatalogColumnStatisticsDataBase> partitionColumnStats = new HashMap<>();
List<String> partitionCols = new ArrayList<>(partitionColsSchema.size());
List<LogicalType> partitionColsType = new ArrayList<>(partitionColsSchema.size());
for (FieldSchema fieldSchema : partitionColsSchema) {
partitionCols.add(fieldSchema.getName());
partitionColsType.add(HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(fieldSchema.getType())).getLogicalType());
}
// the partition column and values for the partition column
Map<String, Object>
partitionColValues = new HashMap<>();
CatalogPartitionSpec partitionSpec = HivePartitionUtils.createPartitionSpec(partitionName, defaultPartitionName);
for (int i = 0; i < partitionCols.size(); i++) {
String partitionCol = partitionCols.get(i);
String partitionStrVal = partitionSpec.getPartitionSpec().get(partitionCols.get(i));
if (partitionStrVal == null) {
partitionColValues.put(partitionCol, null);
} else {
partitionColValues.put(partitionCol, HivePartitionUtils.restorePartitionValueFromType(hiveShim, partitionStrVal, partitionColsType.get(i), defaultPartitionName));
}
}
// calculate statistic for each partition column
for (int i = 0; i < partitionCols.size(); i++) {
Object partitionValue = partitionColValues.get(partitionCols.get(i));
LogicalType v14 = partitionColsType.get(i);
CatalogColumnStatisticsDataBase catalogColumnStatistics = getPartitionColumnStats(client, hiveTable, v14, partitionValue, i, defaultPartitionName);
if (catalogColumnStatistics != null) {
partitionColumnStats.put(partitionCols.get(i), catalogColumnStatistics);
}
}
return partitionColumnStats;
}
| 3.26 |
flink_HiveStatsUtil_tableStatsChanged_rdh
|
/**
* Determine whether the stats change.
*
* @param newStats
* the new table statistics parameters
* @param oldStats
* the old table statistics parameters
* @return whether the stats change
*/
public static boolean tableStatsChanged(Map<String, String> newStats, Map<String, String> oldStats) {return statsChanged(new CatalogTableStatistics(parsePositiveLongStat(newStats, StatsSetupConst.ROW_COUNT),
parsePositiveIntStat(newStats, StatsSetupConst.NUM_FILES), parsePositiveLongStat(newStats, StatsSetupConst.TOTAL_SIZE), parsePositiveLongStat(newStats, StatsSetupConst.RAW_DATA_SIZE)), oldStats);
}
| 3.26 |
flink_HiveStatsUtil_getColumnStatisticsData_rdh
|
/**
* Convert Flink ColumnStats to Hive ColumnStatisticsData according to Hive column type. Note we
* currently assume that, in Flink, the max and min of ColumnStats will be same type as the
* Flink column type. For example, for SHORT and Long columns, the max and min of their
* ColumnStats should be of type SHORT and LONG.
*/private static ColumnStatisticsData getColumnStatisticsData(DataType colType, CatalogColumnStatisticsDataBase colStat, String hiveVersion) {
LogicalTypeRoot type = colType.getLogicalType().getTypeRoot();
if (type.equals(LogicalTypeRoot.CHAR)
|| type.equals(LogicalTypeRoot.VARCHAR)) {
if (colStat instanceof CatalogColumnStatisticsDataString) {
CatalogColumnStatisticsDataString stringColStat = ((CatalogColumnStatisticsDataString) (colStat));
StringColumnStatsData hiveStringColumnStats = new StringColumnStatsData();
hiveStringColumnStats.clear();
if (null != stringColStat.getMaxLength()) {
hiveStringColumnStats.setMaxColLen(stringColStat.getMaxLength());
}
if (null != stringColStat.getAvgLength()) {hiveStringColumnStats.setAvgColLen(stringColStat.getAvgLength());
}
if (null != stringColStat.getNullCount()) {
hiveStringColumnStats.setNumNulls(stringColStat.getNullCount());
}
if (null != stringColStat.getNdv()) {
hiveStringColumnStats.setNumDVs(stringColStat.getNdv());
}
return ColumnStatisticsData.stringStats(hiveStringColumnStats);
}
} else if (type.equals(LogicalTypeRoot.BOOLEAN)) {
if (colStat instanceof CatalogColumnStatisticsDataBoolean) {
CatalogColumnStatisticsDataBoolean booleanColStat = ((CatalogColumnStatisticsDataBoolean) (colStat));
BooleanColumnStatsData hiveBoolStats = new BooleanColumnStatsData();
hiveBoolStats.clear();
if (null != booleanColStat.getTrueCount()) {
hiveBoolStats.setNumTrues(booleanColStat.getTrueCount());
}
if (null != booleanColStat.getFalseCount()) {
hiveBoolStats.setNumFalses(booleanColStat.getFalseCount());
}
if (null != booleanColStat.getNullCount()) {
hiveBoolStats.setNumNulls(booleanColStat.getNullCount());
}
return
ColumnStatisticsData.booleanStats(hiveBoolStats);
}
} else if ((((((type.equals(LogicalTypeRoot.TINYINT) || type.equals(LogicalTypeRoot.SMALLINT)) || type.equals(LogicalTypeRoot.INTEGER)) || type.equals(LogicalTypeRoot.BIGINT)) ||
type.equals(LogicalTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE)) ||
type.equals(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE)) || type.equals(LogicalTypeRoot.TIMESTAMP_WITH_TIME_ZONE)) {
if (colStat instanceof CatalogColumnStatisticsDataLong) {
CatalogColumnStatisticsDataLong longColStat = ((CatalogColumnStatisticsDataLong) (colStat));
LongColumnStatsData hiveLongColStats = new LongColumnStatsData();
hiveLongColStats.clear();
if (null != longColStat.getMax()) {
hiveLongColStats.setHighValue(longColStat.getMax());
}
if (null != longColStat.getMin()) {
hiveLongColStats.setLowValue(longColStat.getMin());
}
if (null != longColStat.getNdv()) {
hiveLongColStats.setNumDVs(longColStat.getNdv());
}
if (null != longColStat.getNullCount()) {
hiveLongColStats.setNumNulls(longColStat.getNullCount());
}
return ColumnStatisticsData.longStats(hiveLongColStats);
}
} else if (type.equals(LogicalTypeRoot.FLOAT) || type.equals(LogicalTypeRoot.DOUBLE)) {
if (colStat instanceof CatalogColumnStatisticsDataDouble) {
CatalogColumnStatisticsDataDouble doubleColumnStatsData = ((CatalogColumnStatisticsDataDouble) (colStat));
DoubleColumnStatsData v70 = new DoubleColumnStatsData();
v70.clear();
if (null != doubleColumnStatsData.getMax()) {v70.setHighValue(doubleColumnStatsData.getMax());
}
if (null != doubleColumnStatsData.getMin()) {
v70.setLowValue(doubleColumnStatsData.getMin());}
if (null != doubleColumnStatsData.getNullCount()) {
v70.setNumNulls(doubleColumnStatsData.getNullCount());
}
if (null != doubleColumnStatsData.getNdv()) {
v70.setNumDVs(doubleColumnStatsData.getNdv());
}
return ColumnStatisticsData.doubleStats(v70);
}
} else if (type.equals(LogicalTypeRoot.DATE)) {if (colStat instanceof CatalogColumnStatisticsDataDate) {
HiveShim hiveShim = HiveShimLoader.loadHiveShim(hiveVersion);return hiveShim.toHiveDateColStats(((CatalogColumnStatisticsDataDate) (colStat)));
}
} else if (type.equals(LogicalTypeRoot.VARBINARY) || type.equals(LogicalTypeRoot.BINARY)) {
if (colStat instanceof CatalogColumnStatisticsDataBinary) {
CatalogColumnStatisticsDataBinary binaryColumnStatsData = ((CatalogColumnStatisticsDataBinary) (colStat));
BinaryColumnStatsData
hiveBinaryColumnStats = new BinaryColumnStatsData();
hiveBinaryColumnStats.clear();
if (null != binaryColumnStatsData.getMaxLength()) {
hiveBinaryColumnStats.setMaxColLen(binaryColumnStatsData.getMaxLength());
}
if (null != binaryColumnStatsData.getAvgLength()) {
hiveBinaryColumnStats.setAvgColLen(binaryColumnStatsData.getAvgLength());
}
if (null != binaryColumnStatsData.getNullCount()) {
hiveBinaryColumnStats.setNumNulls(binaryColumnStatsData.getNullCount());
}
return ColumnStatisticsData.binaryStats(hiveBinaryColumnStats);
}
} else if (type.equals(LogicalTypeRoot.DECIMAL)) {
if
(colStat instanceof
CatalogColumnStatisticsDataDouble) {
CatalogColumnStatisticsDataDouble flinkStats = ((CatalogColumnStatisticsDataDouble) (colStat));
DecimalColumnStatsData hiveStats = new DecimalColumnStatsData();
if (flinkStats.getMax() != null) {
// in older versions we cannot create HiveDecimal from Double, so convert Double
// to BigDecimal first
hiveStats.setHighValue(toThriftDecimal(HiveDecimal.create(BigDecimal.valueOf(flinkStats.getMax()))));
}
if (flinkStats.getMin() != null) {
hiveStats.setLowValue(toThriftDecimal(HiveDecimal.create(BigDecimal.valueOf(flinkStats.getMin()))));
}if (flinkStats.getNdv() != null) {
hiveStats.setNumDVs(flinkStats.getNdv());
}
if (flinkStats.getNullCount() != null) {
hiveStats.setNumNulls(flinkStats.getNullCount());
}
return ColumnStatisticsData.decimalStats(hiveStats);
}
}
throw new CatalogException(String.format("Flink does not support converting ColumnStats '%s' for Hive column " + "type '%s' yet", colStat, colType));
}
| 3.26 |
flink_HiveStatsUtil_statsChanged_rdh
|
/**
* Determine whether the table statistics changes.
*
* @param newTableStats
* new catalog table statistics.
* @param parameters
* original hive table statistics parameters.
* @return whether the table statistics changes
*/
public static boolean statsChanged(CatalogTableStatistics newTableStats, Map<String, String> parameters) {
return (((newTableStats.getRowCount() != parsePositiveLongStat(parameters, StatsSetupConst.ROW_COUNT)) || (newTableStats.getTotalSize() != parsePositiveLongStat(parameters, StatsSetupConst.TOTAL_SIZE))) || (newTableStats.getFileCount() != parsePositiveIntStat(parameters, StatsSetupConst.NUM_FILES))) || (newTableStats.getRawDataSize() != parsePositiveLongStat(parameters, StatsSetupConst.RAW_DATA_SIZE));
}
| 3.26 |
flink_BuiltInSqlOperator_unwrapVersion_rdh
|
// --------------------------------------------------------------------------------------------
static Optional<Integer> unwrapVersion(SqlOperator operator) {
if (operator instanceof BuiltInSqlOperator) {
final BuiltInSqlOperator
builtInSqlOperator = ((BuiltInSqlOperator) (operator));
return builtInSqlOperator.isInternal() ? Optional.empty() : builtInSqlOperator.getVersion();
}
return Optional.of(DEFAULT_VERSION);
}
| 3.26 |
flink_RecordCounter_of_rdh
|
/**
* Creates a {@link RecordCounter} depends on the index of count(*). If index is less than zero,
* returns {@link AccumulationRecordCounter}, otherwise, {@link RetractionRecordCounter}.
*
* @param indexOfCountStar
* The index of COUNT(*) in the aggregates. -1 when the input doesn't
* contain COUNT(*), i.e. doesn't contain retraction messages. We make sure there is a
* COUNT(*) if input stream contains retraction.
*/
public static RecordCounter of(int indexOfCountStar) {
if (indexOfCountStar >= 0) {
return new RetractionRecordCounter(indexOfCountStar);
} else {
return new AccumulationRecordCounter();}
}
| 3.26 |
flink_InPlaceMutableHashTable_reset_rdh
|
/**
* Seeks to the beginning.
*/
public void reset()
{
seekOutput(segments.get(0), 0);
currentSegmentIndex = 0;
}
| 3.26 |
flink_InPlaceMutableHashTable_m1_rdh
|
/**
* Sets appendPosition and the write position to 0, so that appending starts overwriting
* elements from the beginning. (This is used in rebuild.)
*
* <p>Note: if data was written to the area after the current appendPosition before a call
* to resetAppendPosition, it should still be readable. To release the segments after the
* current append position, call freeSegmentsAfterAppendPosition()
*/
public void m1() {
f0 = 0;
// this is just for safety (making sure that we fail immediately
// if a write happens without calling setWritePosition)
outView.currentSegmentIndex = -1;
outView.seekOutput(null, -1);
}
| 3.26 |
flink_InPlaceMutableHashTable_insertOrReplaceRecord_rdh
|
/**
* Searches the hash table for a record with the given key. If it is found, then it is
* overridden with the specified record. Otherwise, the specified record is inserted.
*
* @param record
* The record to insert or to replace with.
* @throws IOException
* (EOFException specifically, if memory ran out)
*/
@Override
public void insertOrReplaceRecord(T record) throws IOException {
if (closed) {
return;
}
T match = prober.getMatchFor(record, reuse);
if (match == null) {prober.insertAfterNoMatch(record);
}
else
{
prober.updateMatch(record);
}
}
| 3.26 |
flink_InPlaceMutableHashTable_rebuild_rdh
|
/**
* Same as above, but the number of bucket segments of the new table can be specified.
*/
private void rebuild(long newNumBucketSegments) throws IOException {
// Get new bucket segments
releaseBucketSegments();
allocateBucketSegments(((int) (newNumBucketSegments)));
T record = buildSideSerializer.createInstance();
try {
EntryIterator iter = getEntryIterator();
recordArea.m1();
recordArea.setWritePosition(0);
while (((record =
iter.next(record)) != null) && (!closed)) {
final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
final int bucket = hashCode & numBucketsMask;
final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits;// which segment contains the bucket
final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
final int bucketOffset = (bucket &
numBucketsPerSegmentMask) << bucketSizeBits;// offset of the bucket in the segment
final long firstPointer = bucketSegment.getLong(bucketOffset);
long ptrToAppended = recordArea.noSeekAppendPointerAndRecord(firstPointer, record);bucketSegment.putLong(bucketOffset, ptrToAppended);
}
recordArea.freeSegmentsAfterAppendPosition();
holes = 0;
} catch (EOFException ex) {
throw new RuntimeException("Bug in InPlaceMutableHashTable: we shouldn't get out of memory during a rebuild, " + "because we aren't allocating any new memory.");
}
}
| 3.26 |
flink_InPlaceMutableHashTable_emit_rdh
|
/**
* Emits all elements currently held by the table to the collector.
*/
public void emit() throws IOException {
T record = buildSideSerializer.createInstance();
EntryIterator iter = getEntryIterator();
while (((record = iter.next(record)) != null) && (!closed)) {
outputCollector.collect(record);
if (!objectReuseEnabled) {
record = buildSideSerializer.createInstance();
}
}
}
| 3.26 |
flink_InPlaceMutableHashTable_getCapacity_rdh
|
/**
* Gets the total capacity of this hash table, in bytes.
*
* @return The hash table's total capacity.
*/
public long getCapacity()
{
return numAllMemorySegments * ((long) (segmentSize));
}
| 3.26 |
flink_InPlaceMutableHashTable_m0_rdh
|
/**
* If there is wasted space (due to updated records not fitting in their old places), then do a
* compaction. Else, throw EOFException to indicate that memory ran out.
*
* @throws IOException
*/
private void m0() throws IOException { if (holes > (((double) (recordArea.getTotalSize())) * 0.05)) {
rebuild();
} else {
throw new EOFException("InPlaceMutableHashTable memory ran out. " + getMemoryConsumptionString());
}
}
| 3.26 |
flink_InPlaceMutableHashTable_noSeekAppendPointerAndRecord_rdh
|
/**
* Appends a pointer and a record. Call this function only if the write position is at the
* end!
*
* @param pointer
* The pointer to write (Note: this is NOT the position to write to!)
* @param record
* The record to write
* @return A pointer to the written data
* @throws IOException
* (EOFException specifically, if memory ran out)
*/
public long noSeekAppendPointerAndRecord(long pointer, T record) throws IOException {
final long oldLastPosition = f0;
final long oldPositionInSegment = outView.getCurrentPositionInSegment();
final long oldSegmentIndex = outView.currentSegmentIndex;outView.writeLong(pointer);
buildSideSerializer.serialize(record, outView);
f0 += (outView.getCurrentPositionInSegment() - oldPositionInSegment) + (outView.getSegmentSize() * (outView.currentSegmentIndex - oldSegmentIndex));return oldLastPosition;
}
| 3.26 |
flink_InPlaceMutableHashTable_giveBackSegments_rdh
|
/**
* Moves all its memory segments to freeMemorySegments. Warning: this will leave the
* RecordArea in an unwritable state: you have to call setWritePosition before writing
* again.
*/
public void giveBackSegments() {
freeMemorySegments.addAll(segments);
segments.clear();
m1();
}
| 3.26 |
flink_InPlaceMutableHashTable_readPointer_rdh
|
/**
* Note: this is sometimes a negated length instead of a pointer (see
* HashTableProber.updateMatch).
*/
public long readPointer() throws IOException {
return inView.readLong();
}
| 3.26 |
flink_InPlaceMutableHashTable_insert_rdh
|
/**
* Inserts the given record into the hash table. Note: this method doesn't care about whether a
* record with the same key is already present.
*
* @param record
* The record to insert.
* @throws IOException
* (EOFException specifically, if memory ran out)
*/
@Override
public void insert(T record) throws IOException {
if (closed) {
return;
}
final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
final int bucket = hashCode & numBucketsMask;
final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits;// which segment contains the bucket
final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits;// offset of the bucket in the segment
final long firstPointer = bucketSegment.getLong(bucketOffset);
try {
final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record);
bucketSegment.putLong(bucketOffset, newFirstPointer);
} catch (EOFException ex) {
m0();
insert(record);
return;
}
numElements++;
resizeTableIfNecessary();
}
| 3.26 |
flink_InPlaceMutableHashTable_updateMatch_rdh
|
/**
* This method can be called after getMatchFor returned a match. It will overwrite the
* record that was found by getMatchFor. Warning: The new record should have the same key as
* the old! WARNING; Don't do any modifications to the table between getMatchFor and
* updateMatch!
*
* @param newRecord
* The record to override the old record with.
* @throws IOException
* (EOFException specifically, if memory ran out)
*/
@Override
public void updateMatch(T newRecord) throws IOException {
if (closed) {
return;
}if (curElemPtr == END_OF_LIST) {
throw new RuntimeException("updateMatch was called after getMatchFor returned no match");
}
try {
// determine the new size
stagingSegmentsOutView.reset();
buildSideSerializer.serialize(newRecord, stagingSegmentsOutView);
final int v39 = ((int) (stagingSegmentsOutView.getWritePosition()));
stagingSegmentsInView.setReadPosition(0);
// Determine the size of the place of the old record.
final int oldRecordSize = ((int) (recordEnd - (curElemPtr + RECORD_OFFSET_IN_LINK)));
if (v39 == oldRecordSize) {
// overwrite record at its original place
recordArea.overwriteRecordAt(curElemPtr + RECORD_OFFSET_IN_LINK, stagingSegmentsInView, v39);
} else
{
// new record has a different size than the old one, append new at the end of
// the record area.
// Note: we have to do this, even if the new record is smaller, because
// otherwise EntryIterator
// wouldn't know the size of this place, and wouldn't know where does the next
// record start.
final long pointerToAppended = recordArea.appendPointerAndCopyRecord(nextPtr, stagingSegmentsInView, v39);
// modify the pointer in the previous link
if (f1 == INVALID_PREV_POINTER) {
// list had only one element, so prev is in the bucketSegments
bucketSegments[bucketSegmentIndex].putLong(bucketOffset, pointerToAppended);
} else {
recordArea.overwritePointerAt(f1, pointerToAppended);
}
// write the negated size of the hole to the place where the next pointer was,
// so that EntryIterator
// will know the size of the place without reading the old record.
// The negative sign will mean that the record is abandoned, and the
// the -1 is for avoiding trouble in case of a record having 0 size. (though I
// think this should
// never actually happen)
// Note: the last record in the record area can't be abandoned. (EntryIterator
// makes use of this fact.)
recordArea.overwritePointerAt(curElemPtr, (-oldRecordSize) - 1);
holes += oldRecordSize;
}
} catch (EOFException ex) {
m0();
insertOrReplaceRecord(newRecord);
}
}
| 3.26 |
flink_InPlaceMutableHashTable_getOccupancy_rdh
|
/**
* Gets the number of bytes currently occupied in this hash table.
*
* @return The number of bytes occupied.
*/
public long getOccupancy()
{
return (numAllMemorySegments * segmentSize) - (freeMemorySegments.size() *
segmentSize);
}
| 3.26 |
flink_InPlaceMutableHashTable_overwritePointerAt_rdh
|
/**
* Overwrites the long value at the specified position.
*
* @param pointer
* Points to the position to overwrite.
* @param value
* The value to write.
* @throws IOException
*/
public void overwritePointerAt(long pointer, long value) throws IOException {
setWritePosition(pointer);outView.writeLong(value);
}
| 3.26 |
flink_InPlaceMutableHashTable_freeSegmentsAfterAppendPosition_rdh
|
/**
* Releases the memory segments that are after the current append position. Note: The
* situation that there are segments after the current append position can arise from a call
* to resetAppendPosition().
*/
public void freeSegmentsAfterAppendPosition() {
final int appendSegmentIndex = ((int) (f0 >>> segmentSizeBits));
while ((segments.size() > (appendSegmentIndex + 1)) && (!closed)) {
freeMemorySegments.add(segments.get(segments.size() -
1));
segments.remove(segments.size() - 1);
}
}
| 3.26 |
flink_InPlaceMutableHashTable_setWritePosition_rdh
|
// ----------------------- Output -----------------------
private void setWritePosition(long position) throws EOFException {
if (position > f0) {
throw new IndexOutOfBoundsException();
}
final int segmentIndex = ((int) (position >>> segmentSizeBits));
final int offset = ((int) (position & segmentSizeMask));
// If position == appendPosition and the last buffer is full,
// then we will be seeking to the beginning of a new segment
if
(segmentIndex == segments.size()) {
addSegment();
}
outView.currentSegmentIndex = segmentIndex;
outView.seekOutput(segments.get(segmentIndex), offset);
}
| 3.26 |
flink_InPlaceMutableHashTable_getMemoryConsumptionString_rdh
|
/**
*
* @return String containing a summary of the memory consumption for error messages
*/
private String getMemoryConsumptionString() {
return ((((((((((((((((((("InPlaceMutableHashTable memory stats:\n" + "Total memory: ") + (numAllMemorySegments * segmentSize)) + "\n") + "Free memory: ") + (freeMemorySegments.size() * segmentSize)) + "\n") + "Bucket area: ") + (numBuckets * 8)) + "\n") + "Record area: ") +
recordArea.getTotalSize()) + "\n") + "Staging area: ") + (stagingSegments.size() * segmentSize)) + "\n") + "Num of elements: ") + numElements) + "\n") + "Holes total size: ") + holes;
}
| 3.26 |
flink_InPlaceMutableHashTable_appendPointerAndCopyRecord_rdh
|
/**
* Appends a pointer and a record. The record is read from a DataInputView (this will be the
* staging area).
*
* @param pointer
* The pointer to write (Note: this is NOT the position to write to!)
* @param input
* The DataInputView to read the record from
* @param recordSize
* The size of the record
* @return A pointer to the written data
* @throws IOException
* (EOFException specifically, if memory ran out)
*/
public long appendPointerAndCopyRecord(long pointer, DataInputView input, int recordSize) throws IOException {
setWritePosition(f0);
final long oldLastPosition = f0;
outView.writeLong(pointer);
outView.write(input, recordSize);
f0 += 8 + recordSize;
return oldLastPosition;
}
| 3.26 |
flink_InPlaceMutableHashTable_appendPointerAndRecord_rdh
|
/**
* Appends a pointer and a record.
*
* @param pointer
* The pointer to write (Note: this is NOT the position to write to!)
* @param record
* The record to write
* @return A pointer to the written data
* @throws IOException
* (EOFException specifically, if memory ran out)
*/
public long appendPointerAndRecord(long pointer, T record) throws IOException {
setWritePosition(f0);
return noSeekAppendPointerAndRecord(pointer, record);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.