name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_TypeInferenceUtil_inferOutputType_rdh | /**
* Infers an output type using the given {@link TypeStrategy}. It assumes that input arguments
* have been adapted before if necessary.
*/
public static DataType inferOutputType(CallContext callContext, TypeStrategy outputTypeStrategy) {
final Optional<DataType> potentialOutputType = outputTypeStrategy.inferType(callContext);
if (!potentialOutputType.isPresent()) {
throw new ValidationException("Could not infer an output type for the given arguments.");
}
final DataType outputType = potentialOutputType.get();
if (isUnknown(outputType)) {
throw new ValidationException("Could not infer an output type for the given arguments. Untyped NULL received.");
}
return outputType;
} | 3.26 |
flink_TypeInferenceUtil_generateSignature_rdh | /**
* Generates a signature of the given {@link FunctionDefinition}.
*/
public static String generateSignature(TypeInference typeInference, String name, FunctionDefinition definition) {
if (typeInference.getTypedArguments().isPresent()) {
return formatNamedOrTypedArguments(name, typeInference);
}
return typeInference.getInputTypeStrategy().getExpectedSignatures(definition).stream().map(s -> formatSignature(name, s)).collect(Collectors.joining("\n"));
} | 3.26 |
flink_TypeInferenceUtil_runTypeInference_rdh | /**
* Runs the entire type inference process.
*
* @param typeInference
* type inference of the current call
* @param callContext
* call context of the current call
* @param surroundingInfo
* information about the outer wrapping call of a current function call
* for performing input type inference
*/
public static Result runTypeInference(TypeInference typeInference, CallContext callContext, @Nullable
SurroundingInfo surroundingInfo) {
try {
return runTypeInferenceInternal(typeInference, callContext, surroundingInfo);
} catch (ValidationException e) {
throw createInvalidCallException(callContext, e);
} catch (Throwable t) {
throw createUnexpectedException(callContext, t);
}
} | 3.26 |
flink_TypeInferenceUtil_validateArgumentCount_rdh | /**
* Validates argument counts.
*
* @param argumentCount
* expected argument count
* @param actualCount
* actual argument count
* @param throwOnFailure
* if true, the function throws a {@link ValidationException} if the
* actual value does not meet the expected argument count
* @return a boolean indicating if expected argument counts match the actual counts
*/
public static boolean validateArgumentCount(ArgumentCount argumentCount, int actualCount, boolean throwOnFailure) {
final int minCount = argumentCount.getMinCount().orElse(0);
if (actualCount < minCount) {
if (throwOnFailure) {
throw new ValidationException(String.format("Invalid number of arguments. At least %d arguments expected but %d passed.", minCount, actualCount));
}
return false;
}
final int maxCount = argumentCount.getMaxCount().orElse(Integer.MAX_VALUE);
if (actualCount > maxCount) {
if (throwOnFailure) {
throw new ValidationException(String.format("Invalid number of arguments. At most %d arguments expected but %d passed.", maxCount, actualCount));
}
return false;
}if (!argumentCount.isValidCount(actualCount)) {
if (throwOnFailure) {
throw new ValidationException(String.format("Invalid number of arguments. %d arguments passed.", actualCount));
}
return false;
}
return true;
} | 3.26 |
flink_TypeInferenceUtil_createInvalidCallException_rdh | /**
* Returns an exception for an invalid call to a function.
*/
public static ValidationException createInvalidCallException(CallContext callContext, ValidationException cause) {
return new ValidationException(String.format("Invalid function call:\n%s(%s)", callContext.getName(), callContext.getArgumentDataTypes().stream().map(DataType::toString).collect(Collectors.joining(", "))), cause);
} | 3.26 |
flink_TypeInferenceUtil_adaptArguments_rdh | /**
* Adapts the call's argument if necessary.
*
* <p>This includes casts that need to be inserted, reordering of arguments (*), or insertion of
* default values (*) where (*) is future work.
*/
public static CallContext adaptArguments(TypeInference typeInference, CallContext callContext, @Nullable
DataType outputType) {
return adaptArguments(typeInference, callContext, outputType, true);
} | 3.26 |
flink_TypeInferenceUtil_createUnexpectedException_rdh | /**
* Returns an exception for an unexpected error during type inference.
*/
public static TableException createUnexpectedException(CallContext callContext, Throwable cause) {
return new TableException(String.format("Unexpected error in type inference logic of function '%s'. This is a bug.", callContext.getName()), cause);
} | 3.26 |
flink_TypeInferenceUtil_runTypeInferenceInternal_rdh | // --------------------------------------------------------------------------------------------
private static Result runTypeInferenceInternal(TypeInference typeInference, CallContext callContext, @Nullable
SurroundingInfo surroundingInfo) {
try {
validateArgumentCount(typeInference.getInputTypeStrategy().getArgumentCount(), callContext.getArgumentDataTypes().size(), true);
} catch (ValidationException e) {
throw createInvalidInputException(typeInference, callContext, e);
} final CallContext adaptedCallContext;
try {
// use information of surrounding call to determine output type of this call
final DataType outputType;
if (surroundingInfo != null) {
outputType = surroundingInfo.inferOutputType(callContext.getDataTypeFactory()).orElse(null);
} else
{
outputType = null;
}
adaptedCallContext = adaptArguments(typeInference, callContext, outputType);
} catch (ValidationException e) {
throw createInvalidInputException(typeInference, callContext, e);}
// infer output type first for better error message
// (logically an accumulator type should be inferred first)
final DataType outputType = inferOutputType(adaptedCallContext, typeInference.getOutputTypeStrategy());
final DataType accumulatorType =
inferAccumulatorType(adaptedCallContext, outputType, typeInference.getAccumulatorTypeStrategy().orElse(null));
return new Result(adaptedCallContext.getArgumentDataTypes(), accumulatorType, outputType);
} | 3.26 |
flink_SsgNetworkMemoryCalculationUtils_enrichNetworkMemory_rdh | /**
* Calculates network memory requirement of {@link ExecutionJobVertex} and update {@link ResourceProfile} of corresponding slot sharing group.
*/
public static void enrichNetworkMemory(SlotSharingGroup ssg, Function<JobVertexID, ExecutionJobVertex> ejvs, ShuffleMaster<?> shuffleMaster) {
ResourceProfile original = ssg.getResourceProfile();
// Updating network memory for UNKNOWN is also beneficial, but currently it's not
// supported and the enriching logic only works for 'fine-grained resource management'.
if (original.equals(ResourceProfile.UNKNOWN) || (!original.getNetworkMemory().equals(MemorySize.ZERO))) {
return;}
MemorySize networkMemory = MemorySize.ZERO;for (JobVertexID jvId : ssg.getJobVertexIds()) {
ExecutionJobVertex ejv = ejvs.apply(jvId);
TaskInputsOutputsDescriptor desc = buildTaskInputsOutputsDescriptor(ejv, ejvs);MemorySize requiredNetworkMemory = shuffleMaster.computeShuffleMemorySizeForTask(desc);
networkMemory = networkMemory.add(requiredNetworkMemory);
}
ResourceProfile enriched =
ResourceProfile.newBuilder().setCpuCores(original.getCpuCores()).setTaskHeapMemory(original.getTaskHeapMemory()).setTaskOffHeapMemory(original.getTaskOffHeapMemory()).setManagedMemory(original.getManagedMemory()).setNetworkMemory(networkMemory).setExtendedResources(original.getExtendedResources().values()).build();
ssg.setResourceProfile(enriched);
} | 3.26 |
flink_GenericTypeInfo_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return typeClass.hashCode();
} | 3.26 |
flink_NonSplittingRecursiveEnumerator_enumerateSplits_rdh | // ------------------------------------------------------------------------
@Override
public Collection<FileSourceSplit> enumerateSplits(Path[] paths, int minDesiredSplits) throws IOException {
final ArrayList<FileSourceSplit> splits = new ArrayList<>();
for (Path path : paths) {
final FileSystem fs = path.getFileSystem();
final FileStatus status = fs.getFileStatus(path);
addSplitsForPath(status, fs, splits);
}
return splits;
} | 3.26 |
flink_DiskTierProducerAgent_emitEndOfSegmentEvent_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void emitEndOfSegmentEvent(int subpartitionId) {
try {
diskCacheManager.appendEndOfSegmentEvent(EventSerializer.toSerializedEvent(EndOfSegmentEvent.INSTANCE), subpartitionId);
} catch (IOException e) {
ExceptionUtils.rethrow(e, "Failed to emit end of segment event.");
}
} | 3.26 |
flink_HsSelectiveSpillingStrategy_decideActionWithGlobalInfo_rdh | // from high to low.
@Override
public Decision decideActionWithGlobalInfo(HsSpillingInfoProvider spillingInfoProvider) {
if (spillingInfoProvider.getNumTotalRequestedBuffers() < (spillingInfoProvider.getPoolSize()
* spillThreshold)) {
// In case situation changed since onMemoryUsageChanged() returns Optional#empty()
return Decision.NO_ACTION;
}
int spillNum
= ((int) (spillingInfoProvider.getPoolSize() * spillBufferRatio));
TreeMap<Integer, Deque<BufferIndexAndChannel>> subpartitionToBuffers = new TreeMap<>();
for (int channel = 0; channel < spillingInfoProvider.getNumSubpartitions(); channel++) {
subpartitionToBuffers.put(channel, // selective spilling strategy does not support multiple consumer.
spillingInfoProvider.getBuffersInOrder(channel, SpillStatus.NOT_SPILL, ConsumeStatusWithId.fromStatusAndConsumerId(ConsumeStatus.NOT_CONSUMED, HsConsumerId.DEFAULT)));
}
TreeMap<Integer, List<BufferIndexAndChannel>> subpartitionToHighPriorityBuffers = // selective spilling strategy does not support multiple consumer.
getBuffersByConsumptionPriorityInOrder(spillingInfoProvider.getNextBufferIndexToConsume(HsConsumerId.DEFAULT), subpartitionToBuffers, spillNum);
Decision.Builder builder = Decision.builder();
subpartitionToHighPriorityBuffers.forEach((subpartitionId, buffers) -> {
builder.addBufferToSpill(subpartitionId, buffers);
builder.addBufferToRelease(subpartitionId, buffers);
});
return builder.build();
} | 3.26 |
flink_HsSelectiveSpillingStrategy_onBufferConsumed_rdh | // For the case of buffer consumed, this buffer need release. The control of the buffer is taken
// over by the downstream task.
@Override
public Optional<Decision> onBufferConsumed(BufferIndexAndChannel consumedBuffer) {return Optional.of(Decision.builder().addBufferToRelease(consumedBuffer).build());
} | 3.26 |
flink_HsSelectiveSpillingStrategy_m0_rdh | // When the amount of memory used exceeds the threshold, decide action based on global
// information. Otherwise, no need to take action.
@Overridepublic Optional<Decision> m0(int numTotalRequestedBuffers, int currentPoolSize) {
return numTotalRequestedBuffers < (currentPoolSize * spillThreshold) ? Optional.of(Decision.NO_ACTION) : Optional.empty();
} | 3.26 |
flink_KeyGroupRangeAssignment_computeKeyGroupForKeyHash_rdh | /**
* Assigns the given key to a key-group index.
*
* @param keyHash
* the hash of the key to assign
* @param maxParallelism
* the maximum supported parallelism, aka the number of key-groups.
* @return the key-group to which the given key is assigned
*/
public static int computeKeyGroupForKeyHash(int keyHash, int maxParallelism) {
return MathUtils.murmurHash(keyHash) % maxParallelism;
} | 3.26 |
flink_KeyGroupRangeAssignment_computeKeyGroupRangeForOperatorIndex_rdh | /**
* Computes the range of key-groups that are assigned to a given operator under the given
* parallelism and maximum parallelism.
*
* <p>IMPORTANT: maxParallelism must be <= Short.MAX_VALUE + 1 to avoid rounding problems in
* this method. If we ever want to go beyond this boundary, this method must perform arithmetic
* on long values.
*
* @param maxParallelism
* Maximal parallelism that the job was initially created with.
* @param parallelism
* The current parallelism under which the job runs. Must be <=
* maxParallelism.
* @param operatorIndex
* index of a operatorIndex. 0 <= operatorIndex < parallelism.
* @return the computed key-group range for the operator.
*/
public static KeyGroupRange computeKeyGroupRangeForOperatorIndex(int maxParallelism, int parallelism, int operatorIndex) {
checkParallelismPreconditions(parallelism);
checkParallelismPreconditions(maxParallelism);
Preconditions.checkArgument(maxParallelism >= parallelism, "Maximum parallelism must not be smaller than parallelism.");
int start = (((operatorIndex * maxParallelism)
+ parallelism) - 1) / parallelism;
int end = (((operatorIndex + 1) * maxParallelism) - 1) / parallelism;
return new KeyGroupRange(start, end);
} | 3.26 |
flink_KeyGroupRangeAssignment_assignKeyToParallelOperator_rdh | /**
* Assigns the given key to a parallel operator index.
*
* @param key
* the key to assign
* @param maxParallelism
* the maximum supported parallelism, aka the number of key-groups.
* @param parallelism
* the current parallelism of the operator
* @return the index of the parallel operator to which the given key should be routed.
*/
public static int assignKeyToParallelOperator(Object key, int maxParallelism, int parallelism) {
Preconditions.checkNotNull(key, "Assigned key must not be null!");return computeOperatorIndexForKeyGroup(maxParallelism, parallelism, assignToKeyGroup(key, maxParallelism));
} | 3.26 |
flink_KeyGroupRangeAssignment_computeDefaultMaxParallelism_rdh | /**
* Computes a default maximum parallelism from the operator parallelism. This is used in case
* the user has not explicitly configured a maximum parallelism to still allow a certain degree
* of scale-up.
*
* @param operatorParallelism
* the operator parallelism as basis for computation.
* @return the computed default maximum parallelism.
*/
public static int computeDefaultMaxParallelism(int operatorParallelism) {
checkParallelismPreconditions(operatorParallelism);
return Math.min(Math.max(MathUtils.roundUpToPowerOfTwo(operatorParallelism + (operatorParallelism / 2)), DEFAULT_LOWER_BOUND_MAX_PARALLELISM), UPPER_BOUND_MAX_PARALLELISM);
} | 3.26 |
flink_KeyGroupRangeAssignment_assignToKeyGroup_rdh | /**
* Assigns the given key to a key-group index.
*
* @param key
* the key to assign
* @param maxParallelism
* the maximum supported parallelism, aka the number of key-groups.
* @return the key-group to which the given key is assigned
*/
public static int assignToKeyGroup(Object key, int maxParallelism) {
Preconditions.checkNotNull(key, "Assigned key must not be null!");return computeKeyGroupForKeyHash(key.hashCode(), maxParallelism);
} | 3.26 |
flink_CsvReaderFormat_forSchema_rdh | /**
* Builds a new {@code CsvReaderFormat} using a {@code CsvSchema} generator and {@code CsvMapper} factory.
*
* @param mapperFactory
* The factory creating the {@code CsvMapper}.
* @param schemaGenerator
* A generator that creates and configures the Jackson CSV schema for
* parsing specific CSV files, from a mapper created by the mapper factory.
* @param typeInformation
* The Flink type descriptor of the returned elements.
* @param <T>
* The type of the returned elements.
*/
public static <T> CsvReaderFormat<T> forSchema(SerializableSupplier<CsvMapper> mapperFactory, SerializableFunction<CsvMapper, CsvSchema> schemaGenerator, TypeInformation<T> typeInformation) {
return new CsvReaderFormat<>(mapperFactory, schemaGenerator, typeInformation.getTypeClass(), (value, context) -> value, typeInformation, false);
} | 3.26 |
flink_CsvReaderFormat_withIgnoreParseErrors_rdh | /**
* Returns a new {@code CsvReaderFormat} configured to ignore all parsing errors. All the other
* options directly carried over from the subject of the method call.
*/
public CsvReaderFormat<T> withIgnoreParseErrors() {
return new CsvReaderFormat<>(this.mapperFactory, this.schemaGenerator, this.rootType, this.converter, this.typeInformation, true);} | 3.26 |
flink_CsvReaderFormat_forPojo_rdh | /**
* Builds a new {@code CsvReaderFormat} for reading CSV files mapped to the provided POJO class
* definition. Produced reader uses default mapper and schema settings, use {@code forSchema} if
* you need customizations.
*
* @param pojoType
* The type class of the POJO.
* @param <T>
* The type of the returned elements.
*/
public static <T> CsvReaderFormat<T> forPojo(Class<T> pojoType) {
return forSchema(JacksonMapperFactory::createCsvMapper, mapper ->
mapper.schemaFor(pojoType).withoutQuoteChar(), TypeInformation.of(pojoType));
} | 3.26 |
flink_CallContext_newValidationError_rdh | /**
* Creates a validation exception for exiting the type inference process with a meaningful
* exception.
*/
default ValidationException newValidationError(String message, Object... args) {return new ValidationException(String.format(message, args));
} | 3.26 |
flink_CallContext_fail_rdh | /**
* Helper method for handling failures during the type inference process while considering the
* {@code throwOnFailure} flag.
*
* <p>Shorthand for {@code if (throwOnFailure) throw ValidationException(...) else return
* Optional.empty()}.
*/
default <T> Optional<T> fail(boolean throwOnFailure, String message, Object... args) {
if (throwOnFailure) {
throw newValidationError(message, args);
}
return Optional.empty();
} | 3.26 |
flink_FileDataIndexSpilledRegionManagerImpl_readRegionGroup_rdh | /**
* Read region group from index file.
*
* @param offset
* offset of this region group.
* @param numRegions
* number of regions of this region group.
* @return List of all regions and its offset belong to this region group.
*/
private List<Tuple2<T, Long>> readRegionGroup(long offset, int numRegions) throws IOException {
List<Tuple2<T, Long>> regionAndOffsets = new ArrayList<>();
for (int i = 0; i < numRegions; i++) {
T region = fileDataIndexRegionHelper.readRegionFromFile(channel, offset);regionAndOffsets.add(Tuple2.of(region, offset));
offset += region.getSize();
}
return regionAndOffsets;
}
/**
* Metadata of spilled regions region group. When a region group is finished(i.e. no longer
* appended), its corresponding {@link RegionGroup} | 3.26 |
flink_JMXService_startInstance_rdh | /**
* Start the JMV-wide singleton JMX server.
*
* <p>If JMXServer static instance is already started, it will not be started again. Instead a
* warning will be logged indicating which port the existing JMXServer static instance is
* exposing.
*
* @param portsConfig
* port configuration of the JMX server.
*/
public static synchronized void
startInstance(String portsConfig) {
if (jmxServer == null)
{
if (portsConfig != null)
{
Iterator<Integer> ports = NetUtils.getPortRangeFromString(portsConfig);
if (ports.hasNext()) {
jmxServer = startJMXServerWithPortRanges(ports);
}
if (jmxServer == null) {
LOG.error("Could not start JMX server on any configured port(s) in: " + portsConfig);
}
}
} else {
LOG.warn("JVM-wide JMXServer already started at port: " + jmxServer.getPort());
}
} | 3.26 |
flink_JMXService_stopInstance_rdh | /**
* Stop the JMX server.
*/
public static synchronized void stopInstance() throws IOException {
if (jmxServer != null) {
jmxServer.stop();
jmxServer = null;
}
} | 3.26 |
flink_JMXService_getInstance_rdh | /**
* Acquire the global singleton JMXServer instance.
*/
public static Optional<JMXServer> getInstance() {
return Optional.ofNullable(jmxServer);
} | 3.26 |
flink_OneInputOperatorTransformation_setMaxParallelism_rdh | /**
* Sets the maximum parallelism of this operator.
*
* <p>The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
* number of key groups used for partitioned state.
*
* @param maxParallelism
* Maximum parallelism
* @return The operator with set maximum parallelism
*/
@PublicEvolving
public OneInputOperatorTransformation<T> setMaxParallelism(int maxParallelism) {
this.operatorMaxParallelism = OptionalInt.of(maxParallelism);
return this;
} | 3.26 |
flink_OneInputOperatorTransformation_m0_rdh | /**
* It creates a new {@link KeyedOperatorTransformation} that uses the provided key with explicit
* type information for partitioning its operator states.
*
* @param keySelector
* The KeySelector to be used for extracting the key for partitioning.
* @param keyType
* The type information describing the key type.
* @return The {@code BootstrapTransformation} with partitioned state.
*/public <K> KeyedOperatorTransformation<K, T> m0(KeySelector<T, K> keySelector, TypeInformation<K> keyType) {
return new KeyedOperatorTransformation<>(dataSet,
operatorMaxParallelism, timestamper, keySelector, keyType);
}
/**
* Partitions the operator state of a {@link OperatorTransformation} by the given key positions.
*
* @param fields
* The position of the fields on which the {@code OperatorTransformation} will be
* grouped.
* @return The {@code OperatorTransformation} | 3.26 |
flink_OneInputOperatorTransformation_keyBy_rdh | /**
* Partitions the operator state of a {@link OperatorTransformation} using field expressions. A
* field expression is either the name of a public field or a getter method with parentheses of
* the {@code OperatorTransformation}'s underlying type. A dot can be used to drill down into
* objects, as in {@code "field1.getInnerField2()"}.
*
* @param fields
* One or more field expressions on which the state of the {@link OperatorTransformation} operators will be partitioned.
* @return The {@code OperatorTransformation} with partitioned state (i.e. KeyedStream)
*/
public KeyedOperatorTransformation<Tuple, T> keyBy(String... fields) {
return keyBy(new Keys.ExpressionKeys<>(fields, dataSet.getType()));
} | 3.26 |
flink_OneInputOperatorTransformation_assignTimestamps_rdh | /**
* Assigns an event time timestamp to each record. This value will be used when performing event
* time computations such as assigning windows.
*/
public OneInputOperatorTransformation<T> assignTimestamps(TimestampAssigner<T> assigner) {
this.timestamper =
new TimestampAssignerWrapper<>(assigner);
return this;} | 3.26 |
flink_OneInputOperatorTransformation_transform_rdh | /**
* Method for passing user defined operators along with the type information that will transform
* the OperatorTransformation.
*
* <p><b>IMPORTANT:</b> Any output from this operator will be discarded.
*
* @param factory
* A factory returning transformation logic type of the return stream
* @return An {@link BootstrapTransformation} that can be added to a {@link Savepoint}.
*/
public BootstrapTransformation<T> transform(SavepointWriterOperatorFactory factory) { return new BootstrapTransformation<>(dataSet, operatorMaxParallelism, timestamper, factory);
} | 3.26 |
flink_PbCodegenUtils_getTypeStrFromProto_rdh | /**
* Get java type str from {@link FieldDescriptor} which directly fetched from protobuf object.
*
* @return The returned code phrase will be used as java type str in codegen sections.
* @throws PbCodegenException
*/
public static String getTypeStrFromProto(FieldDescriptor fd, boolean isList) throws PbCodegenException {
String typeStr;
switch (fd.getJavaType()) {
case MESSAGE :
if (fd.isMapField()) {
// map
FieldDescriptor keyFd = fd.getMessageType().findFieldByName(PbConstant.PB_MAP_KEY_NAME);
FieldDescriptor valueFd = fd.getMessageType().findFieldByName(PbConstant.PB_MAP_VALUE_NAME);
// key and value cannot be repeated
String keyTypeStr = getTypeStrFromProto(keyFd, false);
String valueTypeStr = getTypeStrFromProto(valueFd, false);
typeStr = ((("Map<" + keyTypeStr) + ",") + valueTypeStr) + ">";
} else {
// simple message
typeStr =
PbFormatUtils.getFullJavaName(fd.getMessageType());
}
break;
case INT :
typeStr = "Integer";
break;
case LONG :
typeStr = "Long";
break;
case STRING :
typeStr = "String";
break;
case ENUM :
typeStr = PbFormatUtils.getFullJavaName(fd.getEnumType());
break;
case FLOAT :
typeStr = "Float";
break;
case DOUBLE :
typeStr = "Double";
break;
case BYTE_STRING
:
typeStr = "ByteString";
break;
case BOOLEAN :
typeStr = "Boolean";
break;
default :
throw
new PbCodegenException("do not support field type: " + fd.getJavaType());
}
if (isList)
{
return ("List<" + typeStr) + ">";
} else {
return typeStr;
}
} | 3.26 |
flink_PbCodegenUtils_convertFlinkArrayElementToPbWithDefaultValueCode_rdh | /**
* This method will be called from serializer of flink array/map type because flink contains
* both array/map type in array format. Map/Array cannot contain null value in pb object then we
* must do conversion in case of null values in map/array type.
*
* @param flinkArrDataVar
* code phrase represent arrayData of arr type or keyData/valueData in
* map type.
* @param iVar
* the index in arrDataVar
* @param resultPbVar
* the returned pb variable name in codegen.
* @param elementPbFd
* {@link FieldDescriptor} of element type in proto object
* @param elementDataType
* {@link LogicalType} of element type in flink object
* @return The java code segment which represents field value retrieval.
*/
public static String convertFlinkArrayElementToPbWithDefaultValueCode(String flinkArrDataVar, String iVar, String resultPbVar, FieldDescriptor elementPbFd, LogicalType elementDataType, PbFormatContext pbFormatContext, int indent) throws PbCodegenException {
PbCodegenVarId varUid = PbCodegenVarId.getInstance();
int uid = varUid.getAndIncrement();
String flinkElementVar = "elementVar" + uid;
PbCodegenAppender appender = new PbCodegenAppender(indent);
String protoTypeStr = PbCodegenUtils.getTypeStrFromProto(elementPbFd, false);
String dataTypeStr = PbCodegenUtils.getTypeStrFromLogicType(elementDataType);
appender.appendLine((protoTypeStr + " ") + resultPbVar);
appender.begin(((("if(" + flinkArrDataVar) + ".isNullAt(") + iVar) + ")){");
appender.appendLine((resultPbVar + "=") + PbCodegenUtils.pbDefaultValueCode(elementPbFd, pbFormatContext));
appender.end("}else{");
appender.begin();
appender.appendLine((dataTypeStr + " ") + flinkElementVar);
String flinkContainerElementCode = PbCodegenUtils.flinkContainerElementCode(flinkArrDataVar,
iVar, elementDataType);
appender.appendLine((flinkElementVar + " = ") + flinkContainerElementCode);
PbCodegenSerializer codegenSer = PbCodegenSerializeFactory.getPbCodegenSer(elementPbFd, elementDataType, pbFormatContext);
String code = codegenSer.codegen(resultPbVar, flinkElementVar, appender.currentIndent());
appender.appendSegment(code);
appender.end("}");
return appender.code();
} | 3.26 |
flink_PbCodegenUtils_flinkContainerElementCode_rdh | /**
*
* @param flinkContainerCode
* code phrase which represent flink container type like row/array in
* codegen sections
* @param index
* the index number in flink container type
* @param eleType
* the element type
*/
public static String flinkContainerElementCode(String flinkContainerCode, String index, LogicalType eleType) {
switch (eleType.getTypeRoot()) { case INTEGER :
return ((flinkContainerCode + ".getInt(") + index) + ")";
case BIGINT :
return ((flinkContainerCode + ".getLong(") + index) + ")";
case FLOAT :
return ((flinkContainerCode + ".getFloat(") + index) + ")";
case DOUBLE :
return ((flinkContainerCode + ".getDouble(") + index) + ")";
case BOOLEAN :
return ((flinkContainerCode + ".getBoolean(") + index) + ")";
case VARCHAR :
case CHAR :
return ((flinkContainerCode + ".getString(") + index) + ")";
case VARBINARY :
case BINARY :
return ((flinkContainerCode + ".getBinary(") + index) + ")";
case ROW :
int size = eleType.getChildren().size();
return ((((flinkContainerCode + ".getRow(") + index) + ", ") + size) + ")";
case MAP :
return ((flinkContainerCode + ".getMap(") + index) + ")";
case ARRAY :
return ((flinkContainerCode + ".getArray(") + index) + ")";
default :
throw new IllegalArgumentException("Unsupported data type in schema: " + eleType);
}
} | 3.26 |
flink_PbCodegenUtils_getTypeStrFromLogicType_rdh | /**
* Get java type str from {@link LogicalType} which directly fetched from flink type.
*
* @return The returned code phrase will be used as java type str in codegen sections.
*/
public static String getTypeStrFromLogicType(LogicalType type) {
switch (type.getTypeRoot()) {
case INTEGER :
return "int";
case BIGINT
:
return "long";
case FLOAT :
return "float";
case DOUBLE :return "double";
case BOOLEAN :
return "boolean";
case VARCHAR :
case CHAR :
return "StringData";
case VARBINARY : case BINARY :
return
"byte[]";
case ROW :
return "RowData";
case MAP :return "MapData";
case ARRAY :
return "ArrayData";
default :
throw
new IllegalArgumentException("Unsupported data type in schema: " + type);
}
} | 3.26 |
flink_PbCodegenUtils_pbDefaultValueCode_rdh | /**
* Get protobuf default value from {@link FieldDescriptor}.
*
* @return The java code phrase which represents default value calculation.
*/
public static String pbDefaultValueCode(FieldDescriptor
fieldDescriptor, PbFormatContext pbFormatContext) throws PbCodegenException {
String nullLiteral = pbFormatContext.getPbFormatConfig().getWriteNullStringLiterals();
switch (fieldDescriptor.getJavaType()) {
case MESSAGE :
return PbFormatUtils.getFullJavaName(fieldDescriptor.getMessageType()) + ".getDefaultInstance()";
case INT :
return "0";
case LONG :
return "0L";
case STRING :
return ("\"" + nullLiteral) + "\"";
case ENUM :
return PbFormatUtils.getFullJavaName(fieldDescriptor.getEnumType()) + ".values()[0]";
case FLOAT :
return "0.0f";
case DOUBLE :
return "0.0d";
case BYTE_STRING :
return "ByteString.EMPTY";
case BOOLEAN :
return "false";
default :
throw new PbCodegenException("do not support field type: " + fieldDescriptor.getJavaType());
}
} | 3.26 |
flink_PythonOperatorUtils_setCurrentKeyForStreaming_rdh | /**
* Set the current key for streaming operator.
*/
public static <K> void setCurrentKeyForStreaming(KeyedStateBackend<K> stateBackend, K currentKey) {
if (!inBatchExecutionMode(stateBackend)) {
stateBackend.setCurrentKey(currentKey);
}
} | 3.26 |
flink_PythonOperatorUtils_setCurrentKeyForTimerService_rdh | /**
* Set the current key for the timer service.
*/
public static <K, N> void setCurrentKeyForTimerService(InternalTimerService<N> internalTimerService, K currentKey) throws Exception {
if (internalTimerService instanceof BatchExecutionInternalTimeService) {
((BatchExecutionInternalTimeService<K, N>) (internalTimerService)).setCurrentKey(currentKey);
}
} | 3.26 |
flink_PanedWindowProcessFunction_isPaneLate_rdh | /**
* checks whether the pane is late (e.g. can be / has been cleanup)
*/
private boolean isPaneLate(W pane) {
// whether the pane is late depends on the last window which the pane is belongs to is late
return windowAssigner.isEventTime() && isWindowLate(windowAssigner.getLastWindow(pane));
} | 3.26 |
flink_ExpressionUtils_extractValue_rdh | /**
* Extracts the value (excluding null) of a given class from an expression assuming it is a
* {@link ValueLiteralExpression}.
*
* @param expression
* literal to extract the value from
* @param targetClass
* expected class to extract from the literal
* @param <V>
* type of extracted value
* @return extracted value or empty if could not extract value of given type
*/
public static <V> Optional<V> extractValue(Expression expression, Class<V> targetClass) {
if (expression instanceof ValueLiteralExpression) {
final ValueLiteralExpression valueLiteral = ((ValueLiteralExpression) (expression));
return valueLiteral.getValueAs(targetClass);
}
return Optional.empty();
} | 3.26 |
flink_TableFunctionProvider_of_rdh | /**
* Helper method for creating a static provider.
*/
static <T> TableFunctionProvider<T> of(TableFunction<T> tableFunction) {
return () -> tableFunction;
} | 3.26 |
flink_FailedCheckpointStats_getEndToEndDuration_rdh | /**
* Returns the end to end duration until the checkpoint failure.
*/
@Override
public long getEndToEndDuration() {
return Math.max(0, failureTimestamp - triggerTimestamp);
} | 3.26 |
flink_SortedMapSerializer_snapshotConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Serializer configuration snapshot
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<SortedMap<K, V>> snapshotConfiguration() {
return new SortedMapSerializerSnapshot<>(this);
} | 3.26 |
flink_MimeTypes_getMimeTypeForFileName_rdh | /**
* Gets the MIME type for the file with the given name, by extension. This method tries to
* extract the file extension and then use the {@link #getMimeTypeForExtension(String)} to
* determine the MIME type. If the extension cannot be determined, or the extension is
* unrecognized, this method return {@code null}.
*
* @param fileName
* The file name.
* @return The MIME type, or {@code null}, if the file's extension is not recognized.
*/public static String getMimeTypeForFileName(String fileName) {
int extensionPos = fileName.lastIndexOf('.');
if ((extensionPos >= 1) && (extensionPos < (fileName.length() - 1))) {
String extension = fileName.substring(extensionPos + 1); return getMimeTypeForExtension(extension);
} else {
return
null;}
} | 3.26 |
flink_MimeTypes_getMimeTypeForExtension_rdh | /**
* Gets the MIME type for the file with the given extension. If the mime type is not recognized,
* this method returns null.
*
* @param fileExtension
* The file extension.
* @return The MIME type, or {@code null}, if the file extension is not recognized.
*/
public static String getMimeTypeForExtension(String fileExtension) {
return MIME_MAP.get(fileExtension.toLowerCase());
} | 3.26 |
flink_RepeatingSequenceInputTypeStrategy_equals_rdh | // ---------------------------------------------------------------------------------------------
@Override
public boolean equals(Object other) {
if
(this == other)
{
return true;
}
if ((other
== null) || (getClass() != other.getClass())) {
return false;
}
final RepeatingSequenceInputTypeStrategy
that = ((RepeatingSequenceInputTypeStrategy) (other));
return Objects.equals(argumentStrategies, that.argumentStrategies);
} | 3.26 |
flink_DefaultResourceCleaner_withPrioritizedCleanup_rdh | /**
* Prioritized cleanups run before their regular counterparts. This method enables the
* caller to model dependencies between cleanup tasks. The order in which cleanable
* resources are added matters, i.e. if two cleanable resources are added as prioritized
* cleanup tasks, the resource being added first will block the cleanup of the second
* resource. All prioritized cleanup resources will run and finish before any resource that
* is added using {@link #withRegularCleanup(String, Object)} is started.
*
* @param label
* The label being used when logging errors in the given cleanup.
* @param prioritizedCleanup
* The cleanup callback that is going to be prioritized.
*/
public Builder<T> withPrioritizedCleanup(String label, T prioritizedCleanup) {
this.prioritizedCleanup.add(new CleanupWithLabel<>(prioritizedCleanup, label));
return this;
} | 3.26 |
flink_DefaultResourceCleaner_withRegularCleanup_rdh | /**
* Regular cleanups are resources for which the cleanup is triggered after all prioritized
* cleanups succeeded. All added regular cleanups will run concurrently to each other.
*
* @param label
* The label being used when logging errors in the given cleanup.
* @param regularCleanup
* The cleanup callback that is going to run after all prioritized
* cleanups are finished.
* @see #withPrioritizedCleanup(String, Object)
*/
public Builder<T> withRegularCleanup(String label, T regularCleanup) {
this.regularCleanup.add(new CleanupWithLabel<>(regularCleanup, label));
return this;
} | 3.26 |
flink_PartitionRequestQueue_addCreditOrResumeConsumption_rdh | /**
* Adds unannounced credits from the consumer or resumes data consumption after an exactly-once
* checkpoint and enqueues the corresponding reader for this consumer (if not enqueued yet).
*
* @param receiverId
* The input channel id to identify the consumer.
* @param operation
* The operation to be performed (add credit or resume data consumption).
*/
void addCreditOrResumeConsumption(InputChannelID receiverId, Consumer<NetworkSequenceViewReader> operation) throws Exception {
if (fatalError) {
return;
}
NetworkSequenceViewReader reader = obtainReader(receiverId);
operation.accept(reader);
enqueueAvailableReader(reader);
} | 3.26 |
flink_PartitionRequestQueue_enqueueAvailableReader_rdh | /**
* Try to enqueue the reader once receiving credit notification from the consumer or receiving
* non-empty reader notification from the producer.
*
* <p>NOTE: Only one thread would trigger the actual enqueue after checking the reader's
* availability, so there is no race condition here.
*/
private void enqueueAvailableReader(final NetworkSequenceViewReader reader) throws Exception {
if (reader.isRegisteredAsAvailable()) {
return;
}
ResultSubpartitionView.AvailabilityWithBacklog availabilityWithBacklog = reader.getAvailabilityAndBacklog();
if (!availabilityWithBacklog.isAvailable()) {
int backlog = availabilityWithBacklog.getBacklog();
if ((backlog > 0) && reader.needAnnounceBacklog()) {
announceBacklog(reader, backlog);
}
return;
}
// Queue an available reader for consumption. If the queue is empty,
// we try trigger the actual write. Otherwise this will be handled by
// the writeAndFlushNextMessageIfPossible calls.
boolean triggerWrite = availableReaders.isEmpty();
registerAvailableReader(reader);
if (triggerWrite) {
writeAndFlushNextMessageIfPossible(ctx.channel());
}
} | 3.26 |
flink_PartitionRequestQueue_announceBacklog_rdh | /**
* Announces remaining backlog to the consumer after the available data notification or data
* consumption resumption.
*/
private void announceBacklog(NetworkSequenceViewReader reader, int backlog) {
checkArgument(backlog > 0, "Backlog must be positive.");
NettyMessage.BacklogAnnouncement announcement = new NettyMessage.BacklogAnnouncement(backlog, reader.getReceiverId());
ctx.channel().writeAndFlush(announcement).addListener(((ChannelFutureListener) (future -> {
if (!future.isSuccess()) {
onChannelFutureFailure(future);
}
})));
} | 3.26 |
flink_PartitionRequestQueue_getAvailableReaders_rdh | /**
* Accesses internal state to verify reader registration in the unit tests.
*
* <p><strong>Do not use anywhere else!</strong>
*
* @return readers which are enqueued available for transferring data
*/
@VisibleForTesting
ArrayDeque<NetworkSequenceViewReader> getAvailableReaders() {
return availableReaders;} | 3.26 |
flink_PartitionRequestQueue_notifyRequiredSegmentId_rdh | /**
* Notify the id of required segment from the consumer.
*
* @param receiverId
* The input channel id to identify the consumer.
* @param segmentId
* The id of required segment.
*/
void notifyRequiredSegmentId(InputChannelID receiverId, int segmentId)
{
if (fatalError) {
return;
}
NetworkSequenceViewReader reader = allReaders.get(receiverId);
if (reader != null) {
reader.notifyRequiredSegmentId(segmentId);
}
} | 3.26 |
flink_AbstractStreamOperatorV2_getProcessingTimeService_rdh | /**
* Returns the {@link ProcessingTimeService} responsible for getting the current processing time
* and registering timers.
*/
@VisibleForTesting
public ProcessingTimeService getProcessingTimeService() {
return processingTimeService;
} | 3.26 |
flink_AbstractStreamOperatorV2_getPartitionedState_rdh | /**
* Creates a partitioned state handle, using the state backend configured for this task.
*
* @throws IllegalStateException
* Thrown, if the key/value state was already initialized.
* @throws Exception
* Thrown, if the state backend cannot create the key/value state.
*/
protected <S extends State, N> S getPartitionedState(N namespace, TypeSerializer<N> namespaceSerializer,
StateDescriptor<S, ?> stateDescriptor) throws Exception {
return stateHandler.getPartitionedState(namespace, namespaceSerializer, stateDescriptor);
} | 3.26 |
flink_AbstractStreamOperatorV2_getExecutionConfig_rdh | // ------------------------------------------------------------------------
// Properties and Services
// ------------------------------------------------------------------------
/**
* Gets the execution config defined on the execution environment of the job to which this
* operator belongs.
*
* @return The job's execution config.
*/public ExecutionConfig getExecutionConfig() {
return executionConfig;
} | 3.26 |
flink_AbstractStreamOperatorV2_getRuntimeContext_rdh | /**
* Returns a context that allows the operator to query information about the execution and also
* to interact with systems such as broadcast variables and managed state. This also allows to
* register timers.
*/
public StreamingRuntimeContext getRuntimeContext() {
return runtimeContext;
} | 3.26 |
flink_AbstractStreamOperatorV2_snapshotState_rdh | /**
* Stream operators with state, which want to participate in a snapshot need to override this
* hook method.
*
* @param context
* context that provides information and means required for taking a snapshot
*/
@Override
public void snapshotState(StateSnapshotContext context) throws Exception {
} | 3.26 |
flink_AbstractStreamOperatorV2_m3_rdh | // ------------------------------------------------------------------------
// Watermark handling
// ------------------------------------------------------------------------
/**
* Returns a {@link InternalTimerService} that can be used to query current processing time and
* event time and to set timers. An operator can have several timer services, where each has its
* own namespace serializer. Timer services are differentiated by the string key that is given
* when requesting them, if you call this method with the same key multiple times you will get
* the same timer service instance in subsequent requests.
*
* <p>Timers are always scoped to a key, the currently active key of a keyed stream operation.
* When a timer fires, this key will also be set as the currently active key.
*
* <p>Each timer has attached metadata, the namespace. Different timer services can have a
* different namespace type. If you don't need namespace differentiation you can use {@link VoidNamespaceSerializer} as the namespace serializer.
*
* @param name
* The name of the requested timer service. If no service exists under the given
* name a new one will be created and returned.
* @param namespaceSerializer
* {@code TypeSerializer} for the timer namespace.
* @param triggerable
* The {@link Triggerable} that should be invoked when timers fire
* @param <N>
* The type of the timer namespace.
*/
@VisibleForTesting
public <K, N> InternalTimerService<N> m3(String name, TypeSerializer<N> namespaceSerializer, Triggerable<K, N> triggerable) {
if (f0 == null) {
throw new RuntimeException("The timer service has not been initialized.");
}
@SuppressWarnings("unchecked")
InternalTimeServiceManager<K> keyedTimeServiceHandler = ((InternalTimeServiceManager<K>) (f0));
KeyedStateBackend<K> keyedStateBackend = getKeyedStateBackend();
checkState(keyedStateBackend != null, "Timers can only be used on keyed operators.");
return keyedTimeServiceHandler.getInternalTimerService(name, keyedStateBackend.getKeySerializer(), namespaceSerializer, triggerable);
} | 3.26 |
flink_AbstractStreamOperatorV2_open_rdh | /**
* This method is called immediately before any elements are processed, it should contain the
* operator's initialization logic, e.g. state initialization.
*
* <p>The default implementation does nothing.
*
* @throws Exception
* An exception in this method causes the operator to fail.
*/
@Override
public void open() throws Exception {
} | 3.26 |
flink_AbstractStreamOperatorV2_initializeState_rdh | /**
* Stream operators with state which can be restored need to override this hook method.
*
* @param context
* context that allows to register different states.
*/
@Override
public void initializeState(StateInitializationContext context) throws Exception {
} | 3.26 |
flink_AbstractStreamOperatorV2_isUsingCustomRawKeyedState_rdh | /**
* Indicates whether or not implementations of this class is writing to the raw keyed state
* streams on snapshots, using {@link #snapshotState(StateSnapshotContext)}. If yes, subclasses
* should override this method to return {@code true}.
*
* <p>Subclasses need to explicitly indicate the use of raw keyed state because, internally, the
* {@link AbstractStreamOperator} may attempt to read from it as well to restore heap-based
* timers and ultimately fail with read errors. By setting this flag to {@code true}, this
* allows the {@link AbstractStreamOperator} to know that the data written in the raw keyed
* states were not written by the timer services, and skips the timer restore attempt.
*
* <p>Please refer to FLINK-19741 for further details.
*
* <p>TODO: this method can be removed once all timers are moved to be managed by state
* backends.
*
* @return flag indicating whether or not this operator is writing to raw keyed state via {@link #snapshotState(StateSnapshotContext)}.
*/
@Internal
protected boolean isUsingCustomRawKeyedState() {
return false;
} | 3.26 |
flink_AbstractKubernetesStepDecorator_decorateFlinkPod_rdh | /**
* Apply transformations on the given FlinkPod in accordance to this feature. Note that we
* should return a FlinkPod that keeps all of the properties of the passed FlinkPod object.
*
* <p>So this is correct:
*
* <pre>{@code Pod decoratedPod = new PodBuilder(pod) // Keeps the original state
* ...
* .build()
*
* Container decoratedContainer = new ContainerBuilder(container) // Keeps the original state
* ...
* .build()
*
* FlinkPod decoratedFlinkPod = new FlinkPodBuilder(flinkPod) // Keeps the original state
* ...
* .build()}</pre>
*
* <p>And this is the incorrect:
*
* <pre>{@code Pod decoratedPod = new PodBuilder() // Loses the original state
* ...
* .build()
*
* Container decoratedContainer = new ContainerBuilder() // Loses the original state
* ...
* .build()
*
* FlinkPod decoratedFlinkPod = new FlinkPodBuilder() // Loses the original state
* ...
* .build()}</pre>
*/
@Override
public FlinkPod decorateFlinkPod(FlinkPod flinkPod) {
return flinkPod;
} | 3.26 |
flink_AbstractKubernetesStepDecorator_buildAccompanyingKubernetesResources_rdh | /**
* Note that the method could have a side effect of modifying the Flink Configuration object,
* such as update the JobManager address.
*/
@Override
public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException {
return Collections.emptyList();
} | 3.26 |
flink_CheckpointsCleaner_addSubsumedCheckpoint_rdh | /**
* Add one subsumed checkpoint to CheckpointsCleaner, the subsumed checkpoint would be discarded
* at {@link #cleanSubsumedCheckpoints(long, Set, Runnable, Executor)}.
*
* @param completedCheckpoint
* which is subsumed.
*/
public void addSubsumedCheckpoint(CompletedCheckpoint completedCheckpoint) {
synchronized(lock) {
subsumedCheckpoints.add(completedCheckpoint);
}
} | 3.26 |
flink_CheckpointsCleaner_cleanSubsumedCheckpoints_rdh | /**
* Clean checkpoint that is not in the given {@param stillInUse}.
*
* @param upTo
* lowest CheckpointID which is still valid.
* @param stillInUse
* the state of those checkpoints are still referenced.
* @param postCleanAction
* post action after cleaning.
* @param executor
* is used to perform the cleanup logic.
*/
public void cleanSubsumedCheckpoints(long upTo, Set<Long> stillInUse, Runnable postCleanAction, Executor executor) {
synchronized(lock) {
Iterator<CompletedCheckpoint> iterator = subsumedCheckpoints.iterator();while (iterator.hasNext()) {
CompletedCheckpoint checkpoint = iterator.next();
if ((checkpoint.getCheckpointID() < upTo) && (!stillInUse.contains(checkpoint.getCheckpointID()))) {
try {
LOG.debug("Try to discard checkpoint {}.", checkpoint.getCheckpointID());
cleanCheckpoint(checkpoint, checkpoint.shouldBeDiscardedOnSubsume(), postCleanAction, executor);
iterator.remove();
} catch (Exception e) {
LOG.warn("Fail to discard the old checkpoint {}.", checkpoint);
}
}
}
}
} | 3.26 |
flink_AbstractOrcFileInputFormat_seek_rdh | /**
* The argument of {@link RecordReader#seekToRow(long)} must come from {@link RecordReader#getRowNumber()}. The internal implementation of ORC is very confusing. It
* has special behavior when dealing with Predicate.
*/
public void seek(CheckpointedPosition position) throws IOException {
orcReader.seekToRow(position.getOffset());
recordsToSkip = position.getRecordsAfterOffset();
} | 3.26 |
flink_AbstractOrcFileInputFormat_orcVectorizedRowBatch_rdh | /**
* Gets the ORC VectorizedRowBatch structure from this batch.
*/
public OrcVectorizedBatchWrapper<BatchT> orcVectorizedRowBatch() {
return orcVectorizedRowBatch;
} | 3.26 |
flink_AbstractOrcFileInputFormat_createReader_rdh | // ------------------------------------------------------------------------
@Override
public OrcVectorizedReader<T, BatchT> createReader(final Configuration config, final SplitT split) throws IOException {
final int numBatchesToCirculate = config.getInteger(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY);
final Pool<OrcReaderBatch<T, BatchT>> poolOfBatches = createPoolOfBatches(split, numBatchesToCirculate);
final RecordReader orcReader = shim.createRecordReader(hadoopConfigWrapper.getHadoopConfig(), schema, selectedFields, conjunctPredicates, split.path(), split.offset(), split.length());
return new OrcVectorizedReader<>(shim, orcReader, poolOfBatches);
} | 3.26 |
flink_JobResultDeserializer_assertNextToken_rdh | /**
* Advances the token and asserts that it matches the required {@link JsonToken}.
*/
private static void assertNextToken(final JsonParser p, final JsonToken requiredJsonToken) throws IOException {
final JsonToken jsonToken = p.nextToken();
if (jsonToken != requiredJsonToken) {
throw new JsonMappingException(p, String.format("Expected token %s (was %s)", requiredJsonToken, jsonToken));
}
} | 3.26 |
flink_LookupCacheManager_keepCacheOnRelease_rdh | // ---------------------------- For testing purpose ------------------------------
public static void keepCacheOnRelease(boolean toKeep) {
keepCacheOnRelease = toKeep;
} | 3.26 |
flink_LookupCacheManager_registerCacheIfAbsent_rdh | /**
* Register a cache instance with identifier to the manager.
*
* <p>If the cache with the given identifier is already registered in the manager, this method
* will return the registered one, otherwise this method will register the given cache into the
* manager then return.
*
* @param cacheIdentifier
* identifier of the cache
* @param cache
* instance of cache trying to register
* @return instance of the shared cache
*/
public synchronized LookupCache registerCacheIfAbsent(String cacheIdentifier, LookupCache cache) {
checkNotNull(cache, "Could not register null cache in the manager");
RefCountedCache refCountedCache = managedCaches.computeIfAbsent(cacheIdentifier, identifier -> new RefCountedCache(cache));
refCountedCache.retain();
return refCountedCache.cache;
} | 3.26 |
flink_LookupCacheManager_unregisterCache_rdh | /**
* Release the cache with the given identifier from the manager.
*
* <p>The manager will track a reference count of managed caches, and will close the cache if
* the reference count reaches 0.
*/public synchronized void unregisterCache(String cacheIdentifier) {
RefCountedCache refCountedCache = checkNotNull(managedCaches.get(cacheIdentifier), "Cache identifier '%s' is not registered", cacheIdentifier);
if (refCountedCache.release()) {
managedCaches.remove(cacheIdentifier);
}
} | 3.26 |
flink_LookupCacheManager_getInstance_rdh | /**
* Get the shared instance of {@link LookupCacheManager}.
*/
public static synchronized LookupCacheManager getInstance() {
if (instance == null) {
instance = new LookupCacheManager();
}
return instance;
} | 3.26 |
flink_ResourceManagerId_fromUuidOrNull_rdh | /**
* If the given uuid is null, this returns null, otherwise a ResourceManagerId that corresponds
* to the UUID, via {@link #ResourceManagerId(UUID)}.
*/
public static ResourceManagerId fromUuidOrNull(@Nullable
UUID uuid) {
return uuid == null ? null : new ResourceManagerId(uuid);
} | 3.26 |
flink_ResourceManagerId_toUUID_rdh | /**
* Creates a UUID with the bits from this ResourceManagerId.
*/
public UUID toUUID() {
return new UUID(getUpperPart(), getLowerPart());
} | 3.26 |
flink_ResourceManagerId_fromUuid_rdh | /**
* Creates a ResourceManagerId that corresponds to the given UUID.
*/
public static ResourceManagerId fromUuid(UUID uuid) {
return new ResourceManagerId(uuid);
} | 3.26 |
flink_CachedDataStream_invalidate_rdh | /**
* Invalidate the cache intermediate result of this DataStream to release the physical
* resources. Users are not required to invoke this method to release physical resources unless
* they want to. Cache will be recreated if it is used after invalidated.
*/public void invalidate() throws Exception {
final CacheTransformation<T> t = ((CacheTransformation<T>) (this.getTransformation()));
environment.invalidateClusterDataset(t.getDatasetId());
} | 3.26 |
flink_ApiExpressionUtils_objectToExpression_rdh | /**
* Converts a given object to an expression.
*
* <p>It converts:
*
* <ul>
* <li>{@code null} to null literal
* <li>{@link Row} to a call to a row constructor expression
* <li>{@link Map} to a call to a map constructor expression
* <li>{@link List} to a call to an array constructor expression
* <li>arrays to a call to an array constructor expression
* <li>Scala's {@code Seq} to an array constructor via reflection
* <li>Scala's {@code Map} to a map constructor via reflection
* <li>Scala's {@code BigDecimal} to a DECIMAL literal
* <li>if none of the above applies, the function tries to convert the object to a value
* literal with {@link #valueLiteral(Object)}
* </ul>
*
* @param expression
* An object to convert to an expression
*/
public static Expression objectToExpression(Object expression) {
if
(expression == null) {
return valueLiteral(null, DataTypes.NULL());
} else if (expression instanceof ApiExpression) {
return ((ApiExpression) (expression)).toExpr();
} else if (expression instanceof Expression) {
return ((Expression) (expression));
} else if (expression instanceof Row) {
RowKind kind = ((Row) (expression)).getKind();
if (kind != RowKind.INSERT) {
throw new ValidationException(String.format("Unsupported kind '%s' of a row [%s]. Only rows with 'INSERT' kind are supported when" + " converting to an expression.", kind, expression));
}
return convertRow(((Row) (expression)));
} else if (expression instanceof Map) {
return convertJavaMap(((Map<?, ?>) (expression)));
} else if (expression instanceof byte[]) {
// BINARY LITERAL
return valueLiteral(expression);
} else if (expression.getClass().isArray()) {
return convertArray(expression);
} else if (expression instanceof List) {
return convertJavaList(((List<?>) (expression)));
} else {
return convertScala(expression).orElseGet(() -> valueLiteral(expression));
}
} | 3.26 |
flink_ApiExpressionUtils_isFunction_rdh | /**
* Checks if the given expression is a given builtin function.
*
* @param expression
* expression to check
* @param functionDefinition
* expected function definition
* @return true if the given expression is a given function call
*/public static boolean isFunction(Expression expression, BuiltInFunctionDefinition functionDefinition) {
if (expression instanceof UnresolvedCallExpression)
{
return ((UnresolvedCallExpression) (expression)).getFunctionDefinition() == functionDefinition;
}
if (expression instanceof CallExpression) {
return ((CallExpression) (expression)).getFunctionDefinition() == functionDefinition;
}
return false;
} | 3.26 |
flink_ApiExpressionUtils_isFunctionOfKind_rdh | /**
* Checks if the expression is a function call of given type.
*
* @param expression
* expression to check
* @param kind
* expected type of function
* @return true if the expression is function call of given type, false otherwise
*/
public static boolean isFunctionOfKind(Expression expression, FunctionKind kind) {
if (expression instanceof UnresolvedCallExpression) {
return ((UnresolvedCallExpression) (expression)).getFunctionDefinition().getKind() == kind;
}
if (expression instanceof CallExpression) {return ((CallExpression) (expression)).getFunctionDefinition().getKind() == kind;
}
return false;
} | 3.26 |
flink_RetryingExecutor_execute_rdh | /**
* Execute the given action according to the retry policy.
*
* <p>NOTE: the action must be idempotent because multiple instances of it can be executed
* concurrently (if the policy allows retries).
*/
<T> void execute(RetryPolicy retryPolicy, RetriableAction<T> action) {
LOG.debug("execute with retryPolicy: {}", retryPolicy);
RetriableActionAttempt<T> task = RetriableActionAttempt.initialize(action, retryPolicy, blockingExecutor, attemptsPerTaskHistogram, totalAttemptsPerTaskHistogram, timer);
blockingExecutor.submit(task);
} | 3.26 |
flink_RichAsyncFunction_getDistributedCache_rdh | // -----------------------------------------------------------------------------------
// Unsupported operations
// -----------------------------------------------------------------------------------
@Override
public DistributedCache getDistributedCache() {
throw new UnsupportedOperationException("Distributed cache is not supported in rich async functions.");
} | 3.26 |
flink_RichAsyncFunction_getIterationAggregator_rdh | // -----------------------------------------------------------------------------------
// Unsupported operations
// -----------------------------------------------------------------------------------
@Overridepublic <T extends Aggregator<?>> T getIterationAggregator(String name) {
throw new UnsupportedOperationException("Iteration aggregators are not supported in rich async functions.");
} | 3.26 |
flink_AsyncSinkWriter_snapshotState_rdh | /**
* All in-flight requests that are relevant for the snapshot have been completed, but there may
* still be request entries in the internal buffers that are yet to be sent to the endpoint.
* These request entries are stored in the snapshot state so that they don't get lost in case of
* a failure/restart of the application.
*/
@Override
public List<BufferedRequestState<RequestEntryT>> snapshotState(long checkpointId) {
return Collections.singletonList(new BufferedRequestState<>(bufferedRequestEntries));
} | 3.26 |
flink_AsyncSinkWriter_createNextAvailableBatch_rdh | /**
* Creates the next batch of request entries while respecting the {@code maxBatchSize} and
* {@code maxBatchSizeInBytes}. Also adds these to the metrics counters.
*/private List<RequestEntryT> createNextAvailableBatch(RequestInfo requestInfo) {
List<RequestEntryT> batch = new ArrayList<>(requestInfo.getBatchSize());
long batchSizeBytes = 0;
for (int i = 0; i < requestInfo.getBatchSize(); i++) {
long requestEntrySize = bufferedRequestEntries.peek().getSize();
if ((batchSizeBytes + requestEntrySize) > f2) {
break;
}
RequestEntryWrapper<RequestEntryT> elem = bufferedRequestEntries.remove();
batch.add(elem.getRequestEntry());
bufferedRequestEntriesTotalSizeInBytes -= requestEntrySize;
batchSizeBytes += requestEntrySize;
}
numRecordsOutCounter.inc(batch.size());
numBytesOutCounter.inc(batchSizeBytes);
return batch;
} | 3.26 |
flink_AsyncSinkWriter_completeRequest_rdh | /**
* Marks an in-flight request as completed and prepends failed requestEntries back to the
* internal requestEntry buffer for later retry.
*
* @param failedRequestEntries
* requestEntries that need to be retried
*/
private void completeRequest(List<RequestEntryT> failedRequestEntries, int batchSize, long requestStartTime) throws InterruptedException {
lastSendTimestamp = requestStartTime;
ackTime =
System.currentTimeMillis();
inFlightRequestsCount--;
rateLimitingStrategy.registerCompletedRequest(new BasicResultInfo(failedRequestEntries.size(), batchSize));
ListIterator<RequestEntryT> iterator = failedRequestEntries.listIterator(failedRequestEntries.size());
while
(iterator.hasPrevious()) {
addEntryToBuffer(iterator.previous(), true);
}
nonBlockingFlush();
} | 3.26 |
flink_AsyncSinkWriter_nonBlockingFlush_rdh | /**
* Determines if a call to flush will be non-blocking (i.e. {@code inFlightRequestsCount} is
* strictly smaller than {@code maxInFlightRequests}). Also requires one of the following
* requirements to be met:
*
* <ul>
* <li>The number of elements buffered is greater than or equal to the {@code maxBatchSize}
* <li>The sum of the size in bytes of all records in the buffer is greater than or equal to
* {@code maxBatchSizeInBytes}
* </ul>
*/
private void nonBlockingFlush() throws InterruptedException {
while ((!rateLimitingStrategy.shouldBlock(createRequestInfo())) && ((bufferedRequestEntries.size() >= getNextBatchSizeLimit()) || (bufferedRequestEntriesTotalSizeInBytes >= f2))) {
flush();
}
} | 3.26 |
flink_AsyncSinkWriter_flush_rdh | /**
* In flight requests will be retried if the sink is still healthy. But if in-flight requests
* fail after a checkpoint has been triggered and Flink needs to recover from the checkpoint,
* the (failed) in-flight requests are gone and cannot be retried. Hence, there cannot be any
* outstanding in-flight requests when a commit is initialized.
*
* <p>To this end, all in-flight requests need to completed before proceeding with the commit.
*/
@Override
public void flush(boolean flush) throws InterruptedException {
while ((inFlightRequestsCount > 0) || ((bufferedRequestEntries.size() > 0) && flush)) {
yieldIfThereExistsInFlightRequests();
if (flush) {
flush();
}
}
} | 3.26 |
flink_LeaderInformationRegister_forComponentId_rdh | /**
* Returns the {@link LeaderInformation} that is stored or an empty {@code Optional} if no entry
* exists for the passed {@code componentId}.
*/public Optional<LeaderInformation>
forComponentId(String componentId) {
return Optional.ofNullable(leaderInformationPerComponentId.get(componentId));
} | 3.26 |
flink_LeaderInformationRegister_of_rdh | /**
* Creates a single-entry instance containing only the passed information.
*/public static LeaderInformationRegister of(String componentId, LeaderInformation leaderInformation) {
return new LeaderInformationRegister(Collections.singletonMap(componentId, leaderInformation));
} | 3.26 |
flink_LeaderInformationRegister_merge_rdh | /**
* Merges another {@code LeaderInformationRegister} with additional leader information into a
* new {@code LeaderInformationRegister} instance. Any existing {@link LeaderInformation} for
* the passed {@code componentId} will be overwritten.
*
* <p>Empty {@code LeaderInformation} results in the removal of the corresponding entry (if it
* exists).
*/
public static LeaderInformationRegister merge(@Nullable
LeaderInformationRegister leaderInformationRegister, String componentId, LeaderInformation leaderInformation) {
final Map<String, LeaderInformation> existingLeaderInformation = new HashMap<>(leaderInformationRegister == null ? Collections.emptyMap() : leaderInformationRegister.leaderInformationPerComponentId);
if (leaderInformation.isEmpty()) {
existingLeaderInformation.remove(componentId);
} else {
existingLeaderInformation.put(componentId, leaderInformation);
}
return new LeaderInformationRegister(existingLeaderInformation);
} | 3.26 |
flink_LeaderInformationRegister_clear_rdh | /**
* Creates a new {@code LeaderInformationRegister} that matches the passed {@code LeaderInformationRegister} except for the entry of {@code componentId} which is removed if it
* existed.
*/
public static LeaderInformationRegister clear(@Nullable
LeaderInformationRegister leaderInformationRegister, String componentId) {
if ((leaderInformationRegister == null) || (!leaderInformationRegister.getRegisteredComponentIds().iterator().hasNext())) {
return LeaderInformationRegister.empty();
}
return merge(leaderInformationRegister, componentId, LeaderInformation.empty());
} | 3.26 |
flink_LeaderInformationRegister_getRegisteredComponentIds_rdh | /**
* Returns the {@code componentId}s for which leader information is stored.
*/
public Iterable<String> getRegisteredComponentIds() {
return leaderInformationPerComponentId.keySet();
} | 3.26 |
flink_LeaderInformationRegister_hasLeaderInformation_rdh | /**
* Checks whether the register holds non-empty {@link LeaderInformation} for the passed {@code componentId}.
*/
public boolean hasLeaderInformation(String componentId) {return leaderInformationPerComponentId.containsKey(componentId);
}
/**
* Checks that no non-empty {@link LeaderInformation} is stored.
*
* @return {@code true}, if there is no entry that refers to a non-empty {@code LeaderInformation}; otherwise {@code false} (i.e. either no information is stored under
any {@code componentId} or there are entries for certain {@code componentId}s that refer
to an empty {@code LeaderInformation} | 3.26 |
flink_GlobalConfiguration_isSensitive_rdh | /**
* Check whether the key is a hidden key.
*
* @param key
* the config key
*/
public static boolean isSensitive(String key) {
Preconditions.checkNotNull(key, "key is null");
final String v13 = key.toLowerCase();
for
(String hideKey : SENSITIVE_KEYS) {
if ((v13.length() >= hideKey.length()) && v13.contains(hideKey)) {
return true;
}
}
return false;
} | 3.26 |
flink_GlobalConfiguration_loadConfiguration_rdh | /**
* Loads the configuration files from the specified directory. If the dynamic properties
* configuration is not null, then it is added to the loaded configuration.
*
* @param configDir
* directory to load the configuration from
* @param dynamicProperties
* configuration file containing the dynamic properties. Null if none.
* @return The configuration loaded from the given configuration directory
*/
public static Configuration loadConfiguration(final String configDir, @Nullable
final Configuration dynamicProperties) {
if (configDir == null) {
throw new IllegalArgumentException("Given configuration directory is null, cannot load configuration");
}
final File confDirFile = new File(configDir);
if (!confDirFile.exists()) {
throw new IllegalConfigurationException(((("The given configuration directory name '" + configDir) + "' (") + confDirFile.getAbsolutePath()) + ") does not describe an existing directory.");
}
// get Flink yaml configuration file
final File yamlConfigFile = new File(confDirFile, FLINK_CONF_FILENAME);
if (!yamlConfigFile.exists()) {
throw new IllegalConfigurationException(((("The Flink config file '" + yamlConfigFile) + "' (") + yamlConfigFile.getAbsolutePath()) + ") does not exist.");
}
Configuration configuration = loadYAMLResource(yamlConfigFile);
logConfiguration("Loading", configuration);
if (dynamicProperties != null) {
logConfiguration("Loading dynamic", dynamicProperties);
configuration.addAll(dynamicProperties);
}
return configuration;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.