name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_JobManagerCheckpointStorage_getSavepointPath_rdh
|
/**
*
* @return The default location where savepoints will be externalized if set.
*/
@Nullable
public Path getSavepointPath() {
return location.getBaseSavepointPath();
}
| 3.26 |
flink_ReduceNode_getOperator_rdh
|
// ------------------------------------------------------------------------
@Override
public ReduceOperatorBase<?, ?> getOperator() {
return ((ReduceOperatorBase<?, ?>) (super.getOperator()));
}
| 3.26 |
flink_ValueDataTypeConverter_extractDataType_rdh
|
/**
* Returns the clearly identifiable data type if possible. For example, {@code 12L} can be
* expressed as {@code DataTypes.BIGINT().notNull()}. However, for example, {@code null} could
* be any type and is not supported.
*
* <p>All types of the {@link LogicalTypeFamily#PREDEFINED} family, symbols, and arrays are
* supported.
*/
public static Optional<DataType> extractDataType(Object value) {
if (value == null) {
return Optional.empty();
}
DataType convertedDataType = null;
if (value instanceof String) {
convertedDataType
= convertToCharType(((String) (value)));
} else if (value instanceof byte[]) {
convertedDataType = convertToBinaryType(((byte[]) (value)));
} else if (value instanceof BigDecimal) {
convertedDataType = convertToDecimalType(((BigDecimal) (value)));
} else if (value instanceof LocalTime) {
convertedDataType = convertToTimeType(((LocalTime) (value)));
} else if (value instanceof LocalDateTime) {
convertedDataType = convertToTimestampType(((LocalDateTime) (value)).getNano());
}
else if (value instanceof Timestamp) {
convertedDataType = convertToTimestampType(((Timestamp) (value)).getNanos());
} else if (value instanceof ZonedDateTime) {
convertedDataType = convertToZonedTimestampType(((ZonedDateTime) (value)).getNano());
} else if (value instanceof OffsetDateTime) {
convertedDataType = convertToZonedTimestampType(((OffsetDateTime) (value)).getNano());
} else if (value instanceof
Instant) {
convertedDataType = convertToLocalZonedTimestampType(((Instant) (value)).getNano());
} else if (value instanceof Period) {
convertedDataType = convertToYearMonthIntervalType(((Period) (value)).getYears());
} else if (value instanceof Duration) {
final Duration duration = ((Duration) (value));
convertedDataType = convertToDayTimeIntervalType(duration.toDays(), duration.getNano());
} else if (value instanceof Object[])
{
// don't let the class-based extraction kick in if array elements differ
return convertToArrayType(((Object[]) (value))).map(dt -> dt.notNull().bridgedTo(value.getClass()));
}
final Optional<DataType> resultType;
if (convertedDataType != null) {
resultType = Optional.of(convertedDataType);
} else {
// class-based extraction is possible for BOOLEAN, TINYINT, SMALLINT, INT, FLOAT,
// DOUBLE,
// DATE, TIME with java.sql.Time, and arrays of primitive types
resultType = ClassDataTypeConverter.extractDataType(value.getClass());
}
return resultType.map(dt -> dt.notNull().bridgedTo(value.getClass()));
}
| 3.26 |
flink_MemorySize_add_rdh
|
// ------------------------------------------------------------------------
// Calculations
// ------------------------------------------------------------------------
public MemorySize add(MemorySize that) {
return new MemorySize(Math.addExact(this.bytes, that.bytes));
}
| 3.26 |
flink_MemorySize_parseBytes_rdh
|
/**
* Parses the given string as bytes. The supported expressions are listed under {@link MemorySize}.
*
* @param text
* The string to parse
* @return The parsed size, in bytes.
* @throws IllegalArgumentException
* Thrown, if the expression cannot be parsed.
*/
public static long parseBytes(String text) throws IllegalArgumentException {checkNotNull(text, "text");
final String trimmed = text.trim();
checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string");
final int len = trimmed.length();
int pos = 0;
char current;
while (((pos < len) && ((current = trimmed.charAt(pos)) >= '0')) && (current <= '9')) {
pos++;
}
final String number = trimmed.substring(0, pos);
final String unit = trimmed.substring(pos).trim().toLowerCase(Locale.US);
if (number.isEmpty()) {
throw new NumberFormatException("text does not start with a number");
}
final long value;
try {
value = Long.parseLong(number);// this throws a NumberFormatException on overflow
} catch (NumberFormatException e) {
throw new IllegalArgumentException(("The value '" + number) + "' cannot be re represented as 64bit number (numeric overflow).");
}
final long multiplier = parseUnit(unit).map(MemoryUnit::getMultiplier).orElse(1L);
final long result = value * multiplier;
// check for overflow
if ((result / multiplier) != value) {throw new IllegalArgumentException(("The value '" + text) + "' cannot be re represented as 64bit number of bytes (numeric overflow).");
}
return result;
}
| 3.26 |
flink_MemorySize_m0_rdh
|
// ------------------------------------------------------------------------
// Parsing
// ------------------------------------------------------------------------
/**
* Parses the given string as as MemorySize.
*
* @param text
* The string to parse
* @return The parsed MemorySize
* @throws IllegalArgumentException
* Thrown, if the expression cannot be parsed.
*/
public static MemorySize m0(String text) throws IllegalArgumentException {
return new MemorySize(parseBytes(text));
}
| 3.26 |
flink_MemorySize_getMebiBytes_rdh
|
/**
* Gets the memory size in Mebibytes (= 1024 Kibibytes).
*/
public int getMebiBytes() {
return ((int) (bytes >> 20));
}
| 3.26 |
flink_MemorySize_getKibiBytes_rdh
|
/**
* Gets the memory size in Kibibytes (= 1024 bytes).
*/public long getKibiBytes() {
return bytes >> 10;
}
| 3.26 |
flink_MemorySize_parse_rdh
|
/**
* Parses the given string with a default unit.
*
* @param text
* The string to parse.
* @param defaultUnit
* specify the default unit.
* @return The parsed MemorySize.
* @throws IllegalArgumentException
* Thrown, if the expression cannot be parsed.
*/
public static MemorySize parse(String text, MemoryUnit defaultUnit) throws IllegalArgumentException {
if (!hasUnit(text)) {
return m0(text + defaultUnit.getUnits()[0]);
}
return m0(text);
}
| 3.26 |
flink_MemorySize_getTebiBytes_rdh
|
/**
* Gets the memory size in Tebibytes (= 1024 Gibibytes).
*/
public long getTebiBytes() {
return bytes >> 40;
}
| 3.26 |
flink_MemorySize_getBytes_rdh
|
// ------------------------------------------------------------------------
/**
* Gets the memory size in bytes.
*/
public long getBytes()
{
return bytes;
}
| 3.26 |
flink_MemorySize_hashCode_rdh
|
// ------------------------------------------------------------------------
@Override
public int hashCode()
{
return ((int)
(bytes ^ (bytes >>> 32)));
}
| 3.26 |
flink_MemorySize_getGibiBytes_rdh
|
/**
* Gets the memory size in Gibibytes (= 1024 Mebibytes).
*/
public long getGibiBytes() { return bytes >> 30;
}
| 3.26 |
flink_SliceAssigners_hopping_rdh
|
/**
* Creates a hopping window {@link SliceAssigner} that assigns elements to slices of hopping
* windows.
*
* @param rowtimeIndex
* the index of rowtime field in the input row, {@code -1} if based on *
* processing time.
* @param shiftTimeZone
* The shift timezone of the window, if the proctime or rowtime type is
* TIMESTAMP_LTZ, the shift timezone is the timezone user configured in TableConfig, other
* cases the timezone is UTC which means never shift when assigning windows.
* @param slide
* the slide interval of the generated windows.
*/
public static HoppingSliceAssigner hopping(int rowtimeIndex, ZoneId shiftTimeZone, Duration size, Duration slide) {
return new HoppingSliceAssigner(rowtimeIndex, shiftTimeZone, size.toMillis(), slide.toMillis(), 0);
}
| 3.26 |
flink_SliceAssigners_withOffset_rdh
|
/**
* Creates a new {@link CumulativeSliceAssigner} with a new specified offset.
*/
public CumulativeSliceAssigner withOffset(Duration offset) {
return new CumulativeSliceAssigner(rowtimeIndex, shiftTimeZone, maxSize, step, offset.toMillis());
}
| 3.26 |
flink_SliceAssigners_sliced_rdh
|
/**
* Creates a {@link SliceAssigner} that assigns elements which has been attached slice end
* timestamp.
*
* @param sliceEndIndex
* the index of slice end field in the input row, mustn't be a negative
* value.
* @param innerAssigner
* the inner assigner which assigns the attached windows
*/
public static SliceAssigner sliced(int sliceEndIndex, SliceAssigner innerAssigner) {
if (innerAssigner instanceof SliceSharedAssigner) {
return new SlicedSharedSliceAssigner(sliceEndIndex, ((SliceSharedAssigner) (innerAssigner)));
} else {
return new SlicedUnsharedSliceAssigner(sliceEndIndex, innerAssigner);
}
}
| 3.26 |
flink_SliceAssigners_windowed_rdh
|
/**
* Creates a {@link SliceAssigner} that assigns elements which has been attached window start
* and window end timestamp to slices. The assigned slice is equal to the given window.
*
* @param windowEndIndex
* the index of window end field in the input row, mustn't be a negative
* value.
* @param innerAssigner
* the inner assigner which assigns the attached windows
*/public static WindowedSliceAssigner windowed(int windowEndIndex, SliceAssigner innerAssigner) {
return new WindowedSliceAssigner(windowEndIndex, innerAssigner);
}
| 3.26 |
flink_SliceAssigners_tumbling_rdh
|
// ------β------β------β------β------β------β------β------β------β------β------β------β------β
// Utilities
// ------β------β------β------β------β------β------β------β------β------β------β------β------β
/**
* Creates a tumbling window {@link SliceAssigner} that assigns elements to slices of tumbling
* windows.
*
* @param rowtimeIndex
* the index of rowtime field in the input row, {@code -1} if based on
* processing time.
* @param shiftTimeZone
* The shift timezone of the window, if the proctime or rowtime type is
* TIMESTAMP_LTZ, the shift timezone is the timezone user configured in TableConfig, other
* cases the timezone is UTC which means never shift when assigning windows.
* @param size
* the size of the generated windows.
*/
public static TumblingSliceAssigner tumbling(int rowtimeIndex, ZoneId shiftTimeZone, Duration size) {
return new TumblingSliceAssigner(rowtimeIndex, shiftTimeZone, size.toMillis(), 0);
}
| 3.26 |
flink_SliceAssigners_cumulative_rdh
|
/**
* Creates a cumulative window {@link SliceAssigner} that assigns elements to slices of
* cumulative windows.
*
* @param rowtimeIndex
* the index of rowtime field in the input row, {@code -1} if based on *
* processing time.
* @param shiftTimeZone
* The shift timezone of the window, if the proctime or rowtime type is
* TIMESTAMP_LTZ, the shift timezone is the timezone user configured in TableConfig, other
* cases the timezone is UTC which means never shift when assigning windows.
* @param step
* the step interval of the generated windows.
*/
public static CumulativeSliceAssigner cumulative(int rowtimeIndex, ZoneId shiftTimeZone, Duration maxSize, Duration step) {
return new CumulativeSliceAssigner(rowtimeIndex, shiftTimeZone, maxSize.toMillis(), step.toMillis(), 0);
}
| 3.26 |
flink_DataStructureConverter_open_rdh
|
/**
* Converter between internal and external data structure.
*
* <p>Converters are serializable and can be passed to runtime operators. However, converters are
* not thread-safe.
*
* @param <I>
* internal data structure (see {@link RowData})
* @param <E>
* external data structure (see {@link DataType#getConversionClass()})
*/
@Internal
| 3.26 |
flink_DataStructureConverter_toInternalOrNull_rdh
|
/**
* Converts to internal data structure or {@code null}.
*
* <p>The nullability could be derived from the data type. However, this method reduces null
* checks.
*/
default I toInternalOrNull(E external) {
if (external == null) {
return null;
}
return toInternal(external);
}
| 3.26 |
flink_StreamingFileWriter_closePartFileForPartitions_rdh
|
/**
* Close in-progress part file when partition is committable.
*/
private void closePartFileForPartitions() throws Exception {if (partitionCommitPredicate != null) {
final Iterator<Map.Entry<String, Long>> iterator = inProgressPartitions.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Long> entry = iterator.next();
String partition = entry.getKey();
Long creationTime = entry.getValue();
PredicateContext predicateContext = PartitionCommitPredicate.createPredicateContext(partition, creationTime, processingTimeService.getCurrentProcessingTime(), currentWatermark);
if (partitionCommitPredicate.isPartitionCommittable(predicateContext)) {
// if partition is committable, close in-progress part file in this partition
buckets.closePartFileForBucket(partition);iterator.remove();
}
}
}
}
| 3.26 |
flink_ParserUtils_parsePluginOutput_rdh
|
/**
* Iterates over the given lines, identifying plugin execution blocks with the given pattern and
* parses the plugin output with the given parser.
*
* <p>This method assumes that the given pattern matches at most once for each module.
*
* <p>The given pattern must include a {@code module} group that captures the module that the
* plugin runs on (without the scala suffix!).
*
* @param lines
* maven output lines
* @param executionLinePattern
* pattern that matches plugin executions
* @param blockParser
* parser for the plugin block
* @return map containing the parser result for each module
* @param <D>
* block parser output
*/
public static <D> Map<String, D> parsePluginOutput(Stream<String> lines, Pattern executionLinePattern, Function<Iterator<String>, D> blockParser) {
final Map<String, D> result = new LinkedHashMap<>();
final Iterator<String> iterator = lines.iterator();
while (iterator.hasNext()) {
Matcher moduleMatcher = executionLinePattern.matcher(iterator.next());
while (!moduleMatcher.find()) {
if (iterator.hasNext()) {
moduleMatcher = executionLinePattern.matcher(iterator.next());
} else {
return result;
}
}
final String currentModule = moduleMatcher.group("module");
if (!iterator.hasNext()) {
throw new IllegalStateException("Expected more output from the plugin.");
}
result.put(currentModule, blockParser.apply(iterator));
}
return result;
}
| 3.26 |
flink_Channel_getLocalStrategyComparator_rdh
|
/**
* Gets the local strategy comparator from this Channel.
*
* @return The local strategy comparator.
*/public TypeComparatorFactory<?> getLocalStrategyComparator() {
return localStrategyComparator;}
| 3.26 |
flink_Channel_getRelativeTempMemory_rdh
|
/**
* Gets the memory for materializing the channel's result from this Channel.
*
* @return The temp memory.
*/
public double
getRelativeTempMemory() {
return this.relativeTempMemory;
}
| 3.26 |
flink_Channel_getRequiredGlobalProps_rdh
|
// --------------------------------------------------------------------------------------------
// Data Property Handling
// --------------------------------------------------------------------------------------------
public RequestedGlobalProperties getRequiredGlobalProps() {
return requiredGlobalProps;
}
| 3.26 |
flink_Channel_getDataExchangeMode_rdh
|
/**
* Gets the data exchange mode (batch / pipelined) to use for the data exchange of this channel.
*
* @return The data exchange mode of this channel.
*/
public DataExchangeMode getDataExchangeMode() {
return dataExchangeMode;
}
| 3.26 |
flink_Channel_getSerializer_rdh
|
/**
* Gets the serializer from this Channel.
*
* @return The serializer.
*/
public TypeSerializerFactory<?> getSerializer() {
return f0;}
| 3.26 |
flink_Channel_setLocalStrategyComparator_rdh
|
/**
* Sets the local strategy comparator for this Channel.
*
* @param localStrategyComparator
* The local strategy comparator to set.
*/public void setLocalStrategyComparator(TypeComparatorFactory<?> localStrategyComparator) {
this.localStrategyComparator
= localStrategyComparator;
}
| 3.26 |
flink_Channel_toString_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public String toString() {
return ((((((("Channel (" + this.source) + (this.target == null ? ')' : (") -> ("
+ this.target) + ')')) + '[') + this.shipStrategy) + "] [") + this.localStrategy) + "] ") + ((this.tempMode == null) || (this.tempMode == TempMode.NONE) ? "{NO-TEMP}" : this.tempMode);}
| 3.26 |
flink_Channel_getTarget_rdh
|
/**
* Gets the target of this Channel.
*
* @return The target.
*/
public PlanNode getTarget() {
return this.target;
}
| 3.26 |
flink_Channel_m3_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Utility method used while swapping binary union nodes for n-ary union nodes.
*/
public void m3(PlanNode newUnionNode) {
if (!(this.source instanceof BinaryUnionPlanNode)) {
throw new IllegalStateException();
}
else {
this.source = newUnionNode;
}
}
| 3.26 |
flink_Channel_getSource_rdh
|
// --------------------------------------------------------------------------------------------
// Accessors
// --------------------------------------------------------------------------------------------
/**
* Gets the source of this Channel.
*
* @return The source.
*/
@Override
public PlanNode getSource() {
return this.source;
}
| 3.26 |
flink_Channel_getReplicationFactor_rdh
|
/**
* Returns the replication factor of the connection.
*
* @return The replication factor of the connection.
*/
public int getReplicationFactor() {
return this.replicationFactor;
}
| 3.26 |
flink_Channel_setTarget_rdh
|
/**
* Sets the target of this Channel.
*
* @param target
* The target.
*/
public void setTarget(PlanNode target) {
this.target = target;
}
| 3.26 |
flink_Channel_getEstimatedOutputSize_rdh
|
// --------------------------------------------------------------------------------------------
// Statistic Estimates
// --------------------------------------------------------------------------------------------
@Override
public long getEstimatedOutputSize() {
long estimate = this.source.template.getEstimatedOutputSize();
return estimate < 0 ? estimate : estimate * this.replicationFactor;
}
| 3.26 |
flink_Channel_setTempMode_rdh
|
/**
* Sets the temp mode of the connection.
*
* @param tempMode
* The temp mode of the connection.
*/
public void setTempMode(TempMode tempMode)
{
this.tempMode = tempMode;
}
| 3.26 |
flink_Channel_setSerializer_rdh
|
/**
* Sets the serializer for this Channel.
*
* @param serializer
* The serializer to set.
*/
public void setSerializer(TypeSerializerFactory<?> serializer) {
this.f0 = serializer;
}
| 3.26 |
flink_Channel_setShipStrategyComparator_rdh
|
/**
* Sets the ship strategy comparator for this Channel.
*
* @param shipStrategyComparator
* The ship strategy comparator to set.
*/
public void setShipStrategyComparator(TypeComparatorFactory<?> shipStrategyComparator) {
this.shipStrategyComparator = shipStrategyComparator;
}
| 3.26 |
flink_Channel_m4_rdh
|
// --------------------------------------------------------------------------------------------
public int m4() {
return this.source.getOptimizerNode().getMaxDepth() + 1;
}
| 3.26 |
flink_Channel_setReplicationFactor_rdh
|
/**
* Sets the replication factor of the connection.
*
* @param factor
* The replication factor of the connection.
*/
public void setReplicationFactor(int factor) {
this.replicationFactor = factor;
}
| 3.26 |
flink_Channel_setRelativeTempMemory_rdh
|
/**
* Sets the memory for materializing the channel's result from this Channel.
*
* @param relativeTempMemory
* The memory for materialization.
*/
public void setRelativeTempMemory(double relativeTempMemory) {
this.relativeTempMemory = relativeTempMemory;
}
| 3.26 |
flink_AbstractColumnReader_readToVector_rdh
|
/**
* Reads `total` values from this columnReader into column.
*/
@Override
public final void readToVector(int readNumber, VECTOR vector) throws IOException {
int
rowId =
0;
WritableIntVector dictionaryIds = null;
if (dictionary != null) {
dictionaryIds = vector.reserveDictionaryIds(readNumber);
}
while (readNumber > 0) {
// Compute the number of values we want to read in this page.
int
leftInPage = ((int) (endOfPageValueCount - valuesRead));
if (leftInPage == 0) {
DataPage page = pageReader.readPage();
if (page instanceof DataPageV1) {
readPageV1(((DataPageV1) (page)));
} else if (page instanceof DataPageV2) {
readPageV2(((DataPageV2) (page)));
} else {
throw new RuntimeException("Unsupported page type: " + page.getClass());
}
leftInPage = ((int) (endOfPageValueCount - valuesRead));
}
int num = Math.min(readNumber, leftInPage);
if (isCurrentPageDictionaryEncoded) {
// Read and decode dictionary ids.
runLenDecoder.readDictionaryIds(num, dictionaryIds, vector, rowId, maxDefLevel, this.dictionaryIdsDecoder);if (vector.hasDictionary() || ((rowId == 0) && supportLazyDecode())) {
// Column vector supports lazy decoding of dictionary values so just set the
// dictionary.
// We can't do this if rowId != 0 AND the column doesn't have a dictionary (i.e.
// some
// non-dictionary encoded values have already been added).
vector.setDictionary(new ParquetDictionary(dictionary));
} else {
readBatchFromDictionaryIds(rowId, num, vector, dictionaryIds);
}
} else { if (vector.hasDictionary() && (rowId != 0)) {
// This batch already has dictionary encoded values but this new page is not.
// The batch
// does not support a mix of dictionary and not so we will decode the
// dictionary.
readBatchFromDictionaryIds(0, rowId, vector, vector.getDictionaryIds());
}
vector.setDictionary(null);
readBatch(rowId, num, vector);
}
valuesRead += num;
rowId += num;
readNumber -= num;
}
}
| 3.26 |
flink_AbstractColumnReader_afterReadPage_rdh
|
/**
* After read a page, we may need some initialization.
*/
protected void afterReadPage() {
}
| 3.26 |
flink_TaskExecutor_tryLoadLocalAllocationSnapshots_rdh
|
/**
* This method tries to repopulate the {@link JobTable} and {@link TaskSlotTable} from the local
* filesystem in a best-effort manner.
*/
private void tryLoadLocalAllocationSnapshots() {
Collection<SlotAllocationSnapshot> slotAllocationSnapshots = slotAllocationSnapshotPersistenceService.loadAllocationSnapshots();
log.debug("Recovered slot allocation snapshots {}.", slotAllocationSnapshots);
final Set<AllocationID> allocatedSlots = new HashSet<>();
for (SlotAllocationSnapshot slotAllocationSnapshot : slotAllocationSnapshots) {
try {
allocateSlotForJob(slotAllocationSnapshot.getJobId(), slotAllocationSnapshot.getSlotID(), slotAllocationSnapshot.getAllocationId(), slotAllocationSnapshot.getResourceProfile(), slotAllocationSnapshot.getJobTargetAddress());
} catch (SlotAllocationException e) {
log.debug("Cannot reallocate restored slot {}.", slotAllocationSnapshot, e);
}
allocatedSlots.add(slotAllocationSnapshot.getAllocationId()); }
localStateStoresManager.retainLocalStateForAllocations(allocatedSlots);
}
| 3.26 |
flink_TaskExecutor_failTask_rdh
|
// ------------------------------------------------------------------------
// Internal task methods
// ------------------------------------------------------------------------
private void failTask(final ExecutionAttemptID executionAttemptID, final Throwable
cause) {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
try {
task.failExternally(cause);
} catch (Throwable t) {
log.error("Could not fail task {}.", executionAttemptID, t);
}
} else {
log.info("Cannot find task to fail for execution {} with exception:", executionAttemptID, cause);
}}
| 3.26 |
flink_TaskExecutor_onStart_rdh
|
// ------------------------------------------------------------------------
// Life cycle
// ------------------------------------------------------------------------
@Override
public void onStart() throws Exception {
try {
startTaskExecutorServices();
} catch (Throwable t) {
final TaskManagerException exception = new TaskManagerException(String.format("Could not start the TaskExecutor %s",
getAddress()), t);
onFatalError(exception);
throw exception;
}
startRegistrationTimeout();
}
| 3.26 |
flink_TaskExecutor_offerSlotsToJobManager_rdh
|
// ------------------------------------------------------------------------
// Internal job manager connection methods
// ------------------------------------------------------------------------
private void offerSlotsToJobManager(final JobID jobId) {
jobTable.getConnection(jobId).ifPresent(this::internalOfferSlotsToJobManager);
}
| 3.26 |
flink_TaskExecutor_submitTask_rdh
|
// ----------------------------------------------------------------------
// Task lifecycle RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> submitTask(TaskDeploymentDescriptor tdd, JobMasterId jobMasterId, Time timeout)
{
try {
final JobID jobId = tdd.getJobId();
final ExecutionAttemptID executionAttemptID = tdd.getExecutionAttemptId();final JobTable.Connection jobManagerConnection = jobTable.getConnection(jobId).orElseThrow(() -> {
final String message = (("Could not submit task because there is no JobManager " + "associated for the job ") + jobId) + '.';
log.debug(message);
return new TaskSubmissionException(message);
});
if (!Objects.equals(jobManagerConnection.getJobMasterId(), jobMasterId)) {
final String message = ((("Rejecting the task submission because the job manager leader id " + jobMasterId) + " does not match the expected job manager leader id ") + jobManagerConnection.getJobMasterId()) + '.';
log.debug(message);
throw new TaskSubmissionException(message);
}
if (!taskSlotTable.tryMarkSlotActive(jobId, tdd.getAllocationId())) {
final String v22 = ((("No task slot allocated for job ID " + jobId) + " and allocation ID ") + tdd.getAllocationId()) + '.';
log.debug(v22);
throw new TaskSubmissionException(v22);
}
// re-integrate offloaded data and deserialize shuffle descriptors
try {
tdd.loadBigData(taskExecutorBlobService.getPermanentBlobService(), jobInformationCache, taskInformationCache, shuffleDescriptorsCache);
} catch (IOException | ClassNotFoundException e) {
throw new TaskSubmissionException("Could not re-integrate offloaded TaskDeploymentDescriptor data.", e);
}
// deserialize the pre-serialized information
final JobInformation jobInformation;
final TaskInformation taskInformation;try {jobInformation = tdd.getJobInformation();
taskInformation = tdd.getTaskInformation();
} catch (IOException | ClassNotFoundException e) {
throw new TaskSubmissionException("Could not deserialize the job or task information.", e);
}
if (!jobId.equals(jobInformation.getJobId())) {
throw new TaskSubmissionException(((("Inconsistent job ID information inside TaskDeploymentDescriptor (" + tdd.getJobId()) + " vs. ") + jobInformation.getJobId()) + ")");
}
TaskManagerJobMetricGroup jobGroup = taskManagerMetricGroup.addJob(jobInformation.getJobId(), jobInformation.getJobName());
// note that a pre-existing job group can NOT be closed concurrently - this is done by
// the same TM thread in removeJobMetricsGroup
TaskMetricGroup taskMetricGroup = jobGroup.addTask(tdd.getExecutionAttemptId(), taskInformation.getTaskName());
InputSplitProvider v27 = new RpcInputSplitProvider(jobManagerConnection.getJobManagerGateway(), taskInformation.getJobVertexId(), tdd.getExecutionAttemptId(), taskManagerConfiguration.getRpcTimeout());
final TaskOperatorEventGateway v28 = new RpcTaskOperatorEventGateway(jobManagerConnection.getJobManagerGateway(),
executionAttemptID, t -> runAsync(() -> failTask(executionAttemptID, t)));
TaskManagerActions taskManagerActions = jobManagerConnection.getTaskManagerActions();
CheckpointResponder checkpointResponder
= jobManagerConnection.getCheckpointResponder();
GlobalAggregateManager aggregateManager = jobManagerConnection.getGlobalAggregateManager();
LibraryCacheManager.ClassLoaderHandle classLoaderHandle = jobManagerConnection.getClassLoaderHandle();
PartitionProducerStateChecker partitionStateChecker = jobManagerConnection.getPartitionStateChecker();
final TaskLocalStateStore localStateStore = localStateStoresManager.localStateStoreForSubtask(jobId, tdd.getAllocationId(), taskInformation.getJobVertexId(), tdd.getSubtaskIndex(), taskManagerConfiguration.getConfiguration(), jobInformation.getJobConfiguration());
final FileMergingSnapshotManager fileMergingSnapshotManager = fileMergingManager.fileMergingSnapshotManagerForJob(jobId);
// TODO: Pass config value from user program and do overriding here.
final
StateChangelogStorage<?> changelogStorage;
try {
changelogStorage = changelogStoragesManager.stateChangelogStorageForJob(jobId, taskManagerConfiguration.getConfiguration(), jobGroup, localStateStore.getLocalRecoveryConfig());} catch
(IOException e) {
throw new TaskSubmissionException(e);
}
final JobManagerTaskRestore taskRestore = tdd.getTaskRestore();
final TaskStateManager taskStateManager = new TaskStateManagerImpl(jobId, tdd.getExecutionAttemptId(), localStateStore, fileMergingSnapshotManager, changelogStorage, changelogStoragesManager, taskRestore, checkpointResponder);
MemoryManager memoryManager;
try {
memoryManager = taskSlotTable.getTaskMemoryManager(tdd.getAllocationId());
} catch (SlotNotFoundException e) {
throw new TaskSubmissionException("Could not submit task.", e);
}
Task task = new Task(jobInformation, taskInformation, tdd.getExecutionAttemptId(), tdd.getAllocationId(),
tdd.getProducedPartitions(), tdd.getInputGates(), memoryManager, sharedResources, taskExecutorServices.getIOManager(), taskExecutorServices.getShuffleEnvironment(), taskExecutorServices.getKvStateService(), taskExecutorServices.getBroadcastVariableManager(), taskExecutorServices.getTaskEventDispatcher(), externalResourceInfoProvider, taskStateManager, taskManagerActions, v27, checkpointResponder, v28,
aggregateManager, classLoaderHandle,
fileCache, taskManagerConfiguration, taskMetricGroup, partitionStateChecker,
getRpcService().getScheduledExecutor(), channelStateExecutorFactoryManager.getOrCreateExecutorFactory(jobId));
taskMetricGroup.gauge(MetricNames.IS_BACK_PRESSURED, task::isBackPressured);
log.info("Received task {} ({}), deploy into slot with allocation id {}.", task.getTaskInfo().getTaskNameWithSubtasks(), tdd.getExecutionAttemptId(), tdd.getAllocationId());
boolean taskAdded;
try {
taskAdded = taskSlotTable.addTask(task);
} catch (SlotNotFoundException | SlotNotActiveException e) {
throw new TaskSubmissionException("Could not submit task.", e);
}
if (taskAdded) {
task.startTaskThread();
setupResultPartitionBookkeeping(tdd.getJobId(), tdd.getProducedPartitions(), task.getTerminationFuture());
return
CompletableFuture.completedFuture(Acknowledge.get());
} else {
final String message = ("TaskManager already contains a task for id " + task.getExecutionId()) + '.';
log.debug(message);throw new TaskSubmissionException(message);
}
} catch (TaskSubmissionException e) {
return FutureUtils.completedExceptionally(e);}
}
| 3.26 |
flink_TaskExecutor_triggerCheckpoint_rdh
|
// ----------------------------------------------------------------------
// Checkpointing RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> triggerCheckpoint(ExecutionAttemptID executionAttemptID, long checkpointId, long checkpointTimestamp, CheckpointOptions checkpointOptions) {
log.debug("Trigger checkpoint {}@{} for {}.", checkpointId, checkpointTimestamp, executionAttemptID);
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
task.triggerCheckpointBarrier(checkpointId, checkpointTimestamp, checkpointOptions);
return CompletableFuture.completedFuture(Acknowledge.get());
} else {
final String message = ("TaskManager received a checkpoint request for unknown task " + executionAttemptID) + '.';
log.debug(message);
return FutureUtils.completedExceptionally(new CheckpointException(message, CheckpointFailureReason.TASK_CHECKPOINT_FAILURE));
}
}
| 3.26 |
flink_TaskExecutor_m1_rdh
|
// ------------------------------------------------------------------------
// Internal resource manager connection methods
// ------------------------------------------------------------------------
private void m1(String newLeaderAddress, ResourceManagerId newResourceManagerId) {
resourceManagerAddress = createResourceManagerAddress(newLeaderAddress, newResourceManagerId);
reconnectToResourceManager(new FlinkException(String.format("ResourceManager leader changed to new address %s", resourceManagerAddress)));
}
| 3.26 |
flink_TaskExecutor_heartbeatFromJobManager_rdh
|
// ----------------------------------------------------------------------
// Heartbeat RPC
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Void> heartbeatFromJobManager(ResourceID resourceID, AllocatedSlotReport allocatedSlotReport) {
return jobManagerHeartbeatManager.requestHeartbeat(resourceID, allocatedSlotReport);
}
| 3.26 |
flink_TaskExecutor_syncSlotsWithSnapshotFromJobMaster_rdh
|
/**
* Syncs the TaskExecutor's view on its allocated slots with the JobMaster's view. Slots which
* are no longer reported by the JobMaster are being freed. Slots which the JobMaster thinks it
* still owns but which are no longer allocated to it will be failed via {@link JobMasterGateway#failSlot}.
*
* @param jobMasterGateway
* jobMasterGateway to talk to the connected job master
* @param allocatedSlotReport
* represents the JobMaster's view on the current slot allocation
* state
*/
private void syncSlotsWithSnapshotFromJobMaster(JobMasterGateway jobMasterGateway, AllocatedSlotReport allocatedSlotReport) {
failNoLongerAllocatedSlots(allocatedSlotReport, jobMasterGateway);
freeNoLongerUsedSlots(allocatedSlotReport);
}
| 3.26 |
flink_TaskExecutor_onFatalError_rdh
|
// ------------------------------------------------------------------------
// Error Handling
// ------------------------------------------------------------------------
/**
* Notifies the TaskExecutor that a fatal error has occurred and it cannot proceed.
*
* @param t
* The exception describing the fatal error
*/
void onFatalError(final Throwable t) {
try {
log.error("Fatal error occurred in TaskExecutor {}.", getAddress(), t);
} catch (Throwable ignored) {
}
// The fatal error handler implementation should make sure that this call is non-blocking
fatalErrorHandler.onFatalError(t);
}
| 3.26 |
flink_TaskExecutor_getResourceManagerConnection_rdh
|
// ------------------------------------------------------------------------
// Access to fields for testing
// ------------------------------------------------------------------------
@VisibleForTesting
TaskExecutorToResourceManagerConnection getResourceManagerConnection() {
return resourceManagerConnection;
}
| 3.26 |
flink_TaskExecutor_updatePartitions_rdh
|
// ----------------------------------------------------------------------
// Partition lifecycle RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> updatePartitions(final ExecutionAttemptID executionAttemptID, Iterable<PartitionInfo> partitionInfos, Time timeout) {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
for (final PartitionInfo partitionInfo : partitionInfos) {
// Run asynchronously because it might be blocking
FutureUtils.assertNoException(CompletableFuture.runAsync(() -> {
try {if (!shuffleEnvironment.updatePartitionInfo(executionAttemptID, partitionInfo)) {log.debug("Discard update for input gate partition {} of result {} in task {}. " + "The partition is no longer available.",
partitionInfo.getShuffleDescriptor().getResultPartitionID(), partitionInfo.getIntermediateDataSetID(), executionAttemptID);
}
} catch (IOException | InterruptedException e) {
log.error("Could not update input data location for task {}. Trying to fail task.", task.getTaskInfo().getTaskName(), e);
task.failExternally(e);
}
}, getRpcService().getScheduledExecutor()));
}
return CompletableFuture.completedFuture(Acknowledge.get());
} else {
log.debug("Discard update for input partitions of task {}. Task is no longer running.", executionAttemptID);
return CompletableFuture.completedFuture(Acknowledge.get());
}
}
| 3.26 |
flink_TaskExecutor_sendOperatorEventToTask_rdh
|
// ----------------------------------------------------------------------
// Other RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> sendOperatorEventToTask(ExecutionAttemptID executionAttemptID, OperatorID operatorId, SerializedValue<OperatorEvent> evt) {
log.debug("Operator event for {} - {}", executionAttemptID, operatorId);
final Task task =
taskSlotTable.getTask(executionAttemptID);
if (task == null) {
return FutureUtils.completedExceptionally(new TaskNotRunningException(("Task " + executionAttemptID) + " not running on TaskManager"));
}
try {
task.deliverOperatorEvent(operatorId, evt);
return CompletableFuture.completedFuture(Acknowledge.get());
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalError(t);
return FutureUtils.completedExceptionally(t);
}
}
| 3.26 |
flink_TaskExecutor_disconnectJobManager_rdh
|
// ----------------------------------------------------------------------
// Disconnection RPCs
// ----------------------------------------------------------------------
@Override
public void disconnectJobManager(JobID jobId, Exception cause) {
jobTable.getConnection(jobId).ifPresent(jobManagerConnection -> disconnectAndTryReconnectToJobManager(jobManagerConnection, cause));
}
| 3.26 |
flink_TaskExecutor_isConnectedToResourceManager_rdh
|
// ------------------------------------------------------------------------
// Internal utility methods
// ------------------------------------------------------------------------
private boolean isConnectedToResourceManager() {return establishedResourceManagerConnection != null;
}
| 3.26 |
flink_TaskExecutor_requestSlot_rdh
|
// ----------------------------------------------------------------------
// Slot allocation RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> requestSlot(final SlotID slotId, final JobID jobId, final AllocationID allocationId, final ResourceProfile resourceProfile, final String targetAddress, final ResourceManagerId resourceManagerId, final Time timeout) {
// TODO: Filter invalid requests from the resource manager by using the
// instance/registration Id
log.info("Receive slot request {} for job {} from resource manager with leader id {}.", allocationId, jobId, resourceManagerId);
if (!isConnectedToResourceManager(resourceManagerId)) {
final String message = String.format("TaskManager is not connected to the resource manager %s.", resourceManagerId);
log.debug(message);
return FutureUtils.completedExceptionally(new TaskManagerException(message));
}
m2(new SlotAllocationSnapshot(slotId, jobId, targetAddress, allocationId, resourceProfile));
try {
final boolean isConnected = allocateSlotForJob(jobId, slotId, allocationId, resourceProfile, targetAddress);
if (isConnected) {
offerSlotsToJobManager(jobId);
}
return CompletableFuture.completedFuture(Acknowledge.get());
} catch (SlotAllocationException e) {
log.debug("Could not allocate slot for allocation id {}.", allocationId, e);return FutureUtils.completedExceptionally(e);
}
}
| 3.26 |
flink_TaskExecutor_getResourceID_rdh
|
// ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
public ResourceID getResourceID() {
return unresolvedTaskManagerLocation.getResourceID();
}
| 3.26 |
flink_TaskExecutor_onStop_rdh
|
/**
* Called to shut down the TaskManager. The method closes all TaskManager services.
*/
@Override
public CompletableFuture<Void> onStop() {
log.info("Stopping TaskExecutor {}.", getAddress());
Throwable jobManagerDisconnectThrowable
= null;
FlinkExpectedException cause = new FlinkExpectedException("The TaskExecutor is shutting down.");
closeResourceManagerConnection(cause);
for
(JobTable.Job job : jobTable.getJobs()) {
try {
closeJob(job, cause);
} catch (Throwable t) {
jobManagerDisconnectThrowable = ExceptionUtils.firstOrSuppressed(t, jobManagerDisconnectThrowable);
}
}
changelogStoragesManager.shutdown();
channelStateExecutorFactoryManager.shutdown();
jobInformationCache.clear();
taskInformationCache.clear();
shuffleDescriptorsCache.clear();
Preconditions.checkState(jobTable.isEmpty());
final Throwable throwableBeforeTasksCompletion = jobManagerDisconnectThrowable;
return FutureUtils.runAfterwards(taskSlotTable.closeAsync(), this::stopTaskExecutorServices).handle((ignored, throwable) -> {
handleOnStopException(throwableBeforeTasksCompletion, throwable);
return null;
});
}
| 3.26 |
flink_AbstractUdfOperator_asArray_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Generic utility function that wraps a single class object into an array of that class type.
*
* @param <U>
* The type of the classes.
* @param clazz
* The class object to be wrapped.
* @return An array wrapping the class object.
*/
protected static <U> Class<U>[] asArray(Class<U> clazz) {
@SuppressWarnings("unchecked")
Class<U>[] v0 = new Class[]{ clazz };
return v0;
}
| 3.26 |
flink_AbstractUdfOperator_getUserCodeWrapper_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the function that is held by this operator. The function is the actual implementation of
* the user code.
*
* <p>This throws an exception if the pact does not contain an object but a class for the user
* code.
*
* @return The object with the user function for this operator.
* @see org.apache.flink.api.common.operators.Operator#getUserCodeWrapper()
*/
@Override
public UserCodeWrapper<FT> getUserCodeWrapper() {
return userFunction;
}
| 3.26 |
flink_AbstractUdfOperator_getBroadcastInputs_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Returns the input, or null, if none is set.
*
* @return The broadcast input root operator.
*/
public Map<String, Operator<?>> getBroadcastInputs() {
return this.broadcastInputs;
}
| 3.26 |
flink_AbstractUdfOperator_setBroadcastVariable_rdh
|
/**
* Binds the result produced by a plan rooted at {@code root} to a variable used by the UDF
* wrapped in this operator.
*
* @param root
* The root of the plan producing this input.
*/
public void setBroadcastVariable(String name, Operator<?> root) {
if (name == null) {
throw new IllegalArgumentException("The broadcast input name may not be null.");
}
if (root == null) {
throw new IllegalArgumentException("The broadcast input root operator may not be null.");}
this.broadcastInputs.put(name, root);
}
| 3.26 |
flink_AbstractUdfOperator_setBroadcastVariables_rdh
|
/**
* Clears all previous broadcast inputs and binds the given inputs as broadcast variables of
* this operator.
*
* @param inputs
* The {@code<name, root>} pairs to be set as broadcast inputs.
*/
public <T> void setBroadcastVariables(Map<String, Operator<T>> inputs) {this.broadcastInputs.clear();
this.broadcastInputs.putAll(inputs);
}
| 3.26 |
flink_CsvBulkWriter_forPojo_rdh
|
/**
* Builds a writer based on a POJO class definition.
*
* @param pojoClass
* The class of the POJO.
* @param stream
* The output stream.
* @param <T>
* The type of the elements accepted by this writer.
*/
static <T> CsvBulkWriter<T, T, Void> forPojo(Class<T> pojoClass, FSDataOutputStream stream) {
final Converter<T, T, Void> converter = (value, context) -> value;
final CsvMapper csvMapper = JacksonMapperFactory.createCsvMapper();
final CsvSchema schema = csvMapper.schemaFor(pojoClass).withoutQuoteChar();
return new CsvBulkWriter<>(csvMapper, schema, converter, null, stream);
}
| 3.26 |
flink_CsvBulkWriter_forSchema_rdh
|
/**
* Builds a writer with Jackson schema and a type converter.
*
* @param mapper
* The specialized mapper for producing CSV.
* @param schema
* The schema that defined the mapping properties.
* @param converter
* The type converter that converts incoming elements of type {@code <T>} into
* elements of type JsonNode.
* @param stream
* The output stream.
* @param <T>
* The type of the elements accepted by this writer.
* @param <C>
* The type of the converter context.
* @param <R>
* The type of the elements produced by this writer.
*/
static <T, R, C> CsvBulkWriter<T, R, C> forSchema(CsvMapper mapper, CsvSchema schema, Converter<T, R, C> converter, @Nullable
C converterContext, FSDataOutputStream stream) {
return new CsvBulkWriter<>(mapper, schema, converter, converterContext, stream);
}
| 3.26 |
flink_CommittableMessageTypeInfo_of_rdh
|
/**
* Returns the type information based on the serializer for a {@link CommittableMessage}.
*
* @param committableSerializerFactory
* factory to create the serializer for a {@link CommittableMessage}
* @param <CommT>
* type of the committable
* @return */
public static <CommT> TypeInformation<CommittableMessage<CommT>> of(SerializableSupplier<SimpleVersionedSerializer<CommT>> committableSerializerFactory) {
return new CommittableMessageTypeInfo<>(committableSerializerFactory);
}
| 3.26 |
flink_CommittableMessageTypeInfo_noOutput_rdh
|
/**
* Returns the type information for a {@link CommittableMessage} with no committable.
*
* @return {@link TypeInformation} with {@link CommittableMessage}
*/public static TypeInformation<CommittableMessage<Void>> noOutput() {
return new CommittableMessageTypeInfo<>(NoOutputSerializer::new);
}
| 3.26 |
flink_LogicalTypeJsonDeserializer_deserializeLengthFieldType_rdh
|
// --------------------------------------------------------------------------------------------
// Helper methods for some complex types
// --------------------------------------------------------------------------------------------
private LogicalType deserializeLengthFieldType(LogicalTypeRoot typeRoot, JsonNode logicalTypeNode) {
int length = logicalTypeNode.get(FIELD_NAME_LENGTH).asInt();
switch (typeRoot) {
case CHAR :
return length == 0 ? CharType.ofEmptyLiteral() : new CharType(length);
case VARCHAR :
return length == 0 ? VarCharType.ofEmptyLiteral() : new VarCharType(length);
case BINARY :
return length == 0 ? BinaryType.ofEmptyLiteral() : new BinaryType(length);
case VARBINARY :
return length == 0 ? VarBinaryType.ofEmptyLiteral() : new VarBinaryType(length);
default :
throw new SqlGatewayException(String.format("Cannot convert JSON string '%s' to the logical type '%s', '%s', '%s' or '%s'.", logicalTypeNode.toPrettyString(), LogicalTypeRoot.CHAR.name(), LogicalTypeRoot.VARCHAR.name(),
LogicalTypeRoot.BINARY.name(), LogicalTypeRoot.VARBINARY.name()));
}
}
| 3.26 |
flink_LogicalTypeJsonDeserializer_deserializeInternal_rdh
|
/**
* Deserialize json according to the original type root. It's reverse operation of {@code SerializerWIP#serializeinternal}.
*/
private LogicalType deserializeInternal(JsonNode logicalTypeNode) {
LogicalTypeRoot typeRoot = LogicalTypeRoot.valueOf(logicalTypeNode.get(FIELD_NAME_TYPE_NAME).asText());
// the NullType's Json doesn't have other field, so return in advance
if (typeRoot.equals(LogicalTypeRoot.NULL)) {
return new NullType();
}
boolean isNullable = logicalTypeNode.get(FIELD_NAME_NULLABLE).asBoolean();
switch (typeRoot) {
case BOOLEAN :
return new BooleanType(isNullable);
case TINYINT :
return new
TinyIntType(isNullable);
case SMALLINT :
return new SmallIntType(isNullable);
case INTEGER :
return new IntType(isNullable);
case BIGINT :
return new BigIntType(isNullable);
case FLOAT :
return new FloatType(isNullable);
case DOUBLE :
return new DoubleType(isNullable);
case DATE :
return new DateType(isNullable);
case CHAR :
case VARCHAR :
case BINARY :
case VARBINARY :
return deserializeLengthFieldType(typeRoot, logicalTypeNode).copy(isNullable);
case DECIMAL :
return new DecimalType(isNullable, logicalTypeNode.get(FIELD_NAME_PRECISION).asInt(), logicalTypeNode.get(FIELD_NAME_SCALE).asInt());
case TIME_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITH_TIME_ZONE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
return deserializeTimestamp(typeRoot, logicalTypeNode).copy(isNullable);
case INTERVAL_DAY_TIME :case INTERVAL_YEAR_MONTH :
return deserializeInterval(isNullable, typeRoot, logicalTypeNode);
case MAP :
return deserializeMap(logicalTypeNode).copy(isNullable);
case ARRAY :case MULTISET :
return deserializeCollection(typeRoot, logicalTypeNode).copy(isNullable);
case ROW :
return deserializeRow(logicalTypeNode).copy(isNullable);
case RAW :
return deserializeRaw(logicalTypeNode).copy(isNullable);
default :
throw new UnsupportedOperationException(String.format("Unable to deserialize a logical type of type root '%s'. Please check the documentation for supported types.",
typeRoot.name()));
}
}
| 3.26 |
flink_FlatMapNode_computeOperatorSpecificDefaultEstimates_rdh
|
/**
* Computes the estimates for the FlatMap operator. Since it un-nests, we assume a cardinality
* increase. To give the system a hint at data increase, we take a default magic number of a 5
* times increase.
*/
@Override
protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
this.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords() * 5;
}
| 3.26 |
flink_GroupCombineOperatorBase_executeOnCollections_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected List<OUT> executeOnCollections(List<IN> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception {
GroupCombineFunction<IN, OUT>
function = this.userFunction.getUserCodeObject();
UnaryOperatorInformation<IN, OUT> operatorInfo = getOperatorInfo();
TypeInformation<IN> inputType = operatorInfo.getInputType();
int[] keyColumns = getKeyColumns(0);
int[] sortColumns = keyColumns;
boolean[] sortOrderings = new boolean[sortColumns.length];
if (groupOrder != null) {
sortColumns = ArrayUtils.addAll(sortColumns, groupOrder.getFieldPositions());
sortOrderings = ArrayUtils.addAll(sortOrderings, groupOrder.getFieldSortDirections());
}
if (sortColumns.length == 0) {
// => all reduce. No comparator
checkArgument(sortOrderings.length == 0);
} else {
final TypeComparator<IN> sortComparator = getTypeComparator(inputType, sortColumns, sortOrderings, executionConfig);
Collections.sort(inputData, new Comparator<IN>() {
@Override
public int compare(IN o1, IN o2) { return sortComparator.compare(o1, o2);
}
});
}
FunctionUtils.setFunctionRuntimeContext(function, ctx);
FunctionUtils.openFunction(function, DefaultOpenContext.INSTANCE);
ArrayList<OUT> result = new ArrayList<OUT>();
if (keyColumns.length == 0) {
final TypeSerializer<IN> inputSerializer = inputType.createSerializer(executionConfig);
TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
List<IN> inputDataCopy = new ArrayList<IN>(inputData.size());
for (IN in : inputData) {
inputDataCopy.add(inputSerializer.copy(in));
}CopyingListCollector<OUT> collector = new CopyingListCollector<OUT>(result, outSerializer);
function.combine(inputDataCopy, collector);
} else {
final TypeSerializer<IN> inputSerializer = inputType.createSerializer(executionConfig);boolean[] keyOrderings = new boolean[keyColumns.length];
final TypeComparator<IN> comparator = getTypeComparator(inputType, keyColumns, keyOrderings, executionConfig);
ListKeyGroupedIterator<IN> keyedIterator = new ListKeyGroupedIterator<IN>(inputData, inputSerializer, comparator);
TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig);
CopyingListCollector<OUT>
collector = new CopyingListCollector<OUT>(result, outSerializer);
while (keyedIterator.nextKey()) {
function.combine(keyedIterator.getValues(), collector);
}
}
FunctionUtils.closeFunction(function);
return result;
}
| 3.26 |
flink_GroupCombineOperatorBase_setGroupOrder_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Sets the order of the elements within a reduce group.
*
* @param order
* The order for the elements in a reduce group.
*/
public void setGroupOrder(Ordering order) {
this.groupOrder = order;
}
| 3.26 |
flink_ExceptionHistoryEntry_fromTaskManagerLocation_rdh
|
/**
* Creates a {@code ArchivedTaskManagerLocation} copy of the passed {@link TaskManagerLocation}.
*
* @param taskManagerLocation
* The {@code TaskManagerLocation} that's going to be copied.
* @return The corresponding {@code ArchivedTaskManagerLocation} or {@code null} if {@code null} was passed.
*/
@VisibleForTesting
@Nullable static ArchivedTaskManagerLocation fromTaskManagerLocation(TaskManagerLocation taskManagerLocation) {
if (taskManagerLocation == null) {
return null;
}
return new ArchivedTaskManagerLocation(taskManagerLocation.getResourceID(), taskManagerLocation.addressString(), taskManagerLocation.dataPort(), taskManagerLocation.getHostname(), taskManagerLocation.getFQDNHostname());
}
| 3.26 |
flink_ExceptionHistoryEntry_createGlobal_rdh
|
/**
* Creates an {@code ExceptionHistoryEntry} that is not based on an {@code Execution}.
*/
public static ExceptionHistoryEntry createGlobal(Throwable cause, CompletableFuture<Map<String,
String>> failureLabels) {
return new ExceptionHistoryEntry(cause, System.currentTimeMillis(), failureLabels, null, ((ArchivedTaskManagerLocation) (null)));}
| 3.26 |
flink_ExceptionHistoryEntry_create_rdh
|
/**
* Creates an {@code ExceptionHistoryEntry} based on the provided {@code Execution}.
*
* @param failedExecution
* the failed {@code Execution}.
* @param taskName
* the name of the task.
* @param failureLabels
* the labels associated with the failure.
* @return The {@code ExceptionHistoryEntry}.
* @throws NullPointerException
* if {@code null} is passed as one of the parameters.
* @throws IllegalArgumentException
* if the passed {@code Execution} does not provide a {@link Execution#getFailureInfo() failureInfo}.
*/
public static ExceptionHistoryEntry create(AccessExecution failedExecution, String taskName, CompletableFuture<Map<String, String>> failureLabels) {
Preconditions.checkNotNull(failedExecution, "No Execution is specified.");
Preconditions.checkNotNull(taskName, "No task name is specified.");
Preconditions.checkArgument(failedExecution.getFailureInfo().isPresent(), ("The selected Execution " + failedExecution.getAttemptId()) + " didn't fail.");
final ErrorInfo failure = failedExecution.getFailureInfo().get();return new ExceptionHistoryEntry(failure.getException(), failure.getTimestamp(), failureLabels, taskName, failedExecution.getAssignedResourceLocation());
}
| 3.26 |
flink_BlobInputStream_throwEOFException_rdh
|
/**
* Convenience method to throw an {@link EOFException}.
*
* @throws EOFException
* thrown to indicate the underlying input stream did not provide as much
* data as expected
*/
private void throwEOFException() throws EOFException {
throw new EOFException(String.format("Expected to read %d more bytes from stream", this.bytesToReceive - this.bytesReceived));
}
| 3.26 |
flink_TypeStringUtils_containsDelimiter_rdh
|
// --------------------------------------------------------------------------------------------
private static boolean containsDelimiter(String string) {
final char[] charArray = string.toCharArray();
for (char c : charArray) {
if (isDelimiter(c)) {
return true;
}}
return false;
}
| 3.26 |
flink_CompositeType_hasDeterministicFieldOrder_rdh
|
/**
* True if this type has an inherent ordering of the fields, such that a user can always be sure
* in which order the fields will be in. This is true for Tuples and Case Classes. It is not
* true for Regular Java Objects, since there, the ordering of the fields can be arbitrary.
*
* <p>This is used when translating a DataSet or DataStream to an Expression Table, when
* initially renaming the fields of the underlying type.
*/
@PublicEvolving
public boolean hasDeterministicFieldOrder() {
return false;
}
| 3.26 |
flink_CompositeType_hasField_rdh
|
/**
* Returns true when this type has a composite field with the given name.
*/
@PublicEvolving
public boolean hasField(String fieldName) {
return getFieldIndex(fieldName) >= 0;
}
| 3.26 |
flink_InternalTimerServiceImpl_restoreTimersForKeyGroup_rdh
|
/**
* Restore the timers (both processing and event time ones) for a given {@code keyGroupIdx}.
*
* @param restoredSnapshot
* the restored snapshot containing the key-group's timers, and the
* serializers that were used to write them
* @param keyGroupIdx
* the id of the key-group to be put in the snapshot.
*/
@SuppressWarnings("unchecked")
public void restoreTimersForKeyGroup(InternalTimersSnapshot<?, ?> restoredSnapshot, int keyGroupIdx) {
this.restoredTimersSnapshot = ((InternalTimersSnapshot<K, N>) (restoredSnapshot));
TypeSerializer<K> restoredKeySerializer = restoredTimersSnapshot.getKeySerializerSnapshot().restoreSerializer();
if ((this.keyDeserializer != null) && (!this.keyDeserializer.equals(restoredKeySerializer))) {
throw new IllegalArgumentException("Tried to restore timers for the same service with different key serializers.");
}
this.keyDeserializer = restoredKeySerializer;
TypeSerializer<N> restoredNamespaceSerializer = restoredTimersSnapshot.getNamespaceSerializerSnapshot().restoreSerializer();if ((this.namespaceDeserializer != null) && (!this.namespaceDeserializer.equals(restoredNamespaceSerializer))) {
throw new IllegalArgumentException("Tried to restore timers for the same service with different namespace serializers.");
}
this.namespaceDeserializer = restoredNamespaceSerializer;
checkArgument(localKeyGroupRange.contains(keyGroupIdx), ("Key Group " + keyGroupIdx) + " does not belong to the local range.");
// restore the event time timers
eventTimeTimersQueue.addAll(restoredTimersSnapshot.getEventTimeTimers());
// restore the processing time timers
f0.addAll(restoredTimersSnapshot.getProcessingTimeTimers());}
| 3.26 |
flink_InternalTimerServiceImpl_startTimerService_rdh
|
/**
* Starts the local {@link InternalTimerServiceImpl} by:
*
* <ol>
* <li>Setting the {@code keySerialized} and {@code namespaceSerializer} for the timers it
* will contain.
* <li>Setting the {@code triggerTarget} which contains the action to be performed when a
* timer fires.
* <li>Re-registering timers that were retrieved after recovering from a node failure, if any.
* </ol>
*
* <p>This method can be called multiple times, as long as it is called with the same
* serializers.
*/
public void startTimerService(TypeSerializer<K> keySerializer, TypeSerializer<N> namespaceSerializer, Triggerable<K, N> triggerTarget) {
if (!isInitialized) {
if ((keySerializer == null) || (namespaceSerializer == null)) {
throw new IllegalArgumentException("The TimersService serializers cannot be null.");
}
if (((this.keySerializer != null) || (this.namespaceSerializer !=
null)) || (this.triggerTarget != null)) {
throw new IllegalStateException("The TimerService has already been initialized.");
}
// the following is the case where we restore
if (restoredTimersSnapshot != null) {
TypeSerializerSchemaCompatibility<K> keySerializerCompatibility = restoredTimersSnapshot.getKeySerializerSnapshot().resolveSchemaCompatibility(keySerializer);
if (keySerializerCompatibility.isIncompatible() || keySerializerCompatibility.isCompatibleAfterMigration()) {
throw new IllegalStateException("Tried to initialize restored TimerService with new key serializer that requires migration or is incompatible.");
}
TypeSerializerSchemaCompatibility<N> namespaceSerializerCompatibility = restoredTimersSnapshot.getNamespaceSerializerSnapshot().resolveSchemaCompatibility(namespaceSerializer);
restoredTimersSnapshot = null;
if (namespaceSerializerCompatibility.isIncompatible() || namespaceSerializerCompatibility.isCompatibleAfterMigration()) {
throw new IllegalStateException("Tried to initialize restored TimerService with new namespace serializer that requires migration or is incompatible.");
}
this.keySerializer = (keySerializerCompatibility.isCompatibleAsIs()) ? keySerializer : keySerializerCompatibility.getReconfiguredSerializer();
this.namespaceSerializer = (namespaceSerializerCompatibility.isCompatibleAsIs()) ? namespaceSerializer : namespaceSerializerCompatibility.getReconfiguredSerializer(); } else {
this.keySerializer = keySerializer;
this.namespaceSerializer = namespaceSerializer;
}
this.keyDeserializer = null;
this.namespaceDeserializer = null;
this.triggerTarget = Preconditions.checkNotNull(triggerTarget);
// re-register the restored timers (if any)
final InternalTimer<K, N>
headTimer = f0.peek();
if (headTimer != null) {
nextTimer = processingTimeService.registerTimer(headTimer.getTimestamp(), this::onProcessingTime);
}
this.isInitialized = true;
} else if (!(this.keySerializer.equals(keySerializer) && this.namespaceSerializer.equals(namespaceSerializer))) {
throw new IllegalArgumentException("Already initialized Timer Service " + "tried to be initialized with different key and namespace serializers.");
}
}
| 3.26 |
flink_HiveParserSubQueryDiagnostic_getRewrite_rdh
|
/**
* Counterpart of hive's org.apache.hadoop.hive.ql.parse.SubQueryDiagnostic.
*/public class HiveParserSubQueryDiagnostic {
static QBSubQueryRewrite getRewrite(HiveParserQBSubQuery subQuery, TokenRewriteStream stream, HiveParserContext ctx) {
if (ctx.isExplainSkipExecution()) {
return new QBSubQueryRewrite(subQuery, stream);
} else {
return new QBSubQueryRewriteNoop(subQuery,
stream);
}
}
| 3.26 |
flink_NoResourceAvailableException_equals_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {return (obj instanceof NoResourceAvailableException) && getMessage().equals(((NoResourceAvailableException) (obj)).getMessage());
}
| 3.26 |
flink_StreamTaskNetworkInput_getRecordDeserializers_rdh
|
// Initialize one deserializer per input channel
private static Map<InputChannelInfo, SpillingAdaptiveSpanningRecordDeserializer<DeserializationDelegate<StreamElement>>> getRecordDeserializers(CheckpointedInputGate checkpointedInputGate, IOManager ioManager) {
return checkpointedInputGate.getChannelInfos().stream().collect(toMap(identity(), unused -> new SpillingAdaptiveSpanningRecordDeserializer<>(ioManager.getSpillingDirectoriesPaths())));
}
| 3.26 |
flink_SqlValidatorUtils_adjustTypeForMultisetConstructor_rdh
|
/**
* When the element element does not equal with the component type, making explicit casting.
*
* @param evenType
* derived type for element with even index
* @param oddType
* derived type for element with odd index
* @param sqlCallBinding
* description of call
*/
private static void adjustTypeForMultisetConstructor(RelDataType evenType, RelDataType oddType, SqlCallBinding sqlCallBinding) {
SqlCall call = sqlCallBinding.getCall();
List<RelDataType> operandTypes = sqlCallBinding.collectOperandTypes();
List<SqlNode> operands = call.getOperandList();
RelDataType elementType;
for (int i = 0; i < operands.size(); i++) {
if ((i % 2) == 0) {
elementType = evenType;
} else {
elementType = oddType;
}
if (operandTypes.get(i).equalsSansFieldNames(elementType)) {
continue;
}
call.setOperand(i, castTo(operands.get(i), elementType));
}
}
| 3.26 |
flink_RowData_createFieldGetter_rdh
|
// ------------------------------------------------------------------------------------------
// Access Utilities
// ------------------------------------------------------------------------------------------
/**
* Creates an accessor for getting elements in an internal row data structure at the given
* position.
*
* @param fieldType
* the element type of the row
* @param fieldPos
* the element position of the row
*/
static FieldGetter createFieldGetter(LogicalType fieldType, int fieldPos) {
final FieldGetter v0;
// ordered by type root definition
switch (fieldType.getTypeRoot()) {
case
CHAR :
case VARCHAR :
v0 = row -> row.getString(fieldPos);
break;
case BOOLEAN :
v0 = row -> row.getBoolean(fieldPos);
break;
case BINARY :
case VARBINARY :
v0 = row -> row.getBinary(fieldPos);
break;
case DECIMAL :
final int decimalPrecision = getPrecision(fieldType);
final int decimalScale = getScale(fieldType);
v0 = row
-> row.getDecimal(fieldPos, decimalPrecision, decimalScale);
break;
case TINYINT :
v0
= row -> row.getByte(fieldPos);
break;
case SMALLINT :
v0 = row -> row.getShort(fieldPos);
break;
case INTEGER :case DATE :
case TIME_WITHOUT_TIME_ZONE :
case INTERVAL_YEAR_MONTH :
v0 = row -> row.getInt(fieldPos);
break;
case BIGINT :
case INTERVAL_DAY_TIME :
v0 = row -> row.getLong(fieldPos);
break;
case FLOAT :
v0 = row -> row.getFloat(fieldPos);
break;
case DOUBLE :
v0 = row -> row.getDouble(fieldPos);
break;
case TIMESTAMP_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
final int timestampPrecision = getPrecision(fieldType);
v0 =
row -> row.getTimestamp(fieldPos, timestampPrecision);
break;
case TIMESTAMP_WITH_TIME_ZONE :
throw new UnsupportedOperationException();
case ARRAY :
v0 = row -> row.getArray(fieldPos);
break;case MULTISET :
case MAP :
v0 = row -> row.getMap(fieldPos);
break;
case ROW :
case STRUCTURED_TYPE :
final int rowFieldCount = getFieldCount(fieldType);
v0 = row -> row.getRow(fieldPos, rowFieldCount);
break;
case DISTINCT_TYPE :
v0 = createFieldGetter(((DistinctType) (fieldType)).getSourceType(), fieldPos);
break;
case RAW :
v0 = row -> row.getRawValue(fieldPos);
break;
case NULL :
case SYMBOL :
case UNRESOLVED :
default :
throw new IllegalArgumentException();
}
if (!fieldType.isNullable()) {
return v0;
}
return row -> {
if (row.isNullAt(fieldPos)) {
return null;
} return v0.getFieldOrNull(row);
};
}
| 3.26 |
flink_JobMasterPartitionTracker_getAllTrackedNonClusterPartitions_rdh
|
/**
* Gets all the non-cluster partitions under tracking.
*/
default Collection<ResultPartitionDeploymentDescriptor> getAllTrackedNonClusterPartitions() {
return m0().stream().filter(descriptor -> !descriptor.getPartitionType().isPersistent()).collect(Collectors.toList());
}
| 3.26 |
flink_JobMasterPartitionTracker_getAllTrackedClusterPartitions_rdh
|
/**
* Gets all the cluster partitions under tracking.
*/
default Collection<ResultPartitionDeploymentDescriptor>
getAllTrackedClusterPartitions()
{
return m0().stream().filter(descriptor -> descriptor.getPartitionType().isPersistent()).collect(Collectors.toList());
}
| 3.26 |
flink_JobMasterPartitionTracker_stopTrackingAndReleasePartitions_rdh
|
/**
* Releases the given partitions and stop the tracking of partitions that were released.
*/
default void stopTrackingAndReleasePartitions(Collection<ResultPartitionID> resultPartitionIds) {
stopTrackingAndReleasePartitions(resultPartitionIds, true);
}
| 3.26 |
flink_CallExpression_temporary_rdh
|
/**
* Creates a {@link CallExpression} to a temporary function (potentially shadowing a {@link Catalog} function or providing a system function).
*/
public static CallExpression temporary(FunctionIdentifier functionIdentifier, FunctionDefinition functionDefinition, List<ResolvedExpression> args, DataType dataType) {
return new CallExpression(true, Preconditions.checkNotNull(functionIdentifier, "Function identifier must not be null for temporary functions."), functionDefinition, args, dataType);
}
| 3.26 |
flink_CallExpression_permanent_rdh
|
/**
* Creates a {@link CallExpression} to a resolved built-in function. It assumes that the {@link BuiltInFunctionDefinition} instance is provided by the framework (usually the core module).
*/
@Internal
public static CallExpression permanent(BuiltInFunctionDefinition builtInFunctionDefinition, List<ResolvedExpression> args, DataType dataType) {
return new CallExpression(false, FunctionIdentifier.of(builtInFunctionDefinition.getName()), builtInFunctionDefinition, args, dataType);
}
| 3.26 |
flink_CallExpression_anonymous_rdh
|
/**
* Creates a {@link CallExpression} to an anonymous function that has been declared inline
* without a {@link FunctionIdentifier}.
*/
public static CallExpression anonymous(FunctionDefinition functionDefinition, List<ResolvedExpression> args, DataType dataType) {
return new CallExpression(true, null, functionDefinition, args, dataType);
}
/**
*
* @deprecated Use {@link #permanent(FunctionIdentifier, FunctionDefinition, List, DataType)} or
{@link #temporary(FunctionIdentifier, FunctionDefinition, List, DataType)}
| 3.26 |
flink_CallExpression_getFunctionName_rdh
|
/**
* Returns a string representation of the call's function for logging or printing to a console.
*/ public String
getFunctionName() {
if (functionIdentifier == null) {
return
functionDefinition.toString();
} else {
return functionIdentifier.asSummaryString();
}}
| 3.26 |
flink_PartitionTempFileManager_m0_rdh
|
/**
* Generate a new partition directory with partitions.
*/
public Path m0(String... partitions) {
Path parentPath = taskTmpDir;
for (String dir : partitions) {
parentPath = new Path(parentPath, dir);
}
return new Path(parentPath, newFileName());
}
| 3.26 |
flink_PartitionTempFileManager_listTaskTemporaryPaths_rdh
|
/**
* Returns task temporary paths in this checkpoint.
*/
public static List<Path> listTaskTemporaryPaths(FileSystem fs, Path basePath, BiPredicate<Integer, Integer> taskAttemptFilter) throws Exception
{
List<Path> taskTmpPaths = new ArrayList<>();
if (fs.exists(basePath)) {for (FileStatus taskStatus :
fs.listStatus(basePath)) {
final String taskDirName = taskStatus.getPath().getName();
final Matcher matcher = TASK_DIR_PATTERN.matcher(taskDirName);
if (matcher.matches()) {
final int subtaskIndex = Integer.parseInt(matcher.group(1));
final int attemptNumber = Integer.parseInt(matcher.group(2));
if (taskAttemptFilter.test(subtaskIndex, attemptNumber)) {
taskTmpPaths.add(taskStatus.getPath());
}
}
}
} else {
LOG.warn("The path {} doesn't exist. Maybe no data is generated in the path and the path is not created.", basePath);
}
return taskTmpPaths;
}
| 3.26 |
flink_PartitionTempFileManager_collectPartSpecToPaths_rdh
|
/**
* Collect all partitioned paths, aggregate according to partition spec.
*/
public static Map<LinkedHashMap<String, String>, List<Path>> collectPartSpecToPaths(FileSystem fs,
List<Path> taskPaths, int partColSize) {
Map<LinkedHashMap<String, String>, List<Path>> specToPaths = new HashMap<>();
for (Path taskPath : taskPaths) {
searchPartSpecAndPaths(fs, taskPath, partColSize).forEach(tuple2 -> specToPaths.compute(tuple2.f0, (spec, paths) -> {
paths = (paths == null) ? new ArrayList<>() : paths;
paths.add(tuple2.f1);
return paths;
}));
}
return specToPaths;
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.