name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ScopeFormat_concat_rdh | /**
* Concatenates the given component names separated by the delimiter character. Additionally the
* character filter is applied to all component names.
*
* @param filter
* Character filter to be applied to the component names
* @param delimiter
* Delimiter to separate component names
* @param components
* Array of component names
* @return The concatenated component name
*/
public static String concat(CharacterFilter filter, Character delimiter, String... components) {
StringBuilder sb = new StringBuilder();
sb.append(filter.filterCharacters(components[0]));
for (int x = 1; x < components.length; x++) {sb.append(delimiter);
sb.append(filter.filterCharacters(components[x]));
}
return sb.toString();
} | 3.26 |
flink_ScopeFormat_toString_rdh | // ------------------------------------------------------------------------
@Override
public String toString() {
return ("ScopeFormat '" + format) + '\'';
} | 3.26 |
flink_ScopeFormat_format_rdh | // ------------------------------------------------------------------------
public String format() {
return format;
} | 3.26 |
flink_ScopeFormat_asVariable_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Formats the given string to resemble a scope variable.
*
* @param scope
* The string to format
* @return The formatted string
*/
public static String asVariable(String scope) {
return (SCOPE_VARIABLE_PREFIX + scope) +
SCOPE_VARIABLE_SUFFIX;
} | 3.26 |
flink_FlinkRexBuilder_toComparable_rdh | /**
* Copied from the {@link RexBuilder} to fix the {@link RexBuilder#makeIn}.
*/
@SuppressWarnings("rawtypes")
private static <C extends Comparable<C>> C toComparable(Class<C> clazz, RexNode point) {
switch (point.getKind()) {
case LITERAL :
final RexLiteral literal = ((RexLiteral) (point));
return literal.getValueAs(clazz);
case ROW :
final RexCall call = ((RexCall) (point));
final ImmutableList.Builder<Comparable> v14 = ImmutableList.builder();
for
(RexNode operand : call.operands) {
// noinspection unchecked
final Comparable value = toComparable(Comparable.class, operand);
if (value == null) {
return null;// not a constant value
}
v14.add(value);
}
return clazz.cast(FlatLists.ofComparable(v14.build()));
default :
return null;// not a constant value
}
} | 3.26 |
flink_FlinkRexBuilder_m0_rdh | /**
* Creates a literal of the default value for the given type.
*
* <p>This value is:
*
* <ul>
* <li>0 for numeric types;
* <li>FALSE for BOOLEAN;
* <li>The epoch for TIMESTAMP and DATE;
* <li>Midnight for TIME;
* <li>The empty string for string types (CHAR, BINARY, VARCHAR, VARBINARY).
* </ul>
*
* <p>Uses '1970-01-01 00:00:00'(epoch 0 second) as zero value for TIMESTAMP_LTZ, the zero value
* '0000-00-00 00:00:00' in Calcite is an invalid time whose month and day is invalid, we
* workaround here. Stop overriding once CALCITE-4555 fixed.
*
* @param type
* Type
* @return Simple literal, or cast simple literal
*/
@Override
public RexLiteral m0(RelDataType type) {
switch (type.getSqlTypeName()) {
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
return makeLiteral(new TimestampString(1970, 1, 1, 0, 0, 0), type);
default :
return super.makeZeroLiteral(type);
}
} | 3.26 |
flink_FlinkRexBuilder_areAssignable_rdh | /**
* Copied from the {@link RexBuilder} to fix the {@link RexBuilder#makeIn}.
*/
private boolean areAssignable(RexNode arg, List<? extends RexNode> bounds) {
for (RexNode bound : bounds) {
if ((!SqlTypeUtil.inSameFamily(arg.getType(), bound.getType())) && (!(arg.getType().isStruct() && bound.getType().isStruct()))) {
return false;
}
}
return true;
} | 3.26 |
flink_FlinkRexBuilder_makeFieldAccess_rdh | /**
* Compared to the original method we adjust the nullability of the nested column based on the
* nullability of the enclosing type.
*
* <p>If the fields type is NOT NULL, but the enclosing ROW is nullable we still can produce
* nulls.
*/
@Override
public RexNode makeFieldAccess(RexNode expr, int i) {
RexNode field = super.makeFieldAccess(expr, i);
if (expr.getType().isNullable() && (!field.getType().isNullable())) {
return makeCast(typeFactory.createTypeWithNullability(field.getType(), true), field, true);
}
return field;
} | 3.26 |
flink_FlinkRexBuilder_toSarg_rdh | /**
* Converts a list of expressions to a search argument, or returns null if not possible.
*
* <p>Copied from the {@link RexBuilder} to fix the {@link RexBuilder#makeIn}.
*/
@SuppressWarnings("UnstableApiUsage")
private static <C extends Comparable<C>>
Sarg<C> toSarg(Class<C> clazz, List<? extends RexNode> ranges, boolean containsNull) {
if (ranges.isEmpty()) {
// Cannot convert an empty list to a Sarg (by this interface, at least)
// because we use the type of the first element.
return null;
}
final RangeSet<C> rangeSet = TreeRangeSet.create();
for (RexNode range : ranges) {
final C value = toComparable(clazz, range);
if (value == null) {
return null;
}
rangeSet.add(Range.singleton(value));
}
return Sarg.of(containsNull, rangeSet);
} | 3.26 |
flink_FlinkRexBuilder_m1_rdh | /**
* Convert the conditions into the {@code IN} and fix [CALCITE-4888]: Unexpected {@link RexNode}
* when call {@link RelBuilder#in} to create an {@code IN} predicate with a list of varchar
* literals which have different length in {@link RexBuilder#makeIn}.
*
* <p>The bug is because the origin implementation doesn't take {@link FlinkTypeSystem#shouldConvertRaggedUnionTypesToVarying} into consideration. When this is
* true, the behaviour should not padding char. Please see
* https://issues.apache.org/jira/browse/CALCITE-4590 and
* https://issues.apache.org/jira/browse/CALCITE-2321. Please refer to {@code org.apache.calcite.rex.RexSimplify.RexSargBuilder#getType} for the correct behaviour.
*
* <p>Once CALCITE-4888 is fixed, this method (and related methods) should be removed.
*/
@Override
@SuppressWarnings("unchecked")
public RexNode m1(RexNode arg, List<? extends RexNode> ranges) {
if (areAssignable(arg, ranges)) {
// Fix calcite doesn't check literal whether is NULL here
List<RexNode>
rangeWithoutNull = new ArrayList<>();
boolean v3 = false;
for (RexNode node : ranges) {
if (isNull(node)) {
v3 = true;
} else {
rangeWithoutNull.add(node);
}
}
final Sarg sarg = toSarg(Comparable.class, rangeWithoutNull, v3);
if (sarg != null) {
List<RelDataType> v6 = Util.distinctList(ranges.stream().map(RexNode::getType).collect(Collectors.toList()));
RelDataType commonType = getTypeFactory().leastRestrictive(v6);
return makeCall(SqlStdOperatorTable.SEARCH, arg, makeSearchArgumentLiteral(sarg, commonType));
}
}
return RexUtil.composeDisjunction(this, ranges.stream().map(r -> makeCall(SqlStdOperatorTable.EQUALS, arg, r)).collect(Util.toImmutableList()));} | 3.26 |
flink_ResourceReconcileResult_needReconcile_rdh | /**
* Returns whether the cluster resource need reconcile.
*
* @return True if the cluster resource need reconcile, otherwise false.
*/
public boolean needReconcile() {
return ((pendingTaskManagersToRelease.size() > 0) || (taskManagersToRelease.size() > 0)) || (pendingTaskManagersToAllocate.size() > 0);
} | 3.26 |
flink_IteratorResultIterator_next_rdh | // -------------------------------------------------------------------------
// Result Iterator Methods
// -------------------------------------------------------------------------
@Nullable
@Override
public RecordAndPosition<E> next() {
if (records.hasNext()) {
recordAndPosition.setNext(records.next());return recordAndPosition;
} else {
return null;
}} | 3.26 |
flink_TaskConfig_setFilehandlesDriver_rdh | // --------------------------------------------------------------------------------------------
public void setFilehandlesDriver(int filehandles) {
if (filehandles < 2) {
throw new IllegalArgumentException();
}
this.config.setInteger(FILEHANDLES_DRIVER, filehandles);
} | 3.26 |
flink_TaskConfig_setNumberOfIterations_rdh | // --------------------------------------------------------------------------------------------
// Iterations
// --------------------------------------------------------------------------------------------
public void setNumberOfIterations(int numberOfIterations) {
if (numberOfIterations <= 0) {
throw new IllegalArgumentException();
}
this.config.setInteger(NUMBER_OF_ITERATIONS, numberOfIterations);
} | 3.26 |
flink_TaskConfig_setInputLocalStrategy_rdh | // --------------------------------------------------------------------------------------------
// Inputs
// --------------------------------------------------------------------------------------------
public void setInputLocalStrategy(int inputNum, LocalStrategy strategy) {
this.config.setInteger(INPUT_LOCAL_STRATEGY_PREFIX + inputNum, strategy.ordinal());
} | 3.26 |
flink_TaskConfig_setImplicitConvergenceCriterion_rdh | /**
* Sets the default convergence criterion of a {@link DeltaIteration}
*
* @param aggregatorName
* @param convCriterion
*/
public void setImplicitConvergenceCriterion(String aggregatorName, ConvergenceCriterion<?> convCriterion) {
try {
InstantiationUtil.writeObjectToConfig(convCriterion, this.config, ITERATION_IMPLICIT_CONVERGENCE_CRITERION);
} catch (IOException e) {throw new RuntimeException("Error while writing the implicit convergence criterion object to the task configuration.");
}this.config.setString(ITERATION_IMPLICIT_CONVERGENCE_CRITERION_AGG_NAME, aggregatorName);
} | 3.26 |
flink_TaskConfig_setTypeSerializerFactory_rdh | // --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
private void setTypeSerializerFactory(TypeSerializerFactory<?> factory, String classNameKey, String parametersPrefix) { // sanity check the factory type
InstantiationUtil.checkForInstantiation(factory.getClass());
// store the type
this.config.setString(classNameKey, factory.getClass().getName());
// store the parameters
final DelegatingConfiguration parameters = new DelegatingConfiguration(this.config, parametersPrefix);
factory.writeParametersToConfig(parameters);
} | 3.26 |
flink_TaskConfig_setDriver_rdh | // --------------------------------------------------------------------------------------------
// Driver
// --------------------------------------------------------------------------------------------
public void setDriver(@SuppressWarnings("rawtypes")
Class<? extends Driver> driver) {
this.config.setString(DRIVER_CLASS, driver.getName());
} | 3.26 |
flink_TaskConfig_getNumberOfChainedStubs_rdh | // --------------------------------------------------------------------------------------------
public int getNumberOfChainedStubs() {
return this.config.getInteger(CHAINING_NUM_STUBS, 0);
} | 3.26 |
flink_TaskConfig_setSpillingThresholdDriver_rdh | // --------------------------------------------------------------------------------------------
public void setSpillingThresholdDriver(float threshold) {
if ((threshold < 0.0F) || (threshold > 1.0F)) {
throw new IllegalArgumentException();}
this.config.setFloat(SORT_SPILLING_THRESHOLD_DRIVER, threshold);
} | 3.26 |
flink_TaskConfig_getConfiguration_rdh | /**
* Gets the configuration that holds the actual values encoded.
*
* @return The configuration that holds the actual values
*/
public Configuration getConfiguration() {
return this.config;
} | 3.26 |
flink_TaskConfig_setRelativeMemoryDriver_rdh | // --------------------------------------------------------------------------------------------
// Parameters to configure the memory and I/O behavior
// --------------------------------------------------------------------------------------------
public void setRelativeMemoryDriver(double relativeMemorySize) {
this.config.setDouble(MEMORY_DRIVER, relativeMemorySize);
} | 3.26 |
flink_TaskConfig_setTaskName_rdh | // --------------------------------------------------------------------------------------------
// User Code
// --------------------------------------------------------------------------------------------
public void setTaskName(String name) {
if (name != null) {
this.config.setString(TASK_NAME, name);
}
} | 3.26 |
flink_TaskConfig_addOutputShipStrategy_rdh | // --------------------------------------------------------------------------------------------
public void addOutputShipStrategy(ShipStrategyType strategy) {
final int
outputCnt = this.config.getInteger(OUTPUTS_NUM, 0);
this.config.setInteger(OUTPUT_SHIP_STRATEGY_PREFIX + outputCnt, strategy.ordinal());
this.config.setInteger(OUTPUTS_NUM, outputCnt + 1);
} | 3.26 |
flink_MetadataV3Serializer_serializeStreamStateHandle_rdh | // ------------------------------------------------------------------------
@VisibleForTesting
public static void serializeStreamStateHandle(StreamStateHandle stateHandle, DataOutputStream dos) throws IOException {
MetadataV2V3SerializerBase.serializeStreamStateHandle(stateHandle, dos); } | 3.26 |
flink_MetadataV3Serializer_serializeOperatorState_rdh | // version-specific serialization formats
// ------------------------------------------------------------------------
@Override
protected void serializeOperatorState(OperatorState operatorState, DataOutputStream dos) throws IOException {
// Operator ID
dos.writeLong(operatorState.getOperatorID().getLowerPart());
dos.writeLong(operatorState.getOperatorID().getUpperPart());// Parallelism
dos.writeInt(operatorState.getParallelism());
dos.writeInt(operatorState.getMaxParallelism());
// Coordinator state
serializeStreamStateHandle(operatorState.getCoordinatorState(), dos);
// Sub task states
if (operatorState.isFullyFinished()) {
dos.writeInt(-1);
} else
{
final Map<Integer, OperatorSubtaskState> subtaskStateMap
= operatorState.getSubtaskStates();
dos.writeInt(subtaskStateMap.size());
for (Map.Entry<Integer, OperatorSubtaskState> entry : subtaskStateMap.entrySet()) {
boolean isFinished = entry.getValue().isFinished();serializeSubtaskIndexAndFinishedState(entry.getKey(), isFinished, dos);
if (!isFinished) {
serializeSubtaskState(entry.getValue(), dos);
}
}
}
} | 3.26 |
flink_MetadataV3Serializer_serialize_rdh | // ------------------------------------------------------------------------
// (De)serialization entry points
// ------------------------------------------------------------------------
@Override
public void serialize(CheckpointMetadata checkpointMetadata, DataOutputStream dos) throws IOException {
INSTANCE.serializeMetadata(checkpointMetadata, dos);
} | 3.26 |
flink_SplitReader_seekToRow_rdh | /**
* Seek to a particular row number.
*/
default void seekToRow(long rowCount, RowData reuse) throws IOException {
for (int i =
0; i < rowCount; i++) {
boolean end = reachedEnd();
if (end) { throw new RuntimeException("Seek too many rows.");
}
nextRecord(reuse);
}
} | 3.26 |
flink_SingleInputNode_accept_rdh | // Miscellaneous
// --------------------------------------------------------------------------------------------
@Override
public void accept(Visitor<OptimizerNode> visitor) {
if (visitor.preVisit(this)) {
if (getPredecessorNode() != null) {
getPredecessorNode().accept(visitor);
} else {
throw new CompilerException();
}
for (DagConnection connection : getBroadcastConnections()) {
connection.getSource().accept(visitor);
}
visitor.postVisit(this);
}
} | 3.26 |
flink_SingleInputNode_getOperator_rdh | // --------------------------------------------------------------------------------------------
@Override
public SingleInputOperator<?, ?, ?> getOperator() {
return ((SingleInputOperator<?, ?, ?>) (super.getOperator()));
} | 3.26 |
flink_SingleInputNode_computeUnclosedBranchStack_rdh | // --------------------------------------------------------------------------------------------
// Branch Handling
// --------------------------------------------------------------------------------------------
@Override
public void computeUnclosedBranchStack() {
if (this.openBranches != null) {
return;
}
addClosedBranches(getPredecessorNode().closedBranchingNodes);
List<UnclosedBranchDescriptor> fromInput = getPredecessorNode().getBranchesForParent(this.inConn);
// handle the data flow branching for the broadcast inputs
List<UnclosedBranchDescriptor> result = computeUnclosedBranchStackForBroadcastInputs(fromInput);
this.openBranches = ((result == null) || result.isEmpty()) ? Collections.<UnclosedBranchDescriptor>emptyList() : result;
} | 3.26 |
flink_SingleInputNode_setIncomingConnection_rdh | /**
* Sets the connection through which this node receives its input.
*
* @param inConn
* The input connection to set.
*/
public void setIncomingConnection(DagConnection inConn) {
this.inConn = inConn;
} | 3.26 |
flink_NumericColumnSummary_m0_rdh | /**
* Null, NaN, and Infinite values are ignored in this calculation.
*
* @see <a href="https://en.wikipedia.org/wiki/Mean">Arithmetic Mean</a>
*/
public Double m0() {
return mean;
} | 3.26 |
flink_NumericColumnSummary_getMissingCount_rdh | /**
* The number of "missing" values where "missing" is defined as null, NaN, or Infinity.
*
* <p>These values are ignored in some calculations like mean, variance, and standardDeviation.
*/
public long getMissingCount() {
return
(nullCount + nanCount) + infinityCount;
} | 3.26 |
flink_NumericColumnSummary_getNonNullCount_rdh | /**
* The number of non-null values in this column.
*/@Override
public long getNonNullCount() {return (f0 + nanCount) + infinityCount;
} | 3.26 |
flink_JobVertex_isInputVertex_rdh | // --------------------------------------------------------------------------------------------
public boolean isInputVertex() {return this.inputs.isEmpty();
} | 3.26 |
flink_JobVertex_toString_rdh | // --------------------------------------------------------------------------------------------
@Override
public String toString() {
return ((this.f0 + " (") + this.invokableClassName) + ')';
} | 3.26 |
flink_JobVertex_initializeOnMaster_rdh | // --------------------------------------------------------------------------------------------
/**
* A hook that can be overwritten by sub classes to implement logic that is called by the master
* when the job starts.
*
* @param context
* Provides contextual information for the initialization
* @throws Exception
* The method may throw exceptions which cause the job to fail immediately.
*/
public void initializeOnMaster(InitializeOnMasterContext context) throws Exception {
} | 3.26 |
flink_JobVertex_getMaxParallelism_rdh | /**
* Gets the maximum parallelism for the task.
*
* @return The maximum parallelism for the task.
*/
public int getMaxParallelism() {
return maxParallelism;
} | 3.26 |
flink_JobVertex_getNumberOfInputs_rdh | /**
* Returns the number of inputs.
*
* @return The number of inputs.
*/
public int getNumberOfInputs() { return this.inputs.size();
} | 3.26 |
flink_JobVertex_getInvokableClassName_rdh | /**
* Returns the name of the invokable class which represents the task of this vertex.
*
* @return The name of the invokable class, <code>null</code> if not set.
*/
public String getInvokableClassName()
{
return this.invokableClassName;} | 3.26 |
flink_JobVertex_getInvokableClass_rdh | /**
* Returns the invokable class which represents the task of this vertex.
*
* @param cl
* The classloader used to resolve user-defined classes
* @return The invokable class, <code>null</code> if it is not set
*/
public Class<? extends TaskInvokable> getInvokableClass(ClassLoader cl) {
if (cl == null) {
throw new NullPointerException("The classloader must not be null.");
} if (invokableClassName == null) {
return null;
}
try
{
return Class.forName(invokableClassName, true, cl).asSubclass(TaskInvokable.class);
} catch (ClassNotFoundException e) {
throw new RuntimeException("The user-code class could not be resolved.", e);
} catch (ClassCastException
e) {
throw new RuntimeException("The user-code class is no subclass of " + TaskInvokable.class.getName(), e);
}
} | 3.26 |
flink_JobVertex_getName_rdh | /**
* Returns the name of the vertex.
*
* @return The name of the vertex.
*/
public String getName() {
return this.f0;} | 3.26 |
flink_JobVertex_getOperatorName_rdh | // --------------------------------------------------------------------------------------------
public String getOperatorName() {
return operatorName;
} | 3.26 |
flink_JobVertex_setParallelism_rdh | /**
* Sets the parallelism for the task.
*
* @param parallelism
* The parallelism for the task.
*/
public void setParallelism(int parallelism) {
if ((parallelism < 1) && (parallelism != ExecutionConfig.PARALLELISM_DEFAULT)) {
throw new IllegalArgumentException(("The parallelism must be at least one, or " + ExecutionConfig.PARALLELISM_DEFAULT) + " (unset).");
}
this.parallelism = parallelism;
} | 3.26 |
flink_JobVertex_setStrictlyCoLocatedWith_rdh | /**
* Tells this vertex to strictly co locate its subtasks with the subtasks of the given vertex.
* Strict co-location implies that the n'th subtask of this vertex will run on the same parallel
* computing instance (TaskManager) as the n'th subtask of the given vertex.
*
* <p>NOTE: Co-location is only possible between vertices in a slot sharing group.
*
* <p>NOTE: This vertex must (transitively) depend on the vertex to be co-located with. That
* means that the respective vertex must be a (transitive) input of this vertex.
*
* @param strictlyCoLocatedWith
* The vertex whose subtasks to co-locate this vertex's subtasks
* with.
* @throws IllegalArgumentException
* Thrown, if this vertex and the vertex to co-locate with are
* not in a common slot sharing group.
* @see #setSlotSharingGroup(SlotSharingGroup)
*/
public void setStrictlyCoLocatedWith(JobVertex strictlyCoLocatedWith) {
if ((this.slotSharingGroup == null) || (this.slotSharingGroup != strictlyCoLocatedWith.slotSharingGroup)) {
throw new IllegalArgumentException("Strict co-location requires that both vertices are in the same slot sharing group.");
}
CoLocationGroupImpl thisGroup = this.coLocationGroup;
CoLocationGroupImpl otherGroup = strictlyCoLocatedWith.coLocationGroup;
if (otherGroup == null) {
if (thisGroup == null) {
CoLocationGroupImpl group = new CoLocationGroupImpl(this, strictlyCoLocatedWith);
this.coLocationGroup = group; strictlyCoLocatedWith.coLocationGroup = group;
} else {
thisGroup.addVertex(strictlyCoLocatedWith);
strictlyCoLocatedWith.coLocationGroup = thisGroup;
}
} else if (thisGroup == null) {
otherGroup.addVertex(this);
this.coLocationGroup = otherGroup;
} else {
// both had yet distinct groups, we need to merge them
thisGroup.mergeInto(otherGroup);
}} | 3.26 |
flink_JobVertex_getID_rdh | // --------------------------------------------------------------------------------------------
/**
* Returns the ID of this job vertex.
*
* @return The ID of this job vertex
*/
public JobVertexID getID() {
return this.id;
} | 3.26 |
flink_JobVertex_getSlotSharingGroup_rdh | /**
* Gets the slot sharing group that this vertex is associated with. Different vertices in the
* same slot sharing group can run one subtask each in the same slot.
*
* @return The slot sharing group to associate the vertex with
*/
public SlotSharingGroup getSlotSharingGroup() {
if (slotSharingGroup == null) {
// create a new slot sharing group for this vertex if it was in no other slot sharing
// group.
// this should only happen in testing cases at the moment because production code path
// will
// always set a value to it before used
setSlotSharingGroup(new SlotSharingGroup());
}
return slotSharingGroup;
} | 3.26 |
flink_JobVertex_setResources_rdh | /**
* Sets the minimum and preferred resources for the task.
*
* @param minResources
* The minimum resource for the task.
* @param preferredResources
* The preferred resource for the task.
*/
public void setResources(ResourceSpec minResources, ResourceSpec preferredResources) {
this.minResources = checkNotNull(minResources);
this.preferredResources = checkNotNull(preferredResources);
} | 3.26 |
flink_JobVertex_finalizeOnMaster_rdh | /**
* A hook that can be overwritten by sub classes to implement logic that is called by the master
* after the job completed.
*
* @param context
* Provides contextual information for the initialization
* @throws Exception
* The method may throw exceptions which cause the job to fail immediately.
*/
public void finalizeOnMaster(FinalizeOnMasterContext context) throws Exception {
} | 3.26 |
flink_JobVertex_getMinResources_rdh | /**
* Gets the minimum resource for the task.
*
* @return The minimum resource for the task.
*/
public ResourceSpec getMinResources() {
return minResources;
} | 3.26 |
flink_JobVertex_getOrCreateResultDataSet_rdh | // --------------------------------------------------------------------------------------------
public IntermediateDataSet getOrCreateResultDataSet(IntermediateDataSetID id, ResultPartitionType partitionType) {
return this.results.computeIfAbsent(id, key -> new IntermediateDataSet(id, partitionType, this));} | 3.26 |
flink_JobVertex_getConfiguration_rdh | /**
* Returns the vertex's configuration object which can be used to pass custom settings to the
* task at runtime.
*
* @return the vertex's configuration object
*/
public Configuration getConfiguration() {
if (this.configuration == null) {
this.configuration = new Configuration();
}
return this.configuration;} | 3.26 |
flink_JobVertex_setSlotSharingGroup_rdh | /**
* Associates this vertex with a slot sharing group for scheduling. Different vertices in the
* same slot sharing group can run one subtask each in the same slot.
*
* @param grp
* The slot sharing group to associate the vertex with.
*/
public void setSlotSharingGroup(SlotSharingGroup grp) {
checkNotNull(grp);
if (this.slotSharingGroup != null) {
this.slotSharingGroup.removeVertexFromGroup(this.getID());
}
grp.addVertexToGroup(this.getID());
this.slotSharingGroup = grp;
} | 3.26 |
flink_JobVertex_getPreferredResources_rdh | /**
* Gets the preferred resource for the task.
*
* @return The preferred resource for the task.
*/
public ResourceSpec getPreferredResources() {
return preferredResources;
} | 3.26 |
flink_JobVertex_getParallelism_rdh | /**
* Gets the parallelism of the task.
*
* @return The parallelism of the task.
*/
public int getParallelism() {
return parallelism;
} | 3.26 |
flink_RocksDBStateDownloader_transferAllStateDataToDirectory_rdh | /**
* Transfer all state data to the target directory, as specified in the download requests.
*
* @param downloadRequests
* the list of downloads.
* @throws Exception
* If anything about the download goes wrong.
*/
public void transferAllStateDataToDirectory(Collection<StateHandleDownloadSpec> downloadRequests, CloseableRegistry closeableRegistry) throws Exception {
// We use this closer for fine-grained shutdown of all parallel downloading.
CloseableRegistry internalCloser = new CloseableRegistry();
// Make sure we also react to external close signals.
closeableRegistry.registerCloseable(internalCloser);
try {
List<CompletableFuture<Void>> v1 = transferAllStateDataToDirectoryAsync(downloadRequests, internalCloser).collect(Collectors.toList());
// Wait until either all futures completed successfully or one failed exceptionally.
FutureUtils.completeAll(v1).get();
} catch (Exception e) {
downloadRequests.stream().map(StateHandleDownloadSpec::getDownloadDestination).map(Path::toFile).forEach(FileUtils::deleteDirectoryQuietly);
// Error reporting
Throwable throwable = ExceptionUtils.stripExecutionException(e);
throwable = ExceptionUtils.stripException(throwable, RuntimeException.class);
if (throwable instanceof IOException)
{
throw ((IOException) (throwable));
} else {
throw new FlinkRuntimeException("Failed to download data for state handles.",
e);
}
} finally {
// Unregister and close the internal closer.
if (closeableRegistry.unregisterCloseable(internalCloser)) {
IOUtils.closeQuietly(internalCloser);
}
}} | 3.26 |
flink_RocksDBStateDownloader_downloadDataForStateHandle_rdh | /**
* Copies the file from a single state handle to the given path.
*/
private void downloadDataForStateHandle(Path restoreFilePath, StreamStateHandle remoteFileHandle, CloseableRegistry closeableRegistry) throws IOException {
if (closeableRegistry.isClosed()) {
return;
}
try {
FSDataInputStream inputStream = remoteFileHandle.openInputStream();
closeableRegistry.registerCloseable(inputStream);
Files.createDirectories(restoreFilePath.getParent());
OutputStream outputStream = Files.newOutputStream(restoreFilePath);
closeableRegistry.registerCloseable(outputStream);
byte[] buffer = new byte[8 * 1024];
while (true) {
int numBytes = inputStream.read(buffer);
if (numBytes == (-1)) {
break;
} outputStream.write(buffer, 0, numBytes);
}
closeableRegistry.unregisterAndCloseAll(outputStream, inputStream);
} catch (Exception ex) {
// Quickly close all open streams. This also stops all concurrent downloads because they
// are registered with the same registry.
IOUtils.closeQuietly(closeableRegistry);
throw new IOException(ex);
}
} | 3.26 |
flink_RocksDBStateDownloader_transferAllStateDataToDirectoryAsync_rdh | /**
* Asynchronously runs the specified download requests on executorService.
*/
private Stream<CompletableFuture<Void>> transferAllStateDataToDirectoryAsync(Collection<StateHandleDownloadSpec> handleWithPaths, CloseableRegistry closeableRegistry)
{
return handleWithPaths.stream().flatMap(downloadRequest -> // Take all files from shared and private state.
// Create one runnable for each StreamStateHandle
Streams.concat(downloadRequest.getStateHandle().getSharedState().stream(), downloadRequest.getStateHandle().getPrivateState().stream()).map(entry ->
{
String localPath = entry.getLocalPath();
StreamStateHandle remoteFileHandle = entry.getHandle();
Path downloadDest = downloadRequest.getDownloadDestination().resolve(localPath);
return ThrowingRunnable.unchecked(() -> downloadDataForStateHandle(downloadDest, remoteFileHandle, closeableRegistry));
})).map(runnable -> CompletableFuture.runAsync(runnable, executorService));
} | 3.26 |
flink_BroadcastVariableMaterialization_materializeVariable_rdh | // --------------------------------------------------------------------------------------------
public void materializeVariable(MutableReader<?> reader, TypeSerializerFactory<?> serializerFactory, BatchTask<?, ?> referenceHolder) throws MaterializationExpiredException, IOException {
Preconditions.checkNotNull(reader);
Preconditions.checkNotNull(serializerFactory);
Preconditions.checkNotNull(referenceHolder);
final boolean materializer;
// hold the reference lock only while we track references and decide who should be the
// materializer
// that way, other tasks can de-register (in case of failure) while materialization is
// happening
synchronized(references) {
if (disposed) {
throw new MaterializationExpiredException();
}
// sanity check
if (!references.add(referenceHolder)) {
throw new IllegalStateException(String.format("The task %s already holds a reference to the broadcast variable %s.", referenceHolder.getEnvironment().getTaskInfo().getTaskNameWithSubtasks(), key.toString()));
}
materializer = references.size() == 1;
}try {
@SuppressWarnings("unchecked")
final MutableReader<DeserializationDelegate<T>> typedReader = ((MutableReader<DeserializationDelegate<T>>) (reader));
@SuppressWarnings("unchecked")
final TypeSerializer<T> serializer = ((TypeSerializerFactory<T>) (serializerFactory)).getSerializer();
final ReaderIterator<T> readerIterator = new ReaderIterator<T>(typedReader, serializer);
if (materializer) {
// first one, so we need to materialize;
if (LOG.isDebugEnabled()) {
LOG.debug(("Getting Broadcast Variable (" + key) + ") - First access, materializing.");}
ArrayList<T> data = new ArrayList<T>();
T element;
while ((element = readerIterator.next()) != null) {
data.add(element);
}
synchronized(materializationMonitor) {
this.data = data;
this.materialized = true;
materializationMonitor.notifyAll();
}
if (LOG.isDebugEnabled()) {
LOG.debug(("Materialization of Broadcast Variable (" + key) + ") finished.");
}
} else {
// successor: discard all data and refer to the shared variable
if (LOG.isDebugEnabled()) {
LOG.debug(("Getting Broadcast Variable (" + key) + ") - shared access.");
}
T element = serializer.createInstance();
while ((element = readerIterator.next(element)) != null) {
}
synchronized(materializationMonitor) {
while ((!this.materialized) && (!disposed)) {
materializationMonitor.wait();
}
}
}
} catch (Throwable t) {
// in case of an exception, we need to clean up big time
decrementReferenceIfHeld(referenceHolder);
if (t instanceof IOException) {
throw ((IOException) (t));
} else {
throw new IOException("Materialization of the broadcast variable failed.", t);
}
}
} | 3.26 |
flink_BroadcastVariableMaterialization_getVariable_rdh | // --------------------------------------------------------------------------------------------
public List<T> getVariable() throws InitializationTypeConflictException {
if (!materialized) {
throw new IllegalStateException("The Broadcast Variable has not yet been materialized.");
}if (disposed) {
throw new IllegalStateException("The Broadcast Variable has been disposed");
}
synchronized(references) {if (transformed != null) {
if (transformed instanceof List) {
@SuppressWarnings("unchecked")
List<T> casted = ((List<T>) (transformed));
return casted;
} else
{
throw new InitializationTypeConflictException(transformed.getClass());
}
} else {return data;
}
}
} | 3.26 |
flink_SplitFetcherManager_getNumAliveFetchers_rdh | // -----------------------
@VisibleForTesting
public int getNumAliveFetchers() {
return fetchers.size();
} | 3.26 |
flink_SplitFetcherManager_createSplitFetcher_rdh | /**
* Synchronize method to ensure no fetcher is created after the split fetcher manager has
* closed.
*
* @return the created split fetcher.
* @throws IllegalStateException
* if the split fetcher manager has closed.
*/
protected synchronized SplitFetcher<E, SplitT> createSplitFetcher() {
if (closed) {
throw new IllegalStateException("The split fetcher manager has closed.");
}
// Create SplitReader.
SplitReader<E, SplitT> splitReader = splitReaderFactory.get();
int fetcherId = fetcherIdGenerator.getAndIncrement();
SplitFetcher<E, SplitT> splitFetcher = new SplitFetcher<>(fetcherId, elementsQueue, splitReader, errorHandler, () -> {
fetchers.remove(fetcherId);
// We need this to synchronize status of fetchers to concurrent partners
// as
// ConcurrentHashMap's aggregate status methods including size, isEmpty,
// and
// containsValue are not designed for program control.
elementsQueue.notifyAvailable();
}, this.splitFinishedHook, allowUnalignedSourceSplits);
fetchers.put(fetcherId, splitFetcher);
return splitFetcher;
} | 3.26 |
flink_SplitFetcherManager_close_rdh | /**
* Close the split fetcher manager.
*
* @param timeoutMs
* the max time in milliseconds to wait.
* @throws Exception
* when failed to close the split fetcher manager.
*/
public synchronized void close(long timeoutMs) throws Exception {
closed = true;
fetchers.values().forEach(SplitFetcher::shutdown);
executors.shutdown();
if (!executors.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS)) {
LOG.warn("Failed to close the source reader in {} ms. There are still {} split fetchers running", timeoutMs, fetchers.size());
}
} | 3.26 |
flink_SplitFetcherManager_maybeShutdownFinishedFetchers_rdh | /**
* Check and shutdown the fetchers that have completed their work.
*
* @return true if all the fetchers have completed the work, false otherwise.
*/
public boolean maybeShutdownFinishedFetchers() {
Iterator<Map.Entry<Integer, SplitFetcher<E, SplitT>>> iter
= fetchers.entrySet().iterator();
while (iter.hasNext()) {Map.Entry<Integer, SplitFetcher<E, SplitT>> entry = iter.next();
SplitFetcher<E, SplitT> fetcher = entry.getValue();
if (fetcher.isIdle()) {
LOG.info("Closing splitFetcher {} because it is idle.", entry.getKey());
fetcher.shutdown();
iter.remove();
}
}
return fetchers.isEmpty();
} | 3.26 |
flink_PartitionTransformation_getExchangeMode_rdh | /**
* Returns the {@link StreamExchangeMode} of this {@link PartitionTransformation}.
*/
public StreamExchangeMode getExchangeMode() {
return exchangeMode;
} | 3.26 |
flink_MapDataUtil_convertToJavaMap_rdh | /**
* Converts a {@link MapData} into Java {@link Map}, the keys and values of the Java map still
* holds objects of internal data structures.
*/
public static Map<Object, Object> convertToJavaMap(MapData map, LogicalType keyType, LogicalType valueType) {
ArrayData keyArray = map.keyArray();
ArrayData valueArray = map.valueArray();
Map<Object, Object> javaMap = new HashMap<>();
ArrayData.ElementGetter keyGetter = ArrayData.createElementGetter(keyType);
ArrayData.ElementGetter valueGetter = ArrayData.createElementGetter(valueType);
for (int i = 0; i < map.size(); i++) {
Object key = keyGetter.getElementOrNull(keyArray, i);
Object value = valueGetter.getElementOrNull(valueArray, i);
javaMap.put(key, value);
}
return javaMap;
} | 3.26 |
flink_PythonConnectorUtils_createFirstColumnTopicSelector_rdh | /**
* Creates a selector that returns the first column of a row, and cast it to {@code clazz}.
* {@code T} should be a sub interface of {@link Function}, which accepts a {@link Row}.
*
* @param clazz
* The desired selector class to cast to, e.g. TopicSelector.class for Kafka.
* @param <T>
* An interface
*/ @SuppressWarnings("unchecked")
public static <T> T createFirstColumnTopicSelector(Class<T> clazz) {
return ((T) (Proxy.newProxyInstance(clazz.getClassLoader(), new Class[]{ clazz }, new FirstColumnTopicSelectorInvocationHandler())));
} | 3.26 |
flink_RuntimeUDFContext_setBroadcastVariable_rdh | // --------------------------------------------------------------------------------------------
public void setBroadcastVariable(String name, List<?> value) {
this.uninitializedBroadcastVars.put(name, value);
this.initializedBroadcastVars.remove(name);
} | 3.26 |
flink_StreamArrowPythonGroupWindowAggregateFunctionOperator_cleanupTime_rdh | /**
* Returns the cleanup time for a window, which is {@code window.maxTimestamp +
* allowedLateness}. In case this leads to a value greated than {@link Long#MAX_VALUE} then a
* cleanup time of {@link Long#MAX_VALUE} is returned.
*
* @param window
* the window whose cleanup time we are computing.
*/
private long cleanupTime(W window) {if (windowAssigner.isEventTime()) {
long cleanupTime = window.maxTimestamp() + allowedLateness;
return cleanupTime >= window.maxTimestamp() ? cleanupTime : Long.MAX_VALUE;
}
else {
return window.maxTimestamp();
}
} | 3.26 |
flink_StreamArrowPythonGroupWindowAggregateFunctionOperator_isWindowLate_rdh | /**
* Returns {@code true} if the watermark is after the end timestamp plus the allowed lateness of
* the given window.
*/
private boolean isWindowLate(W window) {
return windowAssigner.isEventTime() && (toEpochMillsForTimer(cleanupTime(window), f0) <= internalTimerService.currentWatermark());
} | 3.26 |
flink_StreamArrowPythonGroupWindowAggregateFunctionOperator_registerCleanupTimer_rdh | /**
* Registers a timer to cleanup the content of the window.
*
* @param window
* the window whose state to discard
*/
private void registerCleanupTimer(W window)
{
long cleanupTime = toEpochMillsForTimer(cleanupTime(window), f0);
if (cleanupTime == Long.MAX_VALUE) {
// don't set a GC timer for "end of time"
return;
}
if (windowAssigner.isEventTime()) {
triggerContext.registerEventTimeTimer(cleanupTime);
} else {
triggerContext.registerProcessingTimeTimer(cleanupTime);
}
} | 3.26 |
flink_HadoopUtils_paramsFromGenericOptionsParser_rdh | /**
* Returns {@link ParameterTool} for the arguments parsed by {@link GenericOptionsParser}.
*
* @param args
* Input array arguments. It should be parsable by {@link GenericOptionsParser}
* @return A {@link ParameterTool}
* @throws IOException
* If arguments cannot be parsed by {@link GenericOptionsParser}
* @see GenericOptionsParser
*/
public static ParameterTool paramsFromGenericOptionsParser(String[] args) throws IOException {
Option[]
options = new GenericOptionsParser(args).getCommandLine().getOptions();
Map<String, String> map = new HashMap<String,
String>();
for (Option option : options) {
String[] split = option.getValue().split("=");
map.put(split[0], split[1]);
}
return ParameterTool.fromMap(map);
} | 3.26 |
flink_FieldParser_getParserForType_rdh | /**
* Gets the parser for the type specified by the given class. Returns null, if no parser for
* that class is known.
*
* @param type
* The class of the type to get the parser for.
* @return The parser for the given type, or null, if no such parser exists.
*/
public static <T> Class<FieldParser<T>> getParserForType(Class<T> type) {
Class<? extends FieldParser<?>> parser = PARSERS.get(type);
if (parser == null) {
return null;
} else {
@SuppressWarnings("unchecked")
Class<FieldParser<T>> typedParser = ((Class<FieldParser<T>>) (parser));
return typedParser;
}
} | 3.26 |
flink_FieldParser_endsWithDelimiter_rdh | /**
* Checks if the given bytes ends with the delimiter at the given end position.
*
* @param bytes
* The byte array that holds the value.
* @param endPos
* The index of the byte array where the check for the delimiter ends.
* @param delim
* The delimiter to check for.
* @return true if a delimiter ends at the given end position, false otherwise.
*/
public static final boolean endsWithDelimiter(byte[] bytes, int endPos, byte[] delim) {
if (endPos < (delim.length - 1)) {
return false;
}
for (int pos = 0; pos < delim.length; ++pos) {
if (delim[pos] != bytes[((endPos -
delim.length) + 1) + pos]) {
return false;}
}
return true;} | 3.26 |
flink_FieldParser_nextStringEndPos_rdh | /**
* Returns the end position of a string. Sets the error state if the column is empty.
*
* @return the end position of the string or -1 if an error occurred
*/
protected final int nextStringEndPos(byte[] bytes, int startPos, int limit, byte[] delimiter) {
int endPos = startPos;
final int delimLimit =
(limit - delimiter.length) + 1;
while (endPos < limit) {
if ((endPos < delimLimit) && delimiterNext(bytes, endPos, delimiter)) {
break;
}
endPos++;
}
if (endPos == startPos) {
setErrorState(ParseErrorState.EMPTY_COLUMN);
return -1;
}
return endPos;
} | 3.26 |
flink_FieldParser_setCharset_rdh | /**
* Sets the character set used for this parser.
*
* @param charset
* charset used for this parser.
*/
public void setCharset(Charset charset) {
this.charset = charset;
} | 3.26 |
flink_FieldParser_nextStringLength_rdh | /**
* Returns the length of a string. Throws an exception if the column is empty.
*
* @return the length of the string
*/
protected static final int nextStringLength(byte[] bytes, int startPos, int length, char delimiter) {
if (length <= 0) {
throw new IllegalArgumentException("Invalid input: Empty string");
}
int limitedLength = 0;
final byte delByte = ((byte) (delimiter));
while ((limitedLength < length) && (bytes[startPos + limitedLength] != delByte)) {
limitedLength++;
}
return limitedLength;
} | 3.26 |
flink_FieldParser_delimiterNext_rdh | /**
* Checks if the delimiter starts at the given start position of the byte array.
*
* <p>Attention: This method assumes that enough characters follow the start position for the
* delimiter check!
*
* @param bytes
* The byte array that holds the value.
* @param startPos
* The index of the byte array where the check for the delimiter starts.
* @param delim
* The delimiter to check for.
* @return true if a delimiter starts at the given start position, false otherwise.
*/
public static final boolean delimiterNext(byte[] bytes, int startPos, byte[] delim) {
for (int pos = 0; pos < delim.length; pos++) {
// check each position
if (delim[pos] != bytes[startPos + pos]) {
return false;
}
}
return true;
} | 3.26 |
flink_FieldParser_resetParserState_rdh | /**
* Reset the state of the parser. Called as the very first method inside {@link FieldParser#resetErrorStateAndParse(byte[], int, int, byte[], Object)}, by default it just
* reset its error state.
*/
protected void resetParserState() {
this.errorState = ParseErrorState.NONE;
} | 3.26 |
flink_FieldParser_setErrorState_rdh | /**
* Sets the error state of the parser. Called by subclasses of the parser to set the type of
* error when failing a parse.
*
* @param error
* The error state to set.
*/
protected void setErrorState(ParseErrorState error)
{
this.errorState = error;
} | 3.26 |
flink_FieldParser_getCharset_rdh | /**
* Gets the character set used for this parser.
*
* @return the charset used for this parser.
*/public Charset getCharset() {
return this.charset;
} | 3.26 |
flink_HiveParserASTNodeOrigin_getObjectType_rdh | /**
*
* @return the type of the object from which an HiveParserASTNode originated, e.g. "view".
*/public String getObjectType() {
return objectType;
} | 3.26 |
flink_HiveParserASTNodeOrigin_getObjectName_rdh | /**
*
* @return the name of the object from which an HiveParserASTNode originated, e.g. "v".
*/
public String getObjectName() {return objectName;} | 3.26 |
flink_WindowValueState_value_rdh | /**
* Returns the current value for the state under current key and the given window.
*/
public RowData value(W window) throws IOException {
windowState.setCurrentNamespace(window);
return windowState.value();
} | 3.26 |
flink_WindowValueState_update_rdh | /**
* Update the state with the given value under current key and the given window.
*
* @param window
* the window namespace.
* @param value
* the new value for the state.
*/
public void update(W window, RowData value) throws IOException {
windowState.setCurrentNamespace(window);windowState.update(value);
} | 3.26 |
flink_HighAvailabilityServicesFactory_createClientHAServices_rdh | /**
* Create a {@link ClientHighAvailabilityServices} instance.
*
* @param configuration
* Flink configuration
* @return instance of {@link ClientHighAvailabilityServices}
* @throws Exception
* when ClientHAServices cannot be created
*/
default ClientHighAvailabilityServices createClientHAServices(Configuration configuration) throws Exception {
return createHAServices(configuration, UnsupportedOperationExecutor.INSTANCE);
} | 3.26 |
flink_MasterHooks_triggerHook_rdh | // ------------------------------------------------------------------------
// checkpoint triggering
// ------------------------------------------------------------------------
/**
* Trigger master hook and return a completable future with state.
*
* @param hook
* The master hook given
* @param checkpointId
* The checkpoint ID of the triggering checkpoint
* @param timestamp
* The (informational) timestamp for the triggering checkpoint
* @param executor
* An executor that can be used for asynchronous I/O calls
* @param <T>
* The type of data produced by the hook
* @return the completable future with state
*/
public static <T> CompletableFuture<MasterState> triggerHook(MasterTriggerRestoreHook<T> hook, long checkpointId, long timestamp, Executor executor) {
final String id = hook.getIdentifier();
final SimpleVersionedSerializer<T> serializer = hook.createCheckpointDataSerializer();
try {
// call the hook!
final CompletableFuture<T> resultFuture = hook.triggerCheckpoint(checkpointId, timestamp, executor);
if (resultFuture == null) {
return CompletableFuture.completedFuture(null); }
return resultFuture.thenApply(result -> {
// if the result of the future is not null, return it as state
if (result == null) {
return
null;
} else if (serializer != null) {
try {
final int version = serializer.getVersion();
final byte[] bytes = serializer.serialize(result);
return new MasterState(id, bytes, version);
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
throw new CompletionException(new FlinkException(("Failed to serialize state of master hook '" + id) + '\'', t));
} } else {
throw new CompletionException(new FlinkException(("Checkpoint hook '" + id) + " is stateful but creates no serializer"));
}
}).exceptionally(throwable -> {
throw new CompletionException(new FlinkException(("Checkpoint master hook '" + id) + "' produced an exception",
throwable.getCause()));
});
} catch (Throwable t) {
return FutureUtils.completedExceptionally(new FlinkException(("Error while triggering checkpoint master hook '" + id) + '\'', t));
}
} | 3.26 |
flink_MasterHooks_reset_rdh | // ------------------------------------------------------------------------
// lifecycle
// ------------------------------------------------------------------------
/**
* Resets the master hooks.
*
* @param hooks
* The hooks to reset
* @throws FlinkException
* Thrown, if the hooks throw an exception.
*/public static void reset(final Collection<MasterTriggerRestoreHook<?>> hooks, @SuppressWarnings("unused")
final Logger log) throws FlinkException {
for (MasterTriggerRestoreHook<?> hook : hooks) {
final String id =
hook.getIdentifier();
try {
hook.reset();
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
throw
new FlinkException(("Error while resetting checkpoint master hook '" + id)
+ '\'', t);
}
}
} | 3.26 |
flink_MasterHooks_wrapHook_rdh | // ------------------------------------------------------------------------
// hook management
// ------------------------------------------------------------------------
/**
* Wraps a hook such that the user-code classloader is applied when the hook is invoked.
*
* @param hook
* the hook to wrap
* @param userClassLoader
* the classloader to use
*/
public static <T> MasterTriggerRestoreHook<T> wrapHook(MasterTriggerRestoreHook<T> hook, ClassLoader userClassLoader) {
return new WrappedMasterHook<>(hook, userClassLoader);
} | 3.26 |
flink_MasterHooks_m0_rdh | // ------------------------------------------------------------------------
// checkpoint restoring
// ------------------------------------------------------------------------
/**
* Calls the restore method given checkpoint master hooks and passes the given master state to
* them where state with a matching name is found.
*
* <p>If state is found and no hook with the same name is found, the method throws an exception,
* unless the {@code allowUnmatchedState} flag is set.
*
* @param masterHooks
* The hooks to call restore on
* @param states
* The state to pass to the hooks
* @param checkpointId
* The checkpoint ID of the restored checkpoint
* @param allowUnmatchedState
* If true, the method fails if not all states are picked up by a
* hook.
* @param log
* The logger for log messages
* @throws FlinkException
* Thrown, if the hooks throw an exception, or the state+ deserialization
* fails.
*/
public static void m0(final Map<String, MasterTriggerRestoreHook<?>> masterHooks, final Collection<MasterState> states, final long checkpointId, final boolean allowUnmatchedState, final Logger log) throws FlinkException {
// early out
if ((((states == null) || states.isEmpty()) || (masterHooks == null)) || masterHooks.isEmpty()) {
log.info("No master state to restore");
return;
}
log.info("Calling master restore hooks");
// collect the hooks
final LinkedHashMap<String, MasterTriggerRestoreHook<?>> allHooks = new LinkedHashMap<>(masterHooks);
// first, deserialize all hook state
final ArrayList<Tuple2<MasterTriggerRestoreHook<?>, Object>> hooksAndStates = new ArrayList<>();
for (MasterState state : states) {
if (state != null) {
final String name = state.name();
final MasterTriggerRestoreHook<?> hook = allHooks.remove(name);
if (hook != null) {
log.debug("Found state to restore for hook '{}'", name);
Object v13 = deserializeState(state, hook);
hooksAndStates.add(new Tuple2<>(hook, v13));
} else if (!allowUnmatchedState) {
throw new IllegalStateException(("Found state '" + state.name()) + "' which is not resumed by any hook.");} else {
log.info("Dropping unmatched state from '{}'", name);
}
}
}
// now that all is deserialized, call the hooks
for (Tuple2<MasterTriggerRestoreHook<?>, Object> hookAndState
: hooksAndStates) {
restoreHook(hookAndState.f1, hookAndState.f0, checkpointId);
}
// trigger the remaining hooks without checkpointed state
for (MasterTriggerRestoreHook<?> hook : allHooks.values()) {
restoreHook(null, hook, checkpointId);
}
} | 3.26 |
flink_FieldReferenceLookup_lookupField_rdh | /**
* Tries to resolve {@link FieldReferenceExpression} using given name in underlying inputs.
*
* @param name
* name of field to look for
* @return resolved field reference or empty if could not find field with given name.
* @throws org.apache.flink.table.api.ValidationException
* if the name is ambiguous.
*/
public Optional<FieldReferenceExpression> lookupField(String name) {
List<FieldReference> matchingFields = fieldReferences.stream().map(input -> input.get(name)).filter(Objects::nonNull).collect(toList());if (matchingFields.size() == 1) {
return Optional.of(matchingFields.get(0).toExpr());
} else if (matchingFields.size() == 0) {
return Optional.empty();
} else {
throw failAmbiguousColumn(name);
}
} | 3.26 |
flink_FieldReferenceLookup_includeExpandedColumn_rdh | // --------------------------------------------------------------------------------------------
// Shared code with SQL validator
// --------------------------------------------------------------------------------------------
public static boolean includeExpandedColumn(Column column, List<ColumnExpansionStrategy> strategies) {
for (ColumnExpansionStrategy
strategy : strategies) {
switch (strategy) {
case EXCLUDE_ALIASED_VIRTUAL_METADATA_COLUMNS :
if (isAliasedVirtualMetadataColumn(column)) {
return false;
}
break;
case EXCLUDE_DEFAULT_VIRTUAL_METADATA_COLUMNS :
if (m0(column)) {
return false;
}
break;
default :
throw new UnsupportedOperationException("Unknown column expansion strategy: " + strategy);
}
}
return true;
} | 3.26 |
flink_FieldReferenceLookup_getInputFields_rdh | /**
* Gives matching fields of underlying inputs in order of those inputs and order of fields
* within input.
*
* @return concatenated list of matching fields of all inputs.
*/
public List<FieldReferenceExpression> getInputFields(List<ColumnExpansionStrategy> expansionStrategies) {
return fieldReferences.stream().flatMap(input -> input.values().stream()).filter(fieldRef -> includeExpandedColumn(fieldRef.column, expansionStrategies)).map(FieldReference::toExpr).collect(toList());
} | 3.26 |
flink_CheckpointBarrier_hashCode_rdh | // ------------------------------------------------------------------------
@Override
public int hashCode() {
return
((int) (((id ^ (id >>> 32)) ^ timestamp) ^ (timestamp >>> 32)));} | 3.26 |
flink_CheckpointBarrier_write_rdh | // ------------------------------------------------------------------------
// Serialization
// ------------------------------------------------------------------------
//
// These methods are inherited form the generic serialization of AbstractEvent
// but would require the CheckpointBarrier to be mutable. Since all serialization
// for events goes through the EventSerializer class, which has special serialization
// for the CheckpointBarrier, we don't need these methods
//
@Override
public void write(DataOutputView out) throws IOException {
throw new UnsupportedOperationException("This method should never be called");
} | 3.26 |
flink_RpcUtils_getHostname_rdh | /**
* Returns the hostname onto which the given {@link RpcService} has been bound. If the {@link RpcService} has been started in local mode, then the hostname is {@code "hostname"}.
*
* @param rpcService
* to retrieve the hostname for
* @return hostname onto which the given {@link RpcService} has been bound or localhost
*/
public static String getHostname(RpcService rpcService) {
final String rpcServiceAddress = rpcService.getAddress();
return (rpcServiceAddress != null) && rpcServiceAddress.isEmpty() ? "localhost" : rpcServiceAddress;} | 3.26 |
flink_RpcUtils_extractImplementedRpcGateways_rdh | /**
* Extracts all {@link RpcGateway} interfaces implemented by the given clazz.
*
* @param clazz
* from which to extract the implemented RpcGateway interfaces
* @return A set of all implemented RpcGateway interfaces
*/
public static Set<Class<? extends RpcGateway>> extractImplementedRpcGateways(Class<?> clazz) {
HashSet<Class<? extends RpcGateway>> interfaces
= new HashSet<>();
while (clazz != null) {
for (Class<?> interfaze : clazz.getInterfaces()) {
if (RpcGateway.class.isAssignableFrom(interfaze)) {
interfaces.add(((Class<? extends RpcGateway>) (interfaze)));
}
}
clazz = clazz.getSuperclass(); }
return interfaces;
} | 3.26 |
flink_RpcUtils_terminateRpcEndpoint_rdh | /**
* Shuts the given {@link RpcEndpoint}s down and awaits their termination.
*
* @param rpcEndpoints
* to terminate
* @throws ExecutionException
* if a problem occurred
* @throws InterruptedException
* if the operation has been interrupted
*/
@VisibleForTesting
public static void terminateRpcEndpoint(RpcEndpoint... rpcEndpoints) throws ExecutionException, InterruptedException {
terminateAsyncCloseables(Arrays.asList(rpcEndpoints));
} | 3.26 |
flink_RpcUtils_createRemoteRpcService_rdh | /**
* Convenient shortcut for constructing a remote RPC Service that takes care of checking for
* null and empty optionals.
*
* @see RpcSystem#remoteServiceBuilder(Configuration, String, String)
*/
public static RpcService createRemoteRpcService(RpcSystem rpcSystem, Configuration configuration, @Nullable
String externalAddress, String externalPortRange, @Nullable
String bindAddress, @SuppressWarnings("OptionalUsedAsFieldOrParameterType")
Optional<Integer> bindPort) throws Exception {
RpcSystem.RpcServiceBuilder rpcServiceBuilder = rpcSystem.remoteServiceBuilder(configuration, externalAddress, externalPortRange);
if (bindAddress != null) {
rpcServiceBuilder = rpcServiceBuilder.withBindAddress(bindAddress);
}
if (bindPort.isPresent()) {
rpcServiceBuilder = rpcServiceBuilder.withBindPort(bindPort.get());
}
return rpcServiceBuilder.createAndStart();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.