name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_CheckpointConfig_setCheckpointStorage_rdh | /**
* Configures the application to write out checkpoint snapshots to the configured directory. See
* {@link FileSystemCheckpointStorage} for more details on checkpointing to a file system.
*
* @param checkpointDirectory
* The path to write checkpoint metadata to.
* @see #setCheckpointStorage(String)
*/
@PublicEvolving
public void setCheckpointStorage(Path checkpointDirectory) {
Preconditions.checkNotNull(checkpointDirectory, "Checkpoint directory must not be null");
this.storage = new FileSystemCheckpointStorage(checkpointDirectory);
}
/**
*
* @return The {@link CheckpointStorage} that has been configured for the job. Or {@code null} | 3.26 |
flink_CheckpointConfig_setCheckpointInterval_rdh | /**
* Sets the interval in which checkpoints are periodically scheduled.
*
* <p>This setting defines the base interval. Checkpoint triggering may be delayed by the
* settings {@link #setMaxConcurrentCheckpoints(int)} and {@link #setMinPauseBetweenCheckpoints(long)}.
*
* @param checkpointInterval
* The checkpoint interval, in milliseconds.
*/
public void setCheckpointInterval(long checkpointInterval) {
if (checkpointInterval < MINIMAL_CHECKPOINT_TIME) {
throw new IllegalArgumentException(String.format("Checkpoint interval must be larger than or equal to %s ms", MINIMAL_CHECKPOINT_TIME));
}
configuration.set(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofMillis(checkpointInterval));
} | 3.26 |
flink_CheckpointConfig_getMaxConcurrentCheckpoints_rdh | /**
* Gets the maximum number of checkpoint attempts that may be in progress at the same time. If
* this value is <i>n</i>, then no checkpoints will be triggered while <i>n</i> checkpoint
* attempts are currently in flight. For the next checkpoint to be triggered, one checkpoint
* attempt would need to finish or expire.
*
* @return The maximum number of concurrent checkpoint attempts.
*/
public int getMaxConcurrentCheckpoints() {
return configuration.get(ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS);
} | 3.26 |
flink_CheckpointConfig_setMinPauseBetweenCheckpoints_rdh | /**
* Sets the minimal pause between checkpointing attempts. This setting defines how soon the
* checkpoint coordinator may trigger another checkpoint after it becomes possible to trigger
* another checkpoint with respect to the maximum number of concurrent checkpoints (see {@link #setMaxConcurrentCheckpoints(int)}).
*
* <p>If the maximum number of concurrent checkpoints is set to one, this setting makes
* effectively sure that a minimum amount of time passes where no checkpoint is in progress at
* all.
*
* @param minPauseBetweenCheckpoints
* The minimal pause before the next checkpoint is triggered.
*/
public void setMinPauseBetweenCheckpoints(long minPauseBetweenCheckpoints) {
if (minPauseBetweenCheckpoints
< 0) {
throw new IllegalArgumentException("Pause value must be zero or positive");
}
configuration.set(ExecutionCheckpointingOptions.MIN_PAUSE_BETWEEN_CHECKPOINTS, Duration.ofMillis(minPauseBetweenCheckpoints));} | 3.26 |
flink_CheckpointConfig_m0_rdh | /**
* Sets the mode for externalized checkpoint clean-up. Externalized checkpoints will be enabled
* automatically unless the mode is set to {@link ExternalizedCheckpointCleanup#NO_EXTERNALIZED_CHECKPOINTS}.
*
* <p>Externalized checkpoints write their meta data out to persistent storage and are
* <strong>not</strong> automatically cleaned up when the owning job fails or is suspended
* (terminating with job status {@link JobStatus#FAILED} or {@link JobStatus#SUSPENDED}). In
* this case, you have to manually clean up the checkpoint state, both the meta data and actual
* program state.
*
* <p>The {@link ExternalizedCheckpointCleanup} mode defines how an externalized checkpoint
* should be cleaned up on job cancellation. If you choose to retain externalized checkpoints on
* cancellation you have to handle checkpoint clean-up manually when you cancel the job as well
* (terminating with job status {@link JobStatus#CANCELED}).
*
* <p>The target directory for externalized checkpoints is configured via {@link CheckpointingOptions#CHECKPOINTS_DIRECTORY}.
*
* @param cleanupMode
* Externalized checkpoint clean-up behaviour.
*/
@PublicEvolving
public void m0(ExternalizedCheckpointCleanup cleanupMode) {
configuration.set(ExecutionCheckpointingOptions.EXTERNALIZED_CHECKPOINT, cleanupMode);
}
/**
* Sets the mode for externalized checkpoint clean-up. Externalized checkpoints will be enabled
* automatically unless the mode is set to {@link ExternalizedCheckpointCleanup#NO_EXTERNALIZED_CHECKPOINTS}.
*
* <p>Externalized checkpoints write their meta data out to persistent storage and are
* <strong>not</strong> automatically cleaned up when the owning job fails or is suspended
* (terminating with job status {@link JobStatus#FAILED} or {@link JobStatus#SUSPENDED}). In
* this case, you have to manually clean up the checkpoint state, both the meta data and actual
* program state.
*
* <p>The {@link ExternalizedCheckpointCleanup} mode defines how an externalized checkpoint
* should be cleaned up on job cancellation. If you choose to retain externalized checkpoints on
* cancellation you have to handle checkpoint clean-up manually when you cancel the job as well
* (terminating with job status {@link JobStatus#CANCELED}).
*
* <p>The target directory for externalized checkpoints is configured via {@link CheckpointingOptions#CHECKPOINTS_DIRECTORY}.
*
* @param cleanupMode
* Externalized checkpoint clean-up behaviour.
* @deprecated use {@link #setExternalizedCheckpointCleanup(ExternalizedCheckpointCleanup)} | 3.26 |
flink_CheckpointConfig_setMaxConcurrentCheckpoints_rdh | /**
* Sets the maximum number of checkpoint attempts that may be in progress at the same time. If
* this value is <i>n</i>, then no checkpoints will be triggered while <i>n</i> checkpoint
* attempts are currently in flight. For the next checkpoint to be triggered, one checkpoint
* attempt would need to finish or expire.
*
* @param maxConcurrentCheckpoints
* The maximum number of concurrent checkpoint attempts.
*/
public void setMaxConcurrentCheckpoints(int maxConcurrentCheckpoints) {
if (maxConcurrentCheckpoints < 1) {
throw new IllegalArgumentException("The maximum number of concurrent attempts must be at least one.");
}
configuration.set(ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS, maxConcurrentCheckpoints);
} | 3.26 |
flink_CheckpointConfig_setTolerableCheckpointFailureNumber_rdh | /**
* This defines how many consecutive checkpoint failures will be tolerated, before the whole job
* is failed over. The default value is `0`, which means no checkpoint failures will be
* tolerated, and the job will fail on first reported checkpoint failure.
*/
public void setTolerableCheckpointFailureNumber(int tolerableCheckpointFailureNumber) {
if (tolerableCheckpointFailureNumber < 0) {
throw new IllegalArgumentException("The tolerable failure checkpoint number must be non-negative.");
}
configuration.set(ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER, tolerableCheckpointFailureNumber);
} | 3.26 |
flink_Tuple_getTupleClass_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the class corresponding to the tuple of the given arity (dimensions). For example,
* {@code getTupleClass(3)} will return the {@code Tuple3.class}.
*
* @param arity
* The arity of the tuple class to get.
* @return The tuple class with the given arity.
*/
@SuppressWarnings("unchecked")
public static Class<? extends Tuple> getTupleClass(int arity) {if ((arity < 0) || (arity > MAX_ARITY)) {
throw new IllegalArgumentException(("The tuple arity must be in [0, " + MAX_ARITY) + "].");
}
return ((Class<? extends Tuple>) (CLASSES[arity]));
} | 3.26 |
flink_Tuple_newInstance_rdh | // --------------------------------------------------------------------------------------------
// The following lines are generated.
// --------------------------------------------------------------------------------------------
// BEGIN_OF_TUPLE_DEPENDENT_CODE
// GENERATED FROM org.apache.flink.api.java.tuple.TupleGenerator.
public static Tuple
newInstance(int arity) {
switch (arity) {case 0 :
return Tuple0.INSTANCE;
case 1 :
return new Tuple1();
case 2 :
return new Tuple2();
case 3
:
return new Tuple3();
case 4 :
return new Tuple4();
case 5 :
return new Tuple5();
case 6 :
return new Tuple6();
case 7 :
return new Tuple7();
case 8 :
return new Tuple8();
case 9 :return new Tuple9();case 10 :return new Tuple10();
case 11 :return new Tuple11();
case 12 :
return new Tuple12();
case 13 :
return new Tuple13();
case 14 :
return new Tuple14();
case 15 :
return new Tuple15();
case 16 :
return new Tuple16();
case 17 :
return new Tuple17();
case 18 :
return new Tuple18();
case 19 :
return new Tuple19();
case 20 :
return new Tuple20();
case 21 :
return new Tuple21();
case 22 :
return new Tuple22();
case 23 :
return new Tuple23(); case 24 :
return new Tuple24();
case 25 :
return new Tuple25();
default :
throw new IllegalArgumentException(("The tuple arity must be in [0, " + MAX_ARITY) + "].");
}
} | 3.26 |
flink_FirstValueWithRetractAggFunction_getArgumentDataTypes_rdh | // --------------------------------------------------------------------------------------------
// Planning
// --------------------------------------------------------------------------------------------
@Override
public List<DataType> getArgumentDataTypes() {
return Collections.singletonList(valueDataType);
} | 3.26 |
flink_SqlUniqueSpec_symbol_rdh | /**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos pos) {
return SqlLiteral.createSymbol(this, pos);
} | 3.26 |
flink_InputChannel_increaseBackoff_rdh | /**
* Increases the current backoff and returns whether the operation was successful.
*
* @return <code>true</code>, iff the operation was successful. Otherwise, <code>false</code>.
*/
protected boolean increaseBackoff() {
// Backoff is disabled
if (initialBackoff == 0) {
return false;
}
if (currentBackoff == 0) {
// This is the first time backing off
currentBackoff = initialBackoff;
return true;
} else if (currentBackoff <
maxBackoff)
{
currentBackoff = Math.min(currentBackoff * 2, maxBackoff);
return true;
}
// Reached maximum backoff
return false;
} | 3.26 |
flink_InputChannel_notifyChannelNonEmpty_rdh | /**
* Notifies the owning {@link SingleInputGate} that this channel became non-empty.
*
* <p>This is guaranteed to be called only when a Buffer was added to a previously empty input
* channel. The notion of empty is atomically consistent with the flag {@link BufferAndAvailability#moreAvailable()} when polling the next buffer from this channel.
*
* <p><b>Note:</b> When the input channel observes an exception, this method is called
* regardless of whether the channel was empty before. That ensures that the parent InputGate
* will always be notified about the exception.
*/
protected void notifyChannelNonEmpty() {
inputGate.notifyChannelNonEmpty(this);
} | 3.26 |
flink_InputChannel_checkError_rdh | // ------------------------------------------------------------------------
// Error notification
// ------------------------------------------------------------------------
/**
* Checks for an error and rethrows it if one was reported.
*
* <p>Note: Any {@link PartitionException} instances should not be transformed and make sure
* they are always visible in task failure cause.
*/
protected void checkError() throws IOException {
final Throwable t = cause.get();
if (t != null) {
if (t instanceof CancelTaskException) {
throw ((CancelTaskException) (t));
}
if (t instanceof IOException)
{
throw ((IOException) (t));
} else {
throw new IOException(t);
}
}
} | 3.26 |
flink_InputChannel_checkpointStarted_rdh | /**
* Called by task thread when checkpointing is started (e.g., any input channel received
* barrier).
*/
public void checkpointStarted(CheckpointBarrier barrier) throws CheckpointException {
} | 3.26 |
flink_InputChannel_setError_rdh | /**
* Atomically sets an error for this channel and notifies the input gate about available data to
* trigger querying this channel by the task thread.
*/
protected void setError(Throwable cause) {
if (this.cause.compareAndSet(null, checkNotNull(cause))) {
// Notify the input gate.
notifyChannelNonEmpty();
}} | 3.26 |
flink_InputChannel_notifyRequiredSegmentId_rdh | /**
* Notify the upstream the id of required segment that should be sent to netty connection.
*
* @param segmentId
* segment id indicates the id of segment.
*/
public void notifyRequiredSegmentId(int segmentId) throws IOException {
} | 3.26 |
flink_InputChannel_checkpointStopped_rdh | /**
* Called by task thread on cancel/complete to clean-up temporary data.
*/
public void checkpointStopped(long checkpointId) {
} | 3.26 |
flink_InputChannel_getChannelIndex_rdh | // ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
/**
* Returns the index of this channel within its {@link SingleInputGate}.
*/
public int getChannelIndex() {
return channelInfo.getInputChannelIdx();
} | 3.26 |
flink_InputChannel_getChannelInfo_rdh | /**
* Returns the info of this channel, which uniquely identifies the channel in respect to its
* operator instance.
*/
public InputChannelInfo getChannelInfo()
{
return channelInfo;} | 3.26 |
flink_InputChannel_unsynchronizedGetNumberOfQueuedBuffers_rdh | // ------------------------------------------------------------------------
// Metric related method
// ------------------------------------------------------------------------
public int unsynchronizedGetNumberOfQueuedBuffers() {
return 0;
} | 3.26 |
flink_InputChannel_getCurrentBackoff_rdh | // ------------------------------------------------------------------------
// Partition request exponential backoff
// ------------------------------------------------------------------------
/**
* Returns the current backoff in ms.
*/
protected int getCurrentBackoff() {
return currentBackoff <= 0 ? 0 : currentBackoff;
} | 3.26 |
flink_DataType_getConversionClass_rdh | /**
* Returns the corresponding conversion class for representing values. If no conversion class
* was defined manually, the default conversion defined by the logical type is used.
*
* @see LogicalType#getDefaultConversion()
* @return the expected conversion class
*/
public Class<?> getConversionClass()
{
return conversionClass;
} | 3.26 |
flink_DataType_getFieldNames_rdh | // --------------------------------------------------------------------------------------------
// Utilities for Common Data Type Transformations
// --------------------------------------------------------------------------------------------
/**
* Returns the first-level field names for the provided {@link DataType}.
*
* <p>Note: This method returns an empty list for every {@link DataType} that is not a composite
* type.
*/
public static List<String>
getFieldNames(DataType dataType) {
final LogicalType type = dataType.getLogicalType();
if (type.is(LogicalTypeRoot.DISTINCT_TYPE)) {
return getFieldNames(dataType.getChildren().get(0));} else if (isCompositeType(type)) {
return LogicalTypeChecks.getFieldNames(type);
}
return Collections.emptyList();} | 3.26 |
flink_DataType_getFields_rdh | /**
* Returns an ordered list of fields starting from the provided {@link DataType}.
*
* <p>Note: This method returns an empty list for every {@link DataType} that is not a composite
* type.
*/
public static List<DataTypes.Field> getFields(DataType dataType) {
final List<String> names = getFieldNames(dataType);
final List<DataType> dataTypes = getFieldDataTypes(dataType);return IntStream.range(0, names.size()).mapToObj(i -> DataTypes.FIELD(names.get(i), dataTypes.get(i))).collect(Collectors.toList());
} | 3.26 |
flink_DataType_performEarlyClassValidation_rdh | // --------------------------------------------------------------------------------------------
/**
* This method should catch the most common errors. However, another validation is required in
* deeper layers as we don't know whether the data type is used for input or output declaration.
*/
private static <C> Class<C> performEarlyClassValidation(LogicalType logicalType, Class<C> candidate) {
if (((candidate != null) && (!logicalType.supportsInputConversion(candidate))) && (!logicalType.supportsOutputConversion(candidate))) {
throw new ValidationException(String.format("Logical type '%s' does not support a conversion from or to class '%s'.", logicalType.asSummaryString(), candidate.getName()));
}return candidate;
} | 3.26 |
flink_DataType_getFieldDataTypes_rdh | /**
* Returns the first-level field data types for the provided {@link DataType}.
*
* <p>Note: This method returns an empty list for every {@link DataType} that is not a composite
* type.
*/
public static List<DataType> getFieldDataTypes(DataType dataType)
{
final LogicalType type = dataType.getLogicalType();
if (type.is(LogicalTypeRoot.DISTINCT_TYPE)) {
return getFieldDataTypes(dataType.getChildren().get(0));
} else if (isCompositeType(type)) {
return dataType.getChildren();
}
return Collections.emptyList();
} | 3.26 |
flink_BinaryArrayWriter_reset_rdh | /**
* First, reset.
*/
@Override
public void reset() {
this.cursor = fixedSize;
for (int i = 0; i < nullBitsSizeInBytes; i +=
8) {
segment.putLong(i, 0L);
}
this.segment.putInt(0, numElements);
} | 3.26 |
flink_BinaryArrayWriter_createNullSetter_rdh | // --------------------------------------------------------------------------------------------
/**
* Creates an for accessor setting the elements of an array writer to {@code null} during
* runtime.
*
* @param elementType
* the element type of the array
*/public
static NullSetter createNullSetter(LogicalType elementType) {
// ordered by type root definition
switch (elementType.getTypeRoot()) {
case CHAR :
case VARCHAR :case BINARY :
case VARBINARY :
case DECIMAL :
case BIGINT :
case
TIMESTAMP_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE
:
case INTERVAL_DAY_TIME :
case ARRAY :
case MULTISET :
case
MAP :
case ROW :
case STRUCTURED_TYPE :
case RAW :
return BinaryArrayWriter::setNullLong;
case BOOLEAN :
return BinaryArrayWriter::setNullBoolean;
case TINYINT :
return BinaryArrayWriter::setNullByte;
case SMALLINT :
return BinaryArrayWriter::setNullShort;
case INTEGER :
case DATE :
case TIME_WITHOUT_TIME_ZONE
:
case INTERVAL_YEAR_MONTH :
return BinaryArrayWriter::setNullInt;
case FLOAT :
return BinaryArrayWriter::setNullFloat;
case DOUBLE :
return BinaryArrayWriter::setNullDouble;
case TIMESTAMP_WITH_TIME_ZONE :
throw new UnsupportedOperationException();
case DISTINCT_TYPE :
return createNullSetter(((DistinctType) (elementType)).getSourceType());
case NULL :
case SYMBOL :
case UNRESOLVED :
default :
throw new IllegalArgumentException();
}
} | 3.26 |
flink_BinaryArrayWriter_complete_rdh | /**
* Finally, complete write to set real size to row.
*/
@Override
public void complete() {
array.pointTo(segment, 0, cursor);
} | 3.26 |
flink_IntermediateResult_getPartitionById_rdh | /**
* Returns the partition with the given ID.
*
* @param resultPartitionId
* ID of the partition to look up
* @throws NullPointerException
* If partition ID <code>null</code>
* @throws IllegalArgumentException
* Thrown if unknown partition ID
* @return Intermediate result partition with the given ID
*/public IntermediateResultPartition getPartitionById(IntermediateResultPartitionID resultPartitionId) {
// Looks ups the partition number via the helper map and returns the
// partition. Currently, this happens infrequently enough that we could
// consider removing the map and scanning the partitions on every lookup.
// The lookup (currently) only happen when the producer of an intermediate
// result cannot be found via its registered execution.
Integer
partitionNumber = partitionLookupHelper.get(checkNotNull(resultPartitionId, "IntermediateResultPartitionID"));
if (partitionNumber != null) {
return partitions[partitionNumber];
} else {
throw new IllegalArgumentException("Unknown intermediate result partition ID " + resultPartitionId);
}
} | 3.26 |
flink_IntermediateResult_getConsumersParallelism_rdh | /**
* Currently, this method is only used to compute the maximum number of consumers. For dynamic
* graph, it should be called before adaptively deciding the downstream consumer parallelism.
*/
int getConsumersParallelism() {
List<JobEdge> consumers = intermediateDataSet.getConsumers();
checkState(!consumers.isEmpty());
InternalExecutionGraphAccessor graph = getProducer().getGraph();
int consumersParallelism = graph.getJobVertex(consumers.get(0).getTarget().getID()).getParallelism();
if (consumers.size() == 1) {
return consumersParallelism;
}
// sanity check, all consumer vertices must have the same parallelism:
// 1. for vertices that are not assigned a parallelism initially (for example, dynamic
// graph), the parallelisms will all be -1 (parallelism not decided yet)
// 2. for vertices that are initially assigned a parallelism, the parallelisms must be the
// same, which is guaranteed at compilation phase
for (JobVertexID jobVertexID :
consumerVertices) {checkState(consumersParallelism == graph.getJobVertex(jobVertexID).getParallelism(), "Consumers must have the same parallelism.");
}
return consumersParallelism;
} | 3.26 |
flink_JsonPlanEdge_fromExecEdge_rdh | /**
* Build {@link JsonPlanEdge} from an {@link ExecEdge}.
*/
static JsonPlanEdge fromExecEdge(ExecEdge execEdge) {
return new JsonPlanEdge(execEdge.getSource().getId(), execEdge.getTarget().getId(), execEdge.getShuffle(), execEdge.getExchangeMode());
} | 3.26 |
flink_FloatValue_read_rdh | // --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {
this.value = in.readFloat();
} | 3.26 |
flink_FloatValue_getBinaryLength_rdh | // --------------------------------------------------------------------------------------------
@Override
public int getBinaryLength() { return 4;
} | 3.26 |
flink_PojoSerializer_createRegisteredSubclassSerializers_rdh | /**
* Creates an array of serializers for provided list of registered subclasses. Order of returned
* serializers will correspond to order of provided subclasses.
*/
private static TypeSerializer<?>[] createRegisteredSubclassSerializers(LinkedHashSet<Class<?>> registeredSubclasses, ExecutionConfig executionConfig) {
final TypeSerializer<?>[] subclassSerializers =
new TypeSerializer[registeredSubclasses.size()];
int i = 0;
for (Class<?> registeredClass : registeredSubclasses) {
subclassSerializers[i] =
TypeExtractor.createTypeInfo(registeredClass).createSerializer(executionConfig);
i++;
}
return subclassSerializers;
} | 3.26 |
flink_PojoSerializer_getRegisteredSubclassesFromExecutionConfig_rdh | // --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
/**
* Extracts the subclasses of the base POJO class registered in the execution config.
*/
private static LinkedHashSet<Class<?>> getRegisteredSubclassesFromExecutionConfig(Class<?> basePojoClass, ExecutionConfig executionConfig) {
LinkedHashSet<Class<?>> subclassesInRegistrationOrder = CollectionUtil.newLinkedHashSetWithExpectedSize(executionConfig.getRegisteredPojoTypes().size());
for (Class<?> registeredClass : executionConfig.getRegisteredPojoTypes()) {
if (registeredClass.equals(basePojoClass)) {
continue;
}
if
(!basePojoClass.isAssignableFrom(registeredClass)) {
continue;
}
subclassesInRegistrationOrder.add(registeredClass);
}
return subclassesInRegistrationOrder; } | 3.26 |
flink_PojoSerializer_writeObject_rdh | // --------------------------------------------------------------------------------------------
private void writeObject(ObjectOutputStream
out) throws IOException, ClassNotFoundException {
out.defaultWriteObject();
out.writeInt(fields.length);
for (Field field : fields) {
FieldSerializer.serializeField(field, out);
}
} | 3.26 |
flink_PojoSerializer_buildSnapshot_rdh | /**
* Build and return a snapshot of the serializer's parameters and currently cached serializers.
*/
private static <T> PojoSerializerSnapshot<T> buildSnapshot(Class<T> pojoType, LinkedHashMap<Class<?>, Integer> registeredSubclassesToTags, TypeSerializer<?>[] registeredSubclassSerializers, Field[] fields, TypeSerializer<?>[] fieldSerializers, Map<Class<?>, TypeSerializer<?>> nonRegisteredSubclassSerializerCache) {
final LinkedHashMap<Class<?>, TypeSerializer<?>> subclassRegistry = CollectionUtil.newLinkedHashMapWithExpectedSize(registeredSubclassesToTags.size());
for (Map.Entry<Class<?>, Integer> entry
: registeredSubclassesToTags.entrySet()) {
subclassRegistry.put(entry.getKey(), registeredSubclassSerializers[entry.getValue()]);
}
return new PojoSerializerSnapshot<>(pojoType, fields, fieldSerializers, subclassRegistry, nonRegisteredSubclassSerializerCache);
} | 3.26 |
flink_PojoSerializer_snapshotConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
// --------------------------------------------------------------------------------------------
@Override
public PojoSerializerSnapshot<T> snapshotConfiguration() {
return buildSnapshot(clazz,
registeredClasses, registeredSerializers, fields, fieldSerializers, subclassSerializerCache);
} | 3.26 |
flink_PojoSerializer_findField_rdh | /**
* Finds and returns the order (0-based) of a POJO field. Returns -1 if the field does not exist
* for this POJO.
*/
private int findField(String fieldName) {int foundIndex = 0;
for (Field field : fields) {
if ((field != null) && fieldName.equals(field.getName())) {
return foundIndex;
}
foundIndex++;
}
return -1;
} | 3.26 |
flink_PojoSerializer_createRegisteredSubclassTags_rdh | /**
* Builds map of registered subclasses to their class tags. Class tags will be integers starting
* from 0, assigned incrementally with the order of provided subclasses.
*/private static LinkedHashMap<Class<?>, Integer> createRegisteredSubclassTags(LinkedHashSet<Class<?>> registeredSubclasses) {
final LinkedHashMap<Class<?>, Integer> classToTag = new LinkedHashMap<>();
int id = 0;
for (Class<?> registeredClass : registeredSubclasses) {
classToTag.put(registeredClass, id);
id++;
}
return classToTag;
} | 3.26 |
flink_PojoSerializer_getSubclassSerializer_rdh | /**
* Fetches cached serializer for a non-registered subclass; also creates the serializer if it
* doesn't exist yet.
*
* <p>This method is also exposed to package-private access for testing purposes.
*/
TypeSerializer<?> getSubclassSerializer(Class<?> subclass) {
TypeSerializer<?> result = subclassSerializerCache.get(subclass);
if (result == null) {
result = m0(subclass);
subclassSerializerCache.put(subclass, result);
}
return result;
} | 3.26 |
flink_PojoSerializer_getPojoClass_rdh | // --------------------------------------------------------------------------------------------
// Configuration access
// --------------------------------------------------------------------------------------------
Class<T> getPojoClass() {
return clazz;
} | 3.26 |
flink_RunLengthDecoder_initWidthAndPacker_rdh | /**
* Initializes the internal state for decoding ints of `bitWidth`.
*/
private void initWidthAndPacker(int bitWidth) {
Preconditions.checkArgument((bitWidth >= 0) && (bitWidth <= 32), "bitWidth must be >= 0 and <= 32");
this.bitWidth = bitWidth;
this.bytesWidth = BytesUtils.paddedByteCountFromBits(bitWidth);
this.packer =
Packer.LITTLE_ENDIAN.newBytePacker(bitWidth);} | 3.26 |
flink_RunLengthDecoder_readDictionaryIdData_rdh | /**
* It is used to decode dictionary IDs.
*/private void readDictionaryIdData(int total, WritableIntVector c, int rowId) {
int left = total;
while (left > 0)
{
if (this.currentCount == 0) {
this.readNextGroup();
}
int n = Math.min(left, this.currentCount);
switch (mode) {
case RLE
:
c.setInts(rowId, n, currentValue);
break; case PACKED :
c.setInts(rowId, n, currentBuffer, currentBufferIdx);
currentBufferIdx += n;
break;
}
rowId += n;
left -= n;
currentCount -= n;
}
} | 3.26 |
flink_RunLengthDecoder_initFromStream_rdh | /**
* Init from input stream.
*/
void initFromStream(int valueCount, ByteBufferInputStream in) throws IOException {this.in = in;
if (fixedWidth) {
// initialize for repetition and definition levels
if (readLength) {
int length = readIntLittleEndian();
this.in = in.sliceStream(length);
}
} else // initialize for values
if (in.available() > 0) {
initWidthAndPacker(in.read());
}
if (bitWidth == 0) {
// 0 bit width, treat this as an RLE run of valueCount number of 0's.
this.mode = MODE.RLE;
this.currentCount = valueCount;
this.currentValue = 0;
} else {
this.currentCount = 0; }
} | 3.26 |
flink_RunLengthDecoder_readDictionaryIds_rdh | /**
* Decoding for dictionary ids. The IDs are populated into `values` and the nullability is
* populated into `nulls`.
*/
void readDictionaryIds(int total, WritableIntVector values, WritableColumnVector nulls, int rowId, int level, RunLengthDecoder data) {
int left = total;
while (left > 0) {
if (this.currentCount == 0) {
this.readNextGroup();
}int n = Math.min(left, this.currentCount);
switch (mode) {
case RLE :
if (currentValue == level) {
data.readDictionaryIdData(n, values,
rowId);} else {
nulls.setNulls(rowId, n);
}
break;
case PACKED :
for (int i = 0; i < n; ++i) {
if (currentBuffer[currentBufferIdx++] == level) {
values.setInt(rowId + i, data.readInteger());
} else {
nulls.setNullAt(rowId + i);
}
}
break;
}
rowId += n;
left -= n;
currentCount -= n;
}
} | 3.26 |
flink_RunLengthDecoder_readUnsignedVarInt_rdh | /**
* Reads the next varint encoded int.
*/
private int readUnsignedVarInt() throws IOException {
int value = 0;
int v7 = 0;
int b;
do
{
b = in.read();
value |= (b & 0x7f) << v7;
v7 += 7;
} while ((b & 0x80) != 0 );
return value;} | 3.26 |
flink_RunLengthDecoder_readNextGroup_rdh | /**
* Reads the next group.
*/
void readNextGroup() {
try {
int header = readUnsignedVarInt();
this.mode = ((header & 1) == 0) ? MODE.RLE : MODE.PACKED;
switch (mode) {
case RLE :
this.currentCount = header >>> 1;
this.currentValue = readIntLittleEndianPaddedOnBitWidth();
return;
case PACKED :
int numGroups = header >>> 1;
this.currentCount = numGroups * 8;
if (this.currentBuffer.length < this.currentCount) {
this.currentBuffer = new int[this.currentCount];
}
currentBufferIdx
= 0;
int valueIndex = 0;
while (valueIndex < this.currentCount) { // values are bit packed 8 at a time, so reading bitWidth will always work
ByteBuffer buffer = in.slice(bitWidth);
if (buffer.hasArray()) {
// byte array has better performance than ByteBuffer
this.packer.unpack8Values(buffer.array(), buffer.arrayOffset() + buffer.position(), this.currentBuffer, valueIndex);
} else {
this.packer.unpack8Values(buffer, buffer.position(),
this.currentBuffer, valueIndex);
}
valueIndex += 8;
}
return;
default :
throw new ParquetDecodingException("not a valid mode " + this.mode);
}
} catch (IOException e) {
throw new ParquetDecodingException("Failed to read from input stream", e);
}
} | 3.26 |
flink_RunLengthDecoder_readIntLittleEndian_rdh | /**
* Reads the next 4 byte little endian int.
*/
private int readIntLittleEndian() throws IOException {
int ch4
= in.read();
int ch3 = in.read();
int ch2 = in.read();
int ch1 = in.read();
return (((ch1 << 24) + (ch2 << 16)) + (ch3 << 8)) + ch4;
} | 3.26 |
flink_RunLengthDecoder_readIntLittleEndianPaddedOnBitWidth_rdh | /**
* Reads the next byteWidth little endian int.
*/
private int readIntLittleEndianPaddedOnBitWidth() throws IOException {
switch (bytesWidth) {
case 0 :
return 0;
case 1 :
return in.read();case 2 :
{
int ch2 = in.read();
int ch1 = in.read();
return (ch1 << 8) + ch2;}
case 3 :
{
int v15 = in.read();
int ch2 = in.read();
int ch1 = in.read();
return ((ch1 << 16) + (ch2 << 8)) + v15;}
case 4 :
{
return readIntLittleEndian();
}
}
throw new RuntimeException("Unreachable");
} | 3.26 |
flink_SimpleVersionedSerialization_readVersionAndDeSerialize_rdh | /**
* Deserializes the version and datum from a byte array. The first four bytes will be read as
* the version, in <i>big-endian</i> encoding. The remaining bytes will be passed to the
* serializer for deserialization, via {@link SimpleVersionedSerializer#deserialize(int,
* byte[])}.
*
* @param serializer
* The serializer to deserialize the datum with.
* @param bytes
* The bytes to deserialize from.
* @return The deserialized datum.
* @throws IOException
* Exceptions from the {@link SimpleVersionedSerializer#deserialize(int,
* byte[])} method are forwarded.
*/
public static <T> T readVersionAndDeSerialize(SimpleVersionedSerializer<T> serializer, byte[] bytes) throws IOException {
checkNotNull(serializer, "serializer");
checkNotNull(bytes, "bytes");
checkArgument(bytes.length >= 8, "byte array below minimum length (8 bytes)");final byte[] dataOnly = Arrays.copyOfRange(bytes, 8, bytes.length);
final int version = ((((bytes[0] & 0xff) << 24) | ((bytes[1] & 0xff) << 16)) | ((bytes[2] & 0xff) << 8)) | (bytes[3] & 0xff);
final int length = ((((bytes[4] & 0xff) << 24) | ((bytes[5] & 0xff) << 16)) | ((bytes[6] & 0xff) << 8)) | (bytes[7] & 0xff);
if (length == dataOnly.length) {
return serializer.deserialize(version, dataOnly);
} else {
throw new IOException((("Corrupt data, conflicting lengths. Length fields: " + length) + ", data: ") + dataOnly.length);
}
} | 3.26 |
flink_SimpleVersionedSerialization_writeVersionAndSerializeList_rdh | /**
* Serializes the version and data into a stream.
*
* <p>Data serialized via this method can be deserialized via {@link #readVersionAndDeserializeList(SimpleVersionedSerializer, DataInputView)}.
*
* <p>The first eight bytes will be occupied by the version, as returned by {@link SimpleVersionedSerializer#getVersion()} and the length of the list. The remaining bytes will
* be the serialized data, as produced by {@link SimpleVersionedSerializer#serialize(Object)},
* plus its length.
*
* @param serializer
* The serializer to serialize the datum with.
* @param data
* list of datum to serialize.
* @param out
* The stream to serialize to.
*/
public static <T> void writeVersionAndSerializeList(SimpleVersionedSerializer<T> serializer, List<T> data, DataOutputView out) throws IOException {
checkNotNull(serializer);
checkNotNull(data);
checkNotNull(out);
out.writeInt(serializer.getVersion());
out.writeInt(data.size());
for (final T datum : data) {
final byte[] serializedDatum = serializer.serialize(datum);
out.writeInt(serializedDatum.length);
out.write(serializer.serialize(datum));
}
} | 3.26 |
flink_SimpleVersionedSerialization_writeVersionAndSerialize_rdh | /**
* Serializes the version and datum into a byte array. The first four bytes will be occupied by
* the version (as returned by {@link SimpleVersionedSerializer#getVersion()}), written in
* <i>big-endian</i> encoding. The remaining bytes will be the serialized datum, as produced by
* {@link SimpleVersionedSerializer#serialize(Object)}. The resulting array will hence be four
* bytes larger than the serialized datum.
*
* <p>Data serialized via this method can be deserialized via {@link #readVersionAndDeSerialize(SimpleVersionedSerializer, byte[])}.
*
* @param serializer
* The serializer to serialize the datum with.
* @param datum
* The datum to serialize.
* @return A byte array containing the serialized version and serialized datum.
* @throws IOException
* Exceptions from the {@link SimpleVersionedSerializer#serialize(Object)}
* method are forwarded.
*/
public static <T> byte[] writeVersionAndSerialize(SimpleVersionedSerializer<T> serializer, T datum) throws IOException {
checkNotNull(serializer, "serializer");
checkNotNull(datum, "datum");
final byte[] v12 = serializer.serialize(datum);
final byte[] versionAndData = new byte[v12.length + 8];
final int version = serializer.getVersion();
versionAndData[0] = ((byte) (version >> 24));
versionAndData[1] = ((byte) (version >> 16));
versionAndData[2] = ((byte) (version >> 8));
versionAndData[3] = ((byte) (version));
final int length = v12.length;
versionAndData[4] = ((byte) (length >> 24));
versionAndData[5] = ((byte) (length >> 16));
versionAndData[6] = ((byte) (length >> 8));
versionAndData[7] = ((byte) (length));
// move the data to the array
System.arraycopy(v12, 0,
versionAndData, 8, v12.length);
return versionAndData;
} | 3.26 |
flink_SimpleVersionedSerialization_readVersionAndDeserializeList_rdh | /**
* Deserializes the version and data from a stream.
*
* <p>This method deserializes data serialized via {@link #writeVersionAndSerializeList(SimpleVersionedSerializer, List, DataOutputView)} .
*
* <p>The first four bytes will be interpreted as the version. The next four bytes will be
* interpreted as the length of the list, then length-many data will be read and deserialized
* via the {@link SimpleVersionedSerializer#deserialize(int, byte[])} method.
*
* @param serializer
* The serializer to serialize the datum with.
* @param in
* The stream to deserialize from.
*/
public static <T> List<T> readVersionAndDeserializeList(SimpleVersionedSerializer<T> serializer, DataInputView in) throws IOException {
checkNotNull(serializer);
checkNotNull(in);
final int serializerVersion = in.readInt();
final int dataSize = in.readInt();
final List<T> data = new ArrayList<>();
for (int ignored =
0; ignored < dataSize; ignored++) {
final int datumSize = in.readInt();
final byte[] datum = new byte[datumSize];in.readFully(datum);
data.add(serializer.deserialize(serializerVersion, datum));
}
return data;
} | 3.26 |
flink_HiveASTParseUtils_parse_rdh | /**
* Parses the Hive query.
*/
public static HiveParserASTNode parse(String command, HiveParserContext ctx, String viewFullyQualifiedName) throws HiveASTParseException {
HiveASTParseDriver pd
= new HiveASTParseDriver();HiveParserASTNode v1 = pd.parse(command,
ctx, viewFullyQualifiedName);
v1 = findRootNonNullToken(v1);
handleSetColRefs(v1);
return v1;
} | 3.26 |
flink_KvStateSerializer_deserializeList_rdh | /**
* Deserializes all values with the given serializer.
*
* @param serializedValue
* Serialized value of type List<T>
* @param serializer
* Serializer for T
* @param <T>
* Type of the value
* @return Deserialized list or <code>null</code> if the serialized value is <code>null</code>
* @throws IOException
* On failure during deserialization
*/
public static <T> List<T> deserializeList(byte[] serializedValue, TypeSerializer<T> serializer) throws IOException {
if (serializedValue != null) {
final DataInputDeserializer in = new DataInputDeserializer(serializedValue, 0, serializedValue.length);
try {
final List<T> result = new ArrayList<>();
while (in.available() > 0) {
result.add(serializer.deserialize(in));
// The expected binary format has a single byte separator. We
// want a consistent binary format in order to not need any
// special casing during deserialization. A "cleaner" format
// would skip this extra byte, but would require a memory copy
// for RocksDB, which stores the data serialized in this way
// for lists.
if (in.available() > 0) {
in.readByte();
}
}
return result;
} catch (IOException e)
{
throw new IOException(("Unable to deserialize value. " + "This indicates a mismatch in the value serializers ") + "used by the KvState instance and this access.", e);}
}
else {
return null;
}
} | 3.26 |
flink_KvStateSerializer_deserializeMap_rdh | /**
* Deserializes all kv pairs with the given serializer.
*
* @param serializedValue
* Serialized value of type Map<UK, UV>
* @param keySerializer
* Serializer for UK
* @param valueSerializer
* Serializer for UV
* @param <UK>
* Type of the key
* @param <UV>
* Type of the value.
* @return Deserialized map or <code>null</code> if the serialized value is <code>null</code>
* @throws IOException
* On failure during deserialization
*/
public static <UK, UV> Map<UK, UV> deserializeMap(byte[] serializedValue, TypeSerializer<UK> keySerializer, TypeSerializer<UV> valueSerializer) throws IOException {
if (serializedValue != null) {
DataInputDeserializer in = new DataInputDeserializer(serializedValue, 0, serializedValue.length);
Map<UK, UV> result = new HashMap<>();
while (in.available() > 0) {
UK key = keySerializer.deserialize(in);
boolean isNull = in.readBoolean();
UV value = (isNull) ? null : valueSerializer.deserialize(in);
result.put(key, value);
}
return result;
} else {
return null;
}
} | 3.26 |
flink_KvStateSerializer_deserializeKeyAndNamespace_rdh | /**
* Deserializes the key and namespace into a {@link Tuple2}.
*
* @param serializedKeyAndNamespace
* Serialized key and namespace
* @param keySerializer
* Serializer for the key
* @param namespaceSerializer
* Serializer for the namespace
* @param <K>
* Key type
* @param <N>
* Namespace
* @return Tuple2 holding deserialized key and namespace
* @throws IOException
* if the deserialization fails for any reason
*/
public static <K, N> Tuple2<K, N> deserializeKeyAndNamespace(byte[] serializedKeyAndNamespace, TypeSerializer<K> keySerializer, TypeSerializer<N> namespaceSerializer) throws IOException {
DataInputDeserializer dis = new DataInputDeserializer(serializedKeyAndNamespace, 0, serializedKeyAndNamespace.length);
try {
K key = keySerializer.deserialize(dis);
byte magicNumber = dis.readByte();
if (magicNumber != MAGIC_NUMBER) {
throw new IOException(("Unexpected magic number " + magicNumber) + ".");
}
N namespace = namespaceSerializer.deserialize(dis);
if (dis.available() > 0) {
throw new IOException("Unconsumed bytes in the serialized key and namespace.");
}
return new Tuple2<>(key, namespace);
} catch (IOException e) {
throw new IOException(("Unable to deserialize key " + "and namespace. This indicates a mismatch in the key/namespace ") + "serializers used by the KvState instance and this access.", e);
}
} | 3.26 |
flink_KvStateSerializer_m0_rdh | // ------------------------------------------------------------------------
// Generic serialization utils
// ------------------------------------------------------------------------
/**
* Serializes the key and namespace into a {@link ByteBuffer}.
*
* <p>The serialized format matches the RocksDB state backend key format, i.e. the key and
* namespace don't have to be deserialized for RocksDB lookups.
*
* @param key
* Key to serialize
* @param keySerializer
* Serializer for the key
* @param namespace
* Namespace to serialize
* @param namespaceSerializer
* Serializer for the namespace
* @param <K>
* Key type
* @param <N>
* Namespace type
* @return Buffer holding the serialized key and namespace
* @throws IOException
* Serialization errors are forwarded
*/
public static <K, N> byte[] m0(K key, TypeSerializer<K> keySerializer, N namespace, TypeSerializer<N> namespaceSerializer) throws IOException {
DataOutputSerializer dos = new DataOutputSerializer(32);
keySerializer.serialize(key, dos);
dos.writeByte(MAGIC_NUMBER);
namespaceSerializer.serialize(namespace, dos);
return dos.getCopyOfBuffer();
} | 3.26 |
flink_KvStateSerializer_deserializeValue_rdh | /**
* Deserializes the value with the given serializer.
*
* @param serializedValue
* Serialized value of type T
* @param serializer
* Serializer for T
* @param <T>
* Type of the value
* @return Deserialized value or <code>null</code> if the serialized value is <code>null</code>
* @throws IOException
* On failure during deserialization
*/
public static <T> T deserializeValue(byte[] serializedValue, TypeSerializer<T> serializer) throws IOException {
if (serializedValue == null) {
return null;
} else {
final DataInputDeserializer deser = new DataInputDeserializer(serializedValue, 0, serializedValue.length);
final T value = serializer.deserialize(deser);
if (deser.available() > 0) {
throw new IOException(("Unconsumed bytes in the deserialized value. " + "This indicates a mismatch in the value serializers ") + "used by the KvState instance and this access.");
}
return value;
}
} | 3.26 |
flink_KvStateSerializer_serializeMap_rdh | /**
* Serializes all values of the Iterable with the given serializer.
*
* @param entries
* Key-value pairs to serialize
* @param keySerializer
* Serializer for UK
* @param valueSerializer
* Serializer for UV
* @param <UK>
* Type of the keys
* @param <UV>
* Type of the values
* @return Serialized values or <code>null</code> if values <code>null</code> or empty
* @throws IOException
* On failure during serialization
*/
public static <UK, UV> byte[] serializeMap(Iterable<Map.Entry<UK, UV>> entries, TypeSerializer<UK> keySerializer, TypeSerializer<UV> valueSerializer) throws IOException {
if (entries != null) {
// Serialize
DataOutputSerializer dos = new DataOutputSerializer(32);
for (Map.Entry<UK, UV> entry : entries) {
keySerializer.serialize(entry.getKey(), dos);
if (entry.getValue() == null) {
dos.writeBoolean(true);
} else {
dos.writeBoolean(false);
valueSerializer.serialize(entry.getValue(), dos);
}
}return dos.getCopyOfBuffer();
} else {
return null;}} | 3.26 |
flink_ClusterEntrypointUtils_createTaskManagerWorkingDirectory_rdh | /**
* Creates the working directory for the TaskManager process. This method ensures that the
* working directory exists.
*
* @param configuration
* to extract the required settings from
* @param envelopedResourceId
* identifying the TaskManager process
* @return working directory
* @throws IOException
* if the working directory could not be created
*/
public static DeterminismEnvelope<WorkingDirectory> createTaskManagerWorkingDirectory(Configuration configuration, DeterminismEnvelope<ResourceID> envelopedResourceId) throws IOException {
return envelopedResourceId.map(resourceId -> WorkingDirectory.create(generateTaskManagerWorkingDirectoryFile(configuration, resourceId)));
}
/**
* Generates the working directory {@link File} | 3.26 |
flink_ClusterEntrypointUtils_getPoolSize_rdh | /**
* Gets and verify the io-executor pool size based on configuration.
*
* @param config
* The configuration to read.
* @return The legal io-executor pool size.
*/
public static int getPoolSize(Configuration config) {
final int poolSize = config.getInteger(ClusterOptions.CLUSTER_IO_EXECUTOR_POOL_SIZE, 4 * Hardware.getNumberCPUCores());
Preconditions.checkArgument(poolSize > 0, String.format("Illegal pool size (%s) of io-executor, please re-configure '%s'.", poolSize, ClusterOptions.CLUSTER_IO_EXECUTOR_POOL_SIZE.key()));
return poolSize;
} | 3.26 |
flink_ClusterEntrypointUtils_parseParametersOrExit_rdh | /**
* Parses passed String array using the parameter definitions of the passed {@code ParserResultFactory}. The method will call {@code System.exit} and print the usage
* information to stdout in case of a parsing error.
*
* @param args
* The String array that shall be parsed.
* @param parserResultFactory
* The {@code ParserResultFactory} that collects the parameter
* parsing instructions.
* @param mainClass
* The main class initiating the parameter parsing.
* @param <T>
* The parsing result type.
* @return The parsing result.
*/public static <T> T parseParametersOrExit(String[]
args, ParserResultFactory<T> parserResultFactory, Class<?> mainClass) {
final CommandLineParser<T> commandLineParser = new CommandLineParser<>(parserResultFactory);
try {
return commandLineParser.parse(args);
} catch (Exception e) {
LOG.error("Could not parse command line arguments {}.", args, e);commandLineParser.printHelp(mainClass.getSimpleName());
System.exit(ClusterEntrypoint.STARTUP_FAILURE_RETURN_CODE);
}
return null;
}
/**
* Tries to find the user library directory.
*
* @return the user library directory if it exits, returns {@link Optional#empty()} | 3.26 |
flink_ClusterEntrypointUtils_configureUncaughtExceptionHandler_rdh | /**
* Sets the uncaught exception handler for current thread based on configuration.
*
* @param config
* the configuration to read.
*/
public static void configureUncaughtExceptionHandler(Configuration config) {
Thread.setDefaultUncaughtExceptionHandler(new ClusterUncaughtExceptionHandler(config.get(ClusterOptions.UNCAUGHT_EXCEPTION_HANDLING)));
} | 3.26 |
flink_ClusterEntrypointUtils_generateJobManagerWorkingDirectoryFile_rdh | /**
* Generates the working directory {@link File} for the JobManager process. This method does not
* ensure that the working directory exists.
*
* @param configuration
* to extract the required settings from
* @param resourceId
* identifying the JobManager process
* @return working directory file
*/
@VisibleForTesting
public static File generateJobManagerWorkingDirectoryFile(Configuration configuration, ResourceID resourceId) {
return generateWorkingDirectoryFile(configuration, Optional.of(ClusterOptions.JOB_MANAGER_PROCESS_WORKING_DIR_BASE), "jm_" + resourceId);
}
/**
* Generate the working directory from the given configuration. If a working dir option is
* specified, then this config option will be read first. At last, {@link CoreOptions#TMP_DIRS} | 3.26 |
flink_ClusterEntrypointUtils_createJobManagerWorkingDirectory_rdh | /**
* Creates the working directory for the JobManager process. This method ensures that the
* working diretory exists.
*
* @param configuration
* to extract the required settings from
* @param envelopedResourceId
* identifying the TaskManager process
* @return working directory
* @throws IOException
* if the working directory could not be created
*/
public static DeterminismEnvelope<WorkingDirectory> createJobManagerWorkingDirectory(Configuration
configuration, DeterminismEnvelope<ResourceID> envelopedResourceId) throws IOException {
return envelopedResourceId.map(resourceId -> WorkingDirectory.create(generateJobManagerWorkingDirectoryFile(configuration, resourceId)));
} | 3.26 |
flink_Explainable_explain_rdh | /**
* Returns the AST of this object and the execution plan to compute the result of the given
* statement.
*
* @param extraDetails
* The extra explain details which the result of this method should include,
* e.g. estimated cost, changelog mode for streaming
* @return AST and the execution plan.
*/
default String explain(ExplainDetail... extraDetails) {
return explain(ExplainFormat.TEXT, extraDetails);
} | 3.26 |
flink_MigrationUtils_skipSerializedStates_rdh | /**
* Skips bytes corresponding to serialized states. In flink 1.6+ the states are no longer kept
* in state.
*/
static void skipSerializedStates(DataInputView in) throws IOException {
TypeSerializer<String> nameSerializer = StringSerializer.INSTANCE;
TypeSerializer<State.StateType> stateTypeSerializer = new EnumSerializer<>(StateType.class);
TypeSerializer<StateTransitionAction> actionSerializer = new EnumSerializer<>(StateTransitionAction.class);
final int noOfStates = in.readInt();
for (int i = 0; i < noOfStates; i++) {
nameSerializer.deserialize(in);
stateTypeSerializer.deserialize(in);
}
for (int i = 0; i < noOfStates; i++) {
String v6 = nameSerializer.deserialize(in);
int noOfTransitions = in.readInt();
for (int
j = 0; j < noOfTransitions; j++) {
String src = nameSerializer.deserialize(in);
Preconditions.checkState(src.equals(v6), ((("Source Edge names do not match (" + v6) + " - ") + src) + ")."); nameSerializer.deserialize(in);
actionSerializer.deserialize(in);
try {
m0(in);
} catch (ClassNotFoundException e) {
e.printStackTrace();
}
}
}
} | 3.26 |
flink_PendingCheckpointStats_reportSubtaskStats_rdh | // ------------------------------------------------------------------------
// Callbacks from the PendingCheckpoint instance
// ------------------------------------------------------------------------
/**
* Reports statistics for a single subtask.
*
* @param jobVertexId
* ID of the task/operator the subtask belongs to.
* @param subtask
* The statistics for the subtask.
* @return <code>true</code> if successfully reported or <code>false</code> otherwise.
*/
boolean reportSubtaskStats(JobVertexID jobVertexId, SubtaskStateStats subtask) {
TaskStateStats taskStateStats = taskStats.get(jobVertexId);
if ((taskStateStats != null) && taskStateStats.reportSubtaskStats(subtask)) {
if (subtask.isCompleted()) {
currentNumAcknowledgedSubtasks++; latestAcknowledgedSubtask = subtask;
}
currentCheckpointedSize += subtask.getCheckpointedSize();
currentStateSize += subtask.getStateSize();
long processedData = subtask.getProcessedData();
if (processedData > 0) {
f1 += processedData;
}
long persistedData = subtask.getPersistedData();
if (persistedData > 0) {
currentPersistedData += persistedData;
}
unalignedCheckpoint |= subtask.getUnalignedCheckpoint();
return true;
} else {
return false;
}
} | 3.26 |
flink_PendingCheckpointStats_m0_rdh | /**
* Reports a failed pending checkpoint.
*
* @param failureTimestamp
* Timestamp of the failure.
* @param cause
* Optional cause of the failure.
*/
FailedCheckpointStats m0(long failureTimestamp, @Nullable
Throwable cause) {
return new FailedCheckpointStats(checkpointId, triggerTimestamp, props, numberOfSubtasks,
new HashMap<>(taskStats), currentNumAcknowledgedSubtasks, currentCheckpointedSize, currentStateSize, f1, currentPersistedData, unalignedCheckpoint, failureTimestamp, latestAcknowledgedSubtask, cause);
} | 3.26 |
flink_CheckpointStatsStatus_isInProgress_rdh | /**
* Returns whether the checkpoint is in progress.
*
* @return <code>true</code> if checkpoint is in progress, <code>false</code> otherwise.
*/
public boolean isInProgress() {
return this == IN_PROGRESS;
} | 3.26 |
flink_WindowTrigger_triggerTime_rdh | /**
* Returns the trigger time of the window, this should be called after TriggerContext
* initialized.
*/
protected long triggerTime(W window) {
return toEpochMillsForTimer(window.maxTimestamp(), ctx.getShiftTimeZone());
} | 3.26 |
flink_Topology_getPipelinedRegionOfVertex_rdh | /**
* The pipelined region for a specified vertex.
*
* @param vertexId
* the vertex id identifying the vertex for which the pipelined region should be
* returned
* @return the pipelined region of the vertex
* @throws IllegalArgumentException
* if there is no vertex in this topology with the specified
* vertex id
*/
default PR getPipelinedRegionOfVertex(VID vertexId) {
throw new UnsupportedOperationException();
} | 3.26 |
flink_FileMergingSnapshotManager_getManagedDirName_rdh | /**
* Generate an unique managed directory name for one subtask.
*
* @return the managed directory name.
*/
public String getManagedDirName() {
return String.format("%s_%d_%d_", operatorIDString, f0, parallelism).replaceAll("[^a-zA-Z0-9\\-]", "_");
} | 3.26 |
flink_StreamTaskNetworkInputFactory_create_rdh | /**
* Factory method for {@link StreamTaskNetworkInput} or {@link RescalingStreamTaskNetworkInput}
* depending on {@link InflightDataRescalingDescriptor}.
*/
public static <T> StreamTaskInput<T> create(CheckpointedInputGate checkpointedInputGate, TypeSerializer<T> inputSerializer, IOManager ioManager, StatusWatermarkValve statusWatermarkValve, int inputIndex, InflightDataRescalingDescriptor rescalingDescriptorinflightDataRescalingDescriptor,
Function<Integer, StreamPartitioner<?>> gatePartitioners, TaskInfo taskInfo, CanEmitBatchOfRecordsChecker canEmitBatchOfRecords) {
return rescalingDescriptorinflightDataRescalingDescriptor.equals(InflightDataRescalingDescriptor.NO_RESCALE) ? new StreamTaskNetworkInput<>(checkpointedInputGate, inputSerializer, ioManager, statusWatermarkValve, inputIndex, canEmitBatchOfRecords) : new RescalingStreamTaskNetworkInput<>(checkpointedInputGate, inputSerializer, ioManager, statusWatermarkValve, inputIndex, rescalingDescriptorinflightDataRescalingDescriptor, gatePartitioners, taskInfo, canEmitBatchOfRecords);
} | 3.26 |
flink_ProcessWindowFunction_clear_rdh | /**
* Deletes any state in the {@code Context} when the Window expires (the watermark passes its
* {@code maxTimestamp} + {@code allowedLateness}).
*
* @param context
* The context to which the window is being evaluated
* @throws Exception
* The function may throw exceptions to fail the program and trigger recovery.
*/
public void clear(Context context) throws Exception {
} | 3.26 |
flink_SpeculativeExecutionVertex_isOriginalAttempt_rdh | /**
* Returns whether the given attempt is the original execution attempt of the execution vertex,
* i.e. it is created along with the creation of resetting of the execution vertex.
*/
public boolean isOriginalAttempt(int attemptNumber) {
return attemptNumber ==
originalAttemptNumber;
} | 3.26 |
flink_SpeculativeExecutionVertex_archiveFailedExecution_rdh | /**
* Remove execution from currentExecutions if it is failed. It is needed to make room for
* possible future speculative executions.
*
* @param executionAttemptId
* attemptID of the execution to be removed
*/
public void archiveFailedExecution(ExecutionAttemptID executionAttemptId) {
if (this.f0.size() <= 1) {
// Leave the last execution because currentExecutions should never be empty. This should
// happen only if all current executions have FAILED. A vertex reset will happen soon
// and will archive the remaining execution.
return;
}
final Execution removedExecution = this.f0.remove(executionAttemptId.getAttemptNumber());
nextInputSplitIndexToConsumeByAttempts.remove(executionAttemptId.getAttemptNumber());
checkNotNull(removedExecution, "Cannot remove execution %s which does not exist.", executionAttemptId);
checkState(removedExecution.getState()
== FAILED, "Cannot remove execution %s which is not FAILED.", executionAttemptId);
executionHistory.add(removedExecution.archive());
if (removedExecution == this.currentExecution) {
this.currentExecution = this.f0.values().iterator().next();
}
} | 3.26 |
flink_AvroParquetWriters_forGenericRecord_rdh | /**
* Creates a ParquetWriterFactory that accepts and writes Avro generic types. The Parquet
* writers will use the given schema to build and write the columnar data.
*
* @param schema
* The schema of the generic type.
*/
public static ParquetWriterFactory<GenericRecord> forGenericRecord(Schema schema) {
final String schemaString =
schema.toString();
// Must override the lambda representation because of a bug in shading lambda
// serialization, see similar issue FLINK-28043 for more details.
final ParquetBuilder<GenericRecord> builder = new ParquetBuilder<GenericRecord>() {
@Override
public ParquetWriter<GenericRecord> createWriter(OutputFile out) throws IOException {
return createAvroParquetWriter(schemaString, GenericData.get(), out);
}
};
return new ParquetWriterFactory<>(builder);
} | 3.26 |
flink_AvroParquetWriters_forReflectRecord_rdh | /**
* Creates a ParquetWriterFactory for the given type. The Parquet writers will use Avro to
* reflectively create a schema for the type and use that schema to write the columnar data.
*
* @param type
* The class of the type to write.
*/
public static <T> ParquetWriterFactory<T> forReflectRecord(Class<T> type) {
final String schemaString = ReflectData.get().getSchema(type).toString();
final ParquetBuilder<T> builder = out -> createAvroParquetWriter(schemaString, ReflectData.get(), out);
return new ParquetWriterFactory<>(builder);
} | 3.26 |
flink_AvroParquetWriters_forSpecificRecord_rdh | /**
* Creates a ParquetWriterFactory for an Avro specific type. The Parquet writers will use the
* schema of that specific type to build and write the columnar data.
*
* @param type
* The class of the type to write.
*/
public static <T extends SpecificRecordBase> ParquetWriterFactory<T> forSpecificRecord(Class<T> type) {
final String schemaString = SpecificData.get().getSchema(type).toString();
final ParquetBuilder<T> builder
= out -> createAvroParquetWriter(schemaString, SpecificData.get(), out);
return new ParquetWriterFactory<>(builder);
} | 3.26 |
flink_ParameterTool_fromSystemProperties_rdh | /**
* Returns {@link ParameterTool} from the system properties. Example on how to pass system
* properties: -Dkey1=value1 -Dkey2=value2
*
* @return A {@link ParameterTool}
*/
public static ParameterTool fromSystemProperties() {
return fromMap(((Map) (System.getProperties())));
} | 3.26 |
flink_ParameterTool_getConfiguration_rdh | // ------------------------- Export to different targets -------------------------
/**
* Returns a {@link Configuration} object from this {@link ParameterTool}.
*
* @return A {@link Configuration}
*/
public Configuration getConfiguration() {
final
Configuration conf = new Configuration();
for (Map.Entry<String, String> entry : f0.entrySet()) {
conf.setString(entry.getKey(), entry.getValue());
}
return conf; } | 3.26 |
flink_ParameterTool_getProperties_rdh | /**
* Returns a {@link Properties} object from this {@link ParameterTool}.
*
* @return A {@link Properties}
*/
public Properties getProperties() {
Properties props = new Properties();props.putAll(this.f0);
return props;
} | 3.26 |
flink_ParameterTool_getNumberOfParameters_rdh | // ------------------ Get data from the util ----------------
/**
* Returns number of parameters in {@link ParameterTool}.
*/
@Override
public int getNumberOfParameters() {return f0.size();
} | 3.26 |
flink_ParameterTool_fromPropertiesFile_rdh | /**
* Returns {@link ParameterTool} for the given InputStream from {@link Properties} file.
*
* @param inputStream
* InputStream from the properties file
* @return A {@link ParameterTool}
* @throws IOException
* If the file does not exist
* @see Properties
*/
public static ParameterTool fromPropertiesFile(InputStream inputStream) throws IOException {
Properties props = new Properties();
props.load(inputStream);
return fromMap(((Map) (props)));
} | 3.26 |
flink_ParameterTool_createPropertiesFile_rdh | /**
* Create a properties file with all the known parameters (call after the last get*() call). Set
* the default value, if overwrite is true.
*
* @param pathToFile
* Location of the default properties file.
* @param overwrite
* Boolean flag indicating whether or not to overwrite the file
* @throws IOException
* If overwrite is not allowed and the file exists
*/
public void createPropertiesFile(String pathToFile, boolean overwrite) throws
IOException {
final File file = new File(pathToFile);
if (file.exists()) {
if (overwrite) {
file.delete();
} else {
throw new RuntimeException(("File " + pathToFile) + " exists and overwriting is not allowed");
}
}
final Properties defaultProps = new Properties();
defaultProps.putAll(this.defaultData);
try (final OutputStream out = new FileOutputStream(file)) {defaultProps.store(out, "Default file created by Flink's ParameterUtil.createPropertiesFile()");
}} | 3.26 |
flink_ParameterTool_fromArgs_rdh | // ------------------ Constructors ------------------------
/**
* Returns {@link ParameterTool} for the given arguments. The arguments are keys followed by
* values. Keys have to start with '-' or '--'
*
* <p><strong>Example arguments:</strong> --key1 value1 --key2 value2 -key3 value3
*
* @param args
* Input array arguments
* @return A {@link ParameterTool}
*/
public static ParameterTool fromArgs(String[] args) {
final Map<String, String> map = CollectionUtil.newHashMapWithExpectedSize(args.length
/ 2);
int i =
0;
while (i < args.length) {final String key = Utils.getKeyFromArgs(args, i);
if (key.isEmpty()) {
throw new IllegalArgumentException(("The input " + Arrays.toString(args)) + " contains an empty argument");
}
i += 1;// try to find the value
if (i >= args.length) {
map.put(key, NO_VALUE_KEY);
} else if (NumberUtils.isNumber(args[i])) {
map.put(key, args[i]);
i
+= 1;
} else if (args[i].startsWith("--") || args[i].startsWith("-")) {
// the argument cannot be a negative number because we checked earlier
// -> the next argument is a parameter name
map.put(key, NO_VALUE_KEY);
} else {
map.put(key, args[i]);
i += 1;
}
}
return fromMap(map);
} | 3.26 |
flink_ParameterTool_m0_rdh | /**
* Check if value is set.
*/
@Override
public boolean m0(String value) {
addToDefaults(value, null);
unrequestedParameters.remove(value);
return f0.containsKey(value);
} | 3.26 |
flink_ParameterTool_fromMap_rdh | /**
* Returns {@link ParameterTool} for the given map.
*
* @param map
* A map of arguments. Both Key and Value have to be Strings
* @return A {@link ParameterTool}
*/
public static ParameterTool fromMap(Map<String, String> map) {
Preconditions.checkNotNull(map, "Unable to initialize from empty map");
return new ParameterTool(map);
} | 3.26 |
flink_ParameterTool_get_rdh | /**
* Returns the String value for the given key. If the key does not exist it will return null.
*/
@Override
public String get(String key) {
addToDefaults(key, null);
unrequestedParameters.remove(key);
return f0.get(key);
} | 3.26 |
flink_ParameterTool_readObject_rdh | // ------------------------- Serialization ---------------------------------------------
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
defaultData = new ConcurrentHashMap<>(f0.size());
unrequestedParameters = Collections.newSetFromMap(new ConcurrentHashMap<>(f0.size()));
} | 3.26 |
flink_ParameterTool_mergeWith_rdh | // ------------------------- Interaction with other ParameterUtils -------------------------
/**
* Merges two {@link ParameterTool}.
*
* @param other
* Other {@link ParameterTool} object
* @return The Merged {@link ParameterTool}
*/
public ParameterTool mergeWith(ParameterTool other) {
final Map<String, String> resultData = CollectionUtil.newHashMapWithExpectedSize(f0.size() + other.f0.size());
resultData.putAll(f0);
resultData.putAll(other.f0);
final ParameterTool ret = new ParameterTool(resultData);
final HashSet<String> requestedParametersLeft = new HashSet<>(f0.keySet());
requestedParametersLeft.removeAll(unrequestedParameters);
final HashSet<String> requestedParametersRight = new HashSet<>(other.f0.keySet());
requestedParametersRight.removeAll(other.unrequestedParameters);
ret.unrequestedParameters.removeAll(requestedParametersLeft);
ret.unrequestedParameters.removeAll(requestedParametersRight);
return
ret;
} | 3.26 |
flink_ParameterTool_toMap_rdh | // ------------------------- ExecutionConfig.UserConfig interface -------------------------
@Override
public Map<String, String> toMap() {
return f0;
} | 3.26 |
flink_ModifyKindSet_toChangelogMode_rdh | /**
* Returns the default {@link ChangelogMode} from this {@link ModifyKindSet}.
*/
public ChangelogMode toChangelogMode() {
ChangelogMode.Builder builder = ChangelogMode.newBuilder();
if (this.contains(ModifyKind.INSERT)) {
builder.addContainedKind(RowKind.INSERT);
}
if (this.contains(ModifyKind.UPDATE)) {
builder.addContainedKind(RowKind.UPDATE_BEFORE);
builder.addContainedKind(RowKind.UPDATE_AFTER);
}
if (this.contains(ModifyKind.DELETE)) {
builder.addContainedKind(RowKind.DELETE);
}
return builder.build();
} | 3.26 |
flink_ModifyKindSet_minus_rdh | /**
* Returns a new set of ModifyKind which is this set minus the other set, i.e. {@code this.kinds
* - that.kinds}. For example: [I,U,D] minus [I] = [U,D] [I,U] minus [U,D] = [I] [I,U,D] minus
* [I,U,D] = []
*/
public ModifyKindSet minus(ModifyKindSet other) {
Set<ModifyKind> result = EnumSet.noneOf(ModifyKind.class);
result.addAll(this.kinds);
result.removeAll(other.kinds);return new ModifyKindSet(result);
} | 3.26 |
flink_ModifyKindSet_union_rdh | // --------------------------------------------------------------------------------------------
/**
* Returns the union of a number of ModifyKindSets.
*/
public static ModifyKindSet union(ModifyKindSet... modifyKindSets) {
Builder builder = newBuilder();
for (ModifyKindSet set : modifyKindSets) {
for (ModifyKind kind : set.m0()) {
builder.addContainedKind(kind);
}
} return builder.build();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.