name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ResultPartitionType_isHybridResultPartition_rdh
|
/**
* {@link #isHybridResultPartition()} is used to judge whether it is the specified {@link #HYBRID_FULL} or {@link #HYBRID_SELECTIVE} resultPartitionType.
*
* <p>this method suitable for judgment conditions related to the specific implementation of
* {@link ResultPartitionType}.
*
* <p>this method not related to data consumption and partition release. As for the logic
* related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume
* type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead.
*/
public boolean isHybridResultPartition() {
return (this == HYBRID_FULL) || (this == HYBRID_SELECTIVE);
}
| 3.26 |
flink_ResultPartitionType_isBounded_rdh
|
/**
* Whether this partition uses a limited number of (network) buffers or not.
*
* @return <tt>true</tt> if the number of buffers should be bound to some limit
*/
public boolean isBounded() {
return isBounded;
}
| 3.26 |
flink_ResultPartitionType_isPipelinedOrPipelinedBoundedResultPartition_rdh
|
/**
* {@link #isPipelinedOrPipelinedBoundedResultPartition()} is used to judge whether it is the
* specified {@link #PIPELINED} or {@link #PIPELINED_BOUNDED} resultPartitionType.
*
* <p>This method suitable for judgment conditions related to the specific implementation of
* {@link ResultPartitionType}.
*
* <p>This method not related to data consumption and partition release. As for the logic
* related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume
* type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead.
*/
public boolean isPipelinedOrPipelinedBoundedResultPartition() {
return (this == PIPELINED) || (this == PIPELINED_BOUNDED);
}
| 3.26 |
flink_ResultPartitionType_isBlockingOrBlockingPersistentResultPartition_rdh
|
/**
* {@link #isBlockingOrBlockingPersistentResultPartition()} is used to judge whether it is the
* specified {@link #BLOCKING} or {@link #BLOCKING_PERSISTENT} resultPartitionType.
*
* <p>this method suitable for judgment conditions related to the specific implementation of
* {@link ResultPartitionType}.
*
* <p>this method not related to data consumption and partition release. As for the logic
* related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume
* type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead.
*/
public boolean
isBlockingOrBlockingPersistentResultPartition() {
return (this == BLOCKING) || (this == BLOCKING_PERSISTENT);
}
| 3.26 |
flink_ResultPartitionType_mustBePipelinedConsumed_rdh
|
/**
* return if this partition's upstream and downstream must be scheduled in the same time.
*/
public boolean mustBePipelinedConsumed() {
return f1 == ConsumingConstraint.MUST_BE_PIPELINED;}
| 3.26 |
flink_ExternalPythonKeyedCoProcessOperator_processTimer_rdh
|
/**
* It is responsible to send timer data to python worker when a registered timer is fired. The
* input data is a Row containing 4 fields: TimerFlag 0 for proc time, 1 for event time;
* Timestamp of the fired timer; Current watermark and the key of the timer.
*
* @param timeDomain
* The type of the timer.
* @param timer
* The fired timer.
* @throws Exception
* The runnerInputSerializer might throw exception.
*/
private void processTimer(TimeDomain timeDomain, InternalTimer<Row, VoidNamespace> timer) throws Exception {
Row timerData = timerHandler.buildTimerData(timeDomain, internalTimerService.currentWatermark(), timer.getTimestamp(), timer.getKey(), null);
timerDataSerializer.serialize(timerData, baosWrapper);
pythonFunctionRunner.processTimer(baos.toByteArray());
baos.reset();
elementCount++;
checkInvokeFinishBundleByCount();
emitResults();
}
| 3.26 |
flink_ExternalPythonKeyedCoProcessOperator_setCurrentKey_rdh
|
/**
* As the beam state gRPC service will access the KeyedStateBackend in parallel with this
* operator, we must override this method to prevent changing the current key of the
* KeyedStateBackend while the beam service is handling requests.
*/
@Override
public void setCurrentKey(Object key) {
if (inBatchExecutionMode(getKeyedStateBackend())) {
super.setCurrentKey(key);
}
keyForTimerService =
key;
}
| 3.26 |
flink_GSFileSystemOptions_getWriterTemporaryBucketName_rdh
|
/**
* The temporary bucket name to use for recoverable writes, if different from the final bucket
* name.
*/public Optional<String> getWriterTemporaryBucketName() {
return flinkConfig.getOptional(WRITER_TEMPORARY_BUCKET_NAME);
}
| 3.26 |
flink_GSFileSystemOptions_m0_rdh
|
/**
* The chunk size to use for writes on the underlying Google WriteChannel.
*/
public Optional<MemorySize> m0() {
return flinkConfig.getOptional(WRITER_CHUNK_SIZE);
}
| 3.26 |
flink_TableSource_explainSource_rdh
|
/**
* Describes the table source.
*
* @return A String explaining the {@link TableSource}.
*/default String explainSource() { return TableConnectorUtils.generateRuntimeName(getClass(), getTableSchema().getFieldNames());
}
| 3.26 |
flink_TableSource_getProducedDataType_rdh
|
/**
* Returns the {@link DataType} for the produced data of the {@link TableSource}.
*
* @return The data type of the returned {@code DataStream}.
*/
default DataType getProducedDataType() {
final TypeInformation<T> legacyType = getReturnType();
if (legacyType == null) {
throw new TableException("Table source does not implement a produced data type.");
}
return fromLegacyInfoToDataType(legacyType).notNull();
}
/**
*
* @deprecated This method will be removed in future versions as it uses the old type system. It
is recommended to use {@link #getProducedDataType()} instead which uses the new type
system based on {@link DataTypes}
| 3.26 |
flink_RichSqlInsert_isUpsert_rdh
|
// ~ Tools ------------------------------------------------------------------
public static boolean isUpsert(List<SqlLiteral> keywords) {
for (SqlNode keyword : keywords) {
SqlInsertKeyword keyword2 = ((SqlLiteral) (keyword)).symbolValue(SqlInsertKeyword.class);
if (keyword2 == SqlInsertKeyword.UPSERT) {
return
true;
}
}
return false;
}
| 3.26 |
flink_RichSqlInsert_isOverwrite_rdh
|
/**
* Returns whether the insert mode is overwrite (for whole table or for specific partitions).
*
* @return true if this is overwrite mode
*/
public boolean isOverwrite() {
return getModifierNode(RichSqlInsertKeyword.OVERWRITE) != null;
}
| 3.26 |
flink_RichSqlInsert_getTableHints_rdh
|
/**
* Returns the table hints as list of {@code SqlNode} for current insert node.
*/
public SqlNodeList getTableHints() {
return this.tableHints;
}
| 3.26 |
flink_TupleComparator_hash_rdh
|
// --------------------------------------------------------------------------------------------
// Comparator Methods
// --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
@Override public int hash(T value) {
int i = 0;
try {
int code = this.comparators[0].hash(value.getFieldNotNull(keyPositions[0]));
for (i = 1; i < this.keyPositions.length; i++) {
code *= HASH_SALT[i & 0x1f];// salt code with (i % HASH_SALT.length)-th salt
// component
code += this.comparators[i].hash(value.getFieldNotNull(keyPositions[i]));
}
return code;
} catch (NullFieldException nfex) {
throw new NullKeyFieldException(nfex);
} catch (IndexOutOfBoundsException iobex) {
throw new KeyFieldOutOfBoundsException(keyPositions[i]);
}
}
| 3.26 |
flink_SourceProvider_of_rdh
|
/**
* Helper method for creating a Source provider with a provided source parallelism.
*/
static SourceProvider of(Source<RowData, ?, ?> source, @Nullable
Integer sourceParallelism) {
return new SourceProvider() {
@Override
public Source<RowData, ?, ?> createSource() {
return source;
}
@Override
public boolean isBounded() {
return Boundedness.BOUNDED.equals(source.getBoundedness());
}
@Override
public Optional<Integer> getParallelism() {return Optional.ofNullable(sourceParallelism);
}
};
}
| 3.26 |
flink_RowSerializer_getPositionByName_rdh
|
// --------------------------------------------------------------------------------------------
private int getPositionByName(String fieldName) {
assert positionByName != null;
final Integer targetPos = positionByName.get(fieldName);
if (targetPos == null) {
throw new RuntimeException(String.format("Unknown field name '%s' for mapping to a row position. " + "Available names are: %s", fieldName, positionByName.keySet()));
}
return targetPos;
}
| 3.26 |
flink_RowSerializer_snapshotConfiguration_rdh
|
// --------------------------------------------------------------------------------------------
// Serializer configuration snapshoting & compatibility
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<Row> snapshotConfiguration() {
return new RowSerializerSnapshot(this);
}
| 3.26 |
flink_RowSerializer_fillMask_rdh
|
// --------------------------------------------------------------------------------------------
// Serialization utilities
// --------------------------------------------------------------------------------------------
private static void fillMask(int fieldLength, Row row, boolean[] mask, boolean supportsRowKind, int rowKindOffset) {if (supportsRowKind) {
final byte v45 = row.getKind().toByteValue();
mask[0] = (v45 & 0x1) > 0;
mask[1] = (v45 & 0x2) > 0;
}
for (int fieldPos = 0;
fieldPos < fieldLength; fieldPos++) {
mask[rowKindOffset + fieldPos] = row.getField(fieldPos) == null;
}
}
| 3.26 |
flink_AbstractStreamTableEnvironmentImpl_execEnv_rdh
|
/**
* This is a temporary workaround for Python API. Python API should not use
* StreamExecutionEnvironment at all.
*/
@Internalpublic StreamExecutionEnvironment execEnv() {
return executionEnvironment;
}
| 3.26 |
flink_AbstractID_longToByteArray_rdh
|
/**
* Converts a long to a byte array.
*
* @param l
* the long variable to be converted
* @param ba
* the byte array to store the result the of the conversion
* @param offset
* offset indicating at what position inside the byte array the result of the
* conversion shall be stored
*/
private static void longToByteArray(long l, byte[] ba, int offset) {
for (int i = 0; i < SIZE_OF_LONG; ++i) {
final int shift = i << 3;// i * 8
ba[((offset + SIZE_OF_LONG) - 1) - i] = ((byte) ((l & (0xffL << shift)) >>> shift));
}
}
| 3.26 |
flink_AbstractID_getBytes_rdh
|
/**
* Gets the bytes underlying this ID.
*
* @return The bytes underlying this ID.
*/public byte[] getBytes() {
byte[] bytes = new byte[SIZE];
longToByteArray(lowerPart, bytes, 0);
longToByteArray(upperPart, bytes, SIZE_OF_LONG);
return bytes;
}
| 3.26 |
flink_AbstractID_toHexString_rdh
|
/**
* Returns pure String representation of the ID in hexadecimal. This method should be used to
* construct things like paths etc., that require a stable representation and is therefore
* final.
*/
public final String toHexString() {
if (this.hexString == null) {
final byte[] ba = new byte[SIZE];
longToByteArray(this.lowerPart, ba, 0);
longToByteArray(this.upperPart, ba, SIZE_OF_LONG);
this.hexString = StringUtils.byteToHexString(ba);
}
return this.hexString;
}
| 3.26 |
flink_AbstractID_equals_rdh
|
// --------------------------------------------------------------------------------------------
// Standard Utilities
// --------------------------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (obj
== this) {
return true;
} else if ((obj != null) && (obj.getClass() == getClass())) {
AbstractID that = ((AbstractID)
(obj));return (that.lowerPart == this.lowerPart) && (that.upperPart == this.upperPart);
} else {
return false;
}
}
| 3.26 |
flink_AbstractID_getLowerPart_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the lower 64 bits of the ID.
*
* @return The lower 64 bits of the ID.
*/
public long getLowerPart() {
return lowerPart;
}
| 3.26 |
flink_AbstractID_byteArrayToLong_rdh
|
// Conversion Utilities
// --------------------------------------------------------------------------------------------
/**
* Converts the given byte array to a long.
*
* @param ba
* the byte array to be converted
* @param offset
* the offset indicating at which byte inside the array the conversion shall begin
* @return the long variable
*/
private static long byteArrayToLong(byte[] ba, int offset) {
long l = 0;for (int i = 0; i < SIZE_OF_LONG; ++i) {
l |= (ba[((offset + SIZE_OF_LONG) - 1) - i] & 0xffL) << (i << 3);
}
return l;
}
| 3.26 |
flink_AvroUtils_getAvroUtils_rdh
|
/**
* Returns either the default {@link AvroUtils} which throw an exception in cases where Avro
* would be needed or loads the specific utils for Avro from flink-avro.
*/
public static AvroUtils getAvroUtils() {
// try and load the special AvroUtils from the flink-avro package
try {
Class<?> clazz = Class.forName(AVRO_KRYO_UTILS, false, Thread.currentThread().getContextClassLoader());
return clazz.asSubclass(AvroUtils.class).getConstructor().newInstance();
} catch (ClassNotFoundException e) {
// cannot find the utils, return the default implementation
return new DefaultAvroUtils();
} catch (Exception e) {
throw new RuntimeException(("Could not instantiate " + AVRO_KRYO_UTILS) + ".", e);
}
}
| 3.26 |
flink_PlannerCallProcedureOperation_toExternal_rdh
|
/**
* Convert the value with internal representation to the value with external representation.
*/private Object toExternal(Object internalValue, DataType inputType, ClassLoader classLoader) {
if (!DataTypeUtils.isInternal(inputType)) {
// if the expected input type of the procedure is not internal type,
// which means the converted Flink internal value doesn't
// match the expected input type, then we need to convert the Flink
// internal value to external value
DataStructureConverter<Object, Object> v9 = DataStructureConverters.getConverter(inputType);
v9.open(classLoader);
return v9.toExternal(internalValue);
} else { return internalValue;
}
}
| 3.26 |
flink_PlannerCallProcedureOperation_procedureResultToTableResult_rdh
|
/**
* Convert the result of procedure to table result .
*/
private TableResultInternal procedureResultToTableResult(Object procedureResult, TableConfig tableConfig, ClassLoader userClassLoader) {
// get result converter
ZoneId zoneId = tableConfig.getLocalTimeZone();
DataType v21 = outputType;
// if is not composite type, wrap it to composited type
if (!LogicalTypeChecks.isCompositeType(outputType.getLogicalType())) {
v21 = DataTypes.ROW(DataTypes.FIELD("result", v21));
}
RowRowConverter rowConverter =
null;
// if the output is struct type,
// we need a row converter to help convert it to Row.
// we will first convert the struct value to RowData, and then use the row converter
// to convert the RowData to Row.
if (outputType.getLogicalType().getTypeRoot() == STRUCTURED_TYPE) {
rowConverter = RowRowConverter.create(v21);
rowConverter.open(userClassLoader);
}
// expand the result type to schema
ResolvedSchema resultSchema = DataTypeUtils.expandCompositeTypeToSchema(v21);
RowDataToStringConverter rowDataToStringConverter = new RowDataToStringConverterImpl(v21, zoneId, userClassLoader, tableConfig.get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_CAST_BEHAVIOUR).isEnabled());
// create DataStructure converters
DataStructureConverter<Object, Object> converter = DataStructureConverters.getConverter(outputType);
converter.open(userClassLoader);
return TableResultImpl.builder().resultProvider(new CallProcedureResultProvider(converter, rowDataToStringConverter, rowConverter, procedureResult)).schema(resultSchema).resultKind(ResultKind.SUCCESS_WITH_CONTENT).build();
}
| 3.26 |
flink_WordCount_main_rdh
|
// *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
LOGGER.warn(DATASET_DEPRECATION_INFO);
final MultipleParameterTool params = MultipleParameterTool.fromArgs(args);
// set up the execution environment
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
// get input data
DataSet<String> text = null;
if (params.has("input")) {
// union all the inputs from text files
for (String
input : params.getMultiParameterRequired("input")) {
if (text == null) {
text
= env.readTextFile(input);
} else {
text = text.union(env.readTextFile(input));
}
}
Preconditions.checkNotNull(text, "Input DataSet should not be null.");
}
else {
// get default test text data
System.out.println("Executing WordCount example with default input data set.");
System.out.println("Use --input to specify file input.");
text = WordCountData.getDefaultTextLineDataSet(env); }
// split up the lines in pairs (2-tuples) containing: (word,1)
DataSet<Tuple2<String, Integer>> counts = // group by the tuple field "0" and sum up tuple field "1"
text.flatMap(new Tokenizer()).groupBy(0).sum(1);
// emit result
if (params.has("output")) {
counts.writeAsCsv(params.get("output"), "\n", " ");
// execute program
env.execute("WordCount Example");
} else {System.out.println("Printing result to stdout. Use --output to specify output path.");
counts.print();
}
}
| 3.26 |
flink_CoProcessFunction_onTimer_rdh
|
/**
* Called when a timer set using {@link TimerService} fires.
*
* @param timestamp
* The timestamp of the firing timer.
* @param ctx
* An {@link OnTimerContext} that allows querying the timestamp of the firing timer,
* querying the {@link TimeDomain} of the firing timer and getting a {@link TimerService}
* for registering timers and querying the time. The context is only valid during the
* invocation of this method, do not store it.
* @param out
* The collector for returning result values.
* @throws Exception
* This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
public void onTimer(long timestamp, OnTimerContext ctx, Collector<OUT> out) throws Exception {
}
| 3.26 |
flink_ListAggWithRetractAggFunction_getArgumentDataTypes_rdh
|
// --------------------------------------------------------------------------------------------
// Planning
// --------------------------------------------------------------------------------------------
@Override
public List<DataType>
getArgumentDataTypes() {
return Collections.singletonList(DataTypes.STRING().bridgedTo(StringData.class));
}
| 3.26 |
flink_BatchShuffleReadBufferPool_requestBuffers_rdh
|
/**
* Requests a collection of buffers (determined by {@link #numBuffersPerRequest}) from this
* buffer pool.
*/
public List<MemorySegment> requestBuffers() throws Exception {
List<MemorySegment> allocated = new ArrayList<>(numBuffersPerRequest);
synchronized(buffers) {
checkState(!destroyed, "Buffer pool is already destroyed.");
if (!initialized) {
initialize();
}Deadline deadline = Deadline.fromNow(WAITING_TIME);
while (buffers.size() < numBuffersPerRequest) { checkState(!destroyed, "Buffer pool is already destroyed.");
buffers.wait(WAITING_TIME.toMillis());
if (!deadline.hasTimeLeft()) {
return allocated;// return the empty list
}
}
while (allocated.size() < numBuffersPerRequest) {
allocated.add(buffers.poll());
}
lastBufferOperationTimestamp = System.currentTimeMillis();
}
return allocated;
}
| 3.26 |
flink_BatchShuffleReadBufferPool_destroy_rdh
|
/**
* Destroys this buffer pool and after which, no buffer can be allocated any more.
*/
public void destroy() {
synchronized(buffers) {
destroyed
= true;
buffers.clear();
buffers.notifyAll();
}
}
| 3.26 |
flink_BatchShuffleReadBufferPool_initialize_rdh
|
/**
* Initializes this buffer pool which allocates all the buffers.
*/
public void initialize() {
synchronized(buffers) {
checkState(!destroyed, "Buffer pool is already destroyed.");
if (initialized) {
return;
}
initialized
= true;
try {
for (int i = 0; i < numTotalBuffers; ++i) {
buffers.add(MemorySegmentFactory.allocateUnpooledOffHeapMemory(bufferSize));
}
} catch (OutOfMemoryError outOfMemoryError) {
int allocated = buffers.size();
buffers.forEach(MemorySegment::free);
buffers.clear();
throw new OutOfMemoryError(String.format(((((((("Can't allocate enough direct buffer for batch shuffle read buffer " + "pool (bytes allocated: %d, bytes still needed: %d). To ") + "avoid the exception, you need to do one of the following") + " adjustments: 1) If you have ever decreased %s, you need") + " to undo the decrement; 2) If you ever increased %s, you") + " should also increase %s; 3) If neither the above cases,") + " it usually means some other parts of your application ") + "have consumed too many direct memory and the value of %s") + " should be increased.", allocated * bufferSize, (numTotalBuffers - allocated) * bufferSize, TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.key(), TaskManagerOptions.NETWORK_BATCH_SHUFFLE_READ_MEMORY.key(), TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.key(), TaskManagerOptions.TASK_OFF_HEAP_MEMORY.key()));
}
}
LOG.info("Batch shuffle IO buffer pool initialized: numBuffers={}, bufferSize={}.", numTotalBuffers, bufferSize);
}
| 3.26 |
flink_BatchShuffleReadBufferPool_recycle_rdh
|
/**
* Recycles a collection of buffers to this buffer pool. This method should never throw any
* exception.
*/
public void recycle(Collection<MemorySegment> segments) {
checkArgument(segments != null, "Buffer list must be not null.");
if (segments.isEmpty()) {
return;
}synchronized(buffers) {
checkState(initialized, "Recycling a buffer before initialization.");
if (destroyed) {
segments.forEach(MemorySegment::free);
return;
}
boolean shouldNotify = (buffers.size() < numBuffersPerRequest) && ((buffers.size() + segments.size()) >= numBuffersPerRequest);
buffers.addAll(segments);
lastBufferOperationTimestamp = System.currentTimeMillis();
if (shouldNotify) {
buffers.notifyAll();
}
}
}
| 3.26 |
flink_JobGraph_getCheckpointingSettings_rdh
|
/**
* Gets the settings for asynchronous snapshots. This method returns null, when checkpointing is
* not enabled.
*
* @return The snapshot settings
*/
public JobCheckpointingSettings getCheckpointingSettings() {
return snapshotSettings;}
| 3.26 |
flink_JobGraph_addVertex_rdh
|
/**
* Adds a new task vertex to the job graph if it is not already included.
*
* @param vertex
* the new task vertex to be added
*/
public void addVertex(JobVertex vertex) {
final JobVertexID id = vertex.getID();
JobVertex previous = taskVertices.put(id,
vertex);
// if we had a prior association, restore and throw an exception
if (previous
!= null) {
taskVertices.put(id, previous);
throw new IllegalArgumentException("The JobGraph already contains a vertex with that id.");
}
}
| 3.26 |
flink_JobGraph_addUserJarBlobKey_rdh
|
/**
* Adds the BLOB referenced by the key to the JobGraph's dependencies.
*
* @param key
* path of the JAR file required to run the job on a task manager
*/
public void addUserJarBlobKey(PermanentBlobKey key) {
if (key == null) {
throw new IllegalArgumentException();
}
if (!userJarBlobKeys.contains(key)) {
userJarBlobKeys.add(key);
}
}
| 3.26 |
flink_JobGraph_getJobConfiguration_rdh
|
/**
* Returns the configuration object for this job. Job-wide parameters should be set into that
* configuration object.
*
* @return The configuration object for this job.
*/
public Configuration getJobConfiguration()
{
return this.jobConfiguration;
}
| 3.26 |
flink_JobGraph_setJobID_rdh
|
/**
* Sets the ID of the job.
*/
public void setJobID(JobID jobID) {
this.jobID = jobID;
}
| 3.26 |
flink_JobGraph_setClasspaths_rdh
|
/**
* Sets the classpaths required to run the job on a task manager.
*
* @param paths
* paths of the directories/JAR files required to run the job on a task manager
*/
public void setClasspaths(List<URL> paths) {
classpaths = paths;
}
| 3.26 |
flink_JobGraph_getVertices_rdh
|
/**
* Returns an Iterable to iterate all vertices registered with the job graph.
*
* @return an Iterable to iterate all vertices registered with the job graph
*/public Iterable<JobVertex> getVertices() {
return this.taskVertices.values();
}
| 3.26 |
flink_JobGraph_getNumberOfVertices_rdh
|
/**
* Returns the number of all vertices.
*
* @return The number of all vertices.
*/
public int getNumberOfVertices() {return this.taskVertices.size();
}
| 3.26 |
flink_JobGraph_setExecutionConfig_rdh
|
/**
* Sets the execution config. This method eagerly serialized the ExecutionConfig for future RPC
* transport. Further modification of the referenced ExecutionConfig object will not affect this
* serialized copy.
*
* @param executionConfig
* The ExecutionConfig to be serialized.
* @throws IOException
* Thrown if the serialization of the ExecutionConfig fails
*/
public void setExecutionConfig(ExecutionConfig executionConfig) throws IOException {
checkNotNull(executionConfig, "ExecutionConfig must not be null.");
setSerializedExecutionConfig(new SerializedValue<>(executionConfig));
}
| 3.26 |
flink_JobGraph_getMaximumParallelism_rdh
|
/**
* Gets the maximum parallelism of all operations in this job graph.
*
* @return The maximum parallelism of this job graph
*/public int getMaximumParallelism()
{
int maxParallelism = -1;
for (JobVertex v6 : taskVertices.values()) {
maxParallelism
= Math.max(v6.getParallelism(), maxParallelism); }
return maxParallelism;
}
| 3.26 |
flink_JobGraph_addJars_rdh
|
/**
* Adds the given jar files to the {@link JobGraph} via {@link JobGraph#addJar}.
*
* @param jarFilesToAttach
* a list of the {@link URL URLs} of the jar files to attach to the
* jobgraph.
* @throws RuntimeException
* if a jar URL is not valid.
*/
public void addJars(final List<URL> jarFilesToAttach) {
for (URL jar : jarFilesToAttach) {
try {
addJar(new Path(jar.toURI()));
} catch (URISyntaxException e) {
throw new RuntimeException("URL is invalid. This should not happen.", e);
}
}}
| 3.26 |
flink_JobGraph_setSavepointRestoreSettings_rdh
|
/**
* Sets the savepoint restore settings.
*
* @param settings
* The savepoint restore settings.
*/
public void setSavepointRestoreSettings(SavepointRestoreSettings settings) {
this.savepointRestoreSettings = checkNotNull(settings, "Savepoint restore settings");
}
| 3.26 |
flink_JobGraph_getVerticesSortedTopologicallyFromSources_rdh
|
// --------------------------------------------------------------------------------------------
public List<JobVertex> getVerticesSortedTopologicallyFromSources() throws InvalidProgramException {
// early out on empty lists
if (this.taskVertices.isEmpty()) {
return Collections.emptyList();
}
List<JobVertex> sorted = new ArrayList<JobVertex>(this.taskVertices.size());Set<JobVertex> remaining = new LinkedHashSet<JobVertex>(this.taskVertices.values());
// start by finding the vertices with no input edges
// and the ones with disconnected inputs (that refer to some standalone data set)
{
Iterator<JobVertex> iter =
remaining.iterator();
while (iter.hasNext()) {
JobVertex v10 = iter.next();
if (v10.hasNoConnectedInputs()) {
sorted.add(v10);
iter.remove();
}
}
}
int startNodePos = 0;
// traverse from the nodes that were added until we found all elements
while (!remaining.isEmpty()) {
// first check if we have more candidates to start traversing from. if not, then the
// graph is cyclic, which is not permitted
if (startNodePos >= sorted.size()) {
throw new InvalidProgramException("The job graph is cyclic.");
}
JobVertex current = sorted.get(startNodePos++);
addNodesThatHaveNoNewPredecessors(current, sorted,
remaining);
}
return sorted;
}
| 3.26 |
flink_JobGraph_addUserArtifact_rdh
|
/**
* Adds the path of a custom file required to run the job on a task manager.
*
* @param name
* a name under which this artifact will be accessible through {@link DistributedCache}
* @param file
* path of a custom file required to run the job on a task manager
*/
public void addUserArtifact(String name, DistributedCache.DistributedCacheEntry file) {
if
(file == null) {
throw new
IllegalArgumentException();
}userArtifacts.putIfAbsent(name, file);
}
| 3.26 |
flink_JobGraph_getJobID_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Returns the ID of the job.
*
* @return the ID of the job
*/
public JobID getJobID() {
return this.jobID;
}
| 3.26 |
flink_JobGraph_getSerializedExecutionConfig_rdh
|
/**
* Returns the {@link ExecutionConfig}.
*
* @return ExecutionConfig
*/
public SerializedValue<ExecutionConfig> getSerializedExecutionConfig() {
return serializedExecutionConfig;}
| 3.26 |
flink_JobGraph_addJar_rdh
|
// --------------------------------------------------------------------------------------------
// Handling of attached JAR files
// --------------------------------------------------------------------------------------------
/**
* Adds the path of a JAR file required to run the job on a task manager.
*
* @param jar
* path of the JAR file required to run the job on a task manager
*/
public void addJar(Path jar) {
if (jar == null) {
throw new IllegalArgumentException();
}
if (!userJars.contains(jar))
{
userJars.add(jar);
}}
| 3.26 |
flink_JobGraph_m1_rdh
|
/**
* Returns an array of all job vertices that are registered with the job graph. The order in
* which the vertices appear in the list is not defined.
*
* @return an array of all job vertices that are registered with the job graph
*/
public JobVertex[] m1() {
return this.taskVertices.values().toArray(new JobVertex[this.taskVertices.size()]);
}
| 3.26 |
flink_JobGraph_getUserArtifacts_rdh
|
/**
* Gets the list of assigned user jar paths.
*
* @return The list of assigned user jar paths
*/
public Map<String, DistributedCache.DistributedCacheEntry> getUserArtifacts()
{
return userArtifacts;
}
| 3.26 |
flink_JobGraph_getSavepointRestoreSettings_rdh
|
/**
* Returns the configured savepoint restore setting.
*
* @return The configured savepoint restore settings.
*/
public SavepointRestoreSettings getSavepointRestoreSettings() {
return savepointRestoreSettings;
}
| 3.26 |
flink_JobGraph_getCoLocationGroups_rdh
|
/**
* Returns all {@link CoLocationGroup} instances associated with this {@code JobGraph}.
*
* @return The associated {@code CoLocationGroup} instances.
*/
public Set<CoLocationGroup> getCoLocationGroups() {
final Set<CoLocationGroup> coLocationGroups = IterableUtils.toStream(getVertices()).map(JobVertex::getCoLocationGroup).filter(Objects::nonNull).collect(Collectors.toSet());
return Collections.unmodifiableSet(coLocationGroups);
}
| 3.26 |
flink_JobGraph_isCheckpointingEnabled_rdh
|
/**
* Checks if the checkpointing was enabled for this job graph.
*
* @return true if checkpointing enabled
*/
public boolean isCheckpointingEnabled() {
if (snapshotSettings == null) {
return false;
}
return snapshotSettings.getCheckpointCoordinatorConfiguration().isCheckpointingEnabled();
}
| 3.26 |
flink_ApplicationDispatcherBootstrap_runApplicationEntryPoint_rdh
|
/**
* Runs the user program entrypoint and completes the given {@code jobIdsFuture} with the {@link JobID JobIDs} of the submitted jobs.
*
* <p>This should be executed in a separate thread (or task).
*/
private void runApplicationEntryPoint(final CompletableFuture<List<JobID>> jobIdsFuture,
final Set<JobID> tolerateMissingResult, final DispatcherGateway dispatcherGateway, final ScheduledExecutor scheduledExecutor, final boolean enforceSingleJobExecution, final boolean submitFailedJobOnApplicationError) {
if (submitFailedJobOnApplicationError && (!enforceSingleJobExecution)) {
jobIdsFuture.completeExceptionally(new ApplicationExecutionException(String.format("Submission of failed job in case of an application error ('%s') is not supported in non-HA setups.", DeploymentOptions.SUBMIT_FAILED_JOB_ON_APPLICATION_ERROR.key())));
return;
}
final List<JobID> v9 = new ArrayList<>(recoveredJobIds);
try {
final PipelineExecutorServiceLoader executorServiceLoader = new EmbeddedExecutorServiceLoader(v9, dispatcherGateway, scheduledExecutor);
/* suppress sysout */ClientUtils.executeProgram(executorServiceLoader, configuration, f0, enforceSingleJobExecution, true);
if (v9.isEmpty()) {
jobIdsFuture.completeExceptionally(new ApplicationExecutionException("The application contains no execute() calls."));
} else {
jobIdsFuture.complete(v9);
}
}
catch (Throwable t) {
// If we're running in a single job execution mode, it's safe to consider re-submission
// of an already finished a success.
final Optional<DuplicateJobSubmissionException> maybeDuplicate = ExceptionUtils.findThrowable(t, DuplicateJobSubmissionException.class);
if ((enforceSingleJobExecution && maybeDuplicate.isPresent()) && maybeDuplicate.get().isGloballyTerminated()) {
final JobID jobId = maybeDuplicate.get().getJobID();
tolerateMissingResult.add(jobId);
jobIdsFuture.complete(Collections.singletonList(jobId));
} else
if (submitFailedJobOnApplicationError && v9.isEmpty()) {
final JobID failedJobId = JobID.fromHexString(configuration.get(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID));
dispatcherGateway.submitFailedJob(failedJobId, FAILED_JOB_NAME, t).thenAccept(ignored -> jobIdsFuture.complete(Collections.singletonList(failedJobId)));
} else
{
jobIdsFuture.completeExceptionally(new ApplicationExecutionException("Could not execute application.", t));
}
}
}
| 3.26 |
flink_ApplicationDispatcherBootstrap_m0_rdh
|
/**
* Runs the user program entrypoint by scheduling a task on the given {@code scheduledExecutor}.
* The returned {@link CompletableFuture} completes when all jobs of the user application
* succeeded. if any of them fails, or if job submission fails.
*/
private CompletableFuture<Void> m0(final DispatcherGateway dispatcherGateway, final ScheduledExecutor scheduledExecutor, final boolean enforceSingleJobExecution, final boolean submitFailedJobOnApplicationError) {
final CompletableFuture<List<JobID>> applicationExecutionFuture = new CompletableFuture<>();final Set<JobID> tolerateMissingResult = Collections.synchronizedSet(new HashSet<>());// we need to hand in a future as return value because we need to get those JobIs out
// from the scheduled task that executes the user program
applicationExecutionTask = scheduledExecutor.schedule(() -> runApplicationEntryPoint(applicationExecutionFuture, tolerateMissingResult, dispatcherGateway, scheduledExecutor, enforceSingleJobExecution, submitFailedJobOnApplicationError), 0L, TimeUnit.MILLISECONDS);
return applicationExecutionFuture.thenCompose(jobIds -> getApplicationResult(dispatcherGateway, jobIds, tolerateMissingResult, scheduledExecutor));
}
| 3.26 |
flink_ApplicationDispatcherBootstrap_finishBootstrapTasks_rdh
|
/**
* Logs final application status and invokes error handler in case of unexpected failures.
* Optionally shuts down the given dispatcherGateway when the application completes (either
* successfully or in case of failure), depending on the corresponding config option.
*/
private CompletableFuture<Acknowledge> finishBootstrapTasks(final DispatcherGateway dispatcherGateway) {
final CompletableFuture<Acknowledge> shutdownFuture = applicationCompletionFuture.handle((ignored, t) -> {
if (t == null) {
LOG.info("Application completed SUCCESSFULLY");
return finish(dispatcherGateway, ApplicationStatus.SUCCEEDED);
}
final Optional<ApplicationStatus> maybeApplicationStatus = extractApplicationStatus(t);
if (maybeApplicationStatus.isPresent() && isCanceledOrFailed(maybeApplicationStatus.get())) {
final ApplicationStatus applicationStatus = maybeApplicationStatus.get();
LOG.info("Application {}: ", applicationStatus, t);
return finish(dispatcherGateway, applicationStatus);
}
if (t instanceof CancellationException) {
LOG.warn("Application has been cancelled because the {} is being stopped.", ApplicationDispatcherBootstrap.class.getSimpleName());
return CompletableFuture.completedFuture(Acknowledge.get());
}
LOG.warn("Application failed unexpectedly: ", t);
return FutureUtils.<Acknowledge>completedExceptionally(t);
}).thenCompose(Function.identity());
FutureUtils.handleUncaughtException(shutdownFuture, (t, e) -> errorHandler.onFatalError(e));
return shutdownFuture;
}
| 3.26 |
flink_ApplicationDispatcherBootstrap_unwrapJobResultException_rdh
|
/**
* If the given {@link JobResult} indicates success, this passes through the {@link JobResult}.
* Otherwise, this returns a future that is finished exceptionally (potentially with an
* exception from the {@link JobResult}).
*/
private CompletableFuture<JobResult> unwrapJobResultException(final CompletableFuture<JobResult> jobResult) {
return jobResult.thenApply(result -> {
if (result.isSuccess()) {
return result;
}
throw new CompletionException(UnsuccessfulExecutionException.fromJobResult(result, f0.getUserCodeClassLoader()));
});
}
| 3.26 |
flink_HsSubpartitionConsumerMemoryDataManager_addBuffer_rdh
|
// this method only called from subpartitionMemoryDataManager with write lock.
@GuardedBy("consumerLock")
public boolean addBuffer(HsBufferContext bufferContext) {
tryIncreaseBacklog(bufferContext.getBuffer());
unConsumedBuffers.add(bufferContext);
trimHeadingReleasedBuffers();
return unConsumedBuffers.size() <= 1;
}
/**
* Check whether the head of {@link #unConsumedBuffers} is the buffer to be consumed. If so,
* return the buffer and backlog.
*
* @param toConsumeIndex
* index of buffer to be consumed.
* @param buffersToRecycle
* buffers to recycle if needed.
* @return If the head of {@link #unConsumedBuffers} is target, return optional of the buffer
and backlog. Otherwise, return {@link Optional#empty()}
| 3.26 |
flink_HsSubpartitionConsumerMemoryDataManager_getBacklog_rdh
|
// Un-synchronized get the backlog to provide memory data backlog, this will make the
// result greater than or equal to the actual backlog, but obtaining an accurate backlog will
// bring too much extra overhead.
@SuppressWarnings("FieldAccessNotGuarded")
@Override
public int getBacklog() {return backlog;
}
| 3.26 |
flink_HsSubpartitionConsumerMemoryDataManager_addInitialBuffers_rdh
|
// this method only called from subpartitionMemoryDataManager with write lock.
@GuardedBy("consumerLock")
public void addInitialBuffers(Deque<HsBufferContext> buffers) {
for (HsBufferContext bufferContext : buffers) {
tryIncreaseBacklog(bufferContext.getBuffer());
}
unConsumedBuffers.addAll(buffers);
}
| 3.26 |
flink_ServiceType_classify_rdh
|
// Helper method
public static ServiceExposedType classify(Service service) {
KubernetesConfigOptions.ServiceExposedType type =
KubernetesConfigOptions.ServiceExposedType.valueOf(service.getSpec().getType());
if (type == ServiceExposedType.ClusterIP) {
if (HeadlessClusterIPService.HEADLESS_CLUSTER_IP.equals(service.getSpec().getClusterIP())) {type = ServiceExposedType.Headless_ClusterIP;
}
}return type;
}
| 3.26 |
flink_ServiceType_buildUpExternalRestService_rdh
|
/**
* Build up the external rest service template, according to the jobManager parameters.
*
* @param kubernetesJobManagerParameters
* the parameters of jobManager.
* @return the external rest service
*/
public Service buildUpExternalRestService(KubernetesJobManagerParameters kubernetesJobManagerParameters) {
final String serviceName = ExternalServiceDecorator.getExternalServiceName(kubernetesJobManagerParameters.getClusterId());
return new
ServiceBuilder().withApiVersion(Constants.API_VERSION).withNewMetadata().withName(serviceName).withLabels(kubernetesJobManagerParameters.getCommonLabels()).withAnnotations(kubernetesJobManagerParameters.getRestServiceAnnotations()).endMetadata().withNewSpec().withType(kubernetesJobManagerParameters.getRestServiceExposedType().serviceType().getType()).withSelector(kubernetesJobManagerParameters.getSelectors()).addNewPort().withName(Constants.REST_PORT_NAME).withPort(kubernetesJobManagerParameters.getRestPort()).withNewTargetPort(kubernetesJobManagerParameters.getRestBindPort()).endPort().endSpec().build();
}
| 3.26 |
flink_ServiceType_getRestPortFromExternalService_rdh
|
/**
* Get rest port from the external Service.
*/
public int getRestPortFromExternalService(Service externalService) {
final List<ServicePort> servicePortCandidates = externalService.getSpec().getPorts().stream().filter(x -> x.getName().equals(Constants.REST_PORT_NAME)).collect(Collectors.toList());
if (servicePortCandidates.isEmpty()) {
throw new RuntimeException(((("Failed to find port \"" + Constants.REST_PORT_NAME) + "\" in Service \"") + externalService.getMetadata().getName()) + "\"");
}
final ServicePort externalServicePort = servicePortCandidates.get(0);
return getRestPort(externalServicePort);
}
| 3.26 |
flink_Tuple12_equals_rdh
|
/**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple12)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple12 tuple
=
((Tuple12) (o));
if (f0 !=
null ? !f0.equals(tuple.f0) : tuple.f0 != null)
{
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4
!= null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 !=
null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
return true;
}
| 3.26 |
flink_Tuple12_m1_rdh
|
/**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
*/
public void m1(T0 f0, T1 f1, T2 f2, T3 f3, T4
f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6
= f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
}
| 3.26 |
flink_Tuple12_toString_rdh
|
// -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11), where the individual fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ")";
}
| 3.26 |
flink_Tuple12_of_rdh
|
/**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0,
T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tuple12<T0, T1, T2, T3, T4, T5, T6,
T7, T8, T9, T10, T11> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11) {
return new Tuple12<>(f0, f1, f2, f3, f4, f5,
f6, f7, f8, f9, f10, f11);
}
| 3.26 |
flink_Tuple12_copy_rdh
|
/**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple12<T0, T1, T2, T3, T4, T5, T6, T7,
T8, T9, T10, T11> copy() {return new
Tuple12<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11);
}
| 3.26 |
flink_ReduceDriver_prepare_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public void prepare() throws Exception {
TaskConfig config = this.taskContext.getTaskConfig();
if (config.getDriverStrategy() != DriverStrategy.SORTED_REDUCE) {
throw new Exception("Unrecognized driver strategy for Reduce driver: " + config.getDriverStrategy().name());
}
this.serializer = this.taskContext.<T>getInputSerializer(0).getSerializer();
this.comparator = this.taskContext.getDriverComparator(0);
this.input = this.taskContext.getInput(0);
ExecutionConfig executionConfig = taskContext.getExecutionConfig();
this.objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (LOG.isDebugEnabled()) {
LOG.debug(("ReduceDriver object reuse: " + (this.objectReuseEnabled ? "ENABLED" : "DISABLED")) + ".");
}
}
| 3.26 |
flink_ReduceDriver_setup_rdh
|
// ------------------------------------------------------------------------
@Override
public void setup(TaskContext<ReduceFunction<T>, T> context) {
this.taskContext = context;
this.running = true;
}
| 3.26 |
flink_OggJsonDecodingFormat_listReadableMetadata_rdh
|
// --------------------------------------------------------------------------------------------
// Metadata handling
// --------------------------------------------------------------------------------------------
@Override
public Map<String, DataType> listReadableMetadata() {
final Map<String, DataType> metadataMap = new LinkedHashMap<>();
Stream.of(ReadableMetadata.values()).forEachOrdered(m -> metadataMap.put(m.key, m.dataType));
return metadataMap;
}
| 3.26 |
flink_SharedStateRegistry_registerReference_rdh
|
/**
* Shortcut for {@link #registerReference(SharedStateRegistryKey, StreamStateHandle, long,
* boolean)} with preventDiscardingCreatedCheckpoint = false.
*/
default StreamStateHandle registerReference(SharedStateRegistryKey registrationKey, StreamStateHandle state, long checkpointID) {
return registerReference(registrationKey, state, checkpointID, false);
}
| 3.26 |
flink_KeyGroupStatePartitionStreamProvider_getKeyGroupId_rdh
|
/**
* Returns the key group that corresponds to the data in the provided stream.
*/
public int getKeyGroupId() {
return keyGroupId;
}
| 3.26 |
flink_SegmentPartitionFileWriter_flush_rdh
|
/**
* This method is only called by the flushing thread.
*/
private void flush(TieredStoragePartitionId partitionId, int subpartitionId, int segmentId, List<Tuple2<Buffer, Integer>> buffersToFlush) {
try {
writeBuffers(partitionId, subpartitionId, segmentId, buffersToFlush, getTotalBytes(buffersToFlush));
buffersToFlush.forEach(bufferToFlush -> bufferToFlush.f0.recycleBuffer());
} catch (IOException exception) {
ExceptionUtils.rethrow(exception);
}
}
| 3.26 |
flink_SegmentPartitionFileWriter_flushOrFinishSegment_rdh
|
// ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void flushOrFinishSegment(TieredStoragePartitionId partitionId, int subpartitionId, SegmentBufferContext segmentBufferContext, CompletableFuture<Void> flushSuccessNotifier) {
int v5 = segmentBufferContext.getSegmentId();
List<Tuple2<Buffer, Integer>> buffersToFlush = segmentBufferContext.getBufferAndIndexes();
boolean isSegmentFinished = segmentBufferContext.isSegmentFinished();
checkState((!buffersToFlush.isEmpty()) || isSegmentFinished);
if (buffersToFlush.size() > 0) {
flush(partitionId, subpartitionId, v5, buffersToFlush);
}
if (isSegmentFinished) {
writeSegmentFinishFile(partitionId, subpartitionId, v5);
}
flushSuccessNotifier.complete(null);
}
| 3.26 |
flink_SegmentPartitionFileWriter_writeSegmentFinishFile_rdh
|
/**
* Writing a segment-finish file when the current segment is complete. The downstream can
* determine if the current segment is complete by checking for the existence of the
* segment-finish file.
*
* <p>Note that the method is only called by the flushing thread.
*/
private void writeSegmentFinishFile(TieredStoragePartitionId partitionId, int
subpartitionId, int segmentId) {
try {
WritableByteChannel channel = subpartitionChannels[subpartitionId];
if (channel != null) {
channel.close();
subpartitionChannels[subpartitionId] = null;
}
SegmentPartitionFile.writeSegmentFinishFile(basePath, partitionId, subpartitionId, segmentId);
} catch (IOException exception) {
ExceptionUtils.rethrow(exception);
}
}
| 3.26 |
flink_PythonEnvironmentManagerUtils_pipInstallRequirements_rdh
|
/**
* Installs the 3rd party libraries listed in the user-provided requirements file. An optional
* requirements cached directory can be provided to support offline installation. In order not
* to populate the public environment, the libraries will be installed to the specified
* directory, and added to the PYTHONPATH of the UDF workers.
*
* @param requirementsFilePath
* The path of the requirements file.
* @param requirementsCacheDir
* The path of the requirements cached directory.
* @param requirementsInstallDir
* The target directory of the installation.
* @param pythonExecutable
* The python interpreter used to launch the pip program.
* @param environmentVariables
* The environment variables used to launch the pip program.
*/
public static void pipInstallRequirements(String requirementsFilePath, @Nullable
String requirementsCacheDir, String requirementsInstallDir, String pythonExecutable, Map<String, String> environmentVariables) throws IOException
{
String sitePackagesPath = getSitePackagesPath(requirementsInstallDir, pythonExecutable, environmentVariables);
String path = String.join(File.pathSeparator, requirementsInstallDir, "bin");
appendToEnvironmentVariable("PYTHONPATH", sitePackagesPath, environmentVariables);
appendToEnvironmentVariable("PATH", path, environmentVariables);
List<String> commands = new ArrayList<>(Arrays.asList(pythonExecutable, "-m", "pip", "install", "--ignore-installed", "-r", requirementsFilePath, "--prefix", requirementsInstallDir));
if (requirementsCacheDir != null) {
commands.addAll(Arrays.asList("--no-index", "--find-links", requirementsCacheDir));
}
int retries = 0;
while (true)
{
try {
execute(commands.toArray(new String[0]), environmentVariables, true);
break;
} catch (Throwable t) {
retries++;
if (retries < MAX_RETRY_TIMES) {
LOG.warn(String.format("Pip install failed, retrying... (%d/%d)", retries, MAX_RETRY_TIMES), t);
} else {
LOG.error(String.format("Pip install failed, already retried %d time...", retries));
throw new IOException(t);
}
}
}
}
| 3.26 |
flink_FailoverStrategyFactoryLoader_loadFailoverStrategyFactory_rdh
|
/**
* Loads a {@link FailoverStrategy.Factory} from the given configuration.
*
* @param config
* which specifies the failover strategy factory to load
* @return failover strategy factory loaded
*/public static Factory loadFailoverStrategyFactory(final Configuration config) {
checkNotNull(config);
final String strategyParam = config.getString(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY);
switch (strategyParam.toLowerCase()) {
case FULL_RESTART_STRATEGY_NAME :
return new RestartAllFailoverStrategy.Factory();
case PIPELINED_REGION_RESTART_STRATEGY_NAME :
return new RestartPipelinedRegionFailoverStrategy.Factory();
default :
throw new IllegalConfigurationException("Unknown failover strategy: " + strategyParam);
}
}
| 3.26 |
flink_BinaryType_ofEmptyLiteral_rdh
|
/**
* The SQL standard defines that character string literals are allowed to be zero-length strings
* (i.e., to contain no characters) even though it is not permitted to declare a type that is
* zero. For consistent behavior, the same logic applies to binary strings.
*
* <p>This method enables this special kind of binary string.
*
* <p>Zero-length binary strings have no serializable string representation.
*/public static BinaryType ofEmptyLiteral() {
return new BinaryType(EMPTY_LITERAL_LENGTH, false);
}
| 3.26 |
flink_DelegatingConfiguration_read_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {
this.prefix = in.readUTF();
this.backingConfig.read(in);
}
| 3.26 |
flink_DelegatingConfiguration_getString_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public String getString(String key, String defaultValue) {
return this.backingConfig.getString(this.prefix + key, defaultValue);
}
| 3.26 |
flink_DelegatingConfiguration_hashCode_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int hashCode() {return this.prefix.hashCode() ^ this.backingConfig.hashCode();
}
| 3.26 |
flink_DelegatingConfiguration_prefixOption_rdh
|
// --------------------------------------------------------------------------------------------
private static <T> ConfigOption<T> prefixOption(ConfigOption<T> option, String prefix) {
String key = prefix + option.key();List<FallbackKey> v11;
if (option.hasFallbackKeys()) {
v11 = new ArrayList<>();
for (FallbackKey dk : option.fallbackKeys()) {
v11.add(createDeprecatedKey(prefix + dk.getKey()));
}
} else {
v11 = Collections.emptyList();
}
FallbackKey[] deprecated = v11.toArray(new FallbackKey[0]);
return new ConfigOption<T>(key, option.getClazz(), option.description(), option.defaultValue(), option.isList(), deprecated);
}
| 3.26 |
flink_SchedulerNG_updateJobResourceRequirements_rdh
|
/**
* Update {@link JobResourceRequirements job resource requirements}.
*
* @param jobResourceRequirements
* new resource requirements
*/
default void updateJobResourceRequirements(JobResourceRequirements jobResourceRequirements) {
throw new
UnsupportedOperationException(String.format("The %s does not support changing the parallelism without a job restart. This feature is currently only expected to work with the %s.", getClass().getSimpleName(), AdaptiveScheduler.class.getSimpleName()));
}
| 3.26 |
flink_SchedulerNG_requestJobResourceRequirements_rdh
|
/**
* Read current {@link JobResourceRequirements job resource requirements}.
*
* @return Current resource requirements.
*/
default JobResourceRequirements requestJobResourceRequirements() {
throw new UnsupportedOperationException(String.format("The %s does not support changing the parallelism without a job restart. This feature is currently only expected to work with the %s.", getClass().getSimpleName(), AdaptiveScheduler.class.getSimpleName()));
}
| 3.26 |
flink_StateConfigUtil_createTtlConfig_rdh
|
/**
* Creates a {@link StateTtlConfig} depends on retentionTime parameter.
*
* @param retentionTime
* State ttl time which unit is MILLISECONDS.
*/
public static StateTtlConfig createTtlConfig(long retentionTime) {
if (retentionTime > 0) {
return StateTtlConfig.newBuilder(Time.milliseconds(retentionTime)).setUpdateType(UpdateType.OnCreateAndWrite).setStateVisibility(StateVisibility.NeverReturnExpired).build();
} else {return StateTtlConfig.DISABLED;
}
}
| 3.26 |
flink_JoinDriver_setup_rdh
|
// ------------------------------------------------------------------------
@Override
public void setup(TaskContext<FlatJoinFunction<IT1, IT2, OT>, OT> context) {
this.taskContext = context;
this.running = true;
}
| 3.26 |
flink_FileRecordFormat_getCheckpointedPosition_rdh
|
/**
* Optionally returns the current position of the reader. This can be implemented by readers
* that want to speed up recovery from a checkpoint.
*
* <p>The current position of the reader is the position of the next record that will be
* returned in a call to {@link #read()}. This can be implemented by readers that want to
* speed up recovery from a checkpoint.
*
* <p>See the {@link FileRecordFormat top-level class comment} (section "Checkpointing") for
* details.
*/
@Nullable
default CheckpointedPosition getCheckpointedPosition() {
return null;
}
| 3.26 |
flink_MemoryBackendCheckpointStorageAccess_supportsHighlyAvailableStorage_rdh
|
// ------------------------------------------------------------------------
// Checkpoint Storage
// ------------------------------------------------------------------------
@Override
public boolean supportsHighlyAvailableStorage() {
return checkpointsDirectory != null;
}
| 3.26 |
flink_MemoryBackendCheckpointStorageAccess_toString_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return (((((("MemoryBackendCheckpointStorage {" + "checkpointsDirectory=") + checkpointsDirectory) + ", fileSystem=") + fileSystem) +
", maxStateSize=") + maxStateSize) + '}';
}
| 3.26 |
flink_WebLogDataGenerator_genDocs_rdh
|
/**
* Generates the files for the documents relation. The entries apply the following format: <br>
* <code>URL | Content</code>
*
* @param noDocs
* Number of entries for the documents relation
* @param filterKeyWords
* A list of keywords that should be contained
* @param words
* A list of words to fill the entries
* @param path
* Output path for the documents relation
*/
private static void genDocs(int noDocs, String[] filterKeyWords, String[] words, String path) {
Random v5 = new Random(Calendar.getInstance().getTimeInMillis());
try (BufferedWriter fw = new BufferedWriter(new
FileWriter(path))) {
for (int i = 0; i < noDocs; i++) {
int wordsInDoc = v5.nextInt(40) + 10;
// URL
StringBuilder doc = new StringBuilder(("url_" + i) + "|");
for (int j = 0; j < wordsInDoc; j++) {
if (v5.nextDouble() > 0.9) {
// Approx. every 10th word is a keyword
doc.append(filterKeyWords[v5.nextInt(filterKeyWords.length)] + " ");
} else {
// Fills up the docs file(s) with random words
doc.append(words[v5.nextInt(words.length)] + " ");
}
}
doc.append("|\n");
fw.write(doc.toString());
}
} catch (IOException e) {
e.printStackTrace();
}
}
| 3.26 |
flink_WebLogDataGenerator_main_rdh
|
/**
* Main method to generate data for the {@link WebLogAnalysis} example program.
*
* <p>The generator creates to files:
*
* <ul>
* <li><code>{tmp.dir}/documents</code> for the web documents
* <li><code>{tmp.dir}/ranks</code> for the ranks of the web documents
* <li><code>{tmp.dir}/visits</code> for the logged visits of web documents
* </ul>
*
* @param args
* <ol>
* <li>Int: Number of web documents
* <li>Int: Number of visits
* </ol>
*/
public static void main(String[] args) {
// parse parameters
if (args.length < 2) {System.out.println("WebLogDataGenerator <numberOfDocuments> <numberOfVisits>"); System.exit(1);
}
int noDocs = Integer.parseInt(args[0]);
int noVisits = Integer.parseInt(args[1]);
String[] filterKWs = new String[]{
"editors", "oscillations", "convection" };
String[] words = new String[]{ "Lorem", "ipsum", "dolor", "sit", "amet", "consectetuer", "adipiscing", "elit", "sed", "diam", "nonummy", "nibh", "euismod", "tincidunt", "ut", "laoreet", "dolore", "magna", "aliquam", "erat", "volutpat", "Ut", "wisi", "enim", "ad", "minim", "veniam", "quis", "nostrud", "exerci", "tation", "ullamcorper", "suscipit", "lobortis", "nisl", "ut", "aliquip", "ex", "ea", "commodo" };
final String outPath = System.getProperty("java.io.tmpdir");
System.out.println("Generating documents files...");
genDocs(noDocs, filterKWs, words, outPath + "/documents");
System.out.println("Generating ranks files...");
genRanks(noDocs, outPath + "/ranks");
System.out.println("Generating visits files...");
m0(noVisits, noDocs, outPath + "/visits");
System.out.println("Done!");
}
| 3.26 |
flink_WebLogDataGenerator_m0_rdh
|
/**
* Generates the files for the visits relation. The visits entries apply the following format:
* <br>
* <code>IP Address | URL | Date (YYYY-MM-DD) | Misc. Data (e.g. User-Agent) |\n</code>
*
* @param noVisits
* Number of entries for the visits relation
* @param noDocs
* Number of entries in the documents relation
* @param path
* Output path for the visits relation
*/
private static void m0(int noVisits, int noDocs, String path) {
Random rand = new Random(Calendar.getInstance().getTimeInMillis());
try (BufferedWriter fw = new BufferedWriter(new FileWriter(path))) {
for (int i = 0; i < noVisits; i++) {
int year = 2000 + rand.nextInt(10);// yearFilter 3
int month = rand.nextInt(12) + 1;// month between 1 and 12
int day = rand.nextInt(27) + 1;// day between 1 and 28
// IP address
StringBuilder visit = new StringBuilder(((((((rand.nextInt(256) + ".") + rand.nextInt(256)) + ".") + rand.nextInt(256)) + ".") + rand.nextInt(256)) + "|");
// URL
visit.append(("url_" + rand.nextInt(noDocs)) + "|");
// Date (format: YYYY-MM-DD)
visit.append(((((year + "-") + month) + "-") + day) + "|");
// Miscellaneous data, e.g. User-Agent
visit.append("0.12|Mozilla Firefox 3.1|de|de|Nothing special|124|\n");
fw.write(visit.toString());
}
} catch (IOException e) {
e.printStackTrace();
}
}
| 3.26 |
flink_WebLogDataGenerator_genRanks_rdh
|
/**
* Generates the files for the ranks relation. The ranks entries apply the following format:
* <br>
* <code>Rank | URL | Average Duration |\n</code>
*
* @param noDocs
* Number of entries in the documents relation
* @param path
* Output path for the ranks relation
*/
private static void genRanks(int noDocs, String path) {
Random rand = new Random(Calendar.getInstance().getTimeInMillis());
try (BufferedWriter fw = new BufferedWriter(new FileWriter(path))) {
for (int i = 0; i < noDocs; i++) {
// Rank
StringBuilder rank = new StringBuilder(rand.nextInt(100) + "|");
// URL
rank.append(("url_" + i) + "|");
// Average duration
rank.append((rand.nextInt(10) + rand.nextInt(50)) + "|\n");
fw.write(rank.toString());
}
} catch (IOException e) {
e.printStackTrace();
}
}
| 3.26 |
flink_MemoryTierProducerAgent_releaseResources_rdh
|
// ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void releaseResources() {
Arrays.stream(subpartitionProducerAgents).forEach(MemoryTierSubpartitionProducerAgent::release);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.