name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_LocalTimeTypeInfo_instantiateComparator_rdh | // --------------------------------------------------------------------------------------------
private static <X> TypeComparator<X> instantiateComparator(Class<? extends TypeComparator<X>> comparatorClass, boolean ascendingOrder) {
try {
Constructor<? extends TypeComparator<X>> constructor = comparatorClass.getConstructor(boolean.class);
return constructor.newInstance(ascendingOrder);
}
catch (Exception e) {
throw new RuntimeException("Could not initialize comparator " + comparatorClass.getName(), e);}
} | 3.26 |
flink_WrappingRuntimeException_unwrap_rdh | /**
* Recursively unwraps this WrappingRuntimeException and its causes, getting the first non
* wrapping exception.
*
* @return The first cause that is not a wrapping exception.
*/
public Throwable unwrap() {
Throwable cause = getCause();
return cause instanceof WrappingRuntimeException ? ((WrappingRuntimeException) (cause)).unwrap() : cause;
} | 3.26 |
flink_GenericRowData_setField_rdh | /**
* Sets the field value at the given position.
*
* <p>Note: The given field value must be an internal data structures. Otherwise the {@link GenericRowData} is corrupted and may throw exception when processing. See {@link RowData} for
* more information about internal data structures.
*
* <p>The field value can be null for representing nullability.
*/
public void setField(int pos, Object value) {
this.fields[pos] = value;
} | 3.26 |
flink_GenericRowData_ofKind_rdh | /**
* Creates an instance of {@link GenericRowData} with given kind and field values.
*
* <p>Note: All fields of the row must be internal data structures.
*/
public static GenericRowData ofKind(RowKind kind, Object... values) {
GenericRowData row = new GenericRowData(kind, values.length);
for (int i = 0; i < values.length; ++i) {
row.setField(i, values[i]);
}
return row;
} | 3.26 |
flink_GenericRowData_getField_rdh | /**
* Returns the field value at the given position.
*
* <p>Note: The returned value is in internal data structure. See {@link RowData} for more
* information about internal data structures.
*
* <p>The returned field value can be null for representing nullability.
*/
public Object getField(int pos) {
return this.fields[pos];
} | 3.26 |
flink_ContinuousFileMonitoringFunction_listEligibleFiles_rdh | /**
* Returns the paths of the files not yet processed.
*
* @param fileSystem
* The filesystem where the monitored directory resides.
*/
private Map<Path, FileStatus> listEligibleFiles(FileSystem fileSystem, Path path) {
final FileStatus[] statuses;
try {
statuses = fileSystem.listStatus(path);
} catch (IOException e) {
// we may run into an IOException if files are moved while listing their status
// delay the check for eligible files in this case
return Collections.emptyMap(); }
if (statuses == null) {
LOG.warn("Path does not exist: {}", path); return Collections.emptyMap();
} else {
Map<Path, FileStatus> v15 = new HashMap<>();
// handle the new files
for (FileStatus status : statuses) {if (!status.isDir()) {
Path filePath = status.getPath();
long modificationTime = status.getModificationTime();
if (!shouldIgnore(filePath, modificationTime)) {
v15.put(filePath, status);
}
} else if (format.getNestedFileEnumeration() && format.acceptFile(status)) {
v15.putAll(listEligibleFiles(fileSystem, status.getPath()));
}
}
return v15;
}
} | 3.26 |
flink_ContinuousFileMonitoringFunction_shouldIgnore_rdh | /**
* Returns {@code true} if the file is NOT to be processed further. This happens if the
* modification time of the file is smaller than the {@link #globalModificationTime}.
*
* @param filePath
* the path of the file to check.
* @param modificationTime
* the modification time of the file.
*/
private boolean shouldIgnore(Path filePath, long modificationTime) {
assert Thread.holdsLock(checkpointLock);
boolean
shouldIgnore = modificationTime <= globalModificationTime;
if (shouldIgnore && LOG.isDebugEnabled()) {
LOG.debug((((("Ignoring " + filePath) + ", with mod time= ") + modificationTime) + " and global mod time= ") + globalModificationTime);
}
return shouldIgnore;
} | 3.26 |
flink_ContinuousFileMonitoringFunction_getInputSplitsSortedByModTime_rdh | /**
* Creates the input splits to be forwarded to the downstream tasks of the {@link ContinuousFileReaderOperator}. Splits are sorted <b>by modification time</b> before being
* forwarded and only splits belonging to files in the {@code eligibleFiles} list will be
* processed.
*
* @param eligibleFiles
* The files to process.
*/
private Map<Long, List<TimestampedFileInputSplit>> getInputSplitsSortedByModTime(Map<Path, FileStatus> eligibleFiles) throws IOException {
Map<Long, List<TimestampedFileInputSplit>> splitsByModTime =
new TreeMap<>();
if (eligibleFiles.isEmpty()) {
return splitsByModTime;
}
for (FileInputSplit split : format.createInputSplits(readerParallelism)) {
FileStatus fileStatus = eligibleFiles.get(split.getPath());
if (fileStatus != null) {
Long modTime = fileStatus.getModificationTime();
List<TimestampedFileInputSplit> splitsToForward
= splitsByModTime.get(modTime);
if (splitsToForward == null) {
splitsToForward = new ArrayList<>();
splitsByModTime.put(modTime, splitsToForward);
}splitsToForward.add(new TimestampedFileInputSplit(modTime, split.getSplitNumber(), split.getPath(), split.getStart(), split.getLength(), split.getHostnames()));
}
}
return splitsByModTime;
} | 3.26 |
flink_ContinuousFileMonitoringFunction_snapshotState_rdh | // --------------------- Checkpointing --------------------------
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
Preconditions.checkState(this.checkpointedState != null, ("The " + getClass().getSimpleName()) + " state has not been properly initialized.");
this.checkpointedState.update(Collections.singletonList(this.globalModificationTime));
if (LOG.isDebugEnabled()) {
LOG.debug("{} checkpointed {}.", getClass().getSimpleName(), globalModificationTime);}
} | 3.26 |
flink_DecimalData_fromBigDecimal_rdh | // ------------------------------------------------------------------------------------------
// Constructor Utilities
// ------------------------------------------------------------------------------------------
/**
* Creates an instance of {@link DecimalData} from a {@link BigDecimal} and the given precision
* and scale.
*
* <p>The returned decimal value may be rounded to have the desired scale. The precision will be
* checked. If the precision overflows, null will be returned.
*/
@Nullable
public static DecimalData fromBigDecimal(BigDecimal bd, int precision, int scale) {
bd = bd.setScale(scale, RoundingMode.HALF_UP);
if (bd.precision() > precision) {
return null;
}
long longVal =
-1;
if (precision <= MAX_COMPACT_PRECISION) {
longVal = bd.movePointRight(scale).longValueExact();
}
return new DecimalData(precision, scale, longVal, bd);
} | 3.26 |
flink_DecimalData_isCompact_rdh | // ------------------------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------------------------
/**
* Returns whether the decimal value is small enough to be stored in a long.
*/
public static boolean isCompact(int precision) {
return precision <= MAX_COMPACT_PRECISION;
} | 3.26 |
flink_DecimalData_toBigDecimal_rdh | /**
* Converts this {@link DecimalData} into an instance of {@link BigDecimal}.
*/
public BigDecimal toBigDecimal() {
BigDecimal bd = decimalVal;
if (bd == null) {
decimalVal = bd = BigDecimal.valueOf(longVal, scale);
}
return bd;
} | 3.26 |
flink_DecimalData_fromUnscaledBytes_rdh | /**
* Creates an instance of {@link DecimalData} from an unscaled byte array value and the given
* precision and scale.
*/public static DecimalData fromUnscaledBytes(byte[] unscaledBytes, int precision, int scale) {
BigDecimal bd = new BigDecimal(new BigInteger(unscaledBytes), scale);return fromBigDecimal(bd, precision, scale);
} | 3.26 |
flink_DecimalData_scale_rdh | /**
* Returns the <i>scale</i> of this {@link DecimalData}.
*/
public int scale() {
return scale;
} | 3.26 |
flink_DecimalData_toUnscaledLong_rdh | /**
* Returns a long describing the <i>unscaled value</i> of this {@link DecimalData}.
*
* @throws ArithmeticException
* if this {@link DecimalData} does not exactly fit in a long.
*/
public long toUnscaledLong() {
if (isCompact()) {
return
longVal;
} else {
return toBigDecimal().unscaledValue().longValueExact();
}
} | 3.26 |
flink_DecimalData_zero_rdh | /**
* Creates an instance of {@link DecimalData} for a zero value with the given precision and
* scale.
*
* <p>The precision will be checked. If the precision overflows, null will be returned.
*/
@Nullable
public static DecimalData zero(int precision, int scale) {
if (precision <= MAX_COMPACT_PRECISION) {
return new DecimalData(precision, scale, 0, null);
} else {
return fromBigDecimal(BigDecimal.ZERO, precision, scale);
}
} | 3.26 |
flink_DecimalData_fromUnscaledLong_rdh | /**
* Creates an instance of {@link DecimalData} from an unscaled long value and the given
* precision and scale.
*/
public static DecimalData fromUnscaledLong(long unscaledLong, int precision, int scale) {
checkArgument((precision > 0) && (precision <= MAX_LONG_DIGITS));
return new DecimalData(precision, scale, unscaledLong, null);
} | 3.26 |
flink_DecimalData_precision_rdh | // ------------------------------------------------------------------------------------------
// Public Interfaces
// ------------------------------------------------------------------------------------------
/**
* Returns the <i>precision</i> of this {@link DecimalData}.
*
* <p>The precision is the number of digits in the unscaled value.
*/
public int precision() {
return precision;
} | 3.26 |
flink_DecimalData_copy_rdh | /**
* Returns a copy of this {@link DecimalData} object.
*/
public DecimalData copy() {
return new DecimalData(precision, scale, longVal, decimalVal);
} | 3.26 |
flink_BinaryRowWriter_reset_rdh | /**
* First, reset.
*/
@Override
public void reset() {
this.cursor = fixedSize;
for (int i = 0; i < nullBitsSizeInBytes; i += 8)
{
segment.putLong(i, 0L);
}
} | 3.26 |
flink_BinaryRowWriter_setNullAt_rdh | /**
* Default not null.
*/@Override
public void setNullAt(int pos) {
setNullBit(pos);
segment.putLong(getFieldOffset(pos), 0L);
} | 3.26 |
flink_TransientBlobCleanupTask_run_rdh | /**
* Cleans up transient BLOBs whose TTL is up, tolerating that files do not exist (anymore).
*/
@Override
public void run() {
// let's cache the current time - we do not operate on a millisecond precision anyway
final long currentTimeMillis = System.currentTimeMillis();
// iterate through all entries and remove those where the current time is past their expiry
Set<Map.Entry<Tuple2<JobID, TransientBlobKey>, Long>> entries = new HashSet<>(blobExpiryTimes.entrySet());
for (Map.Entry<Tuple2<JobID, TransientBlobKey>, Long> entry : entries) {
if (currentTimeMillis >= entry.getValue()) {
JobID jobId = entry.getKey().f0;
TransientBlobKey blobKey = entry.getKey().f1;
cleanupCallback.accept(jobId, blobKey);
}
}
} | 3.26 |
flink_RetryingRegistration_getFuture_rdh | // ------------------------------------------------------------------------
// completion and cancellation
// ------------------------------------------------------------------------
public CompletableFuture<RetryingRegistrationResult<G,
S, R>> getFuture() {
return completionFuture;
} | 3.26 |
flink_RetryingRegistration_startRegistration_rdh | /**
* This method resolves the target address to a callable gateway and starts the registration
* after that.
*/
@SuppressWarnings("unchecked")
public void startRegistration() {
if (canceled) {
// we already got canceled
return;
}
try {
// trigger resolution of the target address to a callable gateway
final CompletableFuture<G> rpcGatewayFuture;
if (FencedRpcGateway.class.isAssignableFrom(targetType)) {
rpcGatewayFuture = ((CompletableFuture<G>) (rpcService.connect(targetAddress, fencingToken, targetType.asSubclass(FencedRpcGateway.class))));
} else {
rpcGatewayFuture = rpcService.connect(targetAddress, targetType);
}
// upon success, start the registration attempts
CompletableFuture<Void> rpcGatewayAcceptFuture = rpcGatewayFuture.thenAcceptAsync((G rpcGateway) -> {
log.info("Resolved {} address, beginning registration", targetName);
register(rpcGateway, 1, retryingRegistrationConfiguration.getInitialRegistrationTimeoutMillis());
}, rpcService.getScheduledExecutor());
// upon failure, retry, unless this is cancelled
rpcGatewayAcceptFuture.whenCompleteAsync((Void v,Throwable failure) -> {
if ((failure != null) && (!canceled)) {final Throwable strippedFailure = ExceptionUtils.stripCompletionException(failure);
if (log.isDebugEnabled()) {
log.debug("Could not resolve {} address {}, retrying in {} ms.", targetName, targetAddress, retryingRegistrationConfiguration.getErrorDelayMillis(), strippedFailure);
} else {
log.info("Could not resolve {} address {}, retrying in {} ms: {}", targetName, targetAddress, retryingRegistrationConfiguration.getErrorDelayMillis(), strippedFailure.getMessage());
}
startRegistrationLater(retryingRegistrationConfiguration.getErrorDelayMillis());
}
}, rpcService.getScheduledExecutor());} catch (Throwable t) {
completionFuture.completeExceptionally(t);
cancel();
}
} | 3.26 |
flink_RetryingRegistration_cancel_rdh | /**
* Cancels the registration procedure.
*/
public void cancel() {
canceled = true;
completionFuture.cancel(false);
} | 3.26 |
flink_RetryingRegistration_register_rdh | /**
* This method performs a registration attempt and triggers either a success notification or a
* retry, depending on the result.
*/
@SuppressWarnings("unchecked")
private void register(final G gateway, final int attempt, final long timeoutMillis) {// eager check for canceling to avoid some unnecessary work
if (canceled) {
return;
}
try {
log.debug("Registration at {} attempt {} (timeout={}ms)", targetName,
attempt, timeoutMillis);
CompletableFuture<RegistrationResponse> registrationFuture = invokeRegistration(gateway, fencingToken, timeoutMillis);
// if the registration was successful, let the TaskExecutor know
CompletableFuture<Void> registrationAcceptFuture = registrationFuture.thenAcceptAsync((RegistrationResponse result) -> {
if (!isCanceled()) {
if (result instanceof RegistrationResponse.Success) {
log.debug("Registration with {} at {} was successful.", targetName, targetAddress);
S success = ((S) (result));
completionFuture.complete(RetryingRegistrationResult.success(gateway, success));
} else if (result instanceof RegistrationResponse.Rejection) {
log.debug("Registration with {} at {} was rejected.", targetName, targetAddress);
R rejection
= ((R) (result));completionFuture.complete(RetryingRegistrationResult.rejection(rejection));
} else {
// registration failure
if (result instanceof RegistrationResponse.Failure) {
RegistrationResponse.Failure failure = ((RegistrationResponse.Failure) (result));
log.info("Registration failure at {} occurred.",
targetName, failure.getReason());
} else {
log.error("Received unknown response to registration attempt: {}", result);
}
log.info("Pausing and re-attempting registration in {} ms", retryingRegistrationConfiguration.getRefusedDelayMillis());
registerLater(gateway, 1, retryingRegistrationConfiguration.getInitialRegistrationTimeoutMillis(), retryingRegistrationConfiguration.getRefusedDelayMillis());
}
}
}, rpcService.getScheduledExecutor());
// upon failure, retry
registrationAcceptFuture.whenCompleteAsync((Void v,Throwable failure) -> {
if ((failure != null) && (!isCanceled())) {
if (ExceptionUtils.stripCompletionException(failure) instanceof TimeoutException) {
// we simply have not received a response in time. maybe the timeout
// was
// very low (initial fast registration attempts), maybe the target
// endpoint is
// currently down.
if (log.isDebugEnabled()) {log.debug("Registration at {} ({}) attempt {} timed out after {} ms", targetName, targetAddress, attempt, timeoutMillis);
}
long newTimeoutMillis = Math.min(2 * timeoutMillis, retryingRegistrationConfiguration.getMaxRegistrationTimeoutMillis());
register(gateway, attempt + 1, newTimeoutMillis);
}
else {
// a serious failure occurred. we still should not give up, but keep
// trying
log.error("Registration at {} failed due to an error", targetName, failure);
log.info("Pausing and re-attempting registration in {} ms", retryingRegistrationConfiguration.getErrorDelayMillis());
registerLater(gateway, 1, retryingRegistrationConfiguration.getInitialRegistrationTimeoutMillis(), retryingRegistrationConfiguration.getErrorDelayMillis());
}
}
}, rpcService.getScheduledExecutor());
} catch (Throwable t)
{
completionFuture.completeExceptionally(t);
cancel();
}
} | 3.26 |
flink_LogicalType_is_rdh | /**
* Returns whether the family type of the type equals to the {@code family} or not.
*
* @param family
* The family type to check against for equality
*/
public boolean is(LogicalTypeFamily family) {
return typeRoot.getFamilies().contains(family);
} | 3.26 |
flink_LogicalType_asSummaryString_rdh | /**
* Returns a string that summarizes this type for printing to a console. An implementation might
* shorten long names or skips very specific properties.
*
* <p>Use {@link #asSerializableString()} for a type string that fully serializes this instance.
*
* @return summary string of this type for debugging purposes
*/
public String asSummaryString() {
return asSerializableString();
}
/**
* Returns whether an instance of the given class can be represented as a value of this logical
* type when entering the table ecosystem. This method helps for the interoperability between
* JVM-based languages and the relational type system.
*
* <p>A supported conversion directly maps an input class to a logical type without loss of
* precision or type widening.
*
* <p>For example, {@code java.lang.Long} or {@code long} can be used as input for {@code BIGINT} | 3.26 |
flink_LogicalType_isAnyOf_rdh | /**
* Returns whether the root of the type is part of at least one family of the {@code typeFamily}
* or not.
*
* @param typeFamilies
* The families to check against for equality
*/
public boolean isAnyOf(LogicalTypeFamily... typeFamilies) {
return Arrays.stream(typeFamilies).anyMatch(tf -> this.typeRoot.getFamilies().contains(tf));
} | 3.26 |
flink_LogicalType_isNullable_rdh | /**
* Returns whether a value of this type can be {@code null}.
*/
public boolean isNullable() {
return isNullable;
} | 3.26 |
flink_FormatFactory_forwardOptions_rdh | /**
* Returns a set of {@link ConfigOption} that are directly forwarded to the runtime
* implementation but don't affect the final execution topology.
*
* <p>Options declared here can override options of the persisted plan during an enrichment
* phase. Since a restored topology is static, an implementer has to ensure that the declared
* options don't affect fundamental abilities such as {@link ChangelogMode}.
*
* <p>For example, given a JSON format, if an option defines how to parse timestamps, changing
* the parsing behavior does not affect the pipeline topology and can be allowed. However, an
* option that defines whether the format results in a {@link ProjectableDecodingFormat} or not
* is not allowed. The wrapping connector and planner might not react to the changed abilities
* anymore.
*
* @see DynamicTableFactory.Context#getEnrichmentOptions()
*/
default Set<ConfigOption<?>> forwardOptions() {
return Collections.emptySet();
} | 3.26 |
flink_AsynchronousJobOperationKey_getJobId_rdh | /**
* Get the job id for the given operation key.
*
* @return job id
*/public JobID getJobId()
{return jobId;
} | 3.26 |
flink_CopyableValueComparator_readObject_rdh | // --------------------------------------------------------------------------------------------
private void readObject(ObjectInputStream s)
throws IOException, ClassNotFoundException {
// read basic object and the type
s.defaultReadObject();
this.reference = InstantiationUtil.instantiate(type, CopyableValue.class);
this.tempReference = null;
} | 3.26 |
flink_CopyableValueComparator_supportsSerializationWithKeyNormalization_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
} | 3.26 |
flink_UserFacingListState_get_rdh | // ------------------------------------------------------------------------
@Override
public Iterable<T> get() throws Exception {
Iterable<T> original = originalState.get();
return original != null ? original : emptyState;
} | 3.26 |
flink_Tuple13_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12), where the individual fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ")";
} | 3.26 |
flink_Tuple13_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
* @param f12
* The value for field 12
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
} | 3.26 |
flink_Tuple13_copy_rdh | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple13<T0, T1, T2, T3, T4, T5, T6,
T7, T8, T9, T10, T11, T12> copy() {
return new Tuple13<>(this.f0,
this.f1, this.f2,
this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12);
} | 3.26 |
flink_Tuple13_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> of(T0 f0, T1 f1,
T2 f2, T3 f3, T4 f4, T5 f5, T6
f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12) {
return new Tuple13<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12);
} | 3.26 |
flink_TaskExecutorProcessUtils_newProcessSpecBuilder_rdh | // ------------------------------------------------------------------------
// Memory Configuration Calculations
// ------------------------------------------------------------------------
public static TaskExecutorProcessSpecBuilder newProcessSpecBuilder(final Configuration config) {
return TaskExecutorProcessSpecBuilder.newBuilder(config);
} | 3.26 |
flink_TaskExecutorProcessUtils_generateDynamicConfigsStr_rdh | // ------------------------------------------------------------------------
// Generating Dynamic Config Options
// ------------------------------------------------------------------------
public static String generateDynamicConfigsStr(final TaskExecutorProcessSpec taskExecutorProcessSpec) {
final Map<String, String> configs
= new HashMap<>();
configs.put(TaskManagerOptions.CPU_CORES.key(), String.valueOf(taskExecutorProcessSpec.getCpuCores().getValue().doubleValue()));
configs.put(TaskManagerOptions.FRAMEWORK_HEAP_MEMORY.key(), taskExecutorProcessSpec.getFrameworkHeapSize().getBytes() + "b");
configs.put(TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.key(), taskExecutorProcessSpec.getFrameworkOffHeapMemorySize().getBytes() + "b");configs.put(TaskManagerOptions.TASK_HEAP_MEMORY.key(), taskExecutorProcessSpec.getTaskHeapSize().getBytes()
+ "b");
configs.put(TaskManagerOptions.TASK_OFF_HEAP_MEMORY.key(), taskExecutorProcessSpec.getTaskOffHeapSize().getBytes() + "b");
configs.put(TaskManagerOptions.NETWORK_MEMORY_MIN.key(), taskExecutorProcessSpec.getNetworkMemSize().getBytes() + "b");
configs.put(TaskManagerOptions.NETWORK_MEMORY_MAX.key(), taskExecutorProcessSpec.getNetworkMemSize().getBytes() + "b");
configs.put(TaskManagerOptions.MANAGED_MEMORY_SIZE.key(), taskExecutorProcessSpec.getManagedMemorySize().getBytes() + "b");
configs.put(TaskManagerOptions.JVM_METASPACE.key(),
taskExecutorProcessSpec.getJvmMetaspaceAndOverhead().getMetaspace().getBytes() + "b");
configs.put(TaskManagerOptions.JVM_OVERHEAD_MIN.key(), taskExecutorProcessSpec.getJvmMetaspaceAndOverhead().getOverhead().getBytes() + "b");
configs.put(TaskManagerOptions.JVM_OVERHEAD_MAX.key(), taskExecutorProcessSpec.getJvmMetaspaceAndOverhead().getOverhead().getBytes() + "b");
configs.put(TaskManagerOptions.NUM_TASK_SLOTS.key(), String.valueOf(taskExecutorProcessSpec.getNumSlots()));
if (!taskExecutorProcessSpec.getExtendedResources().isEmpty()) {
configs.put(ExternalResourceOptions.EXTERNAL_RESOURCE_LIST.key(), ('"' + String.join(";", taskExecutorProcessSpec.getExtendedResources().keySet())) + '"');
taskExecutorProcessSpec.getExtendedResources().forEach((resourceName, resource) -> configs.put(ExternalResourceOptions.getAmountConfigOptionForResource(resourceName), String.valueOf(resource.getValue().longValue())));
} else {
configs.put(ExternalResourceOptions.EXTERNAL_RESOURCE_LIST.key(), ExternalResourceOptions.NONE);
}
return assembleDynamicConfigsStr(configs);
} | 3.26 |
flink_AsynchronousBlockWriter_getReturnQueue_rdh | /**
* Gets the queue in which the memory segments are queued after the asynchronous write is
* completed.
*
* @return The queue with the written memory segments.
*/
@Override
public LinkedBlockingQueue<MemorySegment> getReturnQueue() {
return this.returnSegments;} | 3.26 |
flink_AsynchronousBlockWriter_getNextReturnedBlock_rdh | /**
* Gets the next memory segment that has been written and is available again. This method blocks
* until such a segment is available, or until an error occurs in the writer, or the writer is
* closed.
*
* <p>NOTE: If this method is invoked without any segment ever returning (for example, because
* the {@link #writeBlock(MemorySegment)} method has not been invoked accordingly), the method
* may block forever.
*
* @return The next memory segment from the writers's return queue.
* @throws IOException
* Thrown, if an I/O error occurs in the writer while waiting for the
* request to return.
*/
@Override
public MemorySegment getNextReturnedBlock() throws IOException {try {
while (true) {
final MemorySegment next = returnSegments.poll(1000, TimeUnit.MILLISECONDS);
if (next != null) {
return next;
} else {
if (this.closed) {
throw new IOException("The writer has been closed.");
}
checkErroneous();
}
}
} catch (InterruptedException e) {
throw new IOException("Writer was interrupted while waiting for the next returning segment.");
}
} | 3.26 |
flink_EmptyIterator_remove_rdh | /**
* Throws a {@link java.lang.UnsupportedOperationException}.
*
* @see java.util.Iterator#remove()
*/
@Override
public void remove() {
throw new UnsupportedOperationException();
} | 3.26 |
flink_EmptyIterator_m0_rdh | /**
* Always throws a {@link java.util.NoSuchElementException}.
*
* @see java.util.Iterator#next()
*/
@Override
public E m0() {
throw new NoSuchElementException();} | 3.26 |
flink_EmptyIterator_get_rdh | /**
* Gets a singleton instance of the empty iterator.
*
* @param <E>
* The type of the objects (not) returned by the iterator.
* @return An instance of the iterator.
*/
public static <E> EmptyIterator<E> get() {
@SuppressWarnings("unchecked")
EmptyIterator<E> iter = ((EmptyIterator<E>) (INSTANCE));
return iter;
} | 3.26 |
flink_FlinkDatabaseMetaData_storesMixedCaseQuotedIdentifiers_rdh | /**
* Flink sql is mixed case as sensitive.
*/
@Override
public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {return true;
} | 3.26 |
flink_FlinkDatabaseMetaData_m4_rdh | /**
* Flink sql is mixed case as sensitive.
*/
@Override
public boolean m4() throws SQLException {
return true;
} | 3.26 |
flink_FlinkDatabaseMetaData_supportsMixedCaseIdentifiers_rdh | /**
* Flink sql is mixed case as sensitive.
*/
@Override
public boolean supportsMixedCaseIdentifiers() throws SQLException {return true;
} | 3.26 |
flink_FlinkDatabaseMetaData_nullsAreSortedLow_rdh | /**
* In flink null value will be used as low value for sort.
*/@Override
public boolean nullsAreSortedLow() throws SQLException {
return true;
} | 3.26 |
flink_FlinkDatabaseMetaData_nullPlusNonNullIsNull_rdh | /**
* Null value plus non-null in flink will be null result.
*/
@Override
public boolean nullPlusNonNullIsNull()
throws SQLException {
return true;
} | 3.26 |
flink_FlinkDatabaseMetaData_supportsMixedCaseQuotedIdentifiers_rdh | /**
* Flink sql is mixed case as sensitive.
*/
@Override
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
return true;
} | 3.26 |
flink_FlinkDatabaseMetaData_isCatalogAtStart_rdh | /**
* Catalog name appears at the start of full name.
*/
@Override
public boolean isCatalogAtStart() throws SQLException {
return true;
} | 3.26 |
flink_FlinkDatabaseMetaData_getSchemas_rdh | // TODO Flink will support SHOW DATABASES LIKE statement in FLIP-297, this method will be
// supported after that issue.
@Override
public ResultSet
getSchemas(String catalog, String schemaPattern) throws SQLException {throw new UnsupportedOperationException();
} | 3.26 |
flink_DefaultCostEstimator_addCachedHybridHashCosts_rdh | /**
* Calculates the costs for the cached variant of the hybrid hash join. We are assuming by
* default that half of the cached hash table fit into memory.
*/
@Override
public void addCachedHybridHashCosts(EstimateProvider buildSideInput, EstimateProvider probeSideInput, Costs costs, int costWeight) {
if (costWeight < 1) {
throw new IllegalArgumentException("The cost weight must be at least one.");
}
long bs = buildSideInput.getEstimatedOutputSize();
long v11 = probeSideInput.getEstimatedOutputSize();
if ((bs > 0) && (v11 >
0)) {
long overall = (2 * bs) + (costWeight * v11);
costs.addDiskCost(overall);
costs.addCpuCost(((long) (overall * HASHING_CPU_FACTOR)));
} else {
costs.setDiskCost(Costs.UNKNOWN);
costs.setCpuCost(Costs.UNKNOWN);
}
// one time the build side plus cost-weight time the probe side
costs.addHeuristicDiskCost((1 + costWeight) * HEURISTIC_COST_BASE);
costs.addHeuristicCpuCost(((long) (((1 + costWeight) * HEURISTIC_COST_BASE) * HASHING_CPU_FACTOR)));
} | 3.26 |
flink_DefaultCostEstimator_addFileInputCost_rdh | // --------------------------------------------------------------------------------------------
// Local Strategy Cost
// --------------------------------------------------------------------------------------------
@Override
public void addFileInputCost(long fileSizeInBytes, Costs costs) {
if (fileSizeInBytes >= 0) {
costs.addDiskCost(fileSizeInBytes);
} else {
costs.setDiskCost(Costs.UNKNOWN);
}
costs.addHeuristicDiskCost(HEURISTIC_COST_BASE);
} | 3.26 |
flink_DefaultCostEstimator_addArtificialDamCost_rdh | // --------------------------------------------------------------------------------------------
// Damming Cost
// --------------------------------------------------------------------------------------------
@Override
public void addArtificialDamCost(EstimateProvider estimates, long bufferSize, Costs costs) {
final long s = estimates.getEstimatedOutputSize();
// we assume spilling and re-reading
if (s <= 0) {
costs.setDiskCost(Costs.UNKNOWN);
costs.setCpuCost(Costs.UNKNOWN);
} else {costs.addDiskCost(2 * s);
costs.setCpuCost(((long) (s * MATERIALIZATION_CPU_FACTOR)));
}costs.addHeuristicDiskCost(2 * HEURISTIC_COST_BASE);
costs.addHeuristicCpuCost(((long) (HEURISTIC_COST_BASE * MATERIALIZATION_CPU_FACTOR)));
} | 3.26 |
flink_DefaultCostEstimator_addRandomPartitioningCost_rdh | // --------------------------------------------------------------------------------------------
@Override
public void addRandomPartitioningCost(EstimateProvider
estimates, Costs costs) {
// conservative estimate: we need ship the whole data over the network to establish the
// partitioning. no disk costs.
final long estOutShipSize
= estimates.getEstimatedOutputSize();
if (estOutShipSize <= 0) {
costs.setNetworkCost(Costs.UNKNOWN);
} else {
costs.addNetworkCost(estOutShipSize);
}
costs.addHeuristicNetworkCost(HEURISTIC_COST_BASE);
} | 3.26 |
flink_SlotStatus_getResourceProfile_rdh | /**
* Get the resource profile of this slot.
*
* @return The resource profile
*/
public ResourceProfile getResourceProfile() {
return f0;
} | 3.26 |
flink_SlotStatus_getAllocationID_rdh | /**
* Get the allocation id of this slot.
*
* @return The allocation id if this slot is allocated, otherwise null
*/
public AllocationID getAllocationID() {
return allocationID;
} | 3.26 |
flink_BlockResettableMutableObjectIterator_next_rdh | // --------------------------------------------------------------------------------------------
@Override
public T next(T target) throws IOException {
// check for the left over element
if (this.readPhase) {
return getNextRecord(target);} else // writing phase. check for leftover first
if (this.leftOverReturned) {
// get next record
if ((target = this.input.next(target)) != null) {
if (writeNextRecord(target)) {
return target;
} else {
// did not fit into memory, keep as leftover
this.leftOverRecord = this.serializer.copy(target, this.leftOverRecord);
this.leftOverReturned = false;
this.fullWriteBuffer = true;
return null;
}
} else {
this.noMoreBlocks = true;
return null;
}
} else if (this.fullWriteBuffer) {
return null;
} else {
this.leftOverReturned = true;
target = this.serializer.copy(this.leftOverRecord, target);
return target;
}
} | 3.26 |
flink_BlockResettableMutableObjectIterator_hasFurtherInput_rdh | /**
* Checks, whether the input that is blocked by this iterator, has further elements available.
* This method may be used to forecast (for example at the point where a block is full) whether
* there will be more data (possibly in another block).
*
* @return True, if there will be more data, false otherwise.
*/
public boolean hasFurtherInput() {
return !this.noMoreBlocks;
} | 3.26 |
flink_AdvancedFunctionsExample_executeLastDatedValueFunction_rdh | /**
* Aggregates data by name and returns the latest non-null {@code item_count} value with its
* corresponding {@code order_date}.
*/
private static void executeLastDatedValueFunction(TableEnvironment env) {
// create a table with example data
final Table customers = env.fromValues(DataTypes.of("ROW<name STRING, order_date DATE, item_count INT>"), Row.of("Guillermo Smith", LocalDate.parse("2020-12-01"), 3), Row.of("Guillermo Smith", LocalDate.parse("2020-12-05"), 5), Row.of("Valeria Mendoza", LocalDate.parse("2020-03-23"), 4), Row.of("Valeria Mendoza", LocalDate.parse("2020-06-02"), 10), Row.of("Leann Holloway", LocalDate.parse("2020-05-26"), 9), Row.of("Leann Holloway", LocalDate.parse("2020-05-27"), null), Row.of("Brandy Sanders", LocalDate.parse("2020-10-14"), 1), Row.of("John Turner", LocalDate.parse("2020-10-02"), 12), Row.of("Ellen Ortega", LocalDate.parse("2020-06-18"), 100));
env.createTemporaryView("customers", customers);
// register and execute the function
env.createTemporarySystemFunction("LastDatedValueFunction", LastDatedValueFunction.class);
env.executeSql("SELECT name, LastDatedValueFunction(item_count, order_date) " + "FROM customers GROUP BY name").print();
// clean up
env.dropTemporaryView("customers");
} | 3.26 |
flink_CatalogTableImpl_removeRedundant_rdh | /**
* Construct catalog table properties from {@link #toProperties()}.
*/
public static Map<String, String> removeRedundant(Map<String, String> properties, TableSchema schema, List<String> partitionKeys) {
Map<String, String> ret = new HashMap<>(properties);
DescriptorProperties descriptorProperties = new DescriptorProperties(false);
descriptorProperties.putTableSchema(SCHEMA, schema);
descriptorProperties.putPartitionKeys(partitionKeys);
descriptorProperties.asMap().keySet().forEach(ret::remove);
return ret;
} | 3.26 |
flink_CatalogTableImpl_fromProperties_rdh | /**
* Construct a {@link CatalogTableImpl} from complete properties that contains table schema.
*/
public static CatalogTableImpl fromProperties(Map<String, String> properties) {
DescriptorProperties descriptorProperties = new DescriptorProperties(false);
descriptorProperties.putProperties(properties);
TableSchema tableSchema = descriptorProperties.getTableSchema(SCHEMA);
List<String> partitionKeys = descriptorProperties.getPartitionKeys();
return new CatalogTableImpl(tableSchema, partitionKeys, removeRedundant(properties, tableSchema, partitionKeys), "");
} | 3.26 |
flink_SourceOperatorFactory_instantiateSourceOperator_rdh | /**
* This is a utility method to conjure up a "SplitT" generics variable binding so that we can
* construct the SourceOperator without resorting to "all raw types". That way, this methods
* puts all "type non-safety" in one place and allows to maintain as much generics safety in the
* main code as possible.
*/
@SuppressWarnings("unchecked")
private static <T, SplitT extends SourceSplit> SourceOperator<T, SplitT> instantiateSourceOperator(FunctionWithException<SourceReaderContext, SourceReader<T, ?>, Exception> readerFactory, OperatorEventGateway eventGateway, SimpleVersionedSerializer<?> splitSerializer, WatermarkStrategy<T> watermarkStrategy, ProcessingTimeService timeService, Configuration config, String localHostName, boolean emitProgressiveWatermarks, CanEmitBatchOfRecordsChecker canEmitBatchOfRecords)
{// jumping through generics hoops: cast the generics away to then cast them back more
// strictly typed
final FunctionWithException<SourceReaderContext, SourceReader<T, SplitT>, Exception> typedReaderFactory = ((FunctionWithException<SourceReaderContext, SourceReader<T, SplitT>, Exception>) (FunctionWithException<?, ?, ?>) (readerFactory));
final SimpleVersionedSerializer<SplitT> typedSplitSerializer = ((SimpleVersionedSerializer<SplitT>) (splitSerializer));
return new SourceOperator<>(typedReaderFactory, eventGateway, typedSplitSerializer, watermarkStrategy, timeService, config, localHostName, emitProgressiveWatermarks, canEmitBatchOfRecords);
} | 3.26 |
flink_HsSubpartitionMemoryDataManager_canBeCompressed_rdh | /**
* Whether the buffer can be compressed or not. Note that event is not compressed because it is
* usually small and the size can become even larger after compression.
*/
private boolean canBeCompressed(Buffer buffer) {
return ((bufferCompressor != null) && buffer.isBuffer()) && (buffer.readableBytes() > 0);
} | 3.26 |
flink_HsSubpartitionMemoryDataManager_getBuffersSatisfyStatus_rdh | /**
* Get buffers in {@link #allBuffers} that satisfy expected {@link SpillStatus} and {@link ConsumeStatus}.
*
* @param spillStatus
* the status of spilling expected.
* @param consumeStatusWithId
* the status and consumerId expected.
* @return buffers satisfy expected status in order.
*/
// Note that: callWithLock ensure that code block guarded by resultPartitionReadLock and
// subpartitionLock.
@SuppressWarnings("FieldAccessNotGuarded")
public Deque<BufferIndexAndChannel> getBuffersSatisfyStatus(SpillStatus spillStatus, ConsumeStatusWithId consumeStatusWithId) {
return callWithLock(() -> {
// TODO return iterator to avoid completely traversing the queue for each call.
Deque<BufferIndexAndChannel> targetBuffers = new ArrayDeque<>();
// traverse buffers in order.
allBuffers.forEach(bufferContext -> {
if (isBufferSatisfyStatus(bufferContext, spillStatus,
consumeStatusWithId)) {
targetBuffers.add(bufferContext.getBufferIndexAndChannel());
}
});
return targetBuffers;
});
} | 3.26 |
flink_HsSubpartitionMemoryDataManager_writeEvent_rdh | // ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void writeEvent(ByteBuffer event, DataType dataType) {
checkArgument(dataType.isEvent());
// each Event must take an exclusive buffer
m0();
// store Events in adhoc heap segments, for network memory efficiency
MemorySegment data = MemorySegmentFactory.wrap(event.array());
Buffer buffer = new NetworkBuffer(data, FreeingBufferRecycler.INSTANCE, dataType, data.size());
HsBufferContext bufferContext = new HsBufferContext(buffer, finishedBufferIndex, targetChannel);
addFinishedBuffer(bufferContext);memoryDataManagerOperation.onBufferFinished();
} | 3.26 |
flink_HsSubpartitionMemoryDataManager_releaseSubpartitionBuffers_rdh | /**
* Release this subpartition's buffers in a decision.
*
* @param toRelease
* All buffers that need to be released belong to this subpartition in a
* decision.
*/
// Note that: runWithLock ensure that code block guarded by resultPartitionReadLock and
// subpartitionLock.
@SuppressWarnings("FieldAccessNotGuarded")
public void releaseSubpartitionBuffers(List<BufferIndexAndChannel> toRelease) {
runWithLock(() -> toRelease.forEach(indexAndChannel -> {
int bufferIndex = indexAndChannel.getBufferIndex();
HsBufferContext bufferContext = bufferIndexToContexts.get(bufferIndex);
if (bufferContext != null) {
checkAndMarkBufferReadable(bufferContext);
releaseBuffer(bufferIndex);
}
}));
} | 3.26 |
flink_HsSubpartitionMemoryDataManager_append_rdh | // ------------------------------------------------------------------------
// Called by MemoryDataManager
// ------------------------------------------------------------------------
/**
* Append record to {@link HsSubpartitionMemoryDataManager}.
*
* @param record
* to be managed by this class.
* @param dataType
* the type of this record. In other words, is it data or event.
*/
public void append(ByteBuffer record, DataType dataType) throws InterruptedException {
if (dataType.isEvent()) {
writeEvent(record, dataType);
} else {
writeRecord(record, dataType);
}
} | 3.26 |
flink_HsSubpartitionMemoryDataManager_trimHeadingReleasedBuffers_rdh | /**
* Remove all released buffer from head of queue until buffer queue is empty or meet un-released
* buffer.
*/
@GuardedBy("subpartitionLock")
private void trimHeadingReleasedBuffers(Deque<HsBufferContext> bufferQueue) {
while ((!bufferQueue.isEmpty()) && bufferQueue.peekFirst().isReleased()) {
bufferQueue.removeFirst();
}
} | 3.26 |
flink_HsSubpartitionMemoryDataManager_addFinishedBuffer_rdh | // Note that: callWithLock ensure that code block guarded by resultPartitionReadLock and
// subpartitionLock.
@SuppressWarnings("FieldAccessNotGuarded")
private void addFinishedBuffer(HsBufferContext bufferContext) {
finishedBufferIndex++;
List<HsConsumerId> needNotify = new
ArrayList<>(consumerMap.size());
runWithLock(() -> {
allBuffers.add(bufferContext);
bufferIndexToContexts.put(bufferContext.getBufferIndexAndChannel().getBufferIndex(), bufferContext);
for (Map.Entry<HsConsumerId, HsSubpartitionConsumerMemoryDataManager> consumerEntry : consumerMap.entrySet()) {
if (consumerEntry.getValue().addBuffer(bufferContext)) {
needNotify.add(consumerEntry.getKey());
}
}
updateStatistics(bufferContext.getBuffer());
});
memoryDataManagerOperation.onDataAvailable(targetChannel, needNotify);
} | 3.26 |
flink_HsSubpartitionMemoryDataManager_spillSubpartitionBuffers_rdh | /**
* Spill this subpartition's buffers in a decision.
*
* @param toSpill
* All buffers that need to be spilled belong to this subpartition in a decision.
* @param spillDoneFuture
* completed when spill is finished.
* @return {@link BufferWithIdentity}s about these spill buffers.
*/
// Note that: callWithLock ensure that code block guarded by resultPartitionReadLock and
// subpartitionLock.
@SuppressWarnings("FieldAccessNotGuarded")
public List<BufferWithIdentity> spillSubpartitionBuffers(List<BufferIndexAndChannel> toSpill, CompletableFuture<Void> spillDoneFuture) {
return callWithLock(() -> toSpill.stream().map(indexAndChannel -> {
int bufferIndex = indexAndChannel.getBufferIndex();
return startSpillingBuffer(bufferIndex, spillDoneFuture).map(context -> new BufferWithIdentity(context.getBuffer(), bufferIndex, targetChannel));
}).filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList()));
} | 3.26 |
flink_ListDelimitedSerializer_deserializeNextElement_rdh | /**
* Deserializes a single element from a serialized list.
*/
public static <T> T
deserializeNextElement(DataInputDeserializer in, TypeSerializer<T> elementSerializer) throws IOException {
if (in.available() > 0) {
T element = elementSerializer.deserialize(in);if (in.available() > 0) {
in.readByte();
}
return element;
}
return null;
} | 3.26 |
flink_Tuple22_equals_rdh | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o
* the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple22)) {return false;
}@SuppressWarnings("rawtypes")
Tuple22 tuple = ((Tuple22) (o));
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1
!= null) {
return false;}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3
!= null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if
(f6
!= null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null
? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) :
tuple.f18 != null) {
return false;
}
if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) {
return false;
}
if (f20 != null ? !f20.equals(tuple.f20) : tuple.f20 != null) {
return false;
}
if
(f21 != null ? !f21.equals(tuple.f21) :
tuple.f21 != null) {
return false;
}
return true;
} | 3.26 |
flink_Tuple22_toString_rdh | // -------------------------------------------------------------------------------------------------
// standard utilities
// -------------------------------------------------------------------------------------------------
/**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21), where the individual fields
* are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return ((((((((((((((((((((((((((((((((((((((((((("(" + StringUtils.arrayAwareToString(this.f0)) + ",") + StringUtils.arrayAwareToString(this.f1)) + ",") + StringUtils.arrayAwareToString(this.f2)) + ",") + StringUtils.arrayAwareToString(this.f3)) + ",") + StringUtils.arrayAwareToString(this.f4)) + ",") + StringUtils.arrayAwareToString(this.f5)) + ",") + StringUtils.arrayAwareToString(this.f6)) + ",") + StringUtils.arrayAwareToString(this.f7)) + ",") + StringUtils.arrayAwareToString(this.f8)) + ",") + StringUtils.arrayAwareToString(this.f9)) + ",") + StringUtils.arrayAwareToString(this.f10)) + ",") + StringUtils.arrayAwareToString(this.f11)) + ",") + StringUtils.arrayAwareToString(this.f12)) + ",") + StringUtils.arrayAwareToString(this.f13)) + ",") + StringUtils.arrayAwareToString(this.f14)) + ",") + StringUtils.arrayAwareToString(this.f15)) + ",") + StringUtils.arrayAwareToString(this.f16)) + ",") + StringUtils.arrayAwareToString(this.f17)) + ",") + StringUtils.arrayAwareToString(this.f18)) + ",") + StringUtils.arrayAwareToString(this.f19)) + ",") +
StringUtils.arrayAwareToString(this.f20)) + ",") + StringUtils.arrayAwareToString(this.f21)) + ")";
} | 3.26 |
flink_Tuple22_of_rdh | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21> Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21> of(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16, T17 f17, T18 f18, T19 f19, T20 f20, T21 f21) {
return new Tuple22<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21);
} | 3.26 |
flink_Tuple22_copy_rdh | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple22<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
T16, T17, T18, T19, T20, T21> copy() {
return new Tuple22<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16, this.f17, this.f18, this.f19, this.f20, this.f21);
} | 3.26 |
flink_Tuple22_setFields_rdh | /**
* Sets new values to all fields of the tuple.
*
* @param f0
* The value for field 0
* @param f1
* The value for field 1
* @param f2
* The value for field 2
* @param f3
* The value for field 3
* @param f4
* The value for field 4
* @param f5
* The value for field 5
* @param f6
* The value for field 6
* @param f7
* The value for field 7
* @param f8
* The value for field 8
* @param f9
* The value for field 9
* @param f10
* The value for field 10
* @param f11
* The value for field 11
* @param f12
* The value for field 12
* @param f13
* The value for field 13
* @param f14
* The value for field 14
* @param f15
* The value for field 15
* @param f16
* The value for field 16
* @param f17
* The value for field 17
* @param f18
* The value for field 18
* @param f19
* The value for field 19
* @param f20
* The value for field 20
* @param f21
* The value for field 21
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15,
T16 f16, T17 f17, T18 f18, T19 f19, T20 f20, T21 f21) {this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
this.f16 = f16;
this.f17 =
f17;
this.f18 = f18;
this.f19 = f19;
this.f20 = f20;
this.f21 = f21;
} | 3.26 |
flink_RegisteredBroadcastStateBackendMetaInfo_deepCopy_rdh | /**
* Creates a deep copy of the itself.
*/
@Nonnull
public RegisteredBroadcastStateBackendMetaInfo<K, V> deepCopy() {
return new RegisteredBroadcastStateBackendMetaInfo<>(this);
} | 3.26 |
flink_AbstractBinaryExternalMerger_getMergingIterator_rdh | /**
* Returns an iterator that iterates over the merged result from all given channels.
*
* @param channelIDs
* The channels that are to be merged and returned.
* @return An iterator over the merged records of the input channels.
* @throws IOException
* Thrown, if the readers encounter an I/O problem.
*/
public BinaryMergeIterator<Entry> getMergingIterator(List<ChannelWithMeta> channelIDs, List<FileIOChannel> openChannels) throws IOException {
// create one iterator per channel id
if (LOG.isDebugEnabled()) {
LOG.debug(("Performing merge of " + channelIDs.size()) + " sorted streams.");
}
final List<MutableObjectIterator<Entry>> iterators = new
ArrayList<>(channelIDs.size() + 1);
for (ChannelWithMeta channel
: channelIDs) {
AbstractChannelReaderInputView view = FileChannelUtil.createInputView(ioManager, channel, openChannels, compressionEnabled, compressionCodecFactory, compressionBlockSize, pageSize);
iterators.add(channelReaderInputViewIterator(view));
}
return new BinaryMergeIterator<>(iterators, mergeReusedEntries(channelIDs.size()), mergeComparator());
} | 3.26 |
flink_AbstractBinaryExternalMerger_mergeChannels_rdh | /**
* Merges the sorted runs described by the given Channel IDs into a single sorted run.
*
* @param channelIDs
* The IDs of the runs' channels.
* @return The ID and number of blocks of the channel that describes the merged run.
*/
private ChannelWithMeta mergeChannels(List<ChannelWithMeta> channelIDs) throws IOException {
// the list with the target iterators
List<FileIOChannel> openChannels = new ArrayList<>(channelIDs.size());
final BinaryMergeIterator<Entry> mergeIterator = getMergingIterator(channelIDs, openChannels);
// create a new channel writer
final FileIOChannel.ID mergedChannelID = ioManager.createChannel();
channelManager.addChannel(mergedChannelID);
AbstractChannelWriterOutputView output = null;
int numBytesInLastBlock;int numBlocksWritten;
try {
output = FileChannelUtil.createOutputView(ioManager, mergedChannelID, compressionEnabled, compressionCodecFactory,
compressionBlockSize, pageSize);
writeMergingOutput(mergeIterator, output);
numBytesInLastBlock = output.close();
numBlocksWritten = output.getBlockCount();} catch (IOException e) {
if (output != null) {
output.close();
output.getChannel().deleteChannel();
}
throw e;
}// remove, close and delete channels
for (FileIOChannel channel : openChannels) {
channelManager.removeChannel(channel.getChannelID());
try {
channel.closeAndDelete();
} catch (Throwable ignored) {
}
}
return new ChannelWithMeta(mergedChannelID, numBlocksWritten, numBytesInLastBlock);
} | 3.26 |
flink_AbstractBinaryExternalMerger_mergeChannelList_rdh | /**
* Merges the given sorted runs to a smaller number of sorted runs.
*
* @param channelIDs
* The IDs of the sorted runs that need to be merged.
* @return A list of the IDs of the merged channels.
* @throws IOException
* Thrown, if the readers or writers encountered an I/O problem.
*/
public List<ChannelWithMeta> mergeChannelList(List<ChannelWithMeta> channelIDs) throws IOException {
// A channel list with length maxFanIn<sup>i</sup> can be merged to maxFanIn files in i-1
// rounds where every merge
// is a full merge with maxFanIn input channels. A partial round includes merges with fewer
// than maxFanIn
// inputs. It is most efficient to perform the partial round first.
final double scale = Math.ceil(Math.log(channelIDs.size()) / Math.log(maxFanIn)) - 1;
final int numStart
= channelIDs.size();
final int numEnd = ((int) (Math.pow(maxFanIn, scale)));
final int numMerges = ((int) (Math.ceil((numStart - numEnd) / ((double) (maxFanIn - 1)))));
final int numNotMerged = numEnd - numMerges;
final int numToMerge = numStart - numNotMerged;
// unmerged channel IDs are copied directly to the result list
final List<ChannelWithMeta> mergedChannelIDs = new ArrayList<>(numEnd);
mergedChannelIDs.addAll(channelIDs.subList(0, numNotMerged));
final int
channelsToMergePerStep = ((int) (Math.ceil(numToMerge / ((double) (numMerges)))));
final List<ChannelWithMeta> channelsToMergeThisStep = new ArrayList<>(channelsToMergePerStep);
int channelNum = numNotMerged;
while ((!closed) && (channelNum < channelIDs.size())) {
channelsToMergeThisStep.clear();
for (int i = 0; (i < channelsToMergePerStep) && (channelNum < channelIDs.size()); i++ , channelNum++) {
channelsToMergeThisStep.add(channelIDs.get(channelNum));
}
mergedChannelIDs.add(mergeChannels(channelsToMergeThisStep));
}
return mergedChannelIDs;
} | 3.26 |
flink_RocksDBConfigurableOptions_checkArgumentValid_rdh | /**
* Helper method to check whether the (key,value) is valid through given configuration and
* returns the formatted value.
*
* @param option
* The configuration key which is configurable in {@link RocksDBConfigurableOptions}.
* @param value
* The value within given configuration.
*/
static void checkArgumentValid(ConfigOption<?> option, Object value) {
final String key = option.key();
if (POSITIVE_INT_CONFIG_SET.contains(option)) {
Preconditions.checkArgument(((Integer) (value)) > 0, ("Configured value for key: " + key) + " must be larger than 0.");
} else if (SIZE_CONFIG_SET.contains(option)) {
Preconditions.checkArgument(((MemorySize) (value)).getBytes() > 0, ("Configured size for key" + key) + " must be larger than 0.");
} else if (LOG_MAX_FILE_SIZE.equals(option)) {
Preconditions.checkArgument(((MemorySize) (value)).getBytes() >= 0, ("Configured size for key " + key) + " must be larger than or equal to 0.");
} else if (LOG_DIR.equals(option)) {
Preconditions.checkArgument(new File(((String) (value))).isAbsolute(), ("Configured path for key " + key) + " is not absolute.");
}
} | 3.26 |
flink_TypeSerializerSnapshotSerializationUtil_deserializeV2_rdh | /**
* Deserialization path for Flink versions 1.7+.
*/
@VisibleForTesting
static <T> TypeSerializerSnapshot<T> deserializeV2(DataInputView in, ClassLoader cl) throws IOException {
return TypeSerializerSnapshot.readVersionedSnapshot(in, cl);
} | 3.26 |
flink_TypeSerializerSnapshotSerializationUtil_writeSerializerSnapshot_rdh | /**
* Writes a {@link TypeSerializerSnapshot} to the provided data output view.
*
* <p>It is written with a format that can be later read again using {@link #readSerializerSnapshot(DataInputView, ClassLoader)}.
*
* @param out
* the data output view
* @param serializerSnapshot
* the serializer configuration snapshot to write
*/public static <T> void writeSerializerSnapshot(DataOutputView out, TypeSerializerSnapshot<T> serializerSnapshot) throws IOException {new TypeSerializerSnapshotSerializationProxy<>(serializerSnapshot).write(out);
}
/**
* Reads from a data input view a {@link TypeSerializerSnapshot} that was previously written
* using {@link #writeSerializerSnapshot(DataOutputView, TypeSerializerSnapshot} | 3.26 |
flink_MessageSerializer_deserializeServerFailure_rdh | /**
* De-serializes the failure message sent to the {@link org.apache.flink.queryablestate.network.Client} in case of server related errors.
*
* <pre>
* <b>The buffer is expected to be at the correct position.</b>
* </pre>
*
* @param buf
* The {@link ByteBuf} containing the serialized failure message.
* @return The failure message.
*/
public static Throwable deserializeServerFailure(final ByteBuf
buf)
throws
IOException, ClassNotFoundException {
try (ByteBufInputStream bis = new ByteBufInputStream(buf);ObjectInputStream in = new ObjectInputStream(bis)) {
return ((Throwable) (in.readObject()));
}
} | 3.26 |
flink_MessageSerializer_deserializeRequest_rdh | /**
* De-serializes the request sent to the {@link org.apache.flink.queryablestate.network.AbstractServerBase}.
*
* <pre>
* <b>The buffer is expected to be at the request position.</b>
* </pre>
*
* @param buf
* The {@link ByteBuf} containing the serialized request.
* @return The request.
*/public REQ deserializeRequest(final ByteBuf buf) {
Preconditions.checkNotNull(buf);
return requestDeserializer.deserializeMessage(buf);
} | 3.26 |
flink_MessageSerializer_deserializeRequestFailure_rdh | /**
* De-serializes the {@link RequestFailure} sent to the {@link org.apache.flink.queryablestate.network.Client} in case of protocol related errors.
*
* <pre>
* <b>The buffer is expected to be at the correct position.</b>
* </pre>
*
* @param buf
* The {@link ByteBuf} containing the serialized failure message.
* @return The failure message.
*/
public static RequestFailure deserializeRequestFailure(final ByteBuf buf) throws IOException, ClassNotFoundException {
long requestId = buf.readLong();
Throwable cause;
try (ByteBufInputStream bis = new ByteBufInputStream(buf);ObjectInputStream in = new ObjectInputStream(bis)) {
cause = ((Throwable) (in.readObject()));
}return new
RequestFailure(requestId, cause);
} | 3.26 |
flink_MessageSerializer_writePayload_rdh | /**
* Helper for serializing the messages.
*
* @param alloc
* The {@link ByteBufAllocator} used to allocate the buffer to serialize the
* message into.
* @param requestId
* The id of the request to which the message refers to.
* @param messageType
* The {@link MessageType type of the message}.
* @param payload
* The serialized version of the message.
* @return A {@link ByteBuf} containing the serialized message.
*/
private static ByteBuf writePayload(final ByteBufAllocator alloc, final long requestId, final MessageType messageType, final byte[] payload) {
final int frameLength = (HEADER_LENGTH + REQUEST_ID_SIZE) + payload.length;
final ByteBuf buf = alloc.ioBuffer(frameLength + Integer.BYTES);
buf.writeInt(frameLength);
writeHeader(buf, messageType);
buf.writeLong(requestId);
buf.writeBytes(payload);
return buf;
} | 3.26 |
flink_MessageSerializer_m1_rdh | /**
* Serializes the exception containing the failure message sent to the {@link org.apache.flink.queryablestate.network.Client} in case of protocol related errors.
*
* @param alloc
* The {@link ByteBufAllocator} used to allocate the buffer to serialize the
* message into.
* @param requestId
* The id of the request to which the message refers to.
* @param cause
* The exception thrown at the server.
* @return A {@link ByteBuf} containing the serialized message.
*/
public static ByteBuf m1(final ByteBufAllocator alloc, final long requestId, final Throwable cause) throws IOException {final
ByteBuf buf = alloc.ioBuffer();
// Frame length is set at the end
buf.writeInt(0);
writeHeader(buf, MessageType.REQUEST_FAILURE);
buf.writeLong(requestId);
try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf);ObjectOutput out = new ObjectOutputStream(bbos)) {
out.writeObject(cause);
}
// Set frame length
int frameLength = buf.readableBytes() - Integer.BYTES;
buf.setInt(0, frameLength);
return buf;
} | 3.26 |
flink_MessageSerializer_serializeRequest_rdh | // ------------------------------------------------------------------------
// Serialization
// ------------------------------------------------------------------------
/**
* Serializes the request sent to the {@link org.apache.flink.queryablestate.network.AbstractServerBase}.
*
* @param alloc
* The {@link ByteBufAllocator} used to allocate the buffer to serialize the
* message into.
* @param requestId
* The id of the request to which the message refers to.
* @param request
* The request to be serialized.
* @return A {@link ByteBuf} containing the serialized message.
*/
public static <REQ extends MessageBody> ByteBuf serializeRequest(final ByteBufAllocator alloc, final long requestId, final REQ request) {
Preconditions.checkNotNull(request);return writePayload(alloc, requestId, MessageType.REQUEST, request.serialize());
} | 3.26 |
flink_MessageSerializer_m0_rdh | /**
* Serializes the response sent to the {@link org.apache.flink.queryablestate.network.Client}.
*
* @param alloc
* The {@link ByteBufAllocator} used to allocate the buffer to serialize the
* message into.
* @param requestId
* The id of the request to which the message refers to.
* @param response
* The response to be serialized.
* @return A {@link ByteBuf} containing the serialized message.
*/
public static <RESP extends MessageBody> ByteBuf m0(final ByteBufAllocator alloc, final long requestId, final RESP response) {
Preconditions.checkNotNull(response);
return writePayload(alloc, requestId, MessageType.REQUEST_RESULT, response.serialize());
} | 3.26 |
flink_MessageSerializer_getRequestId_rdh | /**
* De-serializes the header and returns the {@link MessageType}.
*
* <pre>
* <b>The buffer is expected to be at the request id position.</b>
* </pre>
*
* @param buf
* The {@link ByteBuf} containing the serialized request id.
* @return The request id.
*/
public static long getRequestId(final ByteBuf buf) {
return buf.readLong();} | 3.26 |
flink_MessageSerializer_deserializeResponse_rdh | /**
* De-serializes the response sent to the {@link org.apache.flink.queryablestate.network.Client}.
*
* <pre>
* <b>The buffer is expected to be at the response position.</b>
* </pre>
*
* @param buf
* The {@link ByteBuf} containing the serialized response.
* @return The response.
*/
public RESP
deserializeResponse(final ByteBuf buf) {
Preconditions.checkNotNull(buf);
return responseDeserializer.deserializeMessage(buf);
} | 3.26 |
flink_MessageSerializer_serializeServerFailure_rdh | /**
* Serializes the failure message sent to the {@link org.apache.flink.queryablestate.network.Client} in case of server related errors.
*
* @param alloc
* The {@link ByteBufAllocator} used to allocate the buffer to serialize the
* message into.
* @param cause
* The exception thrown at the server.
* @return The failure message.
*/
public static ByteBuf serializeServerFailure(final ByteBufAllocator alloc, final Throwable cause) throws IOException {
final ByteBuf buf = alloc.ioBuffer();
// Frame length is set at end
buf.writeInt(0);
writeHeader(buf, MessageType.SERVER_FAILURE);
try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf);ObjectOutput out = new ObjectOutputStream(bbos)) {
out.writeObject(cause);
}
// Set frame length
int frameLength = buf.readableBytes() - Integer.BYTES;
buf.setInt(0, frameLength);
return buf;
} | 3.26 |
flink_MessageSerializer_writeHeader_rdh | /**
* Helper for serializing the header.
*
* @param buf
* The {@link ByteBuf} to serialize the header into.
* @param messageType
* The {@link MessageType} of the message this header refers to.
*/
private static void writeHeader(final ByteBuf buf, final MessageType messageType) {
buf.writeInt(VERSION);
buf.writeInt(messageType.ordinal());
} | 3.26 |
flink_MessageSerializer_deserializeHeader_rdh | // ------------------------------------------------------------------------
// Deserialization
// ------------------------------------------------------------------------
/**
* De-serializes the header and returns the {@link MessageType}.
*
* <pre>
* <b>The buffer is expected to be at the header position.</b>
* </pre>
*
* @param buf
* The {@link ByteBuf} containing the serialized header.
* @return The message type.
* @throws IllegalStateException
* If unexpected message version or message type.
*/
public static MessageType deserializeHeader(final ByteBuf buf) {
// checking the version
int version = buf.readInt();
Preconditions.checkState(version == VERSION, ((("Version Mismatch: Found " + version) + ", Expected: ") + VERSION) + '.');
// fetching the message type
int
msgType = buf.readInt();
MessageType[] values = MessageType.values();
Preconditions.checkState((msgType >= 0) && (msgType < values.length), ("Illegal message type with index " + msgType) + '.');
return values[msgType];
} | 3.26 |
flink_InternalServiceDecorator_getNamespacedInternalServiceName_rdh | /**
* Generate namespaced name of the internal Service.
*/
public static String getNamespacedInternalServiceName(String clusterId, String namespace) {
return (getInternalServiceName(clusterId) + ".") + namespace;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.