name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_KubernetesUtils_getDeploymentName_rdh
|
/**
* Generate name of the Deployment.
*/public static String getDeploymentName(String clusterId) {
return clusterId;
}
| 3.26 |
flink_KubernetesUtils_parsePort_rdh
|
/**
* Parse a valid port for the config option. A fixed port is expected, and do not support a
* range of ports.
*
* @param flinkConfig
* flink config
* @param port
* port config option
* @return valid port
*/
public static Integer parsePort(Configuration flinkConfig, ConfigOption<String> port) {
checkNotNull(flinkConfig.get(port), port.key() + " should not be null.");
try {
return Integer.parseInt(flinkConfig.get(port));
} catch (NumberFormatException ex) {
throw new FlinkRuntimeException(port.key() + " should be specified to a fixed port. Do not support a range of ports.", ex);
}
}
| 3.26 |
flink_KubernetesUtils_createCompletedCheckpointStore_rdh
|
/**
* Create a {@link DefaultCompletedCheckpointStore} with {@link KubernetesStateHandleStore}.
*
* @param configuration
* configuration to build a RetrievableStateStorageHelper
* @param kubeClient
* flink kubernetes client
* @param configMapName
* ConfigMap name
* @param executor
* executor to run blocking calls
* @param lockIdentity
* lock identity to check the leadership
* @param maxNumberOfCheckpointsToRetain
* max number of checkpoints to retain on state store
* handle
* @param restoreMode
* the mode in which the job is restoring
* @return a {@link DefaultCompletedCheckpointStore} with {@link KubernetesStateHandleStore}.
* @throws Exception
* when create the storage helper failed
*/
public static CompletedCheckpointStore createCompletedCheckpointStore(Configuration configuration, FlinkKubeClient kubeClient, Executor executor, String configMapName, @Nullable
String lockIdentity, int maxNumberOfCheckpointsToRetain, SharedStateRegistryFactory sharedStateRegistryFactory, Executor ioExecutor, RestoreMode restoreMode) throws Exception {
final RetrievableStateStorageHelper<CompletedCheckpoint> stateStorage = new FileSystemStateStorageHelper<>(HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath(configuration), COMPLETED_CHECKPOINT_FILE_SUFFIX);
final KubernetesStateHandleStore<CompletedCheckpoint> stateHandleStore = new KubernetesStateHandleStore<>(kubeClient, configMapName, stateStorage, k -> k.startsWith(CHECKPOINT_ID_KEY_PREFIX), lockIdentity);
Collection<CompletedCheckpoint> checkpoints = DefaultCompletedCheckpointStoreUtils.retrieveCompletedCheckpoints(stateHandleStore, KubernetesCheckpointStoreUtil.INSTANCE);
return new DefaultCompletedCheckpointStore<>(maxNumberOfCheckpointsToRetain, stateHandleStore, KubernetesCheckpointStoreUtil.INSTANCE, checkpoints, sharedStateRegistryFactory.create(ioExecutor, checkpoints, restoreMode), executor);
}
| 3.26 |
flink_KubernetesUtils_getServiceAccount_rdh
|
/**
* Get the service account from the input pod first, if not specified, the service account name
* will be used.
*
* @param flinkPod
* the Flink pod to parse the service account
* @return the parsed service account
*/
@Nullable
public static String getServiceAccount(FlinkPod flinkPod) {
final String serviceAccount = flinkPod.getPodWithoutMainContainer().getSpec().getServiceAccount();
if (serviceAccount == null) {
return flinkPod.getPodWithoutMainContainer().getSpec().getServiceAccountName();
}
return serviceAccount;
}
| 3.26 |
flink_KubernetesUtils_getCommonLabels_rdh
|
/**
* Get the common labels for Flink native clusters. All the Kubernetes resources will be set
* with these labels.
*
* @param clusterId
* cluster id
* @return Return common labels map
*/
public static Map<String, String> getCommonLabels(String clusterId) {
final Map<String, String> commonLabels = new HashMap<>();
commonLabels.put(Constants.LABEL_TYPE_KEY, Constants.LABEL_TYPE_NATIVE_TYPE);
commonLabels.put(Constants.LABEL_APP_KEY,
clusterId);
return commonLabels;
}
| 3.26 |
flink_KubernetesUtils_tryToGetPrettyPrintYaml_rdh
|
/**
* Try to get the pretty print yaml for Kubernetes resource.
*
* @param kubernetesResource
* kubernetes resource
* @return the pretty print yaml, or the {@link KubernetesResource#toString()} if parse failed.
*/
public static String tryToGetPrettyPrintYaml(KubernetesResource kubernetesResource) {
try {
return f0.writerWithDefaultPrettyPrinter().writeValueAsString(kubernetesResource);
} catch (Exception ex) {
LOG.debug("Failed to get the pretty print yaml, fallback to {}", kubernetesResource, ex);
return kubernetesResource.toString();
}
}
| 3.26 |
flink_KubernetesUtils_checkAndUpdatePortConfigOption_rdh
|
/**
* Check whether the port config option is a fixed port. If not, the fallback port will be set
* to configuration.
*
* @param flinkConfig
* flink configuration
* @param port
* config option need to be checked
* @param fallbackPort
* the fallback port that will be set to the configuration
*/
public static void checkAndUpdatePortConfigOption(Configuration flinkConfig, ConfigOption<String> port, int fallbackPort) {
if (KubernetesUtils.parsePort(flinkConfig, port) == 0) {
flinkConfig.setString(port, String.valueOf(fallbackPort));
LOG.info("Kubernetes deployment requires a fixed port. Configuration {} will be set to {}", port.key(), fallbackPort);
}
}
| 3.26 |
flink_KubernetesUtils_resolveDNSPolicy_rdh
|
/**
* Resolve the DNS policy defined value. Return DNS_POLICY_HOSTNETWORK if host network enabled.
* If not, check whether there is a DNS policy overridden in pod template.
*
* @param dnsPolicy
* DNS policy defined in pod template spec
* @param hostNetworkEnabled
* Host network enabled or not
* @return the resolved value
*/
public static String resolveDNSPolicy(String dnsPolicy, boolean hostNetworkEnabled) {
if (hostNetworkEnabled) {
return DNS_POLICY_HOSTNETWORK;
}
if (!StringUtils.isNullOrWhitespaceOnly(dnsPolicy)) {
return dnsPolicy;
}
return DNS_POLICY_DEFAULT;
}
| 3.26 |
flink_KubernetesUtils_resolveUserDefinedValue_rdh
|
/**
* Resolve the user defined value with the precedence. First an explicit config option value is
* taken, then the value in pod template and at last the default value of a config option if
* nothing is specified.
*
* @param flinkConfig
* flink configuration
* @param configOption
* the config option to define the Kubernetes fields
* @param valueOfConfigOptionOrDefault
* the value defined by explicit config option or default
* @param valueOfPodTemplate
* the value defined in the pod template
* @param fieldDescription
* Kubernetes fields description
* @param <T>
* The type of value associated with the configuration option.
* @return the resolved value
*/
public static <T> String resolveUserDefinedValue(Configuration flinkConfig, ConfigOption<T> configOption, String valueOfConfigOptionOrDefault, @Nullable
String valueOfPodTemplate, String fieldDescription) {
final String resolvedValue;
if (valueOfPodTemplate != null) {// The config option is explicitly set.
if (flinkConfig.contains(configOption)) {
resolvedValue = valueOfConfigOptionOrDefault;
LOG.info("The {} configured in pod template will be overwritten to '{}' " + "because of explicitly configured options.", fieldDescription, resolvedValue);
} else {
resolvedValue = valueOfPodTemplate;
}
} else {
resolvedValue = valueOfConfigOptionOrDefault;
}
return resolvedValue;}
| 3.26 |
flink_KubernetesUtils_createJobGraphStateHandleStore_rdh
|
/**
* Create a {@link KubernetesStateHandleStore} which storing {@link JobGraph}.
*
* @param configuration
* configuration to build a RetrievableStateStorageHelper
* @param flinkKubeClient
* flink kubernetes client
* @param configMapName
* ConfigMap name
* @param lockIdentity
* lock identity to check the leadership
* @return a {@link KubernetesStateHandleStore} which storing {@link JobGraph}.
* @throws Exception
* when create the storage helper
*/
public static KubernetesStateHandleStore<JobGraph> createJobGraphStateHandleStore(Configuration configuration, FlinkKubeClient flinkKubeClient, String configMapName, String lockIdentity) throws Exception {
final RetrievableStateStorageHelper<JobGraph> stateStorage = new FileSystemStateStorageHelper<>(HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath(configuration), SUBMITTED_JOBGRAPH_FILE_PREFIX);
return new KubernetesStateHandleStore<>(flinkKubeClient, configMapName, stateStorage, k -> k.startsWith(JOB_GRAPH_STORE_KEY_PREFIX),
lockIdentity);
}
| 3.26 |
flink_KubernetesUtils_getResourceRequirements_rdh
|
/**
* Get resource requirements from memory and cpu.
*
* @param resourceRequirements
* resource requirements in pod template
* @param mem
* Memory in mb.
* @param memoryLimitFactor
* limit factor for the memory, used to set the limit resources.
* @param cpu
* cpu.
* @param cpuLimitFactor
* limit factor for the cpu, used to set the limit resources.
* @param externalResources
* external resources
* @param externalResourceConfigKeys
* config keys of external resources
* @return KubernetesResource requirements.
*/
public static ResourceRequirements getResourceRequirements(ResourceRequirements resourceRequirements, int mem, double memoryLimitFactor, double cpu, double cpuLimitFactor, Map<String, ExternalResource> externalResources, Map<String, String> externalResourceConfigKeys) {
final Quantity cpuQuantity = new Quantity(String.valueOf(cpu));
final Quantity cpuLimitQuantity = new Quantity(String.valueOf(cpu * cpuLimitFactor));
final Quantity memQuantity = new Quantity(mem + Constants.RESOURCE_UNIT_MB);
final Quantity memQuantityLimit = new Quantity(((int) (mem * memoryLimitFactor)) + Constants.RESOURCE_UNIT_MB); ResourceRequirementsBuilder resourceRequirementsBuilder = new ResourceRequirementsBuilder(resourceRequirements).addToRequests(Constants.RESOURCE_NAME_MEMORY, memQuantity).addToRequests(Constants.RESOURCE_NAME_CPU, cpuQuantity).addToLimits(Constants.RESOURCE_NAME_MEMORY, memQuantityLimit).addToLimits(Constants.RESOURCE_NAME_CPU, cpuLimitQuantity);
// Add the external resources to resource requirement.
for (Map.Entry<String, ExternalResource> externalResource : externalResources.entrySet()) {
final String configKey = externalResourceConfigKeys.get(externalResource.getKey());
if (!StringUtils.isNullOrWhitespaceOnly(configKey)) {
final Quantity resourceQuantity = new Quantity(String.valueOf(externalResource.getValue().getValue().longValue()));
resourceRequirementsBuilder.addToRequests(configKey, resourceQuantity).addToLimits(configKey, resourceQuantity);
LOG.info("Request external resource {} with config key {}.", resourceQuantity.getAmount(), configKey);
}
}
return resourceRequirementsBuilder.build();
}
| 3.26 |
flink_KubernetesUtils_getOnlyConfigMap_rdh
|
/**
* Check the ConfigMap list should only contain the expected one.
*
* @param configMaps
* ConfigMap list to check
* @param expectedConfigMapName
* expected ConfigMap Name
* @return Return the expected ConfigMap
*/
public static KubernetesConfigMap getOnlyConfigMap(List<KubernetesConfigMap> configMaps, String expectedConfigMapName) {
if ((configMaps.size() == 1) && expectedConfigMapName.equals(configMaps.get(0).getName())) {
return configMaps.get(0);
}
throw new IllegalStateException(String.format("ConfigMap list should only contain a single ConfigMap [%s].", expectedConfigMapName));
}
/**
* Get the {@link LeaderInformation} from ConfigMap.
*
* @param configMap
* ConfigMap contains the leader information
* @return Parsed leader information. It could be {@link LeaderInformation#empty()}
| 3.26 |
flink_KubernetesUtils_getNamespacedServiceName_rdh
|
/**
* Generate namespaced name of the service.
*/
public static String getNamespacedServiceName(Service service) {
return (service.getMetadata().getName() + ".") + service.getMetadata().getNamespace();
}
| 3.26 |
flink_KubernetesUtils_getConfigMapLabels_rdh
|
/**
* Get ConfigMap labels for the current Flink cluster. They could be used to filter and clean-up
* the resources.
*
* @param clusterId
* cluster id
* @param type
* the config map use case. It could only be {@link Constants#LABEL_CONFIGMAP_TYPE_HIGH_AVAILABILITY} now.
* @return Return ConfigMap labels.
*/public static Map<String, String> getConfigMapLabels(String clusterId, String type) {
final Map<String, String> labels = new HashMap<>(getCommonLabels(clusterId));
labels.put(Constants.LABEL_CONFIGMAP_TYPE_KEY, type);
return Collections.unmodifiableMap(labels);
}
| 3.26 |
flink_KubernetesUtils_isHostNetwork_rdh
|
/**
* Checks if hostNetwork is enabled.
*/
public static boolean isHostNetwork(Configuration configuration) {
return configuration.getBoolean(KubernetesConfigOptions.KUBERNETES_HOSTNETWORK_ENABLED);
}
| 3.26 |
flink_RawFormatFactory_validateAndExtractSingleField_rdh
|
/**
* Validates and extract the single field type from the given physical row schema.
*/
private static LogicalType validateAndExtractSingleField(RowType physicalRowType) {
if (physicalRowType.getFieldCount() != 1) {
String schemaString = physicalRowType.getFields().stream().map(RowType.RowField::asSummaryString).collect(Collectors.joining(", "));
throw new ValidationException(String.format("The 'raw' format only supports single physical column. " + "However the defined schema contains multiple physical columns: [%s]", schemaString));
}
LogicalType v11 = physicalRowType.getChildren().get(0);
checkFieldType(v11);
return v11;
}
| 3.26 |
flink_RawFormatFactory_checkFieldType_rdh
|
/**
* Checks the given field type is supported.
*/
private static void checkFieldType(LogicalType fieldType) {
if (!supportedTypes.contains(fieldType.getTypeRoot())) {
throw new ValidationException(String.format("The 'raw' format doesn't supports '%s' as column type.", fieldType.asSummaryString()));
}
}
| 3.26 |
flink_TableFunctionCollector_reset_rdh
|
/**
* Resets the flag to indicate whether [[collect(T)]] has been called.
*/
public void reset() {
this.collected = false;
}
| 3.26 |
flink_TableFunctionCollector_setInput_rdh
|
/**
* Sets the input row from left table, which will be used to cross join with the result of table
* function.
*/
public void setInput(Object input) {
this.input = input;
}
| 3.26 |
flink_TableFunctionCollector_getInput_rdh
|
/**
* Gets the input value from left table, which will be used to cross join with the result of
* table function.
*/public Object getInput() {
return input;
}
| 3.26 |
flink_TableFunctionCollector_setCollector_rdh
|
/**
* Sets the current collector, which used to emit the final row.
*/
public void setCollector(Collector<?> collector) {
this.collector = collector;
}
| 3.26 |
flink_TableFunctionCollector_isCollected_rdh
|
/**
* Whether {@link #collect(Object)} has been called.
*
* @return True if {@link #collect(Object)} has been called.
*/
public boolean isCollected() {return collected;
}
| 3.26 |
flink_SavepointMetadata_getNewOperators_rdh
|
/**
*
* @return List of new operator states for the savepoint, represented by their target {@link OperatorID} and {@link BootstrapTransformation}.
*/
public List<BootstrapTransformationWithID<?>> getNewOperators() {
return operatorStateIndex.values().stream().filter(OperatorStateSpec::isNewStateTransformation).map(OperatorStateSpec::asNewStateTransformation).collect(Collectors.toList());
}
| 3.26 |
flink_SavepointMetadata_getExistingOperators_rdh
|
/**
*
* @return List of {@link OperatorState} that already exists within the savepoint.
*/
public List<OperatorState> getExistingOperators() {
return operatorStateIndex.values().stream().filter(OperatorStateSpec::isExistingState).map(OperatorStateSpec::asExistingState).collect(Collectors.toList());
}
| 3.26 |
flink_SavepointMetadata_getOperatorState_rdh
|
/**
*
* @return Operator state for the given UID.
* @throws IOException
* If the savepoint does not contain operator state with the given uid.
*/
public OperatorState getOperatorState(String uid) throws IOException {
OperatorID
operatorID = OperatorIDGenerator.fromUid(uid);
OperatorStateSpec operatorState = operatorStateIndex.get(operatorID);
if ((operatorState == null) || operatorState.isNewStateTransformation()) {
throw new IOException("Savepoint does not contain state with operator uid " +
uid);
}
return operatorState.asExistingState();
}
| 3.26 |
flink_PythonFunctionFactory_getPythonFunction_rdh
|
/**
* Returns PythonFunction according to the fully qualified name of the Python UDF i.e
* ${moduleName}.${functionName} or ${moduleName}.${className}.
*
* @param fullyQualifiedName
* The fully qualified name of the Python UDF.
* @param config
* The configuration of python dependencies.
* @param classLoader
* The classloader which is used to identify different jobs.
* @return The PythonFunction object which represents the Python UDF.
*/
static PythonFunction getPythonFunction(String fullyQualifiedName, ReadableConfig config, ClassLoader classLoader) throws ExecutionException {
int splitIndex = fullyQualifiedName.lastIndexOf(".");
if (splitIndex <= 0) {
throw new IllegalArgumentException(String.format("The fully qualified name is invalid: '%s'", fullyQualifiedName));
}
String moduleName = fullyQualifiedName.substring(0, splitIndex);
String objectName = fullyQualifiedName.substring(splitIndex + 1);
Configuration mergedConfig = new Configuration(ExecutionEnvironment.getExecutionEnvironment().getConfiguration());
if (config instanceof TableConfig) {
PythonDependencyUtils.merge(mergedConfig, ((TableConfig) (config)).getConfiguration());
} else {
PythonDependencyUtils.merge(mergedConfig, ((Configuration) (config)));
}
PythonFunctionFactory pythonFunctionFactory = PYTHON_FUNCTION_FACTORY_CACHE.get(CacheKey.of(mergedConfig, classLoader));
ensureCacheCleanupExecutorServiceStarted();
return pythonFunctionFactory.getPythonFunction(moduleName, objectName);
}
| 3.26 |
flink_BroadcastConnectedStream_getSecondInput_rdh
|
/**
* Returns the {@link BroadcastStream}.
*
* @return The stream which, by convention, is the broadcast one.
*/
public BroadcastStream<IN2> getSecondInput() {
return broadcastStream;
}
| 3.26 |
flink_BroadcastConnectedStream_process_rdh
|
/**
* Assumes as inputs a {@link BroadcastStream} and a non-keyed {@link DataStream} and applies
* the given {@link BroadcastProcessFunction} on them, thereby creating a transformed output
* stream.
*
* @param function
* The {@link BroadcastProcessFunction} that is called for each element in the
* stream.
* @param outTypeInfo
* The type of the output elements.
* @param <OUT>
* The type of the output elements.
* @return The transformed {@link DataStream}.
*/
@PublicEvolving
public <OUT> SingleOutputStreamOperator<OUT> process(final BroadcastProcessFunction<IN1, IN2, OUT> function, final TypeInformation<OUT> outTypeInfo) {
Preconditions.checkNotNull(function);
Preconditions.checkArgument(!(nonBroadcastStream instanceof KeyedStream), "A BroadcastProcessFunction can only be used on a non-keyed stream.");
return transform(function, outTypeInfo); }
| 3.26 |
flink_SecurityFactoryServiceLoader_findModuleFactory_rdh
|
/**
* Find a suitable {@link SecurityModuleFactory} based on canonical name.
*/
public static SecurityModuleFactory findModuleFactory(String securityModuleFactoryClass) throws NoMatchSecurityFactoryException {
return
findFactoryInternal(securityModuleFactoryClass, SecurityModuleFactory.class, SecurityModuleFactory.class.getClassLoader());
}
| 3.26 |
flink_SecurityFactoryServiceLoader_findContextFactory_rdh
|
/**
* Find a suitable {@link SecurityContextFactory} based on canonical name.
*/
public static SecurityContextFactory findContextFactory(String securityContextFactoryClass) throws NoMatchSecurityFactoryException {
return findFactoryInternal(securityContextFactoryClass, SecurityContextFactory.class, SecurityContextFactory.class.getClassLoader());
}
| 3.26 |
flink_HeapListState_get_rdh
|
// ------------------------------------------------------------------------
// state access
// ------------------------------------------------------------------------
@Override
public Iterable<V> get() {
return getInternal();}
| 3.26 |
flink_HeapListState_mergeState_rdh
|
// ------------------------------------------------------------------------
// state merging
// ------------------------------------------------------------------------
@Override
protected List<V> mergeState(List<V> a, List<V> b)
{
a.addAll(b);
return a;
}
| 3.26 |
flink_HistoryServer_createOrGetFile_rdh
|
// ------------------------------------------------------------------------
// File generation
// ------------------------------------------------------------------------
static FileWriter createOrGetFile(File folder, String name) throws IOException {
File file = new File(folder, name + ".json");
if (!file.exists()) {
Files.createFile(file.toPath());
}
FileWriter fr = new FileWriter(file);
return fr;}
| 3.26 |
flink_HistoryServer_start_rdh
|
// ------------------------------------------------------------------------
// Life-cycle
// ------------------------------------------------------------------------
void start() throws IOException, InterruptedException {
synchronized(startupShutdownLock) {
LOG.info("Starting history server.");
Files.createDirectories(webDir.toPath());
LOG.info("Using directory {} as local cache.",
webDir);
Router router = new Router();
LogUrlUtil.getValidLogUrlPattern(config, HistoryServerOptions.HISTORY_SERVER_JOBMANAGER_LOG_URL_PATTERN).ifPresent(pattern -> router.addGet(JobManagerLogUrlHeaders.getInstance().getTargetRestEndpointURL(), new GeneratedLogUrlHandler(CompletableFuture.completedFuture(pattern))));
LogUrlUtil.getValidLogUrlPattern(config, HistoryServerOptions.HISTORY_SERVER_TASKMANAGER_LOG_URL_PATTERN).ifPresent(pattern -> router.addGet(TaskManagerLogUrlHeaders.getInstance().getTargetRestEndpointURL(), new GeneratedLogUrlHandler(CompletableFuture.completedFuture(pattern))));
router.addGet("/:*", new HistoryServerStaticFileServerHandler(webDir));
createDashboardConfigFile();
executor.scheduleWithFixedDelay(getArchiveFetchingRunnable(), 0, refreshIntervalMillis, TimeUnit.MILLISECONDS);
netty = new WebFrontendBootstrap(router, LOG, webDir, serverSSLFactory, webAddress, webPort, config);
}
}
| 3.26 |
flink_MaxWithRetractAggFunction_getArgumentDataTypes_rdh
|
// --------------------------------------------------------------------------------------------
// Planning
// --------------------------------------------------------------------------------------------
@Override
public List<DataType> getArgumentDataTypes() {
return Collections.singletonList(valueDataType);
}
| 3.26 |
flink_AggregatorWithName_getName_rdh
|
/**
* Gets the name that the aggregator is registered under.
*
* @return The name that the aggregator is registered under.
*/
public String getName() {
return
name;
}
| 3.26 |
flink_AggregatorWithName_getAggregator_rdh
|
/**
* Gets the aggregator.
*
* @return The aggregator.
*/
public Aggregator<T> getAggregator() {
return aggregator;
}
| 3.26 |
flink_BigDecComparator_putNormalizedKey_rdh
|
/**
* Adds a normalized key containing a normalized order of magnitude of the given record. 2 bits
* determine the sign (negative, zero, positive), 33 bits determine the magnitude. This method
* adds at most 5 bytes that contain information.
*/
@Override
public void putNormalizedKey(BigDecimal record, MemorySegment target, int offset, int len) {
final long signum = record.signum();
// order of magnitude
// smallest:
// scale = Integer.MAX, precision = 1 => SMALLEST_MAGNITUDE
// largest:
// scale = Integer.MIN, precision = Integer.MAX => LARGEST_MAGNITUDE
final long
mag = (((long) (record.scale())) - ((long) (record.precision()))) + 1;
// normalize value range: from 0 to (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE)
final long normMag = ((-1L) * LARGEST_MAGNITUDE) + mag;
// normalize value range dependent on sign:
// 0 to (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE)
// OR (SMALLEST_MAGNITUDE + -1*LARGEST_MAGNITUDE) to 0
// --> uses at most 33 bit (5 least-significant bytes)
long signNormMag = (signum < 0) ? normMag : (SMALLEST_MAGNITUDE + ((-1L) * LARGEST_MAGNITUDE)) - normMag;
// zero has no magnitude
// set 34th bit to flag zero
if
(signum == 0) {
signNormMag = 0L;
signNormMag |= 1L << 34;
} else if (signum > 0) {
signNormMag |= 1L << 35;
}
// add 5 least-significant bytes that contain value to target
for (int i = 0; (i < 5) && (len > 0); i++ , len--) {
final byte b = ((byte) (signNormMag >>> (8 * (4 - i))));
target.put(offset++, b);
}
}
| 3.26 |
flink_EnrichedRowData_from_rdh
|
/**
* Creates a new {@link EnrichedRowData} with the provided {@code fixedRow} as the immutable
* static row, and uses the {@code producedRowFields}, {@code fixedRowFields} and {@code mutableRowFields} arguments to compute the indexes mapping.
*
* <p>The {@code producedRowFields} should include the name of fields of the full row once
* mutable and fixed rows are merged, while {@code fixedRowFields} and {@code mutableRowFields}
* should contain respectively the field names of fixed row and mutable row. All the lists are
* ordered with indexes matching the position of the field in the row. As an example, for a
* complete row {@code (a, b, c)} the mutable row might be {@code (a, c)} and the fixed row
* might be {@code (b)}
*/
public static EnrichedRowData from(RowData fixedRow, List<String> producedRowFields, List<String> mutableRowFields, List<String> fixedRowFields) {
return new EnrichedRowData(fixedRow, computeIndexMapping(producedRowFields,
mutableRowFields, fixedRowFields));
}
| 3.26 |
flink_EnrichedRowData_computeIndexMapping_rdh
|
/**
* This method computes the index mapping for {@link EnrichedRowData}.
*
* @see EnrichedRowData#from(RowData, List, List, List)
*/
public static int[] computeIndexMapping(List<String> producedRowFields, List<String> mutableRowFields, List<String> fixedRowFields) {
int[] indexMapping = new int[producedRowFields.size()];
for (int i = 0; i < producedRowFields.size(); i++) {
String fieldName = producedRowFields.get(i);
int v20 = mutableRowFields.indexOf(fieldName);if (v20 < 0) {
v20 = -(fixedRowFields.indexOf(fieldName) + 1);
}
indexMapping[i] = v20;
}
return indexMapping;
}
| 3.26 |
flink_EnrichedRowData_getArity_rdh
|
// ---------------------------------------------------------------------------------------------
@Override
public int getArity() {
return indexMapping.length;}
| 3.26 |
flink_BroadcastVariableManager_getNumberOfVariablesWithReferences_rdh
|
// --------------------------------------------------------------------------------------------
public int getNumberOfVariablesWithReferences() {
return this.variables.size();
}
| 3.26 |
flink_BroadcastVariableManager_materializeBroadcastVariable_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Materializes the broadcast variable for the given name, scoped to the given task and its
* iteration superstep. An existing materialization created by another parallel subtask may be
* returned, if it hasn't expired yet.
*/
public <T> BroadcastVariableMaterialization<T, ?> materializeBroadcastVariable(String name, int superstep,
BatchTask<?, ?> holder, MutableReader<?> reader, TypeSerializerFactory<T> serializerFactory) throws IOException {
final BroadcastVariableKey key = new BroadcastVariableKey(holder.getEnvironment().getJobVertexId(), name,
superstep);
while (true) {
final BroadcastVariableMaterialization<T, Object> v1 = new BroadcastVariableMaterialization<T, Object>(key);
final BroadcastVariableMaterialization<?, ?> previous = variables.putIfAbsent(key, v1);
@SuppressWarnings("unchecked")
final BroadcastVariableMaterialization<T, ?> materialization = (previous ==
null) ? v1 : ((BroadcastVariableMaterialization<T, ?>) (previous));
try {
materialization.materializeVariable(reader, serializerFactory, holder);
return materialization;
} catch (MaterializationExpiredException e) {
// concurrent release. as an optimization, try to replace the previous one with our
// version. otherwise we might spin for a while
// until the releaser removes the variable
// NOTE: This would also catch a bug prevented an expired materialization from ever
// being removed, so it acts as a future safeguard
boolean replaceSuccessful = false;
try {
replaceSuccessful = variables.replace(key, materialization, v1);
} catch (Throwable t)
{
}
if (replaceSuccessful) {
try {
v1.materializeVariable(reader, serializerFactory, holder);
return v1;
} catch (MaterializationExpiredException ee) {
// can still happen in cases of extreme races and fast tasks
// fall through the loop;
}
}
// else fall through the loop
}
}
}
| 3.26 |
flink_LocalProperties_addUniqueFields_rdh
|
/**
* Adds a combination of fields that are unique in these data properties.
*
* @param uniqueFields
* The fields that are unique in these data properties.
*/
public LocalProperties addUniqueFields(FieldSet uniqueFields) {
LocalProperties copy = clone();
if (copy.uniqueFields == null)
{
copy.uniqueFields = new HashSet<FieldSet>();}
copy.uniqueFields.add(uniqueFields);
return copy;
}
| 3.26 |
flink_LocalProperties_getOrdering_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the key order.
*
* @return The key order, or <code>null</code> if nothing is ordered.
*/
public Ordering getOrdering() {
return ordering;
}
| 3.26 |
flink_LocalProperties_areFieldsUnique_rdh
|
/**
* Checks whether the given set of fields is unique, as specified in these local properties.
*
* @param set
* The set to check.
* @return True, if the given column combination is unique, false if not.
*/
public boolean
areFieldsUnique(FieldSet set) {
return (this.uniqueFields != null) && this.uniqueFields.contains(set);
}
| 3.26 |
flink_LocalProperties_combine_rdh
|
// --------------------------------------------------------------------------------------------
public static LocalProperties combine(LocalProperties lp1, LocalProperties lp2) {
if (lp1.ordering != null) {
return lp1;
} else if (lp2.ordering != null) {
return lp2;
} else if (lp1.groupedFields != null) {
return lp1;
} else if (lp2.groupedFields != null) {
return lp2;
} else if ((lp1.uniqueFields != null) && (!lp1.uniqueFields.isEmpty())) {
return lp1;
} else if ((lp2.uniqueFields != null) && (!lp2.uniqueFields.isEmpty())) {return lp2;
} else {
return lp1;
}
}
| 3.26 |
flink_LocalProperties_filterBySemanticProperties_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Filters these LocalProperties by the fields that are forwarded to the output as described by
* the SemanticProperties.
*
* @param props
* The semantic properties holding information about forwarded fields.
* @param input
* The index of the input.
* @return The filtered LocalProperties
*/
public LocalProperties filterBySemanticProperties(SemanticProperties props, int input)
{
if (props == null) {
throw new NullPointerException("SemanticProperties may not be null.");
}
LocalProperties returnProps = new
LocalProperties();
// check if sorting is preserved
if (this.ordering != null)
{
Ordering newOrdering = new Ordering();
for (int
i = 0; i < this.ordering.getInvolvedIndexes().size(); i++) {
int sourceField = this.ordering.getInvolvedIndexes().get(i);
FieldSet targetField = props.getForwardingTargetFields(input, sourceField); if ((targetField == null) || (targetField.size() ==
0)) {
if (i == 0) {
// order fully destroyed
newOrdering = null;
break;
} else {
// order partially preserved
break;
}
} else {
// use any field of target fields for now. We should use something like field
// equivalence sets in the future.
if (targetField.size() > 1) {
LOG.warn("Found that a field is forwarded to more than one target field in " + "semantic forwarded field information. Will only use the field with the lowest index.");
}
newOrdering.appendOrdering(targetField.toArray()[0], this.ordering.getType(i), this.ordering.getOrder(i));
}}
returnProps.ordering = newOrdering;
if (newOrdering != null) {
returnProps.groupedFields = newOrdering.getInvolvedIndexes();} else {
returnProps.groupedFields = null;
}
} else if (this.groupedFields != null) {
FieldList newGroupedFields = new FieldList();
for (Integer sourceField : this.groupedFields) {
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if ((targetField == null) || (targetField.size() == 0)) {
newGroupedFields = null;
break;
} else {// use any field of target fields for now. We should use something like field
// equivalence sets in the future.
if (targetField.size() > 1) {
LOG.warn("Found that a field is forwarded to more than one target field in " + "semantic forwarded field information. Will only use the field with the lowest index.");
}
newGroupedFields = newGroupedFields.addField(targetField.toArray()[0]);
}
}
returnProps.groupedFields = newGroupedFields;
}
if (this.uniqueFields != null) {
Set<FieldSet> newUniqueFields = new HashSet<FieldSet>();
for (FieldSet fields : this.uniqueFields) {
FieldSet newFields = new FieldSet();
for (Integer sourceField : fields) {
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if ((targetField == null) || (targetField.size() == 0))
{
newFields = null;
break;
} else {
// use any field of target fields for now. We should use something like
// field equivalence sets in the future.
if
(targetField.size()
> 1) {
LOG.warn("Found that a field is forwarded to more than one target field in " + "semantic forwarded field information. Will only use the field with the lowest index.");}
newFields = newFields.addField(targetField.toArray()[0]);
}}
if (newFields != null) {
newUniqueFields.add(newFields);
}
}if (!newUniqueFields.isEmpty()) {
returnProps.uniqueFields = newUniqueFields;
} else {
returnProps.uniqueFields = null;
}
}
return returnProps;
}
| 3.26 |
flink_LocalProperties_forOrdering_rdh
|
// --------------------------------------------------------------------------------------------
public static LocalProperties forOrdering(Ordering o) {
LocalProperties props = new LocalProperties();
props.ordering = o;
props.groupedFields = o.getInvolvedIndexes();
return props;
}
| 3.26 |
flink_LocalProperties_isTrivial_rdh
|
/**
* Checks, if the properties in this object are trivial, i.e. only standard values.
*/
public boolean isTrivial() {
return ((ordering == null) && (this.groupedFields == null)) && (this.uniqueFields == null);
}
| 3.26 |
flink_FlinkPipelineTranslationUtil_getJobGraph_rdh
|
/**
* Transmogrifies the given {@link Pipeline} to a {@link JobGraph}.
*/
public static JobGraph getJobGraph(ClassLoader userClassloader, Pipeline pipeline, Configuration optimizerConfiguration, int defaultParallelism) {
FlinkPipelineTranslator pipelineTranslator = getPipelineTranslator(userClassloader, pipeline);
return pipelineTranslator.translateToJobGraph(pipeline, optimizerConfiguration, defaultParallelism);
}
| 3.26 |
flink_FlinkPipelineTranslationUtil_getJobGraphUnderUserClassLoader_rdh
|
/**
* Transmogrifies the given {@link Pipeline} under the userClassloader to a {@link JobGraph}.
*/
public static JobGraph getJobGraphUnderUserClassLoader(final ClassLoader userClassloader, final Pipeline pipeline, final Configuration configuration, final int defaultParallelism) {
final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(userClassloader);
return FlinkPipelineTranslationUtil.getJobGraph(userClassloader, pipeline, configuration, defaultParallelism);
} finally {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
| 3.26 |
flink_FlinkPipelineTranslationUtil_translateToJSONExecutionPlan_rdh
|
/**
* Extracts the execution plan (as JSON) from the given {@link Pipeline}.
*/
public static String
translateToJSONExecutionPlan(ClassLoader userClassloader, Pipeline pipeline) {
FlinkPipelineTranslator pipelineTranslator = getPipelineTranslator(userClassloader, pipeline);
return pipelineTranslator.translateToJSONExecutionPlan(pipeline);}
| 3.26 |
flink_NettyMessageClientDecoderDelegate_channelInactive_rdh
|
/**
* Releases resources when the channel is closed. When exceptions are thrown during processing
* received netty buffers, {@link CreditBasedPartitionRequestClientHandler} is expected to catch
* the exception and close the channel and trigger this notification.
*
* @param ctx
* The context of the channel close notification.
*/
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
IOUtils.cleanup(LOG, bufferResponseDecoder, nonBufferResponseDecoder);
frameHeaderBuffer.release();
super.channelInactive(ctx);
}
| 3.26 |
flink_HighAvailabilityServices_getWebMonitorLeaderElection_rdh
|
/**
* Gets the {@link LeaderElection} for the cluster's rest endpoint.
*
* @deprecated Use {@link #getClusterRestEndpointLeaderElection()} instead.
*/@Deprecateddefault LeaderElection getWebMonitorLeaderElection() {
throw new UnsupportedOperationException(((("getWebMonitorLeaderElectionService should no longer be used. Instead use " + "#getClusterRestEndpointLeaderElectionService to instantiate the cluster ") + "rest endpoint's leader election service. If you called this method, then ") + "make sure that #getClusterRestEndpointLeaderElectionService has been ") + "implemented by your HighAvailabilityServices implementation.");
}
| 3.26 |
flink_HighAvailabilityServices_getWebMonitorLeaderRetriever_rdh
|
/**
* This retriever should no longer be used on the cluster side. The web monitor retriever is
* only required on the client-side and we have a dedicated high-availability services for the
* client, named {@link ClientHighAvailabilityServices}. See also FLINK-13750.
*
* @return the leader retriever for web monitor
* @deprecated just use {@link #getClusterRestEndpointLeaderRetriever()} instead of this method.
*/
@Deprecated
default LeaderRetrievalService getWebMonitorLeaderRetriever() {
throw new UnsupportedOperationException(((("getWebMonitorLeaderRetriever should no longer be used. Instead use " + "#getClusterRestEndpointLeaderRetriever to instantiate the cluster ") + "rest endpoint leader retriever. If you called this method, then ") + "make sure that #getClusterRestEndpointLeaderRetriever has been ") + "implemented by your HighAvailabilityServices implementation.");
}
| 3.26 |
flink_HighAvailabilityServices_closeWithOptionalClean_rdh
|
/**
* Calls {@link #cleanupAllData()} (if {@code true} is passed as a parameter) before calling
* {@link #close()} on this instance. Any error that appeared during the cleanup will be
* propagated after calling {@code close()}.
*/
default void closeWithOptionalClean(boolean cleanupData) throws Exception {
Throwable exception = null;
if (cleanupData) {
try {
cleanupAllData();
} catch (Throwable t) {
exception = ExceptionUtils.firstOrSuppressed(t, exception);
}
}
try {
close();
} catch (Throwable t) {
exception = ExceptionUtils.firstOrSuppressed(t, exception);
}
if (exception != null) {
ExceptionUtils.rethrowException(exception);
}
}
| 3.26 |
flink_HighAvailabilityServices_getClusterRestEndpointLeaderElection_rdh
|
/**
* Gets the {@link LeaderElection} for the cluster's rest endpoint.
*/
default LeaderElection getClusterRestEndpointLeaderElection() {
// for backwards compatibility we delegate to getWebMonitorLeaderElectionService
// all implementations of this interface should override
// getClusterRestEndpointLeaderElectionService, though
return getWebMonitorLeaderElection();
}
| 3.26 |
flink_ObjectIdentifier_toList_rdh
|
/**
* List of the component names of this object identifier.
*/
public List<String> toList() {
if (catalogName == null) {
return Collections.singletonList(getObjectName());
}
return Arrays.asList(getCatalogName(), m0(), getObjectName());
}
| 3.26 |
flink_ObjectIdentifier_toObjectPath_rdh
|
/**
* Convert this {@link ObjectIdentifier} to {@link ObjectPath}.
*
* @throws TableException
* if the identifier cannot be converted
*/
public ObjectPath toObjectPath() throws TableException {if (catalogName == null) {
throw new TableException("This ObjectIdentifier instance refers to an anonymous object, " + "hence it cannot be converted to ObjectPath and cannot be serialized.");
}
return new ObjectPath(databaseName, objectName);
}
| 3.26 |
flink_ObjectIdentifier_asSerializableString_rdh
|
/**
* Returns a string that fully serializes this instance. The serialized string can be used for
* transmitting or persisting an object identifier.
*
* @throws TableException
* if the identifier cannot be serialized
*/
public String asSerializableString() throws TableException {
if (catalogName == null)
{
throw new TableException("This ObjectIdentifier instance refers to an anonymous object, " + "hence it cannot be converted to ObjectPath and cannot be serialized.");
}
return String.format("%s.%s.%s", escapeIdentifier(catalogName), escapeIdentifier(databaseName), escapeIdentifier(objectName));
}
| 3.26 |
flink_ObjectIdentifier_m1_rdh
|
/**
* Returns a string that summarizes this instance for printing to a console or log.
*/
public String m1() {
if (catalogName == null) {
return objectName;
}
return String.join(".", catalogName, databaseName, objectName);
}
| 3.26 |
flink_ObjectIdentifier_ofAnonymous_rdh
|
/**
* This method allows to create an {@link ObjectIdentifier} without catalog and database name,
* in order to propagate anonymous objects with unique identifiers throughout the stack.
*
* <p>This method for no reason should be exposed to users, as this should be used only when
* creating anonymous tables with uniquely generated identifiers.
*/
static ObjectIdentifier ofAnonymous(String
objectName) {
return new ObjectIdentifier(null, null, Preconditions.checkNotNull(objectName, "Object name must not be null."));
}
| 3.26 |
flink_NettyShuffleEnvironmentConfiguration_numNetworkBuffers_rdh
|
// ------------------------------------------------------------------------
public int numNetworkBuffers() {
return numNetworkBuffers;
}
| 3.26 |
flink_NettyShuffleEnvironmentConfiguration_m1_rdh
|
// ------------------------------------------------------------------------
@Override
public int m1() {
int result = 1;
result = (31
* result) + numNetworkBuffers;
result = (31 * result) + networkBufferSize; result = (31 * result) + partitionRequestInitialBackoff;
result = (31 * result) + partitionRequestMaxBackoff;
result = (31 * result) + partitionRequestListenerTimeout;
result = (31 * result) + networkBuffersPerChannel;
result = (31 * result) + floatingNetworkBuffersPerGate;
result = (31 * result) + requestSegmentsTimeout.hashCode();
result = (31 * result) + (nettyConfig != null ? nettyConfig.hashCode() : 0);result = (31 * result) + Arrays.hashCode(tempDirs);result = (31 * result) + (batchShuffleCompressionEnabled ? 1 : 0);
result = (31 * result) + Objects.hashCode(compressionCodec);
result =
(31 * result) + maxBuffersPerChannel;
result = (31 * result) + Objects.hashCode(batchShuffleReadMemoryBytes);
result = (31 * result) + sortShuffleMinBuffers;
result = (31 * result) + sortShuffleMinParallelism;
result = (31 * result) + maxNumberOfConnections;
result = (31 * result) + (connectionReuseEnabled ? 1 : 0);
result = (31 * result) + maxOverdraftBuffersPerGate;
return result;
}
| 3.26 |
flink_NettyShuffleEnvironmentConfiguration_calculateNumberOfNetworkBuffers_rdh
|
/**
* Calculates the number of network buffers based on configuration and jvm heap size.
*
* @param configuration
* configuration object
* @param networkMemorySize
* the size of memory reserved for shuffle environment
* @param pageSize
* size of memory segment
* @return the number of network buffers
*/
private static int calculateNumberOfNetworkBuffers(Configuration configuration, MemorySize networkMemorySize, int pageSize) {
logIfIgnoringOldConfigs(configuration);
// tolerate offcuts between intended and allocated memory due to segmentation (will be
// available to the user-space memory)
long v29 = networkMemorySize.getBytes() / pageSize;
if (v29
> Integer.MAX_VALUE) {
throw new IllegalArgumentException(("The given number of memory bytes (" + networkMemorySize.getBytes()) + ") corresponds to more than MAX_INT pages.");
}
return ((int) (v29));
}
| 3.26 |
flink_NettyShuffleEnvironmentConfiguration_createNettyConfig_rdh
|
/**
* Generates {@link NettyConfig} from Flink {@link Configuration}.
*
* @param configuration
* configuration object
* @param localTaskManagerCommunication
* true, to skip initializing the network stack
* @param taskManagerAddress
* identifying the IP address under which the TaskManager will be
* accessible
* @param dataPortRange
* data port range for communication and data exchange
* @return the netty configuration or {@code null} if communication is in the same task manager
*/
@Nullableprivate
static NettyConfig createNettyConfig(Configuration configuration, boolean localTaskManagerCommunication, InetAddress taskManagerAddress, PortRange dataPortRange) {
final NettyConfig nettyConfig;
if (!localTaskManagerCommunication) {
final InetSocketAddress taskManagerInetSocketAddress = new InetSocketAddress(taskManagerAddress, 0);
nettyConfig = new NettyConfig(taskManagerInetSocketAddress.getAddress(), dataPortRange, ConfigurationParserUtils.getPageSize(configuration), ConfigurationParserUtils.getSlot(configuration), configuration);
} else {
nettyConfig = null;
}
return nettyConfig;
}
| 3.26 |
flink_NettyShuffleEnvironmentConfiguration_fromConfiguration_rdh
|
// ------------------------------------------------------------------------
/**
* Utility method to extract network related parameters from the configuration and to sanity
* check them.
*
* @param configuration
* configuration object
* @param networkMemorySize
* the size of memory reserved for shuffle environment
* @param localTaskManagerCommunication
* true, to skip initializing the network stack
* @param taskManagerAddress
* identifying the IP address under which the TaskManager will be
* accessible
* @return NettyShuffleEnvironmentConfiguration
*/
public static NettyShuffleEnvironmentConfiguration fromConfiguration(Configuration configuration, MemorySize networkMemorySize, boolean localTaskManagerCommunication, InetAddress taskManagerAddress) {
final PortRange dataBindPortRange = getDataBindPortRange(configuration);
final int pageSize = ConfigurationParserUtils.getPageSize(configuration);
final NettyConfig nettyConfig = createNettyConfig(configuration, localTaskManagerCommunication, taskManagerAddress, dataBindPortRange);
final int numberOfNetworkBuffers = calculateNumberOfNetworkBuffers(configuration, networkMemorySize, pageSize);
int initialRequestBackoff = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL);
int maxRequestBackoff = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX);
int listenerTimeout = ((int) (configuration.get(NettyShuffleEnvironmentOptions.NETWORK_PARTITION_REQUEST_TIMEOUT).toMillis()));
int buffersPerChannel = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_BUFFERS_PER_CHANNEL);
int extraBuffersPerGate
= configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_EXTRA_BUFFERS_PER_GATE);Optional<Integer> maxRequiredBuffersPerGate = configuration.getOptional(NettyShuffleEnvironmentOptions.NETWORK_READ_MAX_REQUIRED_BUFFERS_PER_GATE);
int maxBuffersPerChannel = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_MAX_BUFFERS_PER_CHANNEL);
int maxOverdraftBuffersPerGate = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_MAX_OVERDRAFT_BUFFERS_PER_GATE);
long batchShuffleReadMemoryBytes = configuration.get(TaskManagerOptions.NETWORK_BATCH_SHUFFLE_READ_MEMORY).getBytes();
int sortShuffleMinBuffers =
configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_BUFFERS);
int sortShuffleMinParallelism = configuration.getInteger(NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_PARALLELISM);
boolean isNetworkDetailedMetrics = configuration.getBoolean(NettyShuffleEnvironmentOptions.NETWORK_DETAILED_METRICS);
String[] tempDirs = ConfigurationUtils.parseTempDirectories(configuration);
// Shuffle the data directories to make it fairer for directory selection between different
// TaskManagers, which is good for load balance especially when there are multiple disks.
List<String> v17 = Arrays.asList(tempDirs);
Collections.shuffle(v17);
Duration requestSegmentsTimeout = Duration.ofMillis(configuration.getLong(NettyShuffleEnvironmentOptions.NETWORK_EXCLUSIVE_BUFFERS_REQUEST_TIMEOUT_MILLISECONDS));
BoundedBlockingSubpartitionType blockingSubpartitionType = getBlockingSubpartitionType(configuration);
boolean batchShuffleCompressionEnabled = configuration.get(NettyShuffleEnvironmentOptions.BATCH_SHUFFLE_COMPRESSION_ENABLED);
String v21 = configuration.getString(NettyShuffleEnvironmentOptions.SHUFFLE_COMPRESSION_CODEC);
int maxNumConnections = Math.max(1, configuration.getInteger(NettyShuffleEnvironmentOptions.MAX_NUM_TCP_CONNECTIONS));
boolean connectionReuseEnabled = configuration.get(NettyShuffleEnvironmentOptions.TCP_CONNECTION_REUSE_ACROSS_JOBS_ENABLED);
int hybridShuffleSpilledIndexSegmentSize = configuration.get(NettyShuffleEnvironmentOptions.HYBRID_SHUFFLE_SPILLED_INDEX_REGION_GROUP_SIZE);
long hybridShuffleNumRetainedInMemoryRegionsMax = configuration.get(NettyShuffleEnvironmentOptions.HYBRID_SHUFFLE_NUM_RETAINED_IN_MEMORY_REGIONS_MAX);
checkArgument(buffersPerChannel >= 0, "Must be non-negative.");
checkArgument((!maxRequiredBuffersPerGate.isPresent()) || (maxRequiredBuffersPerGate.get() >= 1), String.format("At least one buffer is required for each gate, please increase the value of %s.", NettyShuffleEnvironmentOptions.NETWORK_READ_MAX_REQUIRED_BUFFERS_PER_GATE.key()));
checkArgument(extraBuffersPerGate >= 1, String.format("The configured floating buffer should be at least 1, please increase the value of %s.", NettyShuffleEnvironmentOptions.NETWORK_EXTRA_BUFFERS_PER_GATE.key()));
TieredStorageConfiguration tieredStorageConfiguration = null;
if (((configuration.get(BATCH_SHUFFLE_MODE) == ALL_EXCHANGES_HYBRID_FULL) || (configuration.get(BATCH_SHUFFLE_MODE) == ALL_EXCHANGES_HYBRID_SELECTIVE)) && configuration.getBoolean(NETWORK_HYBRID_SHUFFLE_ENABLE_NEW_MODE)) {
tieredStorageConfiguration = TieredStorageConfiguration.builder(pageSize, configuration.getString(NETWORK_HYBRID_SHUFFLE_REMOTE_STORAGE_BASE_PATH)).build();
}
return new NettyShuffleEnvironmentConfiguration(numberOfNetworkBuffers, pageSize, initialRequestBackoff, maxRequestBackoff, listenerTimeout, buffersPerChannel, extraBuffersPerGate, maxRequiredBuffersPerGate, requestSegmentsTimeout, isNetworkDetailedMetrics, nettyConfig, v17.toArray(tempDirs), blockingSubpartitionType, batchShuffleCompressionEnabled, v21, maxBuffersPerChannel, batchShuffleReadMemoryBytes, sortShuffleMinBuffers, sortShuffleMinParallelism, BufferDebloatConfiguration.fromConfiguration(configuration), maxNumConnections, connectionReuseEnabled, maxOverdraftBuffersPerGate, hybridShuffleSpilledIndexSegmentSize, hybridShuffleNumRetainedInMemoryRegionsMax, tieredStorageConfiguration);
}
| 3.26 |
flink_NettyShuffleEnvironmentConfiguration_getDataBindPortRange_rdh
|
/**
* Parses the hosts / ports for communication and data exchange from configuration.
*
* @param configuration
* configuration object
* @return the data port
*/
private static PortRange getDataBindPortRange(Configuration configuration) {
if (configuration.contains(NettyShuffleEnvironmentOptions.DATA_BIND_PORT)) {
String dataBindPort = configuration.getString(NettyShuffleEnvironmentOptions.DATA_BIND_PORT);
return new PortRange(dataBindPort);
}
int dataBindPort = configuration.getInteger(NettyShuffleEnvironmentOptions.DATA_PORT);
ConfigurationParserUtils.checkConfigParameter(dataBindPort >= 0, dataBindPort, NettyShuffleEnvironmentOptions.DATA_PORT.key(), "Leave config parameter empty or use 0 to let the system choose a port automatically.");
return new PortRange(dataBindPort);
}
| 3.26 |
flink_SingleInputSemanticProperties_m0_rdh
|
/**
* Adds, to the existing information, a field that is forwarded directly from the source
* record(s) to the destination record(s).
*
* @param sourceField
* the position in the source record(s)
* @param targetField
* the position in the destination record(s)
*/
public void m0(int sourceField, int targetField) {if (isTargetFieldPresent(targetField)) {
throw new InvalidSemanticAnnotationException(("Target field " + targetField) + " was added twice.");
}
FieldSet targetFields = fieldMapping.get(sourceField);
if (targetFields != null) {
fieldMapping.put(sourceField, targetFields.addField(targetField));
} else {
fieldMapping.put(sourceField, new FieldSet(targetField));
}
}
| 3.26 |
flink_SingleInputSemanticProperties_addReadFields_rdh
|
/**
* Adds, to the existing information, field(s) that are read in the source record(s).
*
* @param readFields
* the position(s) in the source record(s)
*/
public void addReadFields(FieldSet readFields) {
if (this.readFields == null) {
this.readFields = readFields;
} else {
this.readFields = this.readFields.addFields(readFields);
}
}
| 3.26 |
flink_RegisteredRpcConnection_createNewRegistration_rdh
|
// ------------------------------------------------------------------------
// Internal methods
// ------------------------------------------------------------------------
private RetryingRegistration<F, G, S, R> createNewRegistration() {
RetryingRegistration<F, G, S, R> newRegistration = checkNotNull(generateRegistration());
CompletableFuture<RetryingRegistration.RetryingRegistrationResult<G, S, R>> future = newRegistration.getFuture();
future.whenCompleteAsync((RetryingRegistration.RetryingRegistrationResult<G, S, R> result,Throwable failure) -> {
if (failure != null) {
if (failure instanceof CancellationException) {
// we ignore cancellation exceptions because they originate from
// cancelling
// the RetryingRegistration
log.debug("Retrying registration towards {} was cancelled.", targetAddress);
} else {
// this future should only ever fail if there is a bug, not if the
// registration is declined
onRegistrationFailure(failure);
}
} else if (result.isSuccess()) {
targetGateway = result.getGateway();
onRegistrationSuccess(result.getSuccess());
} else if (result.isRejection()) {
onRegistrationRejection(result.getRejection());
} else {
throw new IllegalArgumentException(String.format("Unknown retrying registration response: %s.", result));
}
}, executor);
return newRegistration;
}
| 3.26 |
flink_RegisteredRpcConnection_start_rdh
|
// ------------------------------------------------------------------------
// Life cycle
// ------------------------------------------------------------------------
public void start() {
checkState(!closed, "The RPC connection is already closed");
checkState((!isConnected()) && (pendingRegistration == null), "The RPC connection is already started");
final RetryingRegistration<F, G, S, R> newRegistration =
createNewRegistration();
if (REGISTRATION_UPDATER.compareAndSet(this, null, newRegistration)) {
newRegistration.startRegistration();
} else {
// concurrent start operation
newRegistration.cancel();
}
}
/**
* Tries to reconnect to the {@link #targetAddress} by cancelling the pending registration and
* starting a new pending registration.
*
* @return {@code false} if the connection has been closed or a concurrent modification has
happened; otherwise {@code true}
| 3.26 |
flink_RegisteredRpcConnection_getTargetLeaderId_rdh
|
// ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
public F getTargetLeaderId() {
return fencingToken;
}
| 3.26 |
flink_RegisteredRpcConnection_toString_rdh
|
// ------------------------------------------------------------------------
@Override
public String toString() {
String connectionInfo = ((("(ADDRESS: " + targetAddress) + " FENCINGTOKEN: ") + fencingToken) + ")";
if (isConnected()) {
connectionInfo = (("RPC connection to " + targetGateway.getClass().getSimpleName()) + " ") + connectionInfo;
} else {
connectionInfo = "RPC connection to " + connectionInfo;
}
if (isClosed()) {
connectionInfo += " is closed";
} else if (isConnected())
{
connectionInfo += " is established";} else {
connectionInfo += " is connecting";
}
return connectionInfo;
}
| 3.26 |
flink_RegisteredRpcConnection_close_rdh
|
/**
* Close connection.
*/
public void close() {
closed = true;
// make sure we do not keep re-trying forever
if (pendingRegistration != null) {
pendingRegistration.cancel();
}
}
| 3.26 |
flink_RegisteredRpcConnection_getTargetGateway_rdh
|
/**
* Gets the RegisteredGateway. This returns null until the registration is completed.
*/
public G getTargetGateway() {
return targetGateway;
}
| 3.26 |
flink_ChangelogCollectResult_processRecord_rdh
|
// --------------------------------------------------------------------------------------------
@Override
protected void processRecord(RowData row) {synchronized(resultLock) {
// wait if the buffer is full
if (changeRecordBuffer.size() >= CHANGE_RECORD_BUFFER_SIZE) {
try {
resultLock.wait();
} catch (InterruptedException
e) {
// ignore
}
}
changeRecordBuffer.add(row);
}
}
| 3.26 |
flink_RowKind_fromByteValue_rdh
|
/**
* Creates a {@link RowKind} from the given byte value. Each {@link RowKind} has a byte value
* representation.
*
* @see #toByteValue() for mapping of byte value and {@link RowKind}.
*/
public static RowKind fromByteValue(byte value) {
switch (value) {
case 0 :
return f0;
case 1 :
return UPDATE_BEFORE;
case 2 :
return UPDATE_AFTER;
case 3 :
return f1;
default :
throw new UnsupportedOperationException(("Unsupported byte value '" + value) + "' for row kind.");
}
}
| 3.26 |
flink_AndCondition_getLeft_rdh
|
/**
*
* @return One of the {@link IterativeCondition conditions} combined in this condition.
*/
public IterativeCondition<T> getLeft() {
return f0;
}
| 3.26 |
flink_AndCondition_getRight_rdh
|
/**
*
* @return One of the {@link IterativeCondition conditions} combined in this condition.
*/
public IterativeCondition<T> getRight() {
return right;
}
| 3.26 |
flink_BooleanParser_byteArrayEquals_rdh
|
/**
* Checks if a part of a byte array matches another byte array with chars (case-insensitive).
*
* @param source
* The source byte array.
* @param start
* The offset into the source byte array.
* @param length
* The length of the match.
* @param other
* The byte array which is fully compared to the part of the source array.
* @return true if other can be found in the specified part of source, false otherwise.
*/private static
boolean byteArrayEquals(byte[] source, int start, int length, byte[] other) {
if (length != other.length) {
return false;}
for (int i = 0; i < other.length; i++) {
if (Character.toLowerCase(source[i + start]) != other[i]) {
return false;
}
}
return true;
}
| 3.26 |
flink_DataStatistics_cacheBaseStatistics_rdh
|
/**
* Caches the given statistics. They are later retrievable under the given identifier.
*
* @param statistics
* The statistics to cache.
* @param identifier
* The identifier which may be later used to retrieve the statistics.
*/
public void cacheBaseStatistics(BaseStatistics statistics, String identifier) {
synchronized(this.baseStatisticsCache) {
this.baseStatisticsCache.put(identifier, statistics);
}
}
| 3.26 |
flink_DataStatistics_m0_rdh
|
// --------------------------------------------------------------------------------------------
/**
* Gets the base statistics for the input identified by the given identifier.
*
* @param inputIdentifier
* The identifier for the input.
* @return The statistics that were cached for this input.
*/
public BaseStatistics m0(String inputIdentifier) {
synchronized(this.baseStatisticsCache) {
return this.baseStatisticsCache.get(inputIdentifier);
}
}
| 3.26 |
flink_LinkedListSerializer_isImmutableType_rdh
|
// ------------------------------------------------------------------------
// Type Serializer implementation
// ------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return false;
}
| 3.26 |
flink_LinkedListSerializer_equals_rdh
|
// --------------------------------------------------------------------
@Overridepublic boolean equals(Object obj) {
return (obj == this) || (((obj != null) && (obj.getClass() == getClass())) && elementSerializer.equals(((LinkedListSerializer<?>) (obj)).elementSerializer));
}
| 3.26 |
flink_LinkedListSerializer_getElementSerializer_rdh
|
// ------------------------------------------------------------------------
// LinkedListSerializer specific properties
// ------------------------------------------------------------------------
/**
* Gets the serializer for the elements of the list.
*
* @return The serializer for the elements of the list
*/
public TypeSerializer<T> getElementSerializer() {
return elementSerializer;
}
| 3.26 |
flink_LinkedListSerializer_snapshotConfiguration_rdh
|
// --------------------------------------------------------------------------------------------
// Serializer configuration snapshot & compatibility
// --------------------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<LinkedList<T>> snapshotConfiguration() {
return new LinkedListSerializerSnapshot<>(this);
}
/**
* Snapshot class for the {@link LinkedListSerializer}
| 3.26 |
flink_DispatcherId_fromUuid_rdh
|
/**
* Creates a new DispatcherId that corresponds to the UUID.
*/
public static DispatcherId fromUuid(UUID uuid) {
return new DispatcherId(uuid);
}
| 3.26 |
flink_DispatcherId_generate_rdh
|
/**
* Generates a new random DispatcherId.
*/
public static DispatcherId generate() {
return new DispatcherId();
}
| 3.26 |
flink_AbstractReader_handleEvent_rdh
|
/**
* Handles the event and returns whether the reader reached an end-of-stream event (either the
* end of the whole stream or the end of an superstep).
*/
protected boolean handleEvent(AbstractEvent event) throws IOException {
final Class<?> eventType = event.getClass();
try {
// ------------------------------------------------------------
// Runtime events
// ------------------------------------------------------------
// This event is also checked at the (single) input gate to release the respective
// channel, at which it was received.
if (eventType == EndOfPartitionEvent.class) {
return true;
} else if (eventType == EndOfSuperstepEvent.class) {return incrementEndOfSuperstepEventAndCheck();
} else if (event instanceof TaskEvent) {
taskEventHandler.publish(((TaskEvent) (event)));
return false;
} else {
throw new IllegalStateException(("Received unexpected event of type " + eventType) + " at reader.");
}
} catch (Throwable t) {
throw new IOException((("Error while handling event of type " + eventType) + ": ") + t.getMessage(), t);
}
}
| 3.26 |
flink_AbstractReader_registerTaskEventListener_rdh
|
// ------------------------------------------------------------------------
// Events
// ------------------------------------------------------------------------
@Override
public void registerTaskEventListener(EventListener<TaskEvent> listener, Class<? extends TaskEvent> eventType) {
taskEventHandler.subscribe(listener, eventType);
}
| 3.26 |
flink_AbstractReader_setIterativeReader_rdh
|
// ------------------------------------------------------------------------
// Iterations
// ------------------------------------------------------------------------
@Override
public void setIterativeReader() {
isIterative = true;
}
| 3.26 |
flink_RateLimiterStrategy_perCheckpoint_rdh
|
/**
* Creates a {@code RateLimiterStrategy} that is limiting the number of records per checkpoint.
*
* @param recordsPerCheckpoint
* The number of records produced per checkpoint. This value has to
* be greater or equal to parallelism. The actual number of produced records is subject to
* rounding due to dividing the number of produced records among the parallel instances.
*/
static RateLimiterStrategy perCheckpoint(int recordsPerCheckpoint) {
return parallelism -> {
int recordsPerSubtask = recordsPerCheckpoint / parallelism;
checkArgument(recordsPerSubtask > 0, ("recordsPerCheckpoint has to be greater or equal to parallelism. " +
"Either decrease the parallelism or increase the number of ") + "recordsPerCheckpoint.");
return new GatedRateLimiter(recordsPerSubtask);
};
}
| 3.26 |
flink_RateLimiterStrategy_noOp_rdh
|
/**
* Creates a convenience {@code RateLimiterStrategy} that is not limiting the records rate.
*/
static RateLimiterStrategy noOp() {
return parallelism -> new NoOpRateLimiter();
}
| 3.26 |
flink_RateLimiterStrategy_perSecond_rdh
|
/**
* Creates a {@code RateLimiterStrategy} that is limiting the number of records per second.
*
* @param recordsPerSecond
* The number of records produced per second. The actual number of
* produced records is subject to rounding due to dividing the number of produced records
* among the parallel instances.
*/
static RateLimiterStrategy perSecond(double recordsPerSecond) {return parallelism -> new GuavaRateLimiter(recordsPerSecond / parallelism);
}
| 3.26 |
flink_ExecNodePlanDumper_treeToString_rdh
|
/**
* Converts an {@link ExecNode} tree to a string as a tree style.
*
* <p>The following tree of {@link ExecNode}
*
* <pre>{@code Sink
* |
* Join
* / \
* Filter1 Filter2
* \ /
* Project
* |
* Scan}</pre>
*
* <p>would be converted to the tree style as following:
*
* <pre>{@code Sink
* +- Join
* :- Filter1
* : +- Project(reuse_id=[1])
* : +- Scan
* +- Filter2
* +- Reused(reference_id=[1])}
* }</pre>
*
* @param node
* the ExecNode to convert
* @return explain plan of ExecNode
*/
public static String treeToString(ExecNode<?> node) {
return m0(node, new ArrayList<>(), false); }
| 3.26 |
flink_ExecNodePlanDumper_getReuseId_rdh
|
/**
* Returns reuse id if the given node is a reuse node (that means it has multiple outputs),
* else -1.
*/
public Integer getReuseId(ExecNode<?> node) {
return mapReuseNodeToReuseId.getOrDefault(node, -1);
}
| 3.26 |
flink_ExecNodePlanDumper_m0_rdh
|
/**
* Converts an {@link ExecNode} tree to a string as a tree style.
*
* @param node
* the ExecNode to convert
* @param borders
* node sets that stop visit when meet them
* @param includingBorders
* Whether print the border nodes
* @return the plan of ExecNode
*/
public static String m0(ExecNode<?> node, List<ExecNode<?>> borders, boolean includingBorders) {
checkNotNull(node, "node should not be null.");
// convert to mutable list
List<ExecNode<?>> borderList = new ArrayList<>(checkNotNull(borders, "borders should not be null."));
TreeReuseInfo reuseInfo = new TreeReuseInfo(node, borderList);
return doConvertTreeToString(node, reuseInfo, true, borderList, includingBorders);
}
| 3.26 |
flink_ExecNodePlanDumper_addVisitedTimes_rdh
|
/**
* Updates visited times for given node, return the new times.
*/
int addVisitedTimes(ExecNode<?> node) {
return mapNodeToVisitedTimes.compute(node, (k, v) -> v == null ? 1 : v + 1);
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.