name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Configuration_removeConfig_rdh | /**
* Removes given config option from the configuration.
*
* @param configOption
* config option to remove
* @param <T>
* Type of the config option
* @return true is config has been removed, false otherwise
*/
public <T> boolean removeConfig(ConfigOption<T> configOption) {
synchronized(this.confData) {
final BiFunction<String, Boolean, Optional<Boolean>> applier = (key, canBePrefixMap) -> {
if ((canBePrefixMap && removePrefixMap(this.confData, key)) || (this.confData.remove(key) != null)) {
return Optional.of(true);
}
return Optional.empty();
};
return applyWithOption(configOption, applier).orElse(false);
}
} | 3.26 |
flink_Configuration_setFloat_rdh | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key
* the option specifying the key to be added
* @param value
* the value of the key/value pair to be added
*/
@PublicEvolving
public void setFloat(ConfigOption<Float> key, float value) {
setValueInternal(key.key(), value);
} | 3.26 |
flink_Configuration_getRawValueFromOption_rdh | /**
* This method will do the following steps to get the value of a config option:
*
* <p>1. get the value from {@link Configuration}. <br>
* 2. if key is not found, try to get the value with fallback keys from {@link Configuration}
* <br>
* 3. if no fallback keys are found, return {@link Optional#empty()}. <br>
*
* @return the value of the configuration or {@link Optional#empty()}.
*/
private Optional<Object> getRawValueFromOption(ConfigOption<?> configOption) {
return applyWithOption(configOption, this::getRawValue);
} | 3.26 |
flink_Configuration_addAllToProperties_rdh | /**
* Adds all entries in this {@code Configuration} to the given {@link Properties}.
*/ public void addAllToProperties(Properties props) {
synchronized(this.confData) {
for (Map.Entry<String, Object> entry : this.confData.entrySet()) {
props.put(entry.getKey(), entry.getValue());
}
}
} | 3.26 |
flink_Configuration_setClass_rdh | /**
* Adds the given key/value pair to the configuration object. The class can be retrieved by
* invoking {@link #getClass(String, Class, ClassLoader)} if it is in the scope of the class
* loader on the caller.
*
* @param key
* The key of the pair to be added
* @param klazz
* The value of the pair to be added
* @see #getClass(String, Class, ClassLoader)
*/
public void setClass(String key, Class<?> klazz) {
setValueInternal(key, klazz.getName());
} | 3.26 |
flink_Configuration_getLong_rdh | /**
* Returns the value associated with the given config option as a long integer. If no value is
* mapped under any key of the option, it returns the specified default instead of the option's
* default value.
*
* @param configOption
* The configuration option
* @param overrideDefault
* The value to return if no value was mapper for any key of the option
* @return the configured value associated with the given config option, or the overrideDefault
*/
@PublicEvolving
public long getLong(ConfigOption<Long> configOption, long overrideDefault) {
return getOptional(configOption).orElse(overrideDefault);
} | 3.26 |
flink_Configuration_getValue_rdh | /**
* Returns the value associated with the given config option as a string.
*
* @param configOption
* The configuration option
* @return the (default) value associated with the given config option
*/
@PublicEvolving
public String getValue(ConfigOption<?> configOption) {
return Optional.ofNullable(getRawValueFromOption(configOption).orElseGet(configOption::defaultValue)).map(String::valueOf).orElse(null);
} | 3.26 |
flink_Configuration_toMap_rdh | // --------------------------------------------------------------------------------------------
@Override
public Map<String, String> toMap() {
synchronized(this.confData) {
Map<String, String> ret = CollectionUtil.newHashMapWithExpectedSize(this.confData.size());
for (Map.Entry<String, Object> entry : confData.entrySet()) {ret.put(entry.getKey(), ConfigurationUtils.convertToString(entry.getValue()));
}
return ret;
}
} | 3.26 |
flink_Configuration_setLong_rdh | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key
* the option specifying the key to be added
* @param value
* the value of the key/value pair to be added
*/
@PublicEvolving
public void setLong(ConfigOption<Long> key, long value) {
setValueInternal(key.key(), value);
} | 3.26 |
flink_Configuration_containsKey_rdh | /**
* Checks whether there is an entry with the specified key.
*
* @param key
* key of entry
* @return true if the key is stored, false otherwise
*/
public boolean containsKey(String key) {
synchronized(this.confData) {
return this.confData.containsKey(key);
}
} | 3.26 |
flink_Configuration_setBytes_rdh | /**
* Adds the given byte array to the configuration object. If key is <code>null</code> then
* nothing is added.
*
* @param key
* The key under which the bytes are added.
* @param bytes
* The bytes to be added.
*/
public void setBytes(String key, byte[] bytes) {
setValueInternal(key, bytes);
} | 3.26 |
flink_Configuration_getDouble_rdh | /**
* Returns the value associated with the given config option as a {@code double}.
*
* @param configOption
* The configuration option
* @return the (default) value associated with the given config option
*/
@PublicEvolving
public double getDouble(ConfigOption<Double> configOption) {
return getOptional(configOption).orElseGet(configOption::defaultValue);
}
/**
* Returns the value associated with the given config option as a {@code double} | 3.26 |
flink_Configuration_read_rdh | // --------------------------------------------------------------------------------------------
// Serialization
// --------------------------------------------------------------------------------------------
@Override
public void read(DataInputView in) throws IOException {
synchronized(this.confData) {
final int v23 = in.readInt();
for (int i = 0; i
< v23; i++) {
String key = StringValue.readString(in);
Object value;
byte type = in.readByte();
switch (type) {
case TYPE_STRING :
value = StringValue.readString(in);
break;case TYPE_INT :
value = in.readInt();
break;
case TYPE_LONG :
value = in.readLong();
break;
case TYPE_FLOAT :
value = in.readFloat();
break;case TYPE_DOUBLE :
value = in.readDouble();
break;
case TYPE_BOOLEAN :
value = in.readBoolean();
break;
case TYPE_BYTES :
byte[] bytes = new byte[in.readInt()];
in.readFully(bytes);
value = bytes;
break;
default :
throw new IOException(String.format("Unrecognized type: %s. This method is deprecated and" + " might not work for all supported types.", type));
}
this.confData.put(key, value);
}
}
} | 3.26 |
flink_PojoComparator_accessField_rdh | /**
* This method is handling the IllegalAccess exceptions of Field.get()
*/
public final Object accessField(Field field, Object object) {
try {
object = field.get(object);
} catch (NullPointerException npex) {throw new NullKeyFieldException((("Unable to access field " + field) + " on object ") + object);
} catch (IllegalAccessException iaex) {
throw new RuntimeException(((("This should not happen since we call setAccesssible(true) in the ctor." + " fields: ") + field) + " obj: ") + object);
}
return object;
} | 3.26 |
flink_OrcShimV200_computeProjectionMask_rdh | /**
* Computes the ORC projection mask of the fields to include from the selected
* fields.rowOrcInputFormat.nextRecord(null).
*
* @return The ORC projection mask.
*/
public static boolean[] computeProjectionMask(TypeDescription schema, int[] selectedFields) {
// mask with all fields of the schema
boolean[] projectionMask =
new boolean[schema.getMaximumId() + 1];
// for each selected field
for (int inIdx : selectedFields) {
// set all nested fields of a selected field to true
TypeDescription fieldSchema
= schema.getChildren().get(inIdx);
for (int i = fieldSchema.getId(); i <= fieldSchema.getMaximumId(); i++) {
projectionMask[i] = true;
}
}
return projectionMask;
} | 3.26 |
flink_MetricGroup_addGroup_rdh | // Groups
// ------------------------------------------------------------------------
/**
* Creates a new MetricGroup and adds it to this groups sub-groups.
*
* @param name
* name of the group
* @return the created group
*/
default MetricGroup addGroup(int name) {
return addGroup(String.valueOf(name));
} | 3.26 |
flink_MetricGroup_gauge_rdh | /**
* Registers a new {@link org.apache.flink.metrics.Gauge} with Flink.
*
* @param name
* name of the gauge
* @param gauge
* gauge to register
* @param <T>
* return type of the gauge
* @return the given gauge
*/
default <T, G extends Gauge<T>> G gauge(int name, G gauge) {
return gauge(String.valueOf(name), gauge);
} | 3.26 |
flink_MetricGroup_histogram_rdh | /**
* Registers a new {@link Histogram} with Flink.
*
* @param name
* name of the histogram
* @param histogram
* histogram to register
* @param <H>
* histogram type
* @return the registered histogram
*/
default <H extends Histogram> H histogram(int name, H histogram) {
return histogram(String.valueOf(name), histogram);
} | 3.26 |
flink_MetricGroup_meter_rdh | /**
* Registers a new {@link Meter} with Flink.
*
* @param name
* name of the meter
* @param meter
* meter to register
* @param <M>
* meter type
* @return the registered meter
*/
default <M extends Meter> M meter(int name, M meter) {
return meter(String.valueOf(name), meter);
} | 3.26 |
flink_MetricGroup_counter_rdh | /**
* Registers a {@link org.apache.flink.metrics.Counter} with Flink.
*
* @param name
* name of the counter
* @param counter
* counter to register
* @param <C>
* counter type
* @return the given counter
*/
default <C extends Counter> C counter(int name, C counter) {
return counter(String.valueOf(name), counter);
} | 3.26 |
flink_FlinkUserCodeClassLoader_loadClassWithoutExceptionHandling_rdh | /**
* Same as {@link #loadClass(String, boolean)} but without exception handling.
*
* <p>Extending concrete class loaders should implement this instead of {@link #loadClass(String, boolean)}.
*/
protected Class<?> loadClassWithoutExceptionHandling(String name, boolean resolve) throws ClassNotFoundException {return super.loadClass(name, resolve);
} | 3.26 |
flink_EmbeddedHaServices_getDispatcherLeaderService_rdh | // ------------------------------------------------------------------------
// internal
// ------------------------------------------------------------------------
EmbeddedLeaderService getDispatcherLeaderService() {
return dispatcherLeaderService;
} | 3.26 |
flink_EmbeddedHaServices_getResourceManagerLeaderRetriever_rdh | // ------------------------------------------------------------------------
// services
// ------------------------------------------------------------------------
@Override
public LeaderRetrievalService getResourceManagerLeaderRetriever() {
return resourceManagerLeaderService.createLeaderRetrievalService();
} | 3.26 |
flink_GeneratedClass_compile_rdh | /**
* Compiles the generated code, the compiled class will be cached in the {@link GeneratedClass}.
*/
public Class<T> compile(ClassLoader classLoader) {
if (compiledClass == null) {
// cache the compiled class
try {
// first try to compile the split code
compiledClass = CompileUtils.compile(classLoader, className, splitCode);
} catch (Throwable t) {
// compile the original code as fallback
LOG.warn("Failed to compile split code, falling back to original code", t);
compiledClass = CompileUtils.compile(classLoader, className, code);
}
}return compiledClass;
} | 3.26 |
flink_GeneratedClass_newInstance_rdh | /**
* Create a new instance of this generated class.
*/
public T newInstance(ClassLoader classLoader) {
try {
return // Because Constructor.newInstance(Object... initargs), we need to load
// references into a new Object[], otherwise it cannot be compiled.
compile(classLoader).getConstructor(Object[].class).newInstance(new
Object[]{ references });
} catch (Throwable e) {
throw new RuntimeException(("Could not instantiate generated class '" + className) + "'", e);
}
} | 3.26 |
flink_DefaultCheckpointPlan_checkNoPartlyOperatorsFinishedVertexUsedUnionListState_rdh | /**
* If a job vertex using {@code UnionListState} has all the tasks in RUNNING state, but part of
* the tasks have reported that the operators are finished, the checkpoint would be aborted.
* This is to force the fast tasks wait for the slow tasks so that their final checkpoints would
* be the same one, otherwise if the fast tasks finished, the slow tasks would be blocked
* forever since all the following checkpoints would be aborted.
*/
private void checkNoPartlyOperatorsFinishedVertexUsedUnionListState(Map<JobVertexID, ExecutionJobVertex> partlyFinishedVertex, Map<OperatorID, OperatorState> operatorStates) {
for (Map.Entry<ExecutionJobVertex, Integer> entry : vertexOperatorsFinishedTasksCount.entrySet()) {
ExecutionJobVertex vertex = entry.getKey();
// If the vertex is partly finished, then it must not used UnionListState
// due to it passed the previous check.
if (partlyFinishedVertex.containsKey(vertex.getJobVertexId())) {
continue;
}if ((entry.getValue() != vertex.getParallelism()) && hasUsedUnionListState(vertex, operatorStates)) {
throw new PartialFinishingNotSupportedByStateException(String.format("The vertex %s (id = %s) has used" + " UnionListState, but part of its tasks has called operators' finish method.", vertex.getName(), vertex.getJobVertexId()));
}
}
} | 3.26 |
flink_DefaultCheckpointPlan_checkNoPartlyFinishedVertexUsedUnionListState_rdh | /**
* If a job vertex using {@code UnionListState} has part of tasks FINISHED where others are
* still in RUNNING state, the checkpoint would be aborted since it might cause incomplete
* {@code UnionListState}.
*/
private void checkNoPartlyFinishedVertexUsedUnionListState(Map<JobVertexID, ExecutionJobVertex> partlyFinishedVertex, Map<OperatorID, OperatorState> operatorStates) {
for (ExecutionJobVertex
vertex : partlyFinishedVertex.values()) {
if (hasUsedUnionListState(vertex, operatorStates)) {
throw new PartialFinishingNotSupportedByStateException(String.format("The vertex %s (id = %s) has used" + " UnionListState, but part of its tasks are FINISHED.", vertex.getName(), vertex.getJobVertexId()));
}
}
} | 3.26 |
flink_ActorSystemBootstrapTools_startRemoteActorSystem_rdh | /**
* Starts a remote Actor System at given address and specific port.
*
* @param configuration
* The Flink configuration.
* @param actorSystemName
* Name of the started {@link ActorSystem}
* @param externalAddress
* The external address to access the ActorSystem.
* @param externalPort
* The external port to access the ActorSystem.
* @param bindAddress
* The local address to bind to.
* @param bindPort
* The local port to bind to.
* @param logger
* the logger to output log information.
* @param actorSystemExecutorConfiguration
* configuration for the ActorSystem's underlying
* executor
* @param customConfig
* Custom Pekko config to be combined with the config derived from Flink
* configuration.
* @return The ActorSystem which has been started.
* @throws Exception
*/
private static ActorSystem startRemoteActorSystem(Configuration configuration, String actorSystemName, String externalAddress, int externalPort, String bindAddress, int bindPort, Logger logger, Config actorSystemExecutorConfiguration, Config customConfig) throws Exception {
String externalHostPortUrl = NetUtils.unresolvedHostAndPortToNormalizedString(externalAddress, externalPort);
String v4 = NetUtils.unresolvedHostAndPortToNormalizedString(bindAddress, bindPort);
logger.info("Trying to start actor system, external address {}, bind address {}.", externalHostPortUrl, v4);
try {Config pekkoConfig = PekkoUtils.getConfig(configuration, new HostAndPort(externalAddress, externalPort), new HostAndPort(bindAddress, bindPort), actorSystemExecutorConfiguration);
if (customConfig != null) {
pekkoConfig = customConfig.withFallback(pekkoConfig);
}
return startActorSystem(pekkoConfig, actorSystemName, logger);
} catch (Throwable t) {
if (t instanceof ChannelException) {
Throwable v6 = t.getCause();
if ((v6 != null) && (t.getCause() instanceof
BindException)) {
throw new IOException((("Unable to create ActorSystem at address " + v4) + " : ") + v6.getMessage(), t);
}
}
throw new Exception("Could not create actor system", t);
}
} | 3.26 |
flink_ActorSystemBootstrapTools_startLocalActorSystem_rdh | /**
* Starts a local Actor System.
*
* @param configuration
* The Flink configuration.
* @param actorSystemName
* Name of the started ActorSystem.
* @param logger
* The logger to output log information.
* @param actorSystemExecutorConfiguration
* Configuration for the ActorSystem's underlying
* executor.
* @param customConfig
* Custom Pekko config to be combined with the config derived from Flink
* configuration.
* @return The ActorSystem which has been started.
* @throws Exception
*/
public static ActorSystem startLocalActorSystem(Configuration configuration, String actorSystemName, Logger logger, Config actorSystemExecutorConfiguration, Config
customConfig) throws Exception {
logger.info("Trying to start local actor system");
try {
Config
pekkoConfig = PekkoUtils.getConfig(configuration, null, null, actorSystemExecutorConfiguration);
if (customConfig != null) {
pekkoConfig = customConfig.withFallback(pekkoConfig);
}
return startActorSystem(pekkoConfig, actorSystemName, logger);
} catch (Throwable t) {
throw new Exception("Could not create actor system", t);
}
} | 3.26 |
flink_ActorSystemBootstrapTools_startActorSystem_rdh | /**
* Starts an Actor System with given Pekko config.
*
* @param config
* Config of the started ActorSystem.
* @param actorSystemName
* Name of the started ActorSystem.
* @param logger
* The logger to output log information.
* @return The ActorSystem which has been started.
*/
private static ActorSystem startActorSystem(Config config, String actorSystemName, Logger logger) {
logger.debug("Using pekko configuration\n {}", config);
ActorSystem actorSystem = PekkoUtils.createActorSystem(actorSystemName, config);
logger.info("Actor system started at {}", PekkoUtils.getAddress(actorSystem));
return actorSystem;
} | 3.26 |
flink_DefaultCompletedCheckpointStoreUtils_getMaximumNumberOfRetainedCheckpoints_rdh | /**
* Extracts maximum number of retained checkpoints configuration from the passed {@link Configuration}. The default value is used as a fallback if the passed value is a value larger
* than {@code 0}.
*
* @param config
* The configuration that is accessed.
* @param logger
* The {@link Logger} used for exposing the warning if the configured value is
* invalid.
* @return The maximum number of retained checkpoints based on the passed {@code Configuration}.
*/
public static int getMaximumNumberOfRetainedCheckpoints(Configuration config, Logger logger) {
final int maxNumberOfCheckpointsToRetain = config.getInteger(CheckpointingOptions.MAX_RETAINED_CHECKPOINTS);
if (maxNumberOfCheckpointsToRetain <= 0) {
// warning and use 1 as the default value if the setting in
// state.checkpoints.max-retained-checkpoints is not greater than 0.
logger.warn("The setting for '{} : {}' is invalid. Using default value of {}", CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.key(), maxNumberOfCheckpointsToRetain, CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue());
return CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue();
}
return maxNumberOfCheckpointsToRetain;
} | 3.26 |
flink_DefaultCompletedCheckpointStoreUtils_retrieveCompletedCheckpoints_rdh | /**
* Fetch all {@link CompletedCheckpoint completed checkpoints} from an {@link StateHandleStore
* external store}. This method is intended for retrieving an initial state of {@link DefaultCompletedCheckpointStore}.
*
* @param checkpointStateHandleStore
* Completed checkpoints in external store.
* @param completedCheckpointStoreUtil
* Utilities for completed checkpoint store.
* @param <R>
* Type of {@link ResourceVersion}
* @return Immutable collection of {@link CompletedCheckpoint completed checkpoints}.
* @throws Exception
* If we're not able to fetch checkpoints for some reason.
*/
public static <R extends ResourceVersion<R>> Collection<CompletedCheckpoint> retrieveCompletedCheckpoints(StateHandleStore<CompletedCheckpoint, R> checkpointStateHandleStore, CheckpointStoreUtil completedCheckpointStoreUtil) throws Exception {
LOG.info("Recovering checkpoints from {}.", checkpointStateHandleStore);
// Get all there is first.
final List<Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String>> v1 = checkpointStateHandleStore.getAllAndLock();
// Sort checkpoints by name.
v1.sort(Comparator.comparing(o -> o.f1));
final int numberOfInitialCheckpoints = v1.size();
LOG.info("Found {} checkpoints in {}.", numberOfInitialCheckpoints, checkpointStateHandleStore);
final List<CompletedCheckpoint> retrievedCheckpoints = new ArrayList<>(numberOfInitialCheckpoints);
LOG.info("Trying to fetch {} checkpoints from storage.", numberOfInitialCheckpoints);
for (Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String> checkpointStateHandle
: v1) {
retrievedCheckpoints.add(checkNotNull(retrieveCompletedCheckpoint(completedCheckpointStoreUtil, checkpointStateHandle)));
}return Collections.unmodifiableList(retrievedCheckpoints);
} | 3.26 |
flink_TaskManagerMetricGroup_putVariables_rdh | // ------------------------------------------------------------------------
// Component Metric Group Specifics
// ------------------------------------------------------------------------
@Override
protected void putVariables(Map<String, String> variables) {
variables.put(ScopeFormat.SCOPE_HOST, hostname);
variables.put(ScopeFormat.SCOPE_TASKMANAGER_ID, taskManagerId);
} | 3.26 |
flink_TaskManagerMetricGroup_m0_rdh | // ------------------------------------------------------------------------
// job groups
// ------------------------------------------------------------------------
public TaskManagerJobMetricGroup m0(JobID jobId, String jobName) {
Preconditions.checkNotNull(jobId);
String resolvedJobName = ((jobName == null) || jobName.isEmpty()) ? jobId.toString() : jobName;
TaskManagerJobMetricGroup jobGroup;
synchronized(this) {
// synchronization isn't strictly necessary as of FLINK-24864
jobGroup = jobs.get(jobId);
if (jobGroup == null) {
jobGroup = new TaskManagerJobMetricGroup(registry, this, jobId, resolvedJobName);
jobs.put(jobId, jobGroup);
}
}
return jobGroup;
} | 3.26 |
flink_KeyedStateCheckpointOutputStream_getKeyGroupList_rdh | /**
* Returns a list of all key-groups which can be written to this stream.
*/
public KeyGroupsList getKeyGroupList() {
return keyGroupRangeOffsets.getKeyGroupRange();
} | 3.26 |
flink_KeyedStateCheckpointOutputStream_getCurrentKeyGroup_rdh | /**
* Returns the key group that is currently being written. The key group was started but not yet
* finished, i.e. data can still be added. If no key group was started, this returns {@link #NO_CURRENT_KEY_GROUP}.
*/
public int getCurrentKeyGroup() {
return currentKeyGroup;
} | 3.26 |
flink_KeyedStateCheckpointOutputStream_isKeyGroupAlreadyStarted_rdh | /**
* Returns true, if the key group with the given id was already started. The key group might not
* yet be finished, if it's id is equal to the return value of {@link #getCurrentKeyGroup()}.
*/
public boolean isKeyGroupAlreadyStarted(int keyGroupId) {
return NO_OFFSET_SET != keyGroupRangeOffsets.getKeyGroupOffset(keyGroupId);
} | 3.26 |
flink_KeyedStateCheckpointOutputStream_startNewKeyGroup_rdh | /**
* User code can call this method to signal that it begins to write a new key group with the
* given key group id. This id must be within the {@link KeyGroupsList} provided by the stream.
* Each key-group can only be started once and is considered final/immutable as soon as this
* method is called again.
*/
public void startNewKeyGroup(int keyGroupId) throws IOException {
if (isKeyGroupAlreadyStarted(keyGroupId)) {
throw new IOException(("Key group " + keyGroupId) + " already registered!");
}
keyGroupRangeOffsets.setKeyGroupOffset(keyGroupId, delegate.getPos());
currentKeyGroup = keyGroupId;
} | 3.26 |
flink_KeyedStateCheckpointOutputStream_isKeyGroupAlreadyFinished_rdh | /**
* Returns true if the key group is already completely written and immutable. It was started and
* since then another key group has been started.
*/
public boolean isKeyGroupAlreadyFinished(int keyGroupId) {
return isKeyGroupAlreadyStarted(keyGroupId) && (keyGroupId != getCurrentKeyGroup());
} | 3.26 |
flink_NonBufferResponseDecoder_ensureBufferCapacity_rdh | /**
* Ensures the message header accumulation buffer has enough capacity for the current message.
*/
private void ensureBufferCapacity() {
if (messageBuffer.capacity() < messageLength) {
messageBuffer.capacity(messageLength);
}
} | 3.26 |
flink_FsCheckpointStorageAccess_m0_rdh | // ------------------------------------------------------------------------
// CheckpointStorage implementation
// ------------------------------------------------------------------------
@Override
public boolean m0() {
return true;
} | 3.26 |
flink_FsCheckpointStorageAccess_getCheckpointsDirectory_rdh | // ------------------------------------------------------------------------
@VisibleForTesting
Path getCheckpointsDirectory() {
return checkpointsDirectory;
} | 3.26 |
flink_HashJoinOperator_fallbackSMJProcessPartition_rdh | /**
* If here also exists partitions which spilled to disk more than three time when hash join end,
* means that the key in these partitions is very skewed, so fallback to sort merge join
* algorithm to process it.
*/
private void fallbackSMJProcessPartition() throws Exception {
if (!table.getPartitionsPendingForSMJ().isEmpty()) {
// release memory to MemoryManager first that is used to sort merge join operator
table.releaseMemoryCacheForSMJ();
// initialize sort merge join operator
LOG.info("Fallback to sort merge join to process spilled partitions.");
initialSortMergeJoinFunction();
fallbackSMJ = true;
for
(BinaryHashPartition p : table.getPartitionsPendingForSMJ()) {
// process build side
RowIterator<BinaryRowData> buildSideIter = table.getSpilledPartitionBuildSideIter(p);
while (buildSideIter.advanceNext()) {
m0(buildSideIter.getRow());
}
// process probe side
ProbeIterator probeIter = table.getSpilledPartitionProbeSideIter(p);
BinaryRowData probeNext;
while ((probeNext = probeIter.next()) != null) {
processSortMergeJoinElement2(probeNext);
}
}
// close the HashTable
closeHashTable();
// finish build and probe
sortMergeJoinFunction.endInput(1);
sortMergeJoinFunction.endInput(2);
LOG.info("Finish sort merge join for spilled partitions.");
}
} | 3.26 |
flink_ExecutionTimeBasedSlowTaskDetector_findSlowTasks_rdh | /**
* Given that the parallelism is N and the ratio is R, define T as the median of the first N*R
* finished tasks' execution time. The baseline will be T*M, where M is the multiplier. Note
* that the execution time will be weighted with its input bytes when calculating the median. A
* task will be identified as slow if its weighted execution time is longer than the baseline.
*/
@VisibleForTesting
Map<ExecutionVertexID, Collection<ExecutionAttemptID>> findSlowTasks(final ExecutionGraph executionGraph) {
final long currentTimeMillis = System.currentTimeMillis();
final Map<ExecutionVertexID,
Collection<ExecutionAttemptID>> v1 = new HashMap<>();
final List<ExecutionJobVertex> jobVerticesToCheck = getJobVerticesToCheck(executionGraph);
for (ExecutionJobVertex ejv : jobVerticesToCheck) {
final ExecutionTimeWithInputBytes baseline = getBaseline(ejv, currentTimeMillis);
for (ExecutionVertex ev : ejv.getTaskVertices()) {
if (ev.getExecutionState().isTerminal()) {
continue;
}
final List<ExecutionAttemptID> slowExecutions = findExecutionsExceedingBaseline(ev.getCurrentExecutions(), baseline, currentTimeMillis);
if (!slowExecutions.isEmpty()) {
v1.put(ev.getID(), slowExecutions);
}
} }
return v1;
} | 3.26 |
flink_ExecutionTimeBasedSlowTaskDetector_scheduleTask_rdh | /**
* Schedule periodical slow task detection.
*/
private void scheduleTask(final ExecutionGraph executionGraph, final SlowTaskDetectorListener listener, final ComponentMainThreadExecutor mainThreadExecutor) {
this.scheduledDetectionFuture = mainThreadExecutor.schedule(() -> {
try {
listener.notifySlowTasks(findSlowTasks(executionGraph));
} catch (Throwable throwable) {
fatalErrorHandler.onFatalError(throwable);
}
scheduleTask(executionGraph, listener, mainThreadExecutor);
}, checkIntervalMillis, TimeUnit.MILLISECONDS);
} | 3.26 |
flink_CsvFormatFactory_m1_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
private static void m1(ReadableConfig formatOptions, CsvRowDataDeserializationSchema.Builder schemaBuilder) {
formatOptions.getOptional(FIELD_DELIMITER).map(delimiter -> StringEscapeUtils.unescapeJava(delimiter).charAt(0)).ifPresent(schemaBuilder::setFieldDelimiter);
if (formatOptions.get(DISABLE_QUOTE_CHARACTER)) {
schemaBuilder.disableQuoteCharacter();
} else {formatOptions.getOptional(QUOTE_CHARACTER).map(quote -> quote.charAt(0)).ifPresent(schemaBuilder::setQuoteCharacter);
}
formatOptions.getOptional(ALLOW_COMMENTS).ifPresent(schemaBuilder::setAllowComments);
formatOptions.getOptional(IGNORE_PARSE_ERRORS).ifPresent(schemaBuilder::setIgnoreParseErrors);
formatOptions.getOptional(ARRAY_ELEMENT_DELIMITER).ifPresent(schemaBuilder::setArrayElementDelimiter);
formatOptions.getOptional(ESCAPE_CHARACTER).map(escape -> escape.charAt(0)).ifPresent(schemaBuilder::setEscapeCharacter);
formatOptions.getOptional(NULL_LITERAL).ifPresent(schemaBuilder::setNullLiteral);
} | 3.26 |
flink_MutableRecordAndPosition_setNext_rdh | /**
* Sets the next record of a sequence. This increments the {@code recordSkipCount} by one.
*/
public void setNext(E record) {
this.record = record;
this.recordSkipCount++;
} | 3.26 |
flink_MutableRecordAndPosition_setPosition_rdh | /**
* Sets the position without setting a record.
*/
public void setPosition(long offset, long recordSkipCount) {
this.offset = offset;
this.recordSkipCount = recordSkipCount;
} | 3.26 |
flink_MutableRecordAndPosition_set_rdh | /**
* Updates the record and position in this object.
*/
public void set(E record, long offset, long recordSkipCount) {
this.record = record;
this.offset = offset;this.recordSkipCount = recordSkipCount;
} | 3.26 |
flink_MemorySegmentFactory_wrapOffHeapMemory_rdh | /**
* Creates a memory segment that wraps the off-heap memory backing the given ByteBuffer. Note
* that the ByteBuffer needs to be a <i>direct ByteBuffer</i>.
*
* <p>This method is intended to be used for components which pool memory and create memory
* segments around long-lived memory regions.
*
* @param memory
* The byte buffer with the off-heap memory to be represented by the memory
* segment.
* @return A new memory segment representing the given off-heap memory.
*/
public static MemorySegment wrapOffHeapMemory(ByteBuffer memory) {
return new MemorySegment(memory, null);
} | 3.26 |
flink_MemorySegmentFactory_wrap_rdh | /**
* Creates a new memory segment that targets the given heap memory region.
*
* <p>This method should be used to turn short lived byte arrays into memory segments.
*
* @param buffer
* The heap memory region.
* @return A new memory segment that targets the given heap memory region.
*/
public static MemorySegment wrap(byte[] buffer) {
return new MemorySegment(buffer, null);
} | 3.26 |
flink_MemorySegmentFactory_allocateUnpooledSegment_rdh | /**
* Allocates some unpooled memory and creates a new memory segment that represents that memory.
*
* <p>This method is similar to {@link #allocateUnpooledSegment(int)}, but additionally sets the
* owner of the memory segment.
*
* @param size
* The size of the memory segment to allocate.
* @param owner
* The owner to associate with the memory segment.
* @return A new memory segment, backed by unpooled heap memory.
*/
public static MemorySegment allocateUnpooledSegment(int size, Object owner) {
return new
MemorySegment(new byte[size], owner);
} | 3.26 |
flink_MemorySegmentFactory_allocateOffHeapUnsafeMemory_rdh | /**
* Allocates an off-heap unsafe memory and creates a new memory segment to represent that
* memory.
*
* <p>Creation of this segment schedules its memory freeing operation when its java wrapping
* object is about to be garbage collected, similar to {@link java.nio.DirectByteBuffer#DirectByteBuffer(int)}. The difference is that this memory
* allocation is out of option -XX:MaxDirectMemorySize limitation.
*
* @param size
* The size of the off-heap unsafe memory segment to allocate.
* @param owner
* The owner to associate with the off-heap unsafe memory segment.
* @param customCleanupAction
* A custom action to run upon calling GC cleaner.
* @return A new memory segment, backed by off-heap unsafe memory.
*/
public static MemorySegment allocateOffHeapUnsafeMemory(int size, Object owner, Runnable customCleanupAction) {
long address = MemoryUtils.allocateUnsafe(size);
ByteBuffer offHeapBuffer
= MemoryUtils.wrapUnsafeMemoryWithByteBuffer(address, size);
Runnable cleaner = MemoryUtils.createMemoryCleaner(address, customCleanupAction);
return new MemorySegment(offHeapBuffer, owner, false, cleaner);
} | 3.26 |
flink_MemorySegmentFactory_allocateUnpooledOffHeapMemory_rdh | /**
* Allocates some unpooled off-heap memory and creates a new memory segment that represents that
* memory.
*
* @param size
* The size of the off-heap memory segment to allocate.
* @param owner
* The owner to associate with the off-heap memory segment.
* @return A new memory segment, backed by unpooled off-heap memory.
*/
public static MemorySegment allocateUnpooledOffHeapMemory(int size, Object owner) {
ByteBuffer memory = allocateDirectMemory(size);
return new MemorySegment(memory, owner);
} | 3.26 |
flink_ExpressionBuilder_aggDecimalPlus_rdh | /**
* Used only for implementing SUM/AVG aggregations (with and without retractions) on a Decimal
* type to avoid overriding decimal precision/scale calculation for sum/avg with the rules
* applied for the normal plus.
*/
@Internal
public static UnresolvedCallExpression aggDecimalPlus(Expression input1, Expression input2) {
return call(AGG_DECIMAL_PLUS, input1, input2);
} | 3.26 |
flink_ExpressionBuilder_aggDecimalMinus_rdh | /**
* Used only for implementing SUM/AVG aggregations (with and without retractions) on a Decimal
* type to avoid overriding decimal precision/scale calculation for sum/avg with the rules
* applied for the normal minus.
*/
@Internal
public static UnresolvedCallExpression aggDecimalMinus(Expression input1, Expression input2) {
return call(AGG_DECIMAL_MINUS, input1, input2);
} | 3.26 |
flink_YarnApplicationFileUploader_registerMultipleLocalResources_rdh | /**
* Recursively uploads (and registers) any (user and system) files in <tt>shipFiles</tt> except
* for files matching "<tt>flink-dist*.jar</tt>" which should be uploaded separately. If it is
* already a remote file, the uploading will be skipped.
*
* @param shipFiles
* local or remote files to register as Yarn local resources
* @param localResourcesDirectory
* the directory the localResources are uploaded to
* @param resourceType
* type of the resource, which can be one of FILE, PATTERN, or ARCHIVE
* @return list of class paths with the proper resource keys from the registration
*/
List<String> registerMultipleLocalResources(final Collection<Path> shipFiles, final String localResourcesDirectory, final LocalResourceType resourceType) throws IOException {
final List<Path> localPaths = new ArrayList<>();
final List<Path> relativePaths = new ArrayList<>();
for (Path shipFile : shipFiles) {
if (Utils.isRemotePath(shipFile.toString())) {
if (fileSystem.isDirectory(shipFile)) {
final URI parentURI = shipFile.getParent().toUri();
final RemoteIterator<LocatedFileStatus> iterable = fileSystem.listFiles(shipFile, true);
while (iterable.hasNext()) {
final Path current = iterable.next().getPath();
localPaths.add(current);
relativePaths.add(new Path(localResourcesDirectory, parentURI.relativize(current.toUri()).getPath()));
}
continue;
}
} else {
final File file = new File(shipFile.toUri().getPath());
if
(file.isDirectory()) {
final Path shipPath = file.toPath().toRealPath();
final Path parentPath = shipPath.getParent();
Collection<Path> paths = FileUtils.listFilesInDirectory(shipPath, path -> true);
for (Path javaPath : paths) {
localPaths.add(new Path(javaPath.toUri()));
relativePaths.add(new
Path(localResourcesDirectory, parentPath.relativize(javaPath).toString()));
}continue;
}
}
localPaths.add(shipFile);
relativePaths.add(new Path(localResourcesDirectory, shipFile.getName()));
}
final
Set<String> v19 = new HashSet<>();final Set<String> resources = new HashSet<>();
for (int i = 0; i < localPaths.size(); i++) {
final Path localPath = localPaths.get(i);
final Path relativePath = relativePaths.get(i);
if (!isFlinkDistJar(relativePath.getName())) {
final String v24 = relativePath.toString();
final YarnLocalResourceDescriptor resourceDescriptor = registerSingleLocalResource(v24, localPath, relativePath.getParent().toString(), resourceType, true, true);
if (!resourceDescriptor.alreadyRegisteredAsLocalResource()) {
if (v24.endsWith("jar")) {
v19.add(relativePath.toString());
} else {
resources.add(relativePath.getParent().toString());
}
}}
}
// construct classpath, we always want resource directories to go first, we also sort
// both resources and archives in order to make classpath deterministic
final ArrayList<String> classPaths
= new ArrayList<>();
resources.stream().sorted().forEach(classPaths::add);
v19.stream().sorted().forEach(classPaths::add);
return classPaths;
} | 3.26 |
flink_YarnApplicationFileUploader_registerSingleLocalResource_rdh | /**
* Register a single local/remote resource and adds it to <tt>localResources</tt>.
*
* @param key
* the key to add the resource under
* @param resourcePath
* path of the resource to be registered
* @param relativeDstPath
* the relative path at the target location (this will be prefixed by the
* application-specific directory)
* @param resourceType
* type of the resource, which can be one of FILE, PATTERN, or ARCHIVE
* @param whetherToAddToRemotePaths
* whether to add the path of local resource to
* <tt>remotePaths</tt>
* @param whetherToAddToEnvShipResourceList
* whether to add the local resource to
* <tt>envShipResourceList</tt>
* @return the uploaded resource descriptor
*/
YarnLocalResourceDescriptor registerSingleLocalResource(final String key, final Path
resourcePath, final String relativeDstPath, final LocalResourceType resourceType, final boolean whetherToAddToRemotePaths, final boolean whetherToAddToEnvShipResourceList) throws IOException {
addToRemotePaths(whetherToAddToRemotePaths, resourcePath);
if (Utils.isRemotePath(resourcePath.toString())) {
final FileStatus fileStatus = fileSystem.getFileStatus(resourcePath);
LOG.debug("Using remote file {} to register local resource", fileStatus.getPath());
final YarnLocalResourceDescriptor descriptor
= YarnLocalResourceDescriptor.fromFileStatus(key,
fileStatus, LocalResourceVisibility.APPLICATION, resourceType);
addToEnvShipResourceList(whetherToAddToEnvShipResourceList, descriptor);
f0.put(key, descriptor.toLocalResource());
return descriptor;
}
final File localFile = new File(resourcePath.toUri().getPath());
final Tuple2<Path, Long> remoteFileInfo = uploadLocalFileToRemote(resourcePath, relativeDstPath);
final YarnLocalResourceDescriptor descriptor = new YarnLocalResourceDescriptor(key, remoteFileInfo.f0, localFile.length(), remoteFileInfo.f1, LocalResourceVisibility.APPLICATION, resourceType);
addToEnvShipResourceList(whetherToAddToEnvShipResourceList, descriptor);
f0.put(key, descriptor.toLocalResource());
return descriptor;
} | 3.26 |
flink_YarnApplicationFileUploader_registerProvidedLocalResources_rdh | /**
* Register all the files in the provided lib directories as Yarn local resources with PUBLIC
* visibility, which means that they will be cached in the nodes and reused by different
* applications.
*
* @return list of class paths with the file name
*/
List<String> registerProvidedLocalResources() {
checkNotNull(f0);
final ArrayList<String> classPaths = new ArrayList<>();
final Set<String> resourcesJar = new HashSet<>();
final Set<String> resourcesDir = new HashSet<>();
providedSharedLibs.forEach((fileName, fileStatus) -> {
final Path filePath = fileStatus.getPath();
LOG.debug("Using remote file {} to register local resource", filePath);
final YarnLocalResourceDescriptor descriptor = YarnLocalResourceDescriptor.fromFileStatus(fileName, fileStatus, LocalResourceVisibility.PUBLIC, LocalResourceType.FILE);
f0.put(fileName, descriptor.toLocalResource());
remotePaths.add(filePath);
envShipResourceList.add(descriptor);
if ((!isFlinkDistJar(filePath.getName())) && (!isPlugin(filePath))) {
if (fileName.endsWith("jar")) {
resourcesJar.add(fileName);
} else {
resourcesDir.add(new Path(fileName).getParent().toString());
}
} else if (isFlinkDistJar(filePath.getName())) {
flinkDist = descriptor;
}
});
// Construct classpath where resource directories go first followed
// by resource files. Sort both resources and resource directories in
// order to make classpath deterministic.
resourcesDir.stream().sorted().forEach(classPaths::add);
resourcesJar.stream().sorted().forEach(classPaths::add);
return classPaths;
} | 3.26 |
flink_RegisterApplicationMasterResponseReflector_getContainersFromPreviousAttemptsUnsafe_rdh | /**
* Same as {@link #getContainersFromPreviousAttempts(RegisterApplicationMasterResponse)} but
* allows to pass objects that are not of type {@link RegisterApplicationMasterResponse}.
*/
@VisibleForTesting
List<Container> getContainersFromPreviousAttemptsUnsafe(final Object response) {
if (getContainersFromPreviousAttemptsMethod.isPresent() && (response != null)) {
try {
@SuppressWarnings("unchecked")
final List<Container> containers = ((List<Container>) (getContainersFromPreviousAttemptsMethod.get().invoke(response)));
if
((containers != null) && (!containers.isEmpty())) {
return containers;
}
} catch (Exception t)
{
logger.error("Error invoking 'getContainersFromPreviousAttempts()'", t);
}
}
return Collections.emptyList();} | 3.26 |
flink_RegisterApplicationMasterResponseReflector_getContainersFromPreviousAttempts_rdh | /**
* Checks if a YARN application still has registered containers. If the application master
* registered at the ResourceManager for the first time, this list will be empty. If the
* application master registered a repeated time (after a failure and recovery), this list will
* contain the containers that were previously allocated.
*
* @param response
* The response object from the registration at the ResourceManager.
* @return A list with containers from previous application attempt.
*/
List<Container> getContainersFromPreviousAttempts(final RegisterApplicationMasterResponse response) {
return getContainersFromPreviousAttemptsUnsafe(response);
} | 3.26 |
flink_BinarySegmentUtils_readTimestampData_rdh | /**
* Gets an instance of {@link TimestampData} from underlying {@link MemorySegment}.
*
* @param segments
* the underlying MemorySegments
* @param baseOffset
* the base offset of current instance of {@code TimestampData}
* @param offsetAndNanos
* the offset of milli-seconds part and nanoseconds
* @return an instance of {@link TimestampData}
*/
public static TimestampData readTimestampData(MemorySegment[] segments, int baseOffset, long offsetAndNanos) {
final int nanoOfMillisecond = ((int) (offsetAndNanos));
final int subOffset = ((int) (offsetAndNanos >> 32));
final long millisecond = getLong(segments, baseOffset + subOffset);
return TimestampData.fromEpochMillis(millisecond, nanoOfMillisecond);
} | 3.26 |
flink_BinarySegmentUtils_setByte_rdh | /**
* set byte from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setByte(MemorySegment[] segments, int offset, byte value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].put(offset, value);
} else {
setByteMultiSegments(segments, offset, value);
}
} | 3.26 |
flink_BinarySegmentUtils_find_rdh | /**
* Find equal segments2 in segments1.
*
* @param segments1
* segs to find.
* @param segments2
* sub segs.
* @return Return the found offset, return -1 if not find.
*/
public static int find(MemorySegment[] segments1, int offset1, int numBytes1, MemorySegment[] segments2, int offset2, int numBytes2) {
if (numBytes2 == 0) {
// quick way 1.
return offset1;
}
if (inFirstSegment(segments1, offset1, numBytes1) && inFirstSegment(segments2,
offset2, numBytes2)) {
byte first = segments2[0].get(offset2);
int end = (numBytes1 - numBytes2) + offset1;
for (int i = offset1; i <= end; i++) {
// quick way 2: equal first byte.
if ((segments1[0].get(i) == first) && segments1[0].equalTo(segments2[0], i, offset2, numBytes2)) {
return i;
}
}
return -1;
} else {
return findInMultiSegments(segments1, offset1, numBytes1, segments2, offset2, numBytes2);
}
} | 3.26 |
flink_BinarySegmentUtils_getBytes_rdh | /**
* Maybe not copied, if want copy, please use copyTo.
*/
public static byte[] getBytes(MemorySegment[] segments, int baseOffset, int sizeInBytes) {
// avoid copy if `base` is `byte[]`
if (segments.length == 1) {
byte[] heapMemory = segments[0].getHeapMemory();
if (((baseOffset == 0) && (heapMemory != null)) && (heapMemory.length ==
sizeInBytes)) {
return heapMemory;
} else {
byte[] v19 = new byte[sizeInBytes];
segments[0].get(baseOffset, v19, 0, sizeInBytes);
return v19;
}
} else {
byte[] bytes = new byte[sizeInBytes];
copyMultiSegmentsToBytes(segments, baseOffset, bytes, 0, sizeInBytes);
return bytes;
}
} | 3.26 |
flink_BinarySegmentUtils_setInt_rdh | /**
* set int from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setInt(MemorySegment[] segments, int offset, int value) {
if (inFirstSegment(segments, offset, 4)) {
segments[0].putInt(offset, value);
} else
{
setIntMultiSegments(segments, offset, value);
}
} | 3.26 |
flink_BinarySegmentUtils_readArrayData_rdh | /**
* Gets an instance of {@link ArrayData} from underlying {@link MemorySegment}.
*/
public static ArrayData readArrayData(MemorySegment[] segments, int baseOffset, long offsetAndSize) {
final int size = ((int) (offsetAndSize));
int offset = ((int) (offsetAndSize >> 32));
BinaryArrayData array = new BinaryArrayData();
array.pointTo(segments, offset + baseOffset, size);
return array;
} | 3.26 |
flink_BinarySegmentUtils_copyFromBytes_rdh | /**
* Copy target segments from source byte[].
*
* @param segments
* target segments.
* @param offset
* target segments offset.
* @param bytes
* source byte[].
* @param bytesOffset
* source byte[] offset.
* @param numBytes
* the number bytes to copy.
*/
public static void copyFromBytes(MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
if (segments.length == 1) {
segments[0].put(offset, bytes, bytesOffset, numBytes);
} else {
copyMultiSegmentsFromBytes(segments, offset, bytes, bytesOffset, numBytes);
}
} | 3.26 |
flink_BinarySegmentUtils_allocateReuseBytes_rdh | /**
* Allocate bytes that is only for temporary usage, it should not be stored in somewhere else.
* Use a {@link ThreadLocal} to reuse bytes to avoid overhead of byte[] new and gc.
*
* <p>If there are methods that can only accept a byte[], instead of a MemorySegment[]
* parameter, we can allocate a reuse bytes and copy the MemorySegment data to byte[], then call
* the method. Such as String deserialization.
*/
public static byte[] allocateReuseBytes(int length) {
byte[] bytes = BYTES_LOCAL.get();
if (bytes == null) {
if (length <= MAX_BYTES_LENGTH) {
bytes = new byte[MAX_BYTES_LENGTH];
BYTES_LOCAL.set(bytes);
} else {
bytes = new
byte[length];
}
} else if (bytes.length < length) {
bytes = new byte[length];
}
return bytes;
} | 3.26 |
flink_BinarySegmentUtils_getDouble_rdh | /**
* get double from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static double getDouble(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getDouble(offset);
} else {
return getDoubleMultiSegments(segments, offset);}
} | 3.26 |
flink_BinarySegmentUtils_getFloat_rdh | /**
* get float from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static float getFloat(MemorySegment[] segments, int offset) {if (inFirstSegment(segments, offset, 4)) {
return segments[0].getFloat(offset);
} else {
return getFloatMultiSegments(segments, offset);
}
} | 3.26 |
flink_BinarySegmentUtils_byteIndex_rdh | /**
* Given a bit index, return the byte index containing it.
*
* @param bitIndex
* the bit index.
* @return the byte index.
*/
private static int byteIndex(int bitIndex) {
return bitIndex >>> ADDRESS_BITS_PER_WORD;
} | 3.26 |
flink_BinarySegmentUtils_readDecimalData_rdh | /**
* Gets an instance of {@link DecimalData} from underlying {@link MemorySegment}.
*/
public static DecimalData readDecimalData(MemorySegment[] segments,
int baseOffset, long offsetAndSize, int precision, int scale) {
final int size = ((int) (offsetAndSize));
int subOffset = ((int) (offsetAndSize >> 32));
byte[] bytes = new byte[size];
copyToBytes(segments, baseOffset + subOffset, bytes, 0, size);
return DecimalData.fromUnscaledBytes(bytes, precision, scale);
} | 3.26 |
flink_BinarySegmentUtils_bitGet_rdh | /**
* read bit from segments.
*
* @param segments
* target segments.
* @param baseOffset
* bits base offset.
* @param index
* bit index from base offset.
*/public static boolean bitGet(MemorySegment[] segments, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
byte current = getByte(segments, offset);
return (current & (1 << (index & BIT_BYTE_INDEX_MASK))) != 0;
} | 3.26 |
flink_BinarySegmentUtils_getByte_rdh | /**
* get byte from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static byte getByte(MemorySegment[]
segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].get(offset);
} else {
return getByteMultiSegments(segments, offset);
}
} | 3.26 |
flink_BinarySegmentUtils_getShort_rdh | /**
* get short from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static short getShort(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 2)) {
return segments[0].getShort(offset);
} else {
return getShortMultiSegments(segments, offset);
}
} | 3.26 |
flink_BinarySegmentUtils_bitSet_rdh | /**
* set bit from segments.
*
* @param segments
* target segments.
* @param baseOffset
* bits base offset.
* @param index
* bit index from base offset.
*/
public static void bitSet(MemorySegment[] segments, int baseOffset, int index) {
if
(segments.length == 1) {
int offset = baseOffset + byteIndex(index);
MemorySegment segment =
segments[0];
byte current = segment.get(offset);
current |= 1 << (index & BIT_BYTE_INDEX_MASK);
segment.put(offset, current);
} else {
bitSetMultiSegments(segments, baseOffset, index);
}
} | 3.26 |
flink_BinarySegmentUtils_setLong_rdh | /**
* set long from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setLong(MemorySegment[] segments, int offset, long value) {
if (inFirstSegment(segments, offset, 8)) {
segments[0].putLong(offset, value);
} else {
setLongMultiSegments(segments, offset, value);
}
} | 3.26 |
flink_BinarySegmentUtils_getInt_rdh | /**
* get int from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/public static int getInt(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 4)) {
return segments[0].getInt(offset);
} else {
return getIntMultiSegments(segments, offset);
}
} | 3.26 |
flink_BinarySegmentUtils_readRawValueData_rdh | /**
* Gets an instance of {@link RawValueData} from underlying {@link MemorySegment}.
*/
public static <T> RawValueData<T> readRawValueData(MemorySegment[] segments, int baseOffset, long offsetAndSize) {
final int size = ((int) (offsetAndSize));
int offset = ((int) (offsetAndSize >> 32));return new
BinaryRawValueData<>(segments, offset + baseOffset, size, null);
} | 3.26 |
flink_BinarySegmentUtils_bitUnSet_rdh | /**
* unset bit from segments.
*
* @param segments
* target segments.
* @param baseOffset
* bits base offset.
* @param index
* bit index from base offset.
*/
public static void bitUnSet(MemorySegment[] segments, int baseOffset, int index) {
if (segments.length ==
1) {
MemorySegment segment = segments[0];
int offset = baseOffset + byteIndex(index);
byte current = segment.get(offset);current &= ~(1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(offset, current);
} else {
bitUnSetMultiSegments(segments, baseOffset, index);
}
} | 3.26 |
flink_BinarySegmentUtils_copyToUnsafe_rdh | /**
* Copy segments to target unsafe pointer.
*
* @param segments
* Source segments.
* @param offset
* The position where the bytes are started to be read from these memory segments.
* @param target
* The unsafe memory to copy the bytes to.
* @param pointer
* The position in the target unsafe memory to copy the chunk to.
* @param numBytes
* the number bytes to copy.
*/
public static void
copyToUnsafe(MemorySegment[] segments, int offset, Object target, int pointer, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
segments[0].copyToUnsafe(offset, target, pointer, numBytes);
} else {
copyMultiSegmentsToUnsafe(segments, offset, target, pointer, numBytes);
}
} | 3.26 |
flink_BinarySegmentUtils_getBoolean_rdh | /**
* get boolean from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static boolean getBoolean(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].getBoolean(offset);
}
else {
return getBooleanMultiSegments(segments, offset);
}
} | 3.26 |
flink_BinarySegmentUtils_setDouble_rdh | /**
* set double from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/public static void setDouble(MemorySegment[] segments, int offset, double value) {
if (inFirstSegment(segments, offset, 8)) {segments[0].putDouble(offset, value);
} else {
setDoubleMultiSegments(segments, offset, value);
}
} | 3.26 |
flink_BinarySegmentUtils_readStringData_rdh | /**
* Get binary string, if len less than 8, will be include in variablePartOffsetAndLen.
*
* <p>Note: Need to consider the ByteOrder.
*
* @param baseOffset
* base offset of composite binary format.
* @param fieldOffset
* absolute start offset of 'variablePartOffsetAndLen'.
* @param variablePartOffsetAndLen
* a long value, real data or offset and len.
*/
public static StringData readStringData(MemorySegment[] segments, int baseOffset, int fieldOffset, long variablePartOffsetAndLen) {
long mark = variablePartOffsetAndLen & HIGHEST_FIRST_BIT;
if (mark == 0) {
final int subOffset = ((int) (variablePartOffsetAndLen >> 32));
final int len = ((int) (variablePartOffsetAndLen));
return BinaryStringData.fromAddress(segments, baseOffset + subOffset, len);
} else {
int len = ((int) ((variablePartOffsetAndLen & HIGHEST_SECOND_TO_EIGHTH_BIT) >>> 56));
if (BinarySegmentUtils.LITTLE_ENDIAN) {
return BinaryStringData.fromAddress(segments, fieldOffset, len);
} else {
// fieldOffset + 1 to skip header.
return BinaryStringData.fromAddress(segments, fieldOffset + 1, len);
}
}
} | 3.26 |
flink_BinarySegmentUtils_hash_rdh | /**
* hash segments to int.
*
* @param segments
* Source segments.
* @param offset
* Source segments offset.
* @param numBytes
* the number bytes to hash.
*/
public static int hash(MemorySegment[] segments, int offset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
return MurmurHashUtils.hashBytes(segments[0], offset, numBytes);
} else {
return hashMultiSeg(segments, offset, numBytes);
}
} | 3.26 |
flink_BinarySegmentUtils_readMapData_rdh | /**
* Gets an instance of {@link MapData} from underlying {@link MemorySegment}.
*/
public static MapData readMapData(MemorySegment[] segments, int baseOffset, long offsetAndSize) {
final int size = ((int) (offsetAndSize));
int offset = ((int) (offsetAndSize >> 32));
BinaryMapData map = new BinaryMapData();
map.pointTo(segments, offset + baseOffset, size);
return map;
} | 3.26 |
flink_BinarySegmentUtils_setBoolean_rdh | /**
* set boolean from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setBoolean(MemorySegment[]
segments, int offset, boolean value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].putBoolean(offset, value);
} else {
setBooleanMultiSegments(segments, offset, value);
}
} | 3.26 |
flink_BinarySegmentUtils_getLong_rdh | /**
* get long from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static long getLong(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getLong(offset);
} else {
return getLongMultiSegments(segments, offset);
} } | 3.26 |
flink_BinarySegmentUtils_readBinary_rdh | /**
* Get binary, if len less than 8, will be include in variablePartOffsetAndLen.
*
* <p>Note: Need to consider the ByteOrder.
*
* @param baseOffset
* base offset of composite binary format.
* @param fieldOffset
* absolute start offset of 'variablePartOffsetAndLen'.
* @param variablePartOffsetAndLen
* a long value, real data or offset and len.
*/
public static byte[] readBinary(MemorySegment[] segments, int baseOffset, int fieldOffset, long variablePartOffsetAndLen) {
long mark = variablePartOffsetAndLen & HIGHEST_FIRST_BIT;
if (mark == 0) {
final int subOffset = ((int) (variablePartOffsetAndLen >> 32));
final int len = ((int) (variablePartOffsetAndLen));
return BinarySegmentUtils.copyToBytes(segments, baseOffset + subOffset, len);
} else {
int len = ((int) ((variablePartOffsetAndLen & HIGHEST_SECOND_TO_EIGHTH_BIT) >>> 56));
if (BinarySegmentUtils.LITTLE_ENDIAN) {
return BinarySegmentUtils.copyToBytes(segments, fieldOffset, len);
} else {
// fieldOffset + 1 to skip header.
return BinarySegmentUtils.copyToBytes(segments, fieldOffset + 1, len);}
}
} | 3.26 |
flink_BinarySegmentUtils_setShort_rdh | /**
* set short from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setShort(MemorySegment[] segments, int offset, short value)
{
if (inFirstSegment(segments, offset, 2)) {
segments[0].putShort(offset, value);
} else {
setShortMultiSegments(segments, offset, value);
}
} | 3.26 |
flink_BinarySegmentUtils_readRowData_rdh | /**
* Gets an instance of {@link RowData} from underlying {@link MemorySegment}.
*/
public static RowData readRowData(MemorySegment[] segments, int numFields, int baseOffset, long offsetAndSize) {
final int size = ((int) (offsetAndSize));
int offset = ((int) (offsetAndSize >> 32));
NestedRowData row = new NestedRowData(numFields);
row.pointTo(segments, offset + baseOffset, size);
return row;
} | 3.26 |
flink_BinarySegmentUtils_inFirstSegment_rdh | /**
* Is it just in first MemorySegment, we use quick way to do something.
*/
private static boolean inFirstSegment(MemorySegment[] segments, int offset, int numBytes) {
return (numBytes + offset) <= segments[0].size();
} | 3.26 |
flink_BinarySegmentUtils_copyToBytes_rdh | /**
* Copy segments to target byte[].
*
* @param segments
* Source segments.
* @param offset
* Source segments offset.
* @param bytes
* target byte[].
* @param bytesOffset
* target byte[] offset.
* @param numBytes
* the number bytes to copy.
*/
public static byte[]
copyToBytes(MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
segments[0].get(offset, bytes, bytesOffset, numBytes);
} else {
copyMultiSegmentsToBytes(segments, offset, bytes, bytesOffset, numBytes);
}
return bytes;
} | 3.26 |
flink_BinarySegmentUtils_equals_rdh | /**
* Equals two memory segments regions.
*
* @param segments1
* Segments 1
* @param offset1
* Offset of segments1 to start equaling
* @param segments2
* Segments 2
* @param offset2
* Offset of segments2 to start equaling
* @param len
* Length of the equaled memory region
* @return true if equal, false otherwise
*/
public static boolean equals(MemorySegment[] segments1, int offset1, MemorySegment[] segments2, int
offset2, int len) {
if (inFirstSegment(segments1, offset1, len) && inFirstSegment(segments2, offset2, len))
{
return segments1[0].equalTo(segments2[0], offset1, offset2, len);
} else {
return equalsMultiSegments(segments1, offset1, segments2, offset2, len);
}
} | 3.26 |
flink_FunctionDefinitionFactory_createFunctionDefinition_rdh | /**
* Creates a {@link FunctionDefinition} from given {@link CatalogFunction} with the given {@link Context} containing the class loader of the current session, which is useful when it's needed
* to load class from class name.
*
* <p>The default implementation will call {@link #createFunctionDefinition(String,
* CatalogFunction)} directly.
*
* @param name
* name of the {@link CatalogFunction}
* @param catalogFunction
* the catalog function
* @param context
* the {@link Context} for creating function definition
* @return a {@link FunctionDefinition}
*/
default FunctionDefinition createFunctionDefinition(String name, CatalogFunction catalogFunction, Context context) {
try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(context.getClassLoader())) {
return createFunctionDefinition(name, catalogFunction);
}
} | 3.26 |
flink_PekkoUtils_createActorSystem_rdh | /**
* Creates an actor system with the given pekko config.
*
* @param actorSystemName
* name of the actor system
* @param config
* configuration for the actor system
* @return created actor system
*/
public static ActorSystem createActorSystem(String actorSystemName, Config config) {
// Initialize slf4j as logger of Pekko's Netty instead of java.util.logging (FLINK-1650)
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
return RobustActorSystem.create(actorSystemName, config);
} | 3.26 |
flink_PekkoUtils_getBasicConfig_rdh | /**
* Gets the basic Pekko config which is shared by remote and local actor systems.
*
* @param configuration
* instance which contains the user specified values for the configuration
* @return Flink's basic Pekko config
*/
private static Config getBasicConfig(Configuration configuration) {
final int throughput = configuration.getInteger(AkkaOptions.DISPATCHER_THROUGHPUT);
final String jvmExitOnFatalError = booleanToOnOrOff(configuration.getBoolean(AkkaOptions.JVM_EXIT_ON_FATAL_ERROR));
final String logLifecycleEvents = booleanToOnOrOff(configuration.getBoolean(AkkaOptions.LOG_LIFECYCLE_EVENTS));
final String supervisorStrategy = EscalatingSupervisorStrategy.class.getCanonicalName();return new ConfigBuilder().add("pekko {").add(" daemonic = off").add(" loggers = [\"org.apache.pekko.event.slf4j.Slf4jLogger\"]").add(" logging-filter = \"org.apache.pekko.event.slf4j.Slf4jLoggingFilter\"").add(" log-config-on-start = off").add(" logger-startup-timeout = 50s").add(" loglevel = " + getLogLevel()).add(" stdout-loglevel = OFF").add(" log-dead-letters = " + logLifecycleEvents).add(" log-dead-letters-during-shutdown = " + logLifecycleEvents).add(" jvm-exit-on-fatal-error = " + jvmExitOnFatalError).add(" serialize-messages = off").add(" actor {").add(" guardian-supervisor-strategy = " + supervisorStrategy).add(" warn-about-java-serializer-usage = off").add(" allow-java-serialization = on").add(" default-dispatcher {").add(" throughput = " + throughput).add(" }").add(" supervisor-dispatcher {").add(" type = Dispatcher").add(" executor = \"thread-pool-executor\"").add(" thread-pool-executor {").add(" core-pool-size-min = 1").add(" core-pool-size-max = 1").add(" }").add(" }").add(" }").add("}").build();
} | 3.26 |
flink_PekkoUtils_getAddressFromRpcURL_rdh | /**
* Extracts the {@link Address} from the given pekko URL.
*
* @param rpcURL
* to extract the {@link Address} from
* @throws MalformedURLException
* if the {@link Address} could not be parsed from the given pekko
* URL
* @return Extracted {@link Address} from the given rpc URL
*/
// hidden checked exception coming from Pekko
@SuppressWarnings("RedundantThrows")
public static Address getAddressFromRpcURL(String rpcURL) throws MalformedURLException {
return AddressFromURIString.apply(rpcURL);
} | 3.26 |
flink_PekkoUtils_getInetSocketAddressFromRpcURL_rdh | /**
* Extracts the hostname and the port of the remote actor system from the given Pekko URL. The
* result is an {@link InetSocketAddress} instance containing the extracted hostname and port.
* If the Pekko URL does not contain the hostname and port information, e.g. a local Pekko URL
* is provided, then an {@link Exception} is thrown.
*
* @param rpcURL
* The URL to extract the host and port from.
* @throws java.lang.Exception
* Thrown, if the given string does not represent a proper url
* @return The InetSocketAddress with the extracted host and port.
*/
public static InetSocketAddress getInetSocketAddressFromRpcURL(String rpcURL) throws Exception {
// Pekko URLs have the form schema://systemName@host:port/.... if it's a remote Pekko URL
try
{
final Address address = getAddressFromRpcURL(rpcURL);
if (address.host().isDefined() && address.port().isDefined()) {
return new InetSocketAddress(address.host().get(), ((int) (address.port().get())));
} else {
throw new MalformedURLException();
}
} catch (MalformedURLException e) {
throw new Exception("Could not retrieve InetSocketAddress from Pekko URL " + rpcURL);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.