name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ProjectOperator_projectTuple12_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> ProjectOperator<T, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> projectTuple12() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>> tType = new TupleTypeInfo<Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(fTypes);
return new ProjectOperator<T, Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_ProjectOperator_extractFieldTypes_rdh | // END_OF_TUPLE_DEPENDENT_CODE
// -----------------------------------------------------------------------------------------
private TypeInformation<?>[] extractFieldTypes(int[] fields, TypeInformation<?> inType) {
TupleTypeInfo<?> inTupleType = ((TupleTypeInfo<?>) (inType));
TypeInformation<?>[] fieldTypes = new TypeInformation[fields.length];
for (int i = 0; i < fields.length; i++) {
fieldTypes[i] = inTupleType.getTypeAt(fields[i]);
}
return fieldTypes;
} | 3.26 |
flink_ProjectOperator_projectTuple8_rdh | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7> ProjectOperator<T, Tuple8<T0, T1, T2,
T3, T4, T5, T6, T7>> projectTuple8() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> tType = new TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(fTypes);
return new ProjectOperator<T, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(this.ds, this.fieldIndexes, tType);
} | 3.26 |
flink_Path_fromLocalFile_rdh | // Utilities
// ------------------------------------------------------------------------
/**
* Creates a path for the given local file.
*
* <p>This method is useful to make sure the path creation for local files works seamlessly
* across different operating systems. Especially Windows has slightly different rules for
* slashes between schema and a local file path, making it sometimes tricky to produce
* cross-platform URIs for local files.
*
* @param file
* The file that the path should represent.
* @return A path representing the local file URI of the given file.
*/
public static Path fromLocalFile(File file) {
return new Path(file.toURI());
}
/**
* Deserialize the Path from {@link DataInputView} | 3.26 |
flink_Path_getName_rdh | /**
* Returns the final component of this path, i.e., everything that follows the last separator.
*
* @return the final component of the path
*/
public String getName() {
final String path = uri.getPath();
final int slash = path.lastIndexOf(SEPARATOR);
return path.substring(slash + 1);
} | 3.26 |
flink_Path_serializeToDataOutputView_rdh | /**
* Serialize the path to {@link DataInputView}.
*
* @param path
* the file path.
* @param out
* the data out put view.
* @throws IOException
* if an error happened.
*/
public static void serializeToDataOutputView(Path path, DataOutputView out) throws IOException
{
URI uri = path.toUri();
if (uri == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
StringUtils.writeNullableString(uri.getScheme(), out);
StringUtils.writeNullableString(uri.getUserInfo(), out);
StringUtils.writeNullableString(uri.getHost(), out);
out.writeInt(uri.getPort());
StringUtils.writeNullableString(uri.getPath(), out);
StringUtils.writeNullableString(uri.getQuery(), out);
StringUtils.writeNullableString(uri.getFragment(), out);
}
} | 3.26 |
flink_Path_checkPathArg_rdh | /**
* Checks if the provided path string is either null or has zero length and throws a {@link IllegalArgumentException} if any of the two conditions apply.
*
* @param path
* the path string to be checked
* @return The checked path.
*/
private String checkPathArg(String path) {
// disallow construction of a Path from an empty string
if (path == null) {
throw new
IllegalArgumentException("Can not create a Path from a null string");
}
if (path.length() == 0) {
throw new IllegalArgumentException("Can not create a Path from an empty string");}
return path;
} | 3.26 |
flink_Path_hasWindowsDrive_rdh | /**
* Checks if the provided path string contains a windows drive letter.
*
* @param path
* the path to check
* @param slashed
* true to indicate the first character of the string is a slash, false otherwise
* @return <code>true</code> if the path string contains a windows drive letter, false otherwise
*/
private boolean hasWindowsDrive(String path, boolean slashed) {
final int start = (slashed) ?
1 : 0;
return (((path.length() >= (start + 2)) && ((!slashed) ||
(path.charAt(0) == '/'))) && (path.charAt(start + 1) == ':')) && (((path.charAt(start) >= 'A') && (path.charAt(start) <= 'Z')) || ((path.charAt(start) >= 'a') && (path.charAt(start) <= 'z')));
} | 3.26 |
flink_Path_getFileSystem_rdh | /**
* Returns the FileSystem that owns this Path.
*
* @return the FileSystem that owns this Path
* @throws IOException
* thrown if the file system could not be retrieved
*/
public FileSystem getFileSystem() throws IOException {
return FileSystem.get(this.toUri());} | 3.26 |
flink_Path_initialize_rdh | /**
* Initializes a path object given the scheme, authority and path string.
*
* @param scheme
* the scheme string.
* @param authority
* the authority string.
* @param path
* the path string.
*/ private void initialize(String scheme, String authority, String path) {
try {
this.uri = new URI(scheme, authority, normalizePath(path), null, null).normalize();
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
} | 3.26 |
flink_Path_depth_rdh | /**
* Returns the number of elements in this path.
*
* @return the number of elements in this path
*/
public int depth() {
String path = uri.getPath();
int depth = 0;
int
slash = ((path.length() == 1) && (path.charAt(0) == '/')) ? -1 : 0;
while (slash
!= (-1)) {
depth++;
slash = path.indexOf(SEPARATOR, slash + 1);
}
return depth;
} | 3.26 |
flink_Path_suffix_rdh | /**
* Adds a suffix to the final name in the path.
*
* @param suffix
* The suffix to be added
* @return the new path including the suffix
*/
public Path suffix(String suffix) {return new Path(getParent(), getName() + suffix);
} | 3.26 |
flink_Path_getParent_rdh | /**
* Returns the parent of a path, i.e., everything that precedes the last separator or <code>null
* </code> if at root.
*
* @return the parent of a path or <code>null</code> if at root.
*/
public Path getParent() {
final String path = uri.getPath();
final int v15 = path.lastIndexOf('/');
final int start = (hasWindowsDrive(path, true)) ? 3 : 0;
if ((path.length()
== start) || // empty path
((v15 == start) && (path.length() == (start + 1)))) {
// at root
return null;
}
String parent;
if (v15 == (-1)) {
parent = CUR_DIR;
} else {
final int end = (hasWindowsDrive(path, true)) ? 3 : 0;parent = path.substring(0, v15 == end ? end + 1 : v15);
}
return new Path(uri.getScheme(), uri.getAuthority(), parent);
} | 3.26 |
flink_Path_normalizePath_rdh | /**
* Normalizes a path string.
*
* @param path
* the path string to normalize
* @return the normalized path string
*/
private String normalizePath(String path) {
// remove consecutive slashes & backslashes
path = path.replace("\\", "/");
path = path.replaceAll("/+", "/");
// remove tailing separator
if ((path.endsWith(SEPARATOR) && (!path.equals(SEPARATOR))) && // UNIX root path
(!WINDOWS_ROOT_DIR_REGEX.matcher(path).matches())) {
// Windows root path)
// remove tailing slash
path = path.substring(0, path.length() - SEPARATOR.length());
}
return path;
} | 3.26 |
flink_Path_makeQualified_rdh | /**
* Returns a qualified path object.
*
* @param fs
* the FileSystem that should be used to obtain the current working directory
* @return the qualified path object
*/
public Path makeQualified(FileSystem fs) {
Path path = this;
if (!isAbsolute()) {
path = new Path(fs.getWorkingDirectory(), this);
}
final URI pathUri = path.toUri();
final URI fsUri = fs.getUri();
String scheme = pathUri.getScheme();
String authority = pathUri.getAuthority();
if ((scheme != null) && ((authority != null) || (fsUri.getAuthority() == null))) {return path;
}
if (scheme == null) {
scheme = fsUri.getScheme();}
if (authority == null) {
authority = fsUri.getAuthority();
if (authority ==
null) {
authority = "";
}
}
return new Path((((scheme + ":") + "//") + authority) + pathUri.getPath());
}
// ------------------------------------------------------------------------
// Legacy Serialization
// ------------------------------------------------------------------------
/**
* Read uri from {@link DataInputView}.
*
* @param in
* the input view to read the uri.
* @throws IOException
* if an error happened.
* @deprecated the method is deprecated since Flink 1.19 because Path will no longer implement
{@link IOReadableWritable} in future versions. Please use {@code deserializeFromDataInputView} | 3.26 |
flink_JobStatus_isTerminalState_rdh | /**
* Checks whether this state is <i>locally terminal</i>. Locally terminal refers to the state of
* a job's execution graph within an executing JobManager. If the execution graph is locally
* terminal, the JobManager will not continue executing or recovering the job.
*
* <p>The only state that is locally terminal, but not globally terminal is {@link #SUSPENDED},
* which is typically entered when the executing JobManager loses its leader status.
*
* @return True, if this job status is terminal, false otherwise.
*/
public boolean isTerminalState() {
return terminalState != TerminalState.NON_TERMINAL; } | 3.26 |
flink_JoinInputSideSpec_withoutUniqueKey_rdh | /**
* Creates a {@link JoinInputSideSpec} that input hasn't any unique keys.
*/
public static JoinInputSideSpec withoutUniqueKey() {
return new JoinInputSideSpec(false, null, null);
} | 3.26 |
flink_JoinInputSideSpec_joinKeyContainsUniqueKey_rdh | /**
* Returns true if the join key contains the unique key of the input.
*/public boolean joinKeyContainsUniqueKey() {
return joinKeyContainsUniqueKey;
} | 3.26 |
flink_JoinInputSideSpec_hasUniqueKey_rdh | /**
* Returns true if the input has unique key, otherwise false.
*/
public boolean hasUniqueKey() {
return inputSideHasUniqueKey;} | 3.26 |
flink_JoinInputSideSpec_withUniqueKeyContainedByJoinKey_rdh | /**
* Creates a {@link JoinInputSideSpec} that input has an unique key and the unique key is
* contained by the join key.
*
* @param uniqueKeyType
* type information of the unique key
* @param uniqueKeySelector
* key selector to extract unique key from the input row
*/
public static JoinInputSideSpec withUniqueKeyContainedByJoinKey(InternalTypeInfo<RowData> uniqueKeyType, KeySelector<RowData, RowData> uniqueKeySelector) {
checkNotNull(uniqueKeyType);
checkNotNull(uniqueKeySelector);
return new JoinInputSideSpec(true, uniqueKeyType, uniqueKeySelector);
} | 3.26 |
flink_JoinInputSideSpec_getUniqueKeyType_rdh | /**
* Returns the {@link TypeInformation} of the unique key. Returns null if the input hasn't
* unique key.
*/
@Nullable
public InternalTypeInfo<RowData> getUniqueKeyType() {
return uniqueKeyType;
}
/**
* Returns the {@link KeySelector} | 3.26 |
flink_JoinInputSideSpec_m0_rdh | /**
* Creates a {@link JoinInputSideSpec} that the input has an unique key.
*
* @param uniqueKeyType
* type information of the unique key
* @param uniqueKeySelector
* key selector to extract unique key from the input row
*/
public static JoinInputSideSpec m0(InternalTypeInfo<RowData> uniqueKeyType, KeySelector<RowData, RowData> uniqueKeySelector) {
checkNotNull(uniqueKeyType);
checkNotNull(uniqueKeySelector);
return new JoinInputSideSpec(false, uniqueKeyType, uniqueKeySelector);
} | 3.26 |
flink_ResourceProfile_merge_rdh | /**
* Calculates the sum of two resource profiles.
*
* @param other
* The other resource profile to add.
* @return The merged resource profile.
*/
@Nonnull
public ResourceProfile merge(final ResourceProfile other) {
checkNotNull(other, "Cannot merge with null resources");
if (equals(ANY) || other.equals(ANY)) {
return ANY;}
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
Map<String, ExternalResource> resultExtendedResource = new HashMap<>(extendedResources);
other.extendedResources.forEach((String name,ExternalResource resource) -> {
resultExtendedResource.compute(name, (ignored, oldResource) -> oldResource == null ? resource : oldResource.merge(resource));
});
return new ResourceProfile(cpuCores.merge(other.cpuCores), taskHeapMemory.add(other.taskHeapMemory), taskOffHeapMemory.add(other.taskOffHeapMemory), f1.add(other.f1), networkMemory.add(other.networkMemory), resultExtendedResource);
} | 3.26 |
flink_ResourceProfile_readResolve_rdh | // ------------------------------------------------------------------------
// serialization
// ------------------------------------------------------------------------
private Object readResolve() {
// try to preserve the singleton property for UNKNOWN and ANY
if (this.equals(UNKNOWN)) {
return UNKNOWN;
}
if (this.equals(ANY)) {
return ANY;
}
return this;
} | 3.26 |
flink_ResourceProfile_getManagedMemory_rdh | /**
* Get the managed memory needed.
*
* @return The managed memory
*/
public MemorySize getManagedMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return f1;
} | 3.26 |
flink_ResourceProfile_hashCode_rdh | // ------------------------------------------------------------------------
@Override
public int hashCode() {
int v1 = Objects.hashCode(cpuCores);
v1 = (31 * v1) + Objects.hashCode(taskHeapMemory);
v1 = (31 * v1) + Objects.hashCode(taskOffHeapMemory);
v1 = (31 * v1) + Objects.hashCode(f1);
v1 = (31 * v1) + Objects.hashCode(networkMemory);v1 = (31 * v1) + extendedResources.hashCode();
return v1;} | 3.26 |
flink_ResourceProfile_getOperatorsMemory_rdh | /**
* Get the memory the operators needed.
*
* @return The operator memory
*/
public MemorySize getOperatorsMemory() {
throwUnsupportedOperationExceptionIfUnknown();return taskHeapMemory.add(taskOffHeapMemory).add(f1);
} | 3.26 |
flink_ResourceProfile_setExtendedResource_rdh | /**
* Add the given extended resource. The old value with the same resource name will be
* replaced if present.
*/
public Builder setExtendedResource(ExternalResource extendedResource) {this.extendedResources.put(extendedResource.getName(), extendedResource);
return this;
} | 3.26 |
flink_ResourceProfile_getCpuCores_rdh | // ------------------------------------------------------------------------
/**
* Get the cpu cores needed.
*
* @return The cpu cores, 1.0 means a full cpu thread
*/
public CPUResource getCpuCores() {
throwUnsupportedOperationExceptionIfUnknown();
return cpuCores;
} | 3.26 |
flink_ResourceProfile_getTotalMemory_rdh | /**
* Get the total memory needed.
*
* @return The total memory
*/
public MemorySize getTotalMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return getOperatorsMemory().add(networkMemory);
} | 3.26 |
flink_ResourceProfile_getTaskOffHeapMemory_rdh | /**
* Get the task off-heap memory needed.
*
* @return The task off-heap memory
*/
public MemorySize getTaskOffHeapMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return taskOffHeapMemory;
} | 3.26 |
flink_ResourceProfile_getExtendedResources_rdh | /**
* Get the extended resources.
*
* @return The extended resources
*/
public Map<String, ExternalResource> getExtendedResources() {
throwUnsupportedOperationExceptionIfUnknown();
return Collections.unmodifiableMap(extendedResources);
} | 3.26 |
flink_ResourceProfile_getNetworkMemory_rdh | /**
* Get the network memory needed.
*
* @return The network memory
*/
public MemorySize getNetworkMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return networkMemory;
} | 3.26 |
flink_ResourceProfile_fromResourceSpec_rdh | // ------------------------------------------------------------------------
// factories
// ------------------------------------------------------------------------
@VisibleForTesting
static ResourceProfile fromResourceSpec(ResourceSpec resourceSpec) {
return fromResourceSpec(resourceSpec, MemorySize.ZERO);
} | 3.26 |
flink_ResourceProfile_setExtendedResources_rdh | /**
* Add the given extended resources. This will discard all the previous added extended
* resources.
*/
public Builder setExtendedResources(Collection<ExternalResource> extendedResources) {
this.extendedResources = extendedResources.stream().collect(Collectors.toMap(ExternalResource::getName, Function.identity()));
return this;
} | 3.26 |
flink_ResourceProfile_allFieldsNoLessThan_rdh | /**
* Check whether all fields of this resource profile are no less than the given resource
* profile.
*
* <p>It is not same with the total resource comparison. It return true iff each resource
* field(cpu, task heap memory, managed memory, etc.) is no less than the respective field of
* the given profile.
*
* <p>For example, assume that this profile has 1 core, 50 managed memory and 100 heap memory.
*
* <ol>
* <li>The comparison will return false if the other profile has 2 core, 10 managed memory and
* 1000 heap memory.
* <li>The comparison will return true if the other profile has 1 core, 50 managed memory and
* 150 heap memory.
* </ol>
*
* @param other
* the other resource profile
* @return true if all fields of this are no less than the other's, otherwise false
*/
public boolean allFieldsNoLessThan(final ResourceProfile other) {
checkNotNull(other, "Cannot compare null resources");
if (this.equals(ANY)) { return true;
}
if (this.equals(other)) {
return true;
}
if (this.equals(UNKNOWN)) {
return false;
}if (other.equals(UNKNOWN)) {
return true;
}
if (((((cpuCores.getValue().compareTo(other.cpuCores.getValue()) >= 0) && (taskHeapMemory.compareTo(other.taskHeapMemory) >= 0)) && (taskOffHeapMemory.compareTo(other.taskOffHeapMemory) >= 0)) && (f1.compareTo(other.f1) >= 0)) && (networkMemory.compareTo(other.networkMemory) >= 0)) {
for (Map.Entry<String, ExternalResource> resource : other.extendedResources.entrySet()) {
if ((!extendedResources.containsKey(resource.getKey())) || (extendedResources.get(resource.getKey()).getValue().compareTo(resource.getValue().getValue()) < 0)) {
return false;
}
}
return true;
}
return false;
} | 3.26 |
flink_ResourceProfile_isMatching_rdh | /**
* Check whether required resource profile can be matched.
*
* @param required
* the required resource profile
* @return true if the requirement is matched, otherwise false
*/
public boolean isMatching(final ResourceProfile required) {
checkNotNull(required, "Cannot check matching with null resources");
throwUnsupportedOperationExceptionIfUnknown();
if (this.equals(ANY)) {
return true;}
if (this.equals(required)) {
return true;
}
if (required.equals(UNKNOWN))
{
return true;
}return false;
} | 3.26 |
flink_ResourceProfile_getTaskHeapMemory_rdh | /**
* Get the task heap memory needed.
*
* @return The task heap memory
*/
public MemorySize getTaskHeapMemory() {
throwUnsupportedOperationExceptionIfUnknown();
return taskHeapMemory;
} | 3.26 |
flink_RecordsBySplits_addFinishedSplits_rdh | /**
* Mark multiple splits with the given IDs as finished.
*
* @param splitIds
* the IDs of the finished splits.
*/
public void addFinishedSplits(Collection<String> splitIds) {
finishedSplits.addAll(splitIds);
} | 3.26 |
flink_RecordsBySplits_addFinishedSplit_rdh | /**
* Mark the split with the given ID as finished.
*
* @param splitId
* the ID of the finished split.
*/public void addFinishedSplit(String splitId) {finishedSplits.add(splitId);
} | 3.26 |
flink_RecordsBySplits_add_rdh | /**
* Add the record from the given source split.
*
* @param split
* the source split the record was from.
* @param record
* the record to add.
*/
public void add(SourceSplit split, E record) {
add(split.splitId(), record);
} | 3.26 |
flink_RecordsBySplits_addAll_rdh | /**
* Add multiple records from the given source split.
*
* @param split
* the source split the records were from.
* @param records
* the records to add.
*/
public void
addAll(SourceSplit split, Collection<E> records) {
addAll(split.splitId(), records);
} | 3.26 |
flink_EnumTriangles_main_rdh | // *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
LOGGER.warn(DATASET_DEPRECATION_INFO);
// Checking input parameters
final ParameterTool params
= ParameterTool.fromArgs(args);
// set up execution environment
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
// read input data
DataSet<Edge> edges;
if (params.has("edges")) {
edges = env.readCsvFile(params.get("edges")).fieldDelimiter(" ").includeFields(true, true).types(Integer.class, Integer.class).map(new TupleEdgeConverter());
} else {
System.out.println("Executing EnumTriangles example with default edges data set.");
System.out.println("Use --edges to specify file input.");
edges = EnumTrianglesData.getDefaultEdgeDataSet(env);
}
// project edges by vertex id
DataSet<Edge> edgesById = edges.map(new EdgeByIdProjector());
DataSet<Triad> triangles = // filter triads
// build triads
edgesById.groupBy(Edge.V1).sortGroup(Edge.V2, Order.ASCENDING).reduceGroup(new
TriadBuilder()).join(edgesById).where(Triad.V2, Triad.V3).equalTo(Edge.V1, Edge.V2).with(new TriadFilter());
// emit result
if (params.has("output")) {
triangles.writeAsCsv(params.get("output"), "\n", ",");
// execute program
env.execute("Basic Triangle Enumeration Example");
} else {
System.out.println("Printing result to stdout. Use --output to specify output path.");
triangles.print();
}
} | 3.26 |
flink_FsCheckpointStorageLocation_createMetadataOutputStream_rdh | // ------------------------------------------------------------------------
// checkpoint metadata
// ------------------------------------------------------------------------
@Override
public CheckpointMetadataOutputStream createMetadataOutputStream() throws
IOException {
return new FsCheckpointMetadataOutputStream(fileSystem, metadataFilePath, checkpointDirectory);
} | 3.26 |
flink_FsCheckpointStorageLocation_toString_rdh | // ------------------------------------------------------------------------
@Override
public String toString() {
return (((((((((((((((("FsCheckpointStorageLocation {" + "fileSystem=") + fileSystem) + ", checkpointDirectory=") + checkpointDirectory) + ", sharedStateDirectory=") + sharedStateDirectory)
+ ", taskOwnedStateDirectory=") + taskOwnedStateDirectory) + ", metadataFilePath=") + metadataFilePath) + ", reference=") + reference) + ", fileStateSizeThreshold=") + fileStateSizeThreshold) + ", writeBufferSize=") + writeBufferSize) + '}';
} | 3.26 |
flink_FsCheckpointStorageLocation_getCheckpointDirectory_rdh | // ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
public Path getCheckpointDirectory() {
return checkpointDirectory;
} | 3.26 |
flink_GenericArrayData_anyNull_rdh | // ------------------------------------------------------------------------------------------
// Conversion Utilities
// ------------------------------------------------------------------------------------------
private boolean anyNull() {for (Object element : ((Object[]) (array)))
{
if (element == null) {
return true;
}
}
return false;
} | 3.26 |
flink_GenericArrayData_getBoolean_rdh | // ------------------------------------------------------------------------------------------
// Read-only accessor methods
// ------------------------------------------------------------------------------------------
@Override
public boolean getBoolean(int pos) {
return isPrimitiveArray ? ((boolean[]) (array))[pos] :
((boolean) (getObject(pos)));
} | 3.26 |
flink_GenericArrayData_toObjectArray_rdh | /**
* Converts this {@link GenericArrayData} into an array of Java {@link Object}.
*
* <p>The method will convert a primitive array into an object array. But it will not convert
* internal data structures into external data structures (e.g. {@link StringData} to {@link String}).
*/
public Object[] toObjectArray() {if (isPrimitiveArray) {
Class<?> arrayClass = array.getClass();
if (int[].class.equals(arrayClass)) {
return ArrayUtils.toObject(((int[]) (array)));
}
else if (long[].class.equals(arrayClass)) {
return ArrayUtils.toObject(((long[]) (array)));
} else if (float[].class.equals(arrayClass)) {
return ArrayUtils.toObject(((float[]) (array)));
} else if (double[].class.equals(arrayClass)) {
return ArrayUtils.toObject(((double[]) (array)));} else if (short[].class.equals(arrayClass)) {
return ArrayUtils.toObject(((short[]) (array)));
} else if (byte[].class.equals(arrayClass))
{
return ArrayUtils.toObject(((byte[]) (array)));
} else if (boolean[].class.equals(arrayClass)) {
return ArrayUtils.toObject(((boolean[]) (array)));}
throw new RuntimeException("Unsupported primitive array: " + arrayClass);
}
else {
return ((Object[]) (array));
}
} | 3.26 |
flink_PojoSerializerSnapshotData_createFrom_rdh | /**
* Creates a {@link PojoSerializerSnapshotData} from existing snapshotted configuration of a
* {@link PojoSerializer}.
*/
static <T> PojoSerializerSnapshotData<T> createFrom(Class<T> pojoClass, Field[] fields, TypeSerializerSnapshot<?>[] existingFieldSerializerSnapshots, LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>> existingRegisteredSubclassSerializerSnapshots, Map<Class<?>, TypeSerializerSnapshot<?>> existingNonRegisteredSubclassSerializerSnapshots) {
final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots = new LinkedOptionalMap<>(fields.length);
for (int v7 = 0; v7 < fields.length; v7++) {
Field field = fields[v7];
String fieldName = (field == null) ? getDummyNameForMissingField(v7) : field.getName();
fieldSerializerSnapshots.put(fieldName, field, existingFieldSerializerSnapshots[v7]);
}
return new PojoSerializerSnapshotData<>(pojoClass, fieldSerializerSnapshots, optionalMapOf(existingRegisteredSubclassSerializerSnapshots, Class::getName), optionalMapOf(existingNonRegisteredSubclassSerializerSnapshots, Class::getName));
} | 3.26 |
flink_PojoSerializerSnapshotData_writeSnapshotData_rdh | // ---------------------------------------------------------------------------------------------
// Snapshot data read / write methods
// ---------------------------------------------------------------------------------------------
void writeSnapshotData(DataOutputView out) throws IOException {
out.writeUTF(pojoClass.getName());
writeOptionalMap(out, fieldSerializerSnapshots, PojoFieldUtils::writeField, TypeSerializerSnapshot::writeVersionedSnapshot);writeOptionalMap(out, registeredSubclassSerializerSnapshots, NoOpWriter.noopWriter(), TypeSerializerSnapshot::writeVersionedSnapshot);
writeOptionalMap(out, nonRegisteredSubclassSerializerSnapshots, NoOpWriter.noopWriter(), TypeSerializerSnapshot::writeVersionedSnapshot);
} | 3.26 |
flink_PojoSerializerSnapshotData_getPojoClass_rdh | // ---------------------------------------------------------------------------------------------
// Snapshot data accessors
// ---------------------------------------------------------------------------------------------
Class<T> getPojoClass() {
return pojoClass;
} | 3.26 |
flink_PojoSerializerSnapshotData_getDummyNameForMissingField_rdh | // ---------------------------------------------------------------------------------------------
// Utilities
// ---------------------------------------------------------------------------------------------
private static String getDummyNameForMissingField(int fieldIndex) {
return String.format("missing-field-at-%d", fieldIndex);
} | 3.26 |
flink_FlinkFilterJoinRule_perform_rdh | // ~ Methods ----------------------------------------------------------------
protected void perform(RelOptRuleCall call, Filter filter, Join join) {
final List<RexNode> joinFilters = RelOptUtil.conjunctions(join.getCondition());
final List<RexNode> origJoinFilters = ImmutableList.copyOf(joinFilters);
// If there is only the joinRel,
// make sure it does not match a cartesian product joinRel
// (with "true" condition), otherwise this rule will be applied
// again on the new cartesian product joinRel.
if ((filter == null) && joinFilters.isEmpty()) {return;
}
final List<RexNode> aboveFilters = (filter != null) ? getConjunctions(filter) : new ArrayList<>();
final ImmutableList<RexNode> origAboveFilters = ImmutableList.copyOf(aboveFilters);
// Simplify Outer Joins
JoinRelType joinType =
join.getJoinType();
if ((config.isSmart() && (!origAboveFilters.isEmpty())) && (join.getJoinType() != JoinRelType.INNER)) {
joinType = RelOptUtil.simplifyJoin(join, origAboveFilters, joinType);
}final List<RexNode> leftFilters = new ArrayList<>();
final List<RexNode> rightFilters = new ArrayList<>();
// TODO - add logic to derive additional filters. E.g., from
// (t1.a = 1 AND t2.a = 2) OR (t1.b = 3 AND t2.b = 4), you can
// derive table filters:
// (t1.a = 1 OR t1.b = 3)
// (t2.a = 2 OR t2.b = 4)
// Try to push down above filters. These are typically where clause
// filters. They can be pushed down if they are not on the NULL
// generating side.
boolean filterPushed = false;
if (RelOptUtil.classifyFilters(join, aboveFilters, joinType.canPushIntoFromAbove(), joinType.canPushLeftFromAbove(), joinType.canPushRightFromAbove(), joinFilters, leftFilters, rightFilters)) {
filterPushed = true;
}
// Move join filters up if needed
validateJoinFilters(aboveFilters, joinFilters, join, joinType);
// If no filter got pushed after validate, reset filterPushed flag
if (((leftFilters.isEmpty() && rightFilters.isEmpty()) && (joinFilters.size() == origJoinFilters.size())) && (aboveFilters.size()
== origAboveFilters.size())) {
if (Sets.newHashSet(joinFilters).equals(Sets.newHashSet(origJoinFilters))) {
filterPushed = false;
}
}
// Try to push down filters in ON clause. A ON clause filter can only be
// pushed down if it does not affect the non-matching set, i.e. it is
// not on the side which is preserved.
// Anti-join on conditions can not be pushed into left or right, e.g. for plan:
//
// Join(condition=[AND(cond1, $2)], joinType=[anti])
// : - prj(f0=[$0], f1=[$1], f2=[$2])
// : - prj(f0=[$0])
//
// The semantic would change if join condition $2 is pushed into left,
// that is, the result set may be smaller. The right can not be pushed
// into for the same reason.
if (RelOptUtil.classifyFilters(join, joinFilters, false, joinType.canPushLeftFromWithin(), joinType.canPushRightFromWithin(), joinFilters, leftFilters, rightFilters)) {
filterPushed = true;
}
// if nothing actually got pushed and there is nothing leftover,
// then this rule is a no-op
if (((!filterPushed) && (joinType == join.getJoinType())) || ((joinFilters.isEmpty() && leftFilters.isEmpty()) && rightFilters.isEmpty())) {
return;}
final RexBuilder rexBuilder = join.getCluster().getRexBuilder();
// create the new join node referencing the new children and
// containing its new join filters (if there are any)
final ImmutableList<RelDataType> fieldTypes = ImmutableList.<RelDataType>builder().addAll(RelOptUtil.getFieldTypeList(join.getLeft().getRowType())).addAll(RelOptUtil.getFieldTypeList(join.getRight().getRowType())).build();
final RexNode joinFilter = RexUtil.composeConjunction(rexBuilder, RexUtil.fixUp(rexBuilder, joinFilters, fieldTypes));
// push above filters to another side for INNER, LEFT, RIGHT join
pushFiltersToAnotherSide(join, joinType, origAboveFilters, joinFilter, leftFilters, rightFilters, Arrays.asList(JoinRelType.INNER, JoinRelType.LEFT, JoinRelType.RIGHT));
// push join filters to another side for INNER join
// do not derive JoinInfo
pushFiltersToAnotherSide(join, joinType, origJoinFilters, null, leftFilters, rightFilters, Collections.singletonList(JoinRelType.INNER));
// create Filters on top of the children if any filters were
// pushed to them
final RelBuilder relBuilder = call.builder();
final RelNode leftRel = relBuilder.push(join.getLeft()).filter(leftFilters).build();
final RelNode rightRel = relBuilder.push(join.getRight()).filter(rightFilters).build();
// If nothing actually got pushed and there is nothing leftover,
// then this rule is a no-op
if (((joinFilter.isAlwaysTrue() && leftFilters.isEmpty()) && rightFilters.isEmpty()) && (joinType == join.getJoinType())) {
return;
}
RelNode newJoinRel = join.copy(join.getTraitSet(), joinFilter, leftRel, rightRel, joinType, join.isSemiJoinDone());
call.getPlanner().onCopy(join, newJoinRel);
if (!leftFilters.isEmpty()) {
call.getPlanner().onCopy(filter, leftRel);
}
if (!rightFilters.isEmpty()) {
call.getPlanner().onCopy(filter, rightRel);
}
relBuilder.push(newJoinRel);// Create a project on top of the join if some of the columns have become
// NOT NULL due to the join-type getting stricter.
relBuilder.convert(join.getRowType(), false);
// create a FilterRel on top of the join if needed
relBuilder.filter(RexUtil.fixUp(rexBuilder, aboveFilters, RelOptUtil.getFieldTypeList(relBuilder.peek().getRowType())));
call.transformTo(relBuilder.build());
} | 3.26 |
flink_FlinkFilterJoinRule_getConjunctions_rdh | /**
* Get conjunctions of filter's condition but with collapsed {@code IS NOT DISTINCT FROM}
* expressions if needed.
*
* @param filter
* filter containing condition
* @return condition conjunctions with collapsed {@code IS NOT DISTINCT FROM} expressions if any
* @see RelOptUtil#conjunctions(RexNode)
*/
private List<RexNode> getConjunctions(Filter filter) {
List<RexNode> conjunctions = conjunctions(filter.getCondition());
RexBuilder rexBuilder = filter.getCluster().getRexBuilder();
for (int i = 0; i < conjunctions.size(); i++) {
RexNode node = conjunctions.get(i);
if (node instanceof RexCall) {
conjunctions.set(i, RelOptUtil.collapseExpandedIsNotDistinctFromExpr(((RexCall) (node)), rexBuilder));
}
}
return conjunctions;
} | 3.26 |
flink_FlinkFilterJoinRule_isSmart_rdh | /**
* Whether to try to strengthen join-type, default false.
*/
@Value.Default
default boolean isSmart() {
return false;
} | 3.26 |
flink_FlinkFilterJoinRule_validateJoinFilters_rdh | /**
* Validates that target execution framework can satisfy join filters.
*
* <p>If the join filter cannot be satisfied (for example, if it is {@code l.c1 > r.c2} and the
* join only supports equi-join), removes the filter from {@code joinFilters} and adds it to
* {@code aboveFilters}.
*
* <p>The default implementation does nothing; i.e. the join can handle all conditions.
*
* @param aboveFilters
* Filter above Join
* @param joinFilters
* Filters in join condition
* @param join
* Join
* @param joinType
* JoinRelType could be different from type in Join due to outer join
* simplification.
*/
protected void validateJoinFilters(List<RexNode> aboveFilters, List<RexNode> joinFilters, Join join, JoinRelType joinType) {
final Iterator<RexNode> filterIter = joinFilters.iterator();
while (filterIter.hasNext())
{
RexNode exp = filterIter.next();
// Do not pull up filter conditions for semi/anti join.
if ((!config.getPredicate().apply(join, joinType, exp)) && joinType.projectsRight()) {
aboveFilters.add(exp);
filterIter.remove();
}
}
} | 3.26 |
flink_LogicalSnapshot_create_rdh | /**
* Creates a LogicalSnapshot.
*/
public static LogicalSnapshot create(RelNode input, RexNode period) {
final RelOptCluster cluster = input.getCluster();
final RelMetadataQuery mq = cluster.getMetadataQuery();
final RelTraitSet traitSet = cluster.traitSet().replace(Convention.NONE).replaceIfs(RelCollationTraitDef.INSTANCE, () -> RelMdCollation.snapshot(mq, input)).replaceIf(RelDistributionTraitDef.INSTANCE, () -> RelMdDistribution.snapshot(mq, input));
return new LogicalSnapshot(cluster, traitSet, input, period);
} | 3.26 |
flink_TableEnvironment_fromValues_rdh | /**
* Creates a Table from given collection of objects with a given row type.
*
* <p>The difference between this method and {@link #fromValues(Object...)} is that the schema
* can be manually adjusted. It might be helpful for assigning more generic types like e.g.
* DECIMAL or naming the columns.
*
* <p>Examples:
*
* <pre>{@code tEnv.fromValues(
* DataTypes.ROW(
* DataTypes.FIELD("id", DataTypes.DECIMAL(10, 2)),
* DataTypes.FIELD("name", DataTypes.STRING())
* ),
* row(1, "ABC"),
* row(2L, "ABCDE")
* )}</pre>
*
* <p>will produce a Table with a schema as follows:
*
* <pre>{@code root
* |-- id: DECIMAL(10, 2)
* |-- name: STRING}</pre>
*
* <p>For more examples see {@link #fromValues(Object...)}.
*
* @param rowType
* Expected row type for the values.
* @param values
* Expressions for constructing rows of the VALUES table.
* @see #fromValues(Object...)
*/
default Table fromValues(AbstractDataType<?> rowType, Object... values) {
// It is necessary here to implement TableEnvironment#fromValues(Object...) for
// BatchTableEnvImpl.
// In scala varargs are translated to Seq. Due to the type erasure Seq<Expression> and
// Seq<Object>
// are the same. It is not a problem in java as varargs in java are translated to an array.
return fromValues(rowType, Arrays.asList(values));
} | 3.26 |
flink_TableEnvironment_create_rdh | /**
* Creates a table environment that is the entry point and central context for creating Table
* and SQL API programs.
*
* <p>It is unified both on a language level for all JVM-based languages (i.e. there is no
* distinction between Scala and Java API) and for bounded and unbounded data processing.
*
* <p>A table environment is responsible for:
*
* <ul>
* <li>Connecting to external systems.
* <li>Registering and retrieving {@link Table}s and other meta objects from a catalog.
* <li>Executing SQL statements.
* <li>Offering further configuration options.
* </ul>
*
* <p>Note: This environment is meant for pure table programs. If you would like to convert from
* or to other Flink APIs, it might be necessary to use one of the available language-specific
* table environments in the corresponding bridging modules.
*
* @param settings
* The environment settings used to instantiate the {@link TableEnvironment}.
*/
static TableEnvironment create(EnvironmentSettings settings) {
return TableEnvironmentImpl.create(settings);
}
/**
* Creates a table environment that is the entry point and central context for creating Table
* and SQL API programs.
*
* <p>It is unified both on a language level for all JVM-based languages (i.e. there is no
* distinction between Scala and Java API) and for bounded and unbounded data processing.
*
* <p>A table environment is responsible for:
*
* <ul>
* <li>Connecting to external systems.
* <li>Registering and retrieving {@link Table}s and other meta objects from a catalog.
* <li>Executing SQL statements.
* <li>Offering further configuration options.
* </ul>
*
* <p>Note: This environment is meant for pure table programs. If you would like to convert from
* or to other Flink APIs, it might be necessary to use one of the available language-specific
* table environments in the corresponding bridging modules.
*
* @param configuration
* The specified options are used to instantiate the {@link TableEnvironment} | 3.26 |
flink_TableEnvironment_explainSql_rdh | /**
* Returns the AST of the specified statement and the execution plan to compute the result of
* the given statement.
*
* @param statement
* The statement for which the AST and execution plan will be returned.
* @param extraDetails
* The extra explain details which the explain result should include, e.g.
* estimated cost, changelog mode for streaming, displaying execution plan in json format
* @return AST and the execution plan.
*/
default String explainSql(String statement, ExplainDetail... extraDetails) {
return explainSql(statement, ExplainFormat.TEXT, extraDetails);
} | 3.26 |
flink_TableEnvironment_executePlan_rdh | /**
* Shorthand for {@code tEnv.loadPlan(planReference).execute()}.
*
* @see #loadPlan(PlanReference)
* @see CompiledPlan#execute()
*/
@Experimental
default TableResult executePlan(PlanReference planReference) throws TableException {
return loadPlan(planReference).execute();
} | 3.26 |
flink_DynamicSourceUtils_createMetadataKeysToMetadataColumnsMap_rdh | /**
* Returns a map record the mapping relation between metadataKeys to metadataColumns in input
* schema.
*/
public static Map<String, MetadataColumn> createMetadataKeysToMetadataColumnsMap(ResolvedSchema schema) {
final List<MetadataColumn> metadataColumns = extractMetadataColumns(schema);
Map<String, MetadataColumn> metadataKeysToMetadataColumns = new HashMap<>();
for (MetadataColumn column : metadataColumns) {
String metadataKey = column.getMetadataKey().orElse(column.getName());
// After resolving, every metadata column has the unique metadata key.
metadataKeysToMetadataColumns.put(metadataKey, column);
}
return metadataKeysToMetadataColumns;
} | 3.26 |
flink_DynamicSourceUtils_m0_rdh | /**
* Creates a projection that adds computed columns and finalizes the table schema.
*/
private static void m0(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
final ExpressionConverter converter = new ExpressionConverter(relBuilder);
final List<RexNode> projection = schema.getColumns().stream().map(c -> {
if (c instanceof ComputedColumn) {
final ComputedColumn computedColumn = ((ComputedColumn) (c));
return computedColumn.getExpression().accept(converter);
} else {
return relBuilder.field(c.getName());
}
}).collect(Collectors.toList());
relBuilder.projectNamed(projection, schema.getColumns().stream().map(Column::getName).collect(Collectors.toList()), true);
} | 3.26 |
flink_DynamicSourceUtils_prepareDynamicSource_rdh | /**
* Prepares the given {@link DynamicTableSource}. It check whether the source is compatible with
* the given schema and applies initial parameters.
*/
public static void prepareDynamicSource(String tableDebugName, ResolvedCatalogTable table, DynamicTableSource source, boolean isBatchMode, ReadableConfig config, List<SourceAbilitySpec> sourceAbilities) {
final ResolvedSchema schema = table.getResolvedSchema();
validateAndApplyMetadata(tableDebugName, schema, source, sourceAbilities);
if (source instanceof ScanTableSource) {validateScanSource(tableDebugName, schema, ((ScanTableSource) (source)), isBatchMode, config);
prepareRowLevelModificationScan(source);
}
// lookup table source is validated in LookupJoin node
} | 3.26 |
flink_DynamicSourceUtils_pushWatermarkAssigner_rdh | // --------------------------------------------------------------------------------------------
/**
* Creates a specialized node for assigning watermarks.
*/
private static void
pushWatermarkAssigner(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
final ExpressionConverter converter = new ExpressionConverter(relBuilder);
final RelDataType inputRelDataType = relBuilder.peek().getRowType();
// schema resolver has checked before that only one spec exists
final WatermarkSpec watermarkSpec = schema.getWatermarkSpecs().get(0);
final String rowtimeColumn = watermarkSpec.getRowtimeAttribute();
final int rowtimeColumnIdx = inputRelDataType.getFieldNames().indexOf(rowtimeColumn);final RexNode watermarkRexNode = watermarkSpec.getWatermarkExpression().accept(converter);
relBuilder.watermark(rowtimeColumnIdx, watermarkRexNode);
} | 3.26 |
flink_DynamicSourceUtils_createRequiredMetadataColumns_rdh | // TODO: isUpsertSource(), isSourceChangeEventsDuplicate()
/**
* Returns a list of required metadata columns. Ordered by the iteration order of {@link SupportsReadingMetadata#listReadableMetadata()}.
*
* <p>This method assumes that source and schema have been validated via {@link #prepareDynamicSource(String, ResolvedCatalogTable, DynamicTableSource, boolean,
* ReadableConfig, List)}.
*/
public static List<MetadataColumn> createRequiredMetadataColumns(ResolvedSchema schema, DynamicTableSource source) {
final Map<String, MetadataColumn> metadataKeysToMetadataColumns = createMetadataKeysToMetadataColumnsMap(schema);
final Map<String, DataType> metadataMap = extractMetadataMap(source);
// reorder the column
return metadataMap.keySet().stream().filter(metadataKeysToMetadataColumns::containsKey).map(metadataKeysToMetadataColumns::get).collect(Collectors.toList());
} | 3.26 |
flink_DynamicSourceUtils_convertSourceToRel_rdh | /**
* Converts a given {@link DynamicTableSource} to a {@link RelNode}. It adds helper projections
* if necessary.
*/
public static RelNode convertSourceToRel(boolean isBatchMode, ReadableConfig config, FlinkRelBuilder relBuilder, ContextResolvedTable contextResolvedTable, FlinkStatistic statistic, List<RelHint> hints, DynamicTableSource tableSource) {
final String tableDebugName = contextResolvedTable.getIdentifier().asSummaryString();
final ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
final List<SourceAbilitySpec> sourceAbilities = new ArrayList<>();
// 1. prepare table source
prepareDynamicSource(tableDebugName, resolvedCatalogTable, tableSource, isBatchMode, config, sourceAbilities);
// 2. push table scan
pushTableScan(isBatchMode, relBuilder, contextResolvedTable, statistic, hints, tableSource, sourceAbilities);
// 3. push project for non-physical columns
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
if (!schema.getColumns().stream().allMatch(Column::isPhysical)) {
pushMetadataProjection(relBuilder, schema);
m0(relBuilder, schema); }
// 4. push watermark assigner
if ((!isBatchMode) && (!schema.getWatermarkSpecs().isEmpty())) {
pushWatermarkAssigner(relBuilder, schema);}
return relBuilder.build();
} | 3.26 |
flink_DynamicSourceUtils_isSourceChangeEventsDuplicate_rdh | /**
* Returns true if the table source produces duplicate change events.
*/public static boolean isSourceChangeEventsDuplicate(ResolvedSchema resolvedSchema,
DynamicTableSource tableSource, TableConfig tableConfig) {
if (!(tableSource instanceof ScanTableSource)) {
return false;
}
ChangelogMode mode = ((ScanTableSource) (tableSource)).getChangelogMode();
boolean isCDCSource = (!mode.containsOnly(RowKind.INSERT)) && (!isUpsertSource(resolvedSchema, tableSource));
boolean changeEventsDuplicate = tableConfig.get(ExecutionConfigOptions.TABLE_EXEC_SOURCE_CDC_EVENTS_DUPLICATE);
boolean hasPrimaryKey = resolvedSchema.getPrimaryKey().isPresent();
return (isCDCSource
&& changeEventsDuplicate) && hasPrimaryKey;
} | 3.26 |
flink_DynamicSourceUtils_pushMetadataProjection_rdh | /**
* Creates a projection that reorders physical and metadata columns according to the given
* schema. It casts metadata columns into the expected data type to be accessed by computed
* columns in the next step. Computed columns are ignored here.
*
* @see SupportsReadingMetadata
*/
private static void pushMetadataProjection(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final List<String> fieldNames = schema.getColumns().stream().filter(c -> !(c instanceof ComputedColumn)).map(Column::getName).collect(Collectors.toList());
final List<RexNode> fieldNodes = schema.getColumns().stream().filter(c -> !(c instanceof ComputedColumn)).map(c -> {
final RelDataType relDataType = relBuilder.getTypeFactory().createFieldTypeFromLogicalType(c.getDataType().getLogicalType());
if (c instanceof MetadataColumn) {
final MetadataColumn metadataColumn = ((MetadataColumn) (c));
String columnName = metadataColumn.getName();return rexBuilder.makeAbstractCast(relDataType, relBuilder.field(columnName));
} else {
return relBuilder.field(c.getName());
}
}).collect(Collectors.toList());
relBuilder.projectNamed(fieldNodes, fieldNames, true);
} | 3.26 |
flink_DynamicSourceUtils_convertDataStreamToRel_rdh | /**
* Converts a given {@link DataStream} to a {@link RelNode}. It adds helper projections if
* necessary.
*/
public static RelNode convertDataStreamToRel(boolean isBatchMode, ReadableConfig config, FlinkRelBuilder relBuilder, ContextResolvedTable contextResolvedTable, DataStream<?> dataStream, DataType physicalDataType, boolean isTopLevelRecord, ChangelogMode changelogMode) {
final DynamicTableSource tableSource = new ExternalDynamicSource<>(contextResolvedTable.getIdentifier(), dataStream, physicalDataType, isTopLevelRecord, changelogMode);
final FlinkStatistic statistic = FlinkStatistic.unknown(contextResolvedTable.getResolvedSchema()).build();
return convertSourceToRel(isBatchMode, config, relBuilder, contextResolvedTable, statistic, Collections.emptyList(), tableSource);
} | 3.26 |
flink_DynamicSourceUtils_isUpsertSource_rdh | /**
* Returns true if the table is an upsert source.
*/
public static boolean isUpsertSource(ResolvedSchema resolvedSchema, DynamicTableSource tableSource) {
if (!(tableSource instanceof ScanTableSource)) {
return false;
}
ChangelogMode mode = ((ScanTableSource) (tableSource)).getChangelogMode();
boolean isUpsertMode = mode.contains(RowKind.UPDATE_AFTER) && (!mode.contains(RowKind.UPDATE_BEFORE));
boolean hasPrimaryKey = resolvedSchema.getPrimaryKey().isPresent();
return isUpsertMode && hasPrimaryKey;
} | 3.26 |
flink_DynamicSourceUtils_createProducedType_rdh | /**
* Returns the {@link DataType} that a source should produce as the input into the runtime.
*
* <p>The format looks as follows: {@code PHYSICAL COLUMNS + METADATA COLUMNS}
*
* <p>Physical columns use the table schema's name. Metadata column use the metadata key as
* name.
*/
public static RowType createProducedType(ResolvedSchema schema, DynamicTableSource
source) {
final Map<String, DataType> metadataMap = extractMetadataMap(source);final Stream<RowField> physicalFields = ((RowType) (schema.toPhysicalRowDataType().getLogicalType())).getFields().stream();
final Stream<RowField> metadataFields = createRequiredMetadataColumns(schema, source).stream().map(k -> // Use the alias to ensure that physical and
// metadata columns don't collide
new RowField(k.getName(), metadataMap.get(k.getMetadataKey().orElse(k.getName())).getLogicalType()));
final List<RowField> rowFields = Stream.concat(physicalFields, metadataFields).collect(Collectors.toList());
return new
RowType(false, rowFields);
} | 3.26 |
flink_JobMaster_onStart_rdh | // ----------------------------------------------------------------------------------------------
// Lifecycle management
// ----------------------------------------------------------------------------------------------
@Override
protected void onStart() throws JobMasterException {
try {
startJobExecution();
} catch (Exception e) {
final JobMasterException jobMasterException = new JobMasterException("Could not start the JobMaster.", e);
handleJobMasterError(jobMasterException);
throw jobMasterException;
}
} | 3.26 |
flink_JobMaster_startJobExecution_rdh | // Internal methods
// ----------------------------------------------------------------------------------------------
// -- job starting and stopping
// -----------------------------------------------------------------
private void startJobExecution() throws Exception {
validateRunsInMainThread();
JobShuffleContext context = new JobShuffleContextImpl(jobGraph.getJobID(), this);
shuffleMaster.registerJob(context);
startJobMasterServices();
log.info("Starting execution of job '{}' ({}) under job master id {}.", jobGraph.getName(),
jobGraph.getJobID(), getFencingToken());
startScheduling();} | 3.26 |
flink_JobMaster_onStop_rdh | /**
* Suspend the job and shutdown all other services including rpc.
*/
@Override
public CompletableFuture<Void> onStop() {
log.info("Stopping the JobMaster for job '{}' ({}).", jobGraph.getName(), jobGraph.getJobID());
// make sure there is a graceful exit
return stopJobExecution(new FlinkException(String.format("Stopping JobMaster for job '%s' (%s).", jobGraph.getName(),
jobGraph.getJobID()))).exceptionally(exception -> {throw new CompletionException(new JobMasterException("Could not properly stop the JobMaster.", exception));
});
} | 3.26 |
flink_JobMaster_handleJobMasterError_rdh | // ----------------------------------------------------------------------------------------------
private void handleJobMasterError(final Throwable cause) {
if (ExceptionUtils.isJvmFatalError(cause)) {
log.error("Fatal error occurred on JobManager.", cause);
// The fatal error handler implementation should make sure that this call is
// non-blocking
fatalErrorHandler.onFatalError(cause);
} else {
jobCompletionActions.jobMasterFailed(cause);
}
} | 3.26 |
flink_JobMaster_acknowledgeCheckpoint_rdh | // TODO: This method needs a leader session ID
@Override
public void acknowledgeCheckpoint(final JobID jobID, final ExecutionAttemptID executionAttemptID, final long checkpointId, final CheckpointMetrics checkpointMetrics, @Nullable
final SerializedValue<TaskStateSnapshot> checkpointState) {
schedulerNG.acknowledgeCheckpoint(jobID, executionAttemptID, checkpointId, checkpointMetrics, deserializeTaskStateSnapshot(checkpointState, getClass().getClassLoader()));
} | 3.26 |
flink_JobMaster_getGateway_rdh | // ----------------------------------------------------------------------------------------------
// Service methods
// ----------------------------------------------------------------------------------------------
@Override
public JobMasterGateway getGateway() {
return getSelfGateway(JobMasterGateway.class);
} | 3.26 |
flink_JobMaster_m0_rdh | /**
* Updates the task execution state for a given task.
*
* @param taskExecutionState
* New task execution state for a given task
* @return Acknowledge the task execution state update
*/
@Overridepublic CompletableFuture<Acknowledge> m0(final TaskExecutionState taskExecutionState) {
FlinkException taskExecutionException;
try {
checkNotNull(taskExecutionState, "taskExecutionState");
if (schedulerNG.updateTaskExecutionState(taskExecutionState)) {
return CompletableFuture.completedFuture(Acknowledge.get());
} else {
taskExecutionException =
new ExecutionGraphException(("The execution attempt " + taskExecutionState.getID()) + " was not found.");
}
} catch (Exception e) {
taskExecutionException = new JobMasterException("Could not update the state of task execution for JobMaster.", e);
handleJobMasterError(taskExecutionException);
}
return FutureUtils.completedExceptionally(taskExecutionException);
} | 3.26 |
flink_JobMaster_cancel_rdh | // ----------------------------------------------------------------------------------------------
// RPC methods
// ----------------------------------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> cancel(Time timeout) {
schedulerNG.cancel();return CompletableFuture.completedFuture(Acknowledge.get());
} | 3.26 |
flink_JobMaster_m1_rdh | // TODO: This method needs a leader session ID
@Overridepublic void m1(DeclineCheckpoint decline) {
schedulerNG.declineCheckpoint(decline);
} | 3.26 |
flink_DefaultExecutionTopology_containsIntraRegionAllToAllEdge_rdh | /**
* Check if the {@link DefaultLogicalPipelinedRegion} contains intra-region all-to-all edges or
* not.
*/
private static boolean containsIntraRegionAllToAllEdge(DefaultLogicalPipelinedRegion logicalPipelinedRegion) {
for (LogicalVertex vertex : logicalPipelinedRegion.getVertices()) {
for (LogicalEdge inputEdge : vertex.getInputs()) {
if ((inputEdge.getDistributionPattern() == DistributionPattern.ALL_TO_ALL) && logicalPipelinedRegion.contains(inputEdge.getProducerVertexId())) {
return true;
}
}
}
return false;
} | 3.26 |
flink_DefaultExecutionTopology_ensureCoLocatedVerticesInSameRegion_rdh | /**
* Co-location constraints are only used for iteration head and tail. A paired head and tail
* needs to be in the same pipelined region so that they can be restarted together.
*/
private static void ensureCoLocatedVerticesInSameRegion(List<DefaultSchedulingPipelinedRegion> pipelinedRegions, ExecutionGraph executionGraph) {
final Map<CoLocationConstraint, DefaultSchedulingPipelinedRegion> constraintToRegion = new
HashMap<>();
for (DefaultSchedulingPipelinedRegion region : pipelinedRegions) {
for (DefaultExecutionVertex vertex : region.getVertices()) {
final CoLocationConstraint constraint = getCoLocationConstraint(vertex.getId(), executionGraph);
if (constraint != null) {
final DefaultSchedulingPipelinedRegion regionOfConstraint = constraintToRegion.get(constraint);
checkState((regionOfConstraint == null) || (regionOfConstraint == region), "co-located tasks must be in the same pipelined region");
constraintToRegion.putIfAbsent(constraint, region);
}
}
}
} | 3.26 |
flink_UserFacingMapState_get_rdh | // ------------------------------------------------------------------------
@Override
public V get(K key) throws Exception
{
return originalState.get(key);
} | 3.26 |
flink_RichFunction_open_rdh | /**
* Initialization method for the function. It is called before the actual working methods (like
* <i>map</i> or <i>join</i>) and thus suitable for one time setup work. For functions that are
* part of an iteration, this method will be invoked at the beginning of each iteration
* superstep.
*
* <p>The openContext object passed to the function can be used for configuration and
* initialization. The openContext contains some necessary information that were configured on
* the function in the program composition.
*
* <pre>{@code public class MyFilter extends RichFilterFunction<String> {
*
* private String searchString;
*
* public void open(OpenContext openContext) {
* // initialize the value of searchString
* }
*
* public boolean filter(String value) {
* return value.equals(searchString);
* }
* }}</pre>
*
* <p>By default, this method does nothing.
*
* <p>1. If you implement {@code open(OpenContext openContext)}, the {@code open(OpenContext
* openContext)} will be invoked and the {@code open(Configuration parameters)} won't be
* invoked. 2. If you don't implement {@code open(OpenContext openContext)}, the {@code open(Configuration parameters)} will be invoked in the default implementation of the {@code open(OpenContext openContext)}.
*
* @param openContext
* The context containing information about the context in which the function
* is opened.
* @throws Exception
* Implementations may forward exceptions, which are caught by the runtime.
* When the runtime catches an exception, it aborts the task and lets the fail-over logic
* decide whether to retry the task execution.
*/
@PublicEvolving
default void open(OpenContext openContext) throws Exception {
open(new Configuration());
} | 3.26 |
flink_NettyClient_connect_rdh | // Client connections
// ------------------------------------------------------------------------
ChannelFuture connect(final InetSocketAddress serverSocketAddress) {
checkState(bootstrap != null, "Client has not been initialized yet.");
// --------------------------------------------------------------------
// Child channel pipeline for accepted connections
// --------------------------------------------------------------------
bootstrap.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel channel) throws Exception {
// SSL handler should be added first in the pipeline
if (clientSSLFactory != null) {
SslHandler sslHandler
= clientSSLFactory.createNettySSLHandler(channel.alloc(), serverSocketAddress.getAddress().getCanonicalHostName(), serverSocketAddress.getPort());
channel.pipeline().addLast("ssl", sslHandler);
}
channel.pipeline().addLast(protocol.getClientChannelHandlers());
}
});
try {
return bootstrap.connect(serverSocketAddress);
} catch (ChannelException e) {
if (((e.getCause() instanceof SocketException) && e.getCause().getMessage().equals("Too many open files")) || (((e.getCause() instanceof ChannelException) && (e.getCause().getCause() instanceof SocketException)) && e.getCause().getCause().getMessage().equals("Too many open files"))) {
throw new ChannelException("The operating system does not offer enough file handles to open the network connection. " + "Please increase the number of available file handles.", e.getCause());
} else {
throw e;
}
}
} | 3.26 |
flink_AsyncLookupFunctionProvider_of_rdh | /**
* Helper function for creating a static provider.
*/
static AsyncLookupFunctionProvider of(AsyncLookupFunction asyncLookupFunction) {
return () -> asyncLookupFunction;
} | 3.26 |
flink_GenericTypeComparator_supportsSerializationWithKeyNormalization_rdh | // ------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
} | 3.26 |
flink_TestEnvironmentSettings_getSavepointRestorePath_rdh | /**
* Path of savepoint that the job should recover from.
*/
@Nullable
public String getSavepointRestorePath() {
return savepointRestorePath;
} | 3.26 |
flink_TestEnvironmentSettings_getConnectorJarPaths_rdh | /**
* List of connector JARs paths.
*/
public List<URL> getConnectorJarPaths() {
return connectorJarPaths;
} | 3.26 |
flink_TimestampedFileInputSplit_setSplitState_rdh | /**
* Sets the state of the split. This information is used when restoring from a checkpoint and
* allows to resume reading the underlying file from the point we left off.
*
* <p>* This is applicable to {@link org.apache.flink.api.common.io.FileInputFormat
* FileInputFormats} that implement the {@link org.apache.flink.api.common.io.CheckpointableInputFormat} interface.
*/public void
setSplitState(Serializable state) {
this.splitState = state;
} | 3.26 |
flink_TimestampedFileInputSplit_getSplitState_rdh | /**
*
* @return the state of the split.
*/
public Serializable getSplitState() {return this.splitState;
} | 3.26 |
flink_TimestampedFileInputSplit_getModificationTime_rdh | /**
*
* @return The modification time of the file this split belongs to.
*/
public long getModificationTime() {
return this.modificationTime;
} | 3.26 |
flink_FlinkAggregateJoinTransposeRule_registry_rdh | /**
* Creates a {@link org.apache.calcite.sql.SqlSplittableAggFunction.Registry} that is a view of
* a list.
*/
private static <E> SqlSplittableAggFunction.Registry<E> registry(final List<E> list) {
return new SqlSplittableAggFunction.Registry<E>() {
public int register(E e) {int i = list.indexOf(e);
if (i < 0) {
i = list.size();
list.add(e);
}
return i;
}
};
} | 3.26 |
flink_FlinkAggregateJoinTransposeRule_keyColumns_rdh | /**
* Computes the closure of a set of columns according to a given list of constraints. Each 'x =
* y' constraint causes bit y to be set if bit x is set, and vice versa.
*/
private static ImmutableBitSet keyColumns(ImmutableBitSet aggregateColumns, ImmutableList<RexNode> predicates) {
SortedMap<Integer, BitSet> equivalence = new TreeMap<>();
for (RexNode predicate : predicates) {
populateEquivalences(equivalence, predicate);
}
ImmutableBitSet keyColumns = aggregateColumns;
for (Integer v93 :
aggregateColumns) {
final
BitSet bitSet = equivalence.get(v93);
if (bitSet != null) {
keyColumns = keyColumns.union(bitSet);
}
}
return keyColumns;} | 3.26 |
flink_FlinkAggregateJoinTransposeRule_m1_rdh | /**
* Convert aggregate with AUXILIARY_GROUP to regular aggregate. Return original aggregate and
* null project if the given aggregate does not contain AUXILIARY_GROUP, else new aggregate
* without AUXILIARY_GROUP and a project to permute output columns if needed.
*/
private Pair<Aggregate, List<RexNode>> m1(Aggregate aggregate) {
Tuple2<int[], Seq<AggregateCall>> auxGroupAndRegularAggCalls = AggregateUtil.checkAndSplitAggCalls(aggregate);
final int[] auxGroup = auxGroupAndRegularAggCalls._1;
final Seq<AggregateCall> regularAggCalls = auxGroupAndRegularAggCalls._2;
if (auxGroup.length != 0) {
int[] fullGroupSet = AggregateUtil.checkAndGetFullGroupSet(aggregate);
ImmutableBitSet newGroupSet = ImmutableBitSet.of(fullGroupSet);List<AggregateCall> aggCalls = JavaConverters.seqAsJavaListConverter(regularAggCalls).asJava();
final Aggregate newAgg = aggregate.copy(aggregate.getTraitSet(), aggregate.getInput(), newGroupSet, ImmutableList.of(newGroupSet), aggCalls);
final List<RelDataTypeField> aggFields = aggregate.getRowType().getFieldList();
final List<RexNode> projectAfterAgg = new ArrayList<>();
for (int i = 0; i < fullGroupSet.length; ++i) {
int group = fullGroupSet[i];
int index = newGroupSet.indexOf(group);projectAfterAgg.add(new RexInputRef(index, aggFields.get(i).getType()));
}int fieldCntOfAgg = aggFields.size();for (int i = fullGroupSet.length; i < fieldCntOfAgg; ++i) {
projectAfterAgg.add(new RexInputRef(i, aggFields.get(i).getType()));}
Preconditions.checkArgument(projectAfterAgg.size() == fieldCntOfAgg);
return new Pair<>(newAgg, projectAfterAgg);
} else {
return new Pair<>(aggregate, null);
}
} | 3.26 |
flink_ResolvedSchema_getPrimaryKeyIndexes_rdh | /**
* Returns the primary key indexes, if any, otherwise returns an empty array.
*/
public int[] getPrimaryKeyIndexes() {
final List<String> columns = getColumnNames();
return getPrimaryKey().map(UniqueConstraint::getColumns).map(pkColumns -> pkColumns.stream().mapToInt(columns::indexOf).toArray()).orElseGet(() -> new int[]{ });
} | 3.26 |
flink_ResolvedSchema_physical_rdh | /**
* Shortcut for a resolved schema of only physical columns.
*/public static ResolvedSchema physical(String[] columnNames, DataType[] columnDataTypes) {
return m0(Arrays.asList(columnNames), Arrays.asList(columnDataTypes));
} | 3.26 |
flink_ResolvedSchema_getColumnCount_rdh | /**
* Returns the number of {@link Column}s of this schema.
*/
public int getColumnCount() {
return columns.size();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.