name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_DateComparator_compareSerializedDate_rdh | // --------------------------------------------------------------------------------------------
// Static Helpers for Date Comparison
// --------------------------------------------------------------------------------------------
public static int compareSerializedDate(DataInputView firstSource, DataInputView secondSource, boolean ascendingComparison) throws IOException {
final long l1 = firstSource.readLong();
final long l2 = secondSource.readLong();
final int comp = (l1 < l2) ? -1 : l1 == l2 ? 0 : 1;
return ascendingComparison ? comp : -comp;
} | 3.26 |
flink_WindowTableFunctionUtil_createWindowAssigner_rdh | /**
* Creates window assigner based on input window strategy.
*
* @param windowingStrategy
* input window strategy
* @return new created window assigner
*/
public static WindowAssigner<TimeWindow> createWindowAssigner(TimeAttributeWindowingStrategy windowingStrategy) {
WindowSpec windowSpec = windowingStrategy.getWindow();
boolean isProctime = windowingStrategy.isProctime();
if (windowSpec instanceof TumblingWindowSpec) {
TumblingWindowSpec tumblingWindowSpec = ((TumblingWindowSpec) (windowSpec));
TumblingWindowAssigner windowAssigner = TumblingWindowAssigner.of(tumblingWindowSpec.getSize());if (isProctime) {
windowAssigner = windowAssigner.withProcessingTime();
}
if (tumblingWindowSpec.getOffset() != null) {
windowAssigner = windowAssigner.withOffset(tumblingWindowSpec.getOffset());
}
return windowAssigner;
} else if (windowSpec instanceof HoppingWindowSpec) {
HoppingWindowSpec hoppingWindowSpec =
((HoppingWindowSpec) (windowSpec));
SlidingWindowAssigner windowAssigner = SlidingWindowAssigner.of(hoppingWindowSpec.getSize(), hoppingWindowSpec.getSlide());
if (isProctime) {
windowAssigner = windowAssigner.withProcessingTime();
}
if (hoppingWindowSpec.getOffset() != null) {
windowAssigner = windowAssigner.withOffset(hoppingWindowSpec.getOffset());
}
return windowAssigner; } else if (windowSpec instanceof CumulativeWindowSpec) {
CumulativeWindowSpec cumulativeWindowSpec = ((CumulativeWindowSpec) (windowSpec));
CumulativeWindowAssigner windowAssigner = CumulativeWindowAssigner.of(cumulativeWindowSpec.getMaxSize(), cumulativeWindowSpec.getStep());
if (isProctime) {
windowAssigner = windowAssigner.withProcessingTime();
}
if (cumulativeWindowSpec.getOffset() != null) {
windowAssigner = windowAssigner.withOffset(cumulativeWindowSpec.getOffset());
}
return windowAssigner;
} else {
throw new TableException(String.format("Unknown window spec: %s", windowSpec.getClass().getSimpleName()));
}
} | 3.26 |
flink_CliFrontend_buildProgram_rdh | /**
* Creates a Packaged program from the given command line options and the
* effectiveConfiguration.
*
* @return A PackagedProgram (upon success)
*/
PackagedProgram buildProgram(final ProgramOptions runOptions, final Configuration configuration) throws FileNotFoundException, ProgramInvocationException, CliArgsException {
runOptions.validate();
String[]
programArgs = runOptions.getProgramArgs();
String jarFilePath = runOptions.getJarFilePath();
List<URL> classpaths = runOptions.getClasspaths();
// Get assembler class
String entryPointClass = runOptions.getEntryPointClassName();
File jarFile = (jarFilePath != null) ? getJarFile(jarFilePath) : null;
return PackagedProgram.newBuilder().setJarFile(jarFile).setUserClassPaths(classpaths).setEntryPointClassName(entryPointClass).setConfiguration(configuration).setSavepointRestoreSettings(runOptions.getSavepointRestoreSettings()).setArguments(programArgs).build();
} | 3.26 |
flink_CliFrontend_handleError_rdh | /**
* Displays an exception message.
*
* @param t
* The exception to display.
* @return The return code for the process.
*/
private static int handleError(Throwable t) {
LOG.error("Error while running the command.", t);
System.err.println();
System.err.println("------------------------------------------------------------");
System.err.println(" The program finished with the following exception:");
System.err.println();
if (t.getCause() instanceof InvalidProgramException) {
System.err.println(t.getCause().getMessage());
StackTraceElement[] trace = t.getCause().getStackTrace();
for (StackTraceElement ele : trace) {
System.err.println("\t" + ele);
if (ele.getMethodName().equals("main")) {
break;}
}
} else {
t.printStackTrace();
}
return 1;
} | 3.26 |
flink_CliFrontend_cancel_rdh | /**
* Executes the CANCEL action.
*
* @param args
* Command line arguments for the cancel action.
*/
protected void cancel(String[] args) throws Exception {
LOG.info("Running 'cancel' command.");
final Options commandOptions = CliFrontendParser.getCancelCommandOptions();
final
CommandLine commandLine = getCommandLine(commandOptions, args, false);
CancelOptions cancelOptions = new CancelOptions(commandLine);
// evaluate help flag
if (cancelOptions.isPrintHelp()) {
CliFrontendParser.printHelpForCancel(customCommandLines);
return;
}
final CustomCommandLine activeCommandLine = m1(commandLine);
final String[] cleanedArgs = cancelOptions.getArgs();
if (cancelOptions.isWithSavepoint()) {
logAndSysout("DEPRECATION WARNING: Cancelling a job with savepoint is deprecated. Use \"stop\" instead.");
final JobID jobId;
final String targetDirectory;
if (cleanedArgs.length > 0) {
jobId = parseJobId(cleanedArgs[0]);
targetDirectory = cancelOptions.getSavepointTargetDirectory();
} else {
jobId = parseJobId(cancelOptions.getSavepointTargetDirectory());
targetDirectory = null;
}
final SavepointFormatType formatType = cancelOptions.getFormatType();
if (targetDirectory == null) {
logAndSysout(((("Cancelling job " + jobId) + " with ") + formatType) + " savepoint to default savepoint directory.");
} else {logAndSysout(((((("Cancelling job " + jobId) + " with ") + formatType) + " savepoint to ") + targetDirectory) + '.');
}
runClusterAction(activeCommandLine, commandLine, (clusterClient, effectiveConfiguration) -> {
final String savepointPath;
try {
savepointPath = clusterClient.cancelWithSavepoint(jobId, targetDirectory, formatType).get(getClientTimeout(effectiveConfiguration).toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new <e>FlinkException(("Could not cancel job " + jobId) + '.');
}
logAndSysout(((("Cancelled job " + jobId)
+ ". Savepoint stored in ") + savepointPath) + '.');
});
} else {
final JobID jobId;
if (cleanedArgs.length > 0) {
jobId = parseJobId(cleanedArgs[0]);
} else {
throw new CliArgsException("Missing JobID. Specify a JobID to cancel a job.");
}
logAndSysout(("Cancelling job " + jobId) + '.');
runClusterAction(activeCommandLine, commandLine, (clusterClient, effectiveConfiguration) -> {
try {clusterClient.cancel(jobId).get(getClientTimeout(effectiveConfiguration).toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new <e>FlinkException(("Could not cancel job " + jobId) + '.');
}
}); logAndSysout(("Cancelled job " + jobId) + '.');
}
} | 3.26 |
flink_CliFrontend_m1_rdh | // --------------------------------------------------------------------------------------------
// Custom command-line
// --------------------------------------------------------------------------------------------
/**
* Gets the custom command-line for the arguments.
*
* @param commandLine
* The input to the command-line.
* @return custom command-line which is active (may only be one at a time)
*/
public CustomCommandLine m1(CommandLine commandLine) {
LOG.debug("Custom commandlines: {}", customCommandLines);
for (CustomCommandLine cli : customCommandLines) {
LOG.debug("Checking custom commandline {}, isActive: {}", cli, cli.isActive(commandLine));
if (cli.isActive(commandLine)) {
return cli;
}
}
throw new IllegalStateException("No valid command-line found.");
} | 3.26 |
flink_CliFrontend_runApplication_rdh | // --------------------------------------------------------------------------------------------
// Execute Actions
// --------------------------------------------------------------------------------------------
protected void runApplication(String[] args) throws Exception {LOG.info("Running 'run-application' command.");
final Options commandOptions = CliFrontendParser.getRunCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, true);
if (commandLine.hasOption(HELP_OPTION.getOpt())) {
CliFrontendParser.printHelpForRunApplication(customCommandLines);return;
}
final CustomCommandLine activeCommandLine = m1(checkNotNull(commandLine));
final ApplicationDeployer deployer = new ApplicationClusterDeployer(f2);
final ProgramOptions programOptions;
final Configuration effectiveConfiguration;
// No need to set a jarFile path for Pyflink job.
if (ProgramOptionsUtils.isPythonEntryPoint(commandLine)) {
programOptions = ProgramOptionsUtils.createPythonProgramOptions(commandLine);
effectiveConfiguration
= getEffectiveConfiguration(activeCommandLine, commandLine, programOptions, Collections.emptyList());
} else {
programOptions = new ProgramOptions(commandLine);
programOptions.validate();
final URI uri = PackagedProgramUtils.resolveURI(programOptions.getJarFilePath());
effectiveConfiguration = getEffectiveConfiguration(activeCommandLine, commandLine, programOptions, Collections.singletonList(uri.toString()));
}
final ApplicationConfiguration applicationConfiguration = new ApplicationConfiguration(programOptions.getProgramArgs(), programOptions.getEntryPointClassName());
deployer.run(effectiveConfiguration, applicationConfiguration);
} | 3.26 |
flink_CliFrontend_getJobJarAndDependencies_rdh | /**
* Get all provided libraries needed to run the program from the ProgramOptions.
*/
private List<URL> getJobJarAndDependencies(ProgramOptions programOptions) throws CliArgsException {
String entryPointClass = programOptions.getEntryPointClassName();
String jarFilePath = programOptions.getJarFilePath();
try {
File jarFile = (jarFilePath != null) ? getJarFile(jarFilePath) : null;
return PackagedProgram.getJobJarAndDependencies(jarFile, entryPointClass);
} catch (FileNotFoundException | ProgramInvocationException e) {
throw new CliArgsException("Could not get job jar and dependencies from JAR file: " + e.getMessage(), e);
}
} | 3.26 |
flink_CliFrontend_stop_rdh | /**
* Executes the STOP action.
*
* @param args
* Command line arguments for the stop action.
*/
protected void stop(String[] args) throws Exception {
LOG.info("Running 'stop-with-savepoint' command.");
final Options commandOptions = CliFrontendParser.getStopCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, false);
final StopOptions stopOptions = new StopOptions(commandLine);
if (stopOptions.isPrintHelp()) {
CliFrontendParser.printHelpForStop(customCommandLines);
return;
}
final String[] cleanedArgs = stopOptions.getArgs();
final String targetDirectory = (stopOptions.hasSavepointFlag() && (cleanedArgs.length > 0)) ? stopOptions.getTargetDirectory() : null;// the default savepoint location is going to be used in this case.
final JobID jobId = (cleanedArgs.length != 0) ? parseJobId(cleanedArgs[0]) : parseJobId(stopOptions.getTargetDirectory());
final boolean advanceToEndOfEventTime = stopOptions.shouldAdvanceToEndOfEventTime();
final SavepointFormatType formatType = stopOptions.getFormatType();
logAndSysout((((((advanceToEndOfEventTime ?
"Draining job " : "Suspending job ") + "\"") + jobId) + "\" with a ") + formatType) + " savepoint.");
final CustomCommandLine v59 = m1(commandLine);
runClusterAction(v59, commandLine, (clusterClient, effectiveConfiguration) -> {
final String savepointPath;
try {
savepointPath = clusterClient.stopWithSavepoint(jobId, advanceToEndOfEventTime, targetDirectory, formatType).get(getClientTimeout(effectiveConfiguration).toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new <e>FlinkException(("Could not stop with a savepoint job \"" + jobId) +
"\".");
}logAndSysout("Savepoint completed. Path: " + savepointPath);
});
} | 3.26 |
flink_CliFrontend_handleArgException_rdh | // --------------------------------------------------------------------------------------------
// Logging and Exception Handling
// --------------------------------------------------------------------------------------------
/**
* Displays an exception message for incorrect command line arguments.
*
* @param e
* The exception to display.
* @return The return code for the process.
*/
private static int handleArgException(CliArgsException e) {
LOG.error("Invalid command line arguments.", e);
System.out.println(e.getMessage());
System.out.println();
System.out.println("Use the help option (-h or --help) to get help on the command.");
return 1;
} | 3.26 |
flink_CliFrontend_getDefaultParallelism_rdh | /**
* Get default parallelism from command line via effective configuration.
*
* @param effectiveConfiguration
* Flink effective configuration.
* @return default parallelism.
*/
private int getDefaultParallelism(Configuration effectiveConfiguration) {
return effectiveConfiguration.get(CoreOptions.DEFAULT_PARALLELISM);
} | 3.26 |
flink_CliFrontend_parseAndRun_rdh | // --------------------------------------------------------------------------------------------
// Entry point for executable
// --------------------------------------------------------------------------------------------
/**
* Parses the command line arguments and starts the requested action.
*
* @param args
* command line arguments of the client.
* @return The return code of the program
*/
public int parseAndRun(String[] args) {
// check for action
if (args.length < 1) {
CliFrontendParser.printHelp(customCommandLines);
System.out.println("Please specify an action.");
return 1;
}
// get action
String action = args[0];
// remove action from parameters
final String[] params = Arrays.copyOfRange(args, 1, args.length);
try {// do action
switch (action) {
case ACTION_RUN :
run(params);
return 0;
case ACTION_RUN_APPLICATION :
runApplication(params);
return 0;
case ACTION_LIST :
list(params);
return 0;
case ACTION_INFO :
info(params);
return 0;
case ACTION_CANCEL :
cancel(params);
return 0;
case ACTION_STOP :
stop(params);
return 0;
case ACTION_SAVEPOINT :
savepoint(params);return 0;
case "-h" :
case "--help" :
CliFrontendParser.printHelp(customCommandLines);
return 0;
case "-v" :
case "--version" :
String version =
EnvironmentInformation.getVersion();
String commitID = EnvironmentInformation.getRevisionInformation().commitId;
System.out.print("Version: " + version);
System.out.println(commitID.equals(EnvironmentInformation.UNKNOWN) ? "" : ", Commit ID: " + commitID);
return 0;
default :
System.out.printf("\"%s\" is not a valid action.\n", action);
System.out.println();
System.out.println("Valid actions are \"run\", \"run-application\", \"list\", \"info\", \"savepoint\", \"stop\", or \"cancel\".");
System.out.println();
System.out.println("Specify the version option (-v or --version) to print Flink version.");
System.out.println();
System.out.println("Specify the help option (-h or --help) to get help on the command.");
return 1;
}
} catch (CliArgsException ce) {
return handleArgException(ce);
} catch (ProgramParametrizationException ppe) {
return handleParametrizationException(ppe);
} catch (ProgramMissingJobException pmje) {
return handleMissingJobException();
} catch (Exception e)
{
return handleError(e);
}
} | 3.26 |
flink_CliFrontend_handleParametrizationException_rdh | /**
* Displays an optional exception message for incorrect program parametrization.
*
* @param e
* The exception to display.
* @return The return code for the process.
*/
private static int handleParametrizationException(ProgramParametrizationException
e) {LOG.error("Program has not been parametrized properly.", e);
System.err.println(e.getMessage());
return 1;
} | 3.26 |
flink_CliFrontend_m2_rdh | /**
* Get client timeout from command line via effective configuration.
*
* @param effectiveConfiguration
* Flink effective configuration.
* @return client timeout with Duration type
*/
private Duration m2(Configuration effectiveConfiguration) {
return effectiveConfiguration.get(ClientOptions.CLIENT_TIMEOUT);
} | 3.26 |
flink_CliFrontend_getJarFile_rdh | /**
* Gets the JAR file from the path.
*
* @param jarFilePath
* The path of JAR file
* @return The JAR file
* @throws FileNotFoundException
* The JAR file does not exist.
*/
private File getJarFile(String jarFilePath) throws FileNotFoundException {
File v89 = new File(jarFilePath);
// Check if JAR file exists
if (!v89.exists()) {
throw new FileNotFoundException("JAR file does not exist: " + v89);
} else if (!v89.isFile()) {
throw new FileNotFoundException("JAR file is not a file: " + v89); }
return v89;
} | 3.26 |
flink_CliFrontend_loadCustomCommandLine_rdh | /**
* Loads a class from the classpath that implements the CustomCommandLine interface.
*
* @param className
* The fully-qualified class name to load.
* @param params
* The constructor parameters
*/
private static CustomCommandLine loadCustomCommandLine(String className, Object...
params) throws Exception {
Class<? extends CustomCommandLine> customCliClass = Class.forName(className).asSubclass(CustomCommandLine.class);
// construct class types from the parameters
Class<?>[]
types = new Class<?>[params.length];
for (int i = 0; i < params.length; i++) {
checkNotNull(params[i], "Parameters for custom command-lines may not be null.");
types[i] = params[i].getClass();
}
Constructor<? extends CustomCommandLine> constructor = customCliClass.getConstructor(types);
return constructor.newInstance(params);
} | 3.26 |
flink_CliFrontend_setJobManagerAddressInConfig_rdh | /**
* Writes the given job manager address to the associated configuration object.
*
* @param address
* Address to write to the configuration
* @param config
* The configuration to write to
*/
static void setJobManagerAddressInConfig(Configuration config, InetSocketAddress address) {
config.setString(JobManagerOptions.ADDRESS, address.getHostString());
config.setInteger(JobManagerOptions.PORT, address.getPort());
config.setString(RestOptions.ADDRESS, address.getHostString());
config.setInteger(RestOptions.PORT,
address.getPort());
} | 3.26 |
flink_CliFrontend_executeProgram_rdh | // --------------------------------------------------------------------------------------------
// Interaction with programs and JobManager
// --------------------------------------------------------------------------------------------
protected void executeProgram(final Configuration configuration, final PackagedProgram program) throws ProgramInvocationException {ClientUtils.executeProgram(new DefaultExecutorServiceLoader(), configuration, program, false, false);
} | 3.26 |
flink_CliFrontend_savepoint_rdh | /**
* Executes the SAVEPOINT action.
*
* @param args
* Command line arguments for the savepoint action.
*/
protected void savepoint(String[] args) throws Exception {
LOG.info("Running 'savepoint' command.");
final Options commandOptions = CliFrontendParser.getSavepointCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, false);
final SavepointOptions savepointOptions = new SavepointOptions(commandLine);
// evaluate help flag
if (savepointOptions.isPrintHelp()) {
CliFrontendParser.printHelpForSavepoint(customCommandLines);
return;
}
final CustomCommandLine activeCommandLine = m1(commandLine);
if (savepointOptions.isDispose()) {
runClusterAction(activeCommandLine, commandLine,
(clusterClient, effectiveConfiguration) -> disposeSavepoint(clusterClient, savepointOptions.getSavepointPath(), getClientTimeout(effectiveConfiguration)));
} else {
String[] cleanedArgs = savepointOptions.getArgs();
final JobID jobId;
if (cleanedArgs.length >= 1) {
String jobIdString = cleanedArgs[0];
jobId = parseJobId(jobIdString); } else {
throw new CliArgsException("Missing JobID. " + "Specify a Job ID to trigger a savepoint.");
}
final String savepointDirectory;
if (cleanedArgs.length >= 2) {
savepointDirectory = cleanedArgs[1];
} else { savepointDirectory = null;
}
// Print superfluous arguments
if (cleanedArgs.length >= 3) {
logAndSysout("Provided more arguments than required. Ignoring not needed arguments.");
}
runClusterAction(activeCommandLine, commandLine, (clusterClient, effectiveConfiguration) -> triggerSavepoint(clusterClient, jobId, savepointDirectory, savepointOptions.getFormatType(), getClientTimeout(effectiveConfiguration)));
}
} | 3.26 |
flink_CliFrontend_handleMissingJobException_rdh | /**
* Displays a message for a program without a job to execute.
*
* @return The return code for the process.
*/
private static int handleMissingJobException() {
System.err.println();
System.err.println("The program didn't contain a Flink job. " + "Perhaps you forgot to call execute() on the execution environment.");
return 1;
} | 3.26 |
flink_CliFrontend_disposeSavepoint_rdh | /**
* Sends a SavepointDisposalRequest to the job manager.
*/
private void disposeSavepoint(ClusterClient<?> clusterClient, String savepointPath, Duration clientTimeout)
throws FlinkException {
checkNotNull(savepointPath, "Missing required argument: savepoint path. " + "Usage: bin/flink savepoint -d <savepoint-path>");
logAndSysout(("Disposing savepoint '" + savepointPath) + "'.");
final CompletableFuture<Acknowledge> disposeFuture = clusterClient.disposeSavepoint(savepointPath);
logAndSysout("Waiting for response...");
try {
disposeFuture.get(clientTimeout.toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new FlinkException(("Disposing the savepoint '" + savepointPath) + "' failed.", e);
}
logAndSysout(("Savepoint '"
+ savepointPath) + "' disposed.");
} | 3.26 |
flink_CliFrontend_triggerSavepoint_rdh | /**
* Sends a SavepointTriggerMessage to the job manager.
*/
private void triggerSavepoint(ClusterClient<?> clusterClient, JobID jobId, String savepointDirectory, SavepointFormatType formatType, Duration clientTimeout) throws FlinkException {
logAndSysout(("Triggering savepoint for job " + jobId) + '.');
CompletableFuture<String> v80 = clusterClient.triggerSavepoint(jobId, savepointDirectory, formatType);
logAndSysout("Waiting for response...");
try {
final String savepointPath = v80.get(clientTimeout.toMillis(), TimeUnit.MILLISECONDS);
logAndSysout("Savepoint completed. Path: " + savepointPath);
logAndSysout("You can resume your program from this savepoint with the run command.");
} catch (Exception e) {
Throwable cause = ExceptionUtils.stripExecutionException(e);
throw new FlinkException(("Triggering a savepoint for the job " + jobId) + " failed.", cause);
}
} | 3.26 |
flink_CliFrontend_info_rdh | /**
* Executes the info action.
*
* @param args
* Command line arguments for the info action.
*/
protected void info(String[] args) throws Exception {
LOG.info("Running 'info' command.");
final Options commandOptions = CliFrontendParser.getInfoCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, true);
final ProgramOptions programOptions = ProgramOptions.create(commandLine);
// evaluate help flag
if (commandLine.hasOption(HELP_OPTION.getOpt())) {
CliFrontendParser.printHelpForInfo();
return;
}
// -------- build the packaged program -------------
LOG.info("Building program from JAR file");
PackagedProgram program = null;try {
LOG.info("Creating program plan dump");
final
CustomCommandLine activeCommandLine = m1(checkNotNull(commandLine));
final Configuration effectiveConfiguration = getEffectiveConfiguration(activeCommandLine, commandLine, programOptions, getJobJarAndDependencies(programOptions));
program = buildProgram(programOptions, effectiveConfiguration); int parallelism = programOptions.getParallelism();
if (ExecutionConfig.PARALLELISM_DEFAULT == parallelism) {
parallelism = getDefaultParallelism(effectiveConfiguration);
}
Pipeline
pipeline = PackagedProgramUtils.getPipelineFromProgram(program, effectiveConfiguration, parallelism, true);String jsonPlan = FlinkPipelineTranslationUtil.translateToJSONExecutionPlan(program.getUserCodeClassLoader(), pipeline);
if (jsonPlan != null) {
System.out.println("----------------------- Execution Plan -----------------------");
System.out.println(jsonPlan);
System.out.println("--------------------------------------------------------------");
}
else {
System.out.println("JSON plan could not be generated.");
}String description = program.getDescription();
if (description != null) {
System.out.println();
System.out.println(description);} else {
System.out.println();
System.out.println("No description provided.");
}
} finally
{
if (program != null) {
program.close();
}
}
} | 3.26 |
flink_CliFrontend_list_rdh | /**
* Executes the list action.
*
* @param args
* Command line arguments for the list action.
*/
protected void list(String[] args) throws Exception {
LOG.info("Running 'list' command.");
final Options commandOptions = CliFrontendParser.getListCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, false);
ListOptions listOptions = new ListOptions(commandLine);
// evaluate help flag
if (listOptions.isPrintHelp()) {
CliFrontendParser.printHelpForList(customCommandLines);
return;
}
final boolean showRunning;
final boolean showScheduled;final boolean showAll;
// print running and scheduled jobs if not option supplied
if (((!listOptions.showRunning()) && (!listOptions.showScheduled())) && (!listOptions.showAll())) {
showRunning =
true;
showScheduled = true;
showAll = false;
} else {
showRunning = listOptions.showRunning();
showScheduled = listOptions.showScheduled();
showAll = listOptions.showAll();
}
final CustomCommandLine activeCommandLine = m1(commandLine);
runClusterAction(activeCommandLine, commandLine, (clusterClient, effectiveConfiguration) -> listJobs(clusterClient, showRunning, showScheduled, showAll));
} | 3.26 |
flink_CliFrontend_runClusterAction_rdh | /**
* Retrieves the {@link ClusterClient} from the given {@link CustomCommandLine} and runs the
* given {@link ClusterAction} against it.
*
* @param activeCommandLine
* to create the {@link ClusterDescriptor} from
* @param commandLine
* containing the parsed command line options
* @param clusterAction
* the cluster action to run against the retrieved {@link ClusterClient}.
* @param <ClusterID>
* type of the cluster id
* @throws FlinkException
* if something goes wrong
*/
private <ClusterID> void runClusterAction(CustomCommandLine activeCommandLine, CommandLine commandLine, ClusterAction<ClusterID> clusterAction) throws FlinkException {
final Configuration effectiveConfiguration = getEffectiveConfiguration(activeCommandLine, commandLine);
LOG.debug("Effective configuration after Flink conf, and custom commandline: {}", effectiveConfiguration);
final ClusterClientFactory<ClusterID> clusterClientFactory = f2.getClusterClientFactory(effectiveConfiguration);
final ClusterID clusterId = clusterClientFactory.getClusterId(effectiveConfiguration);
if (clusterId == null) {
throw new FlinkException("No cluster id was specified. Please specify a cluster to which you would like to connect.");
}
try (final ClusterDescriptor<ClusterID> clusterDescriptor = clusterClientFactory.createClusterDescriptor(effectiveConfiguration)) {
try (final ClusterClient<ClusterID> clusterClient = clusterDescriptor.retrieve(clusterId).getClusterClient()) {
clusterAction.runAction(clusterClient, effectiveConfiguration);
}
}
} | 3.26 |
flink_CliFrontend_run_rdh | /**
* Executions the run action.
*
* @param args
* Command line arguments for the run action.
*/
protected void
run(String[] args) throws Exception {
LOG.info("Running 'run' command.");
final Options commandOptions = CliFrontendParser.getRunCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, true);
// evaluate help flag
if (commandLine.hasOption(HELP_OPTION.getOpt())) {CliFrontendParser.printHelpForRun(customCommandLines);
return;
}
final CustomCommandLine activeCommandLine = m1(checkNotNull(commandLine)); final ProgramOptions v13 = ProgramOptions.create(commandLine);
final List<URL> jobJars = getJobJarAndDependencies(v13);
final Configuration effectiveConfiguration = getEffectiveConfiguration(activeCommandLine, commandLine,
v13, jobJars);
LOG.debug("Effective executor configuration: {}", effectiveConfiguration);
try (PackagedProgram program = getPackagedProgram(v13, effectiveConfiguration)) {
executeProgram(effectiveConfiguration, program);
}
} | 3.26 |
flink_CliFrontend_main_rdh | /**
* Submits the job based on the arguments.
*/
public static void main(final String[] args) {
int retCode = INITIAL_RET_CODE;
try {
retCode = mainInternal(args);
} finally {
System.exit(retCode);
}
} | 3.26 |
flink_SessionWindowAssigner_withGap_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Creates a new {@code SessionWindowAssigner} {@link WindowAssigner} that assigns elements to
* sessions based on the timestamp.
*
* @param size
* The session timeout, i.e. the time gap between sessions
* @return The policy.
*/public static SessionWindowAssigner withGap(Duration size) {
return new SessionWindowAssigner(size.toMillis(), true);
} | 3.26 |
flink_SessionWindowAssigner_mergeWindow_rdh | /**
* Merge curWindow and other, return a new window which covers curWindow and other if they are
* overlapped. Otherwise, returns the curWindow itself.
*/
private TimeWindow mergeWindow(TimeWindow curWindow, TimeWindow other, Collection<TimeWindow> mergedWindow) {if (curWindow.intersects(other)) {
mergedWindow.add(other);
return curWindow.cover(other);
} else {
return curWindow;
}
} | 3.26 |
flink_ValueComparator_supportsSerializationWithKeyNormalization_rdh | // --------------------------------------------------------------------------------------------
// unsupported normalization
// --------------------------------------------------------------------------------------------
@Override
public boolean supportsSerializationWithKeyNormalization() {
return false;
} | 3.26 |
flink_DataGenerator_snapshotState_rdh | /**
* Snapshot state for {@link DataGenerator}. See {@link CheckpointedFunction#snapshotState}.
*/
default void snapshotState(FunctionSnapshotContext context) throws Exception {
} | 3.26 |
flink_ServerConnection_sendRequest_rdh | /**
* Returns a future holding the serialized request result.
*
* @param request
* the request to be sent.
* @return Future holding the serialized result
*/
@Override
public CompletableFuture<RESP> sendRequest(REQ request) {
synchronized(lock) {
if (running) {
EstablishedConnection.TimestampedCompletableFuture<RESP> requestPromiseTs = new EstablishedConnection.TimestampedCompletableFuture<>(System.nanoTime());
try {
final long requestId = requestCount++;
pendingRequests.put(requestId, requestPromiseTs);
stats.reportRequest();
ByteBuf buf = MessageSerializer.serializeRequest(channel.alloc(), requestId, request);
channel.writeAndFlush(buf).addListener(((ChannelFutureListener) (future -> {
if (!future.isSuccess()) {
// Fail promise if not failed to write
EstablishedConnection.TimestampedCompletableFuture<RESP> pending = pendingRequests.remove(requestId);
if ((pending != null) && pending.completeExceptionally(future.cause())) {
stats.reportFailedRequest();
}}
})));
} catch (Throwable t) {
requestPromiseTs.completeExceptionally(t);
}
return requestPromiseTs;
} else {
return FutureUtils.completedExceptionally(new ClosedChannelException());
}
}
} | 3.26 |
flink_ServerConnection_createEstablishedConnection_rdh | /**
* Creates an established connection from the given channel.
*
* @param channel
* Channel to create an established connection from
*/
private InternalConnection<REQ, RESP> createEstablishedConnection(Channel channel) {
if ((failureCause != null) || (!running)) {
// Close the channel and we are done. Any queued requests
// are removed on the close/failure call and after that no
// new ones can be enqueued.
channel.close();
return this;
} else {
final EstablishedConnection<REQ, RESP> establishedConnection = connectionFactory.apply(channel);
while (!queuedRequests.isEmpty()) {
final PendingConnection.PendingRequest<REQ, RESP> pending = queuedRequests.poll();
FutureUtils.forward(establishedConnection.sendRequest(pending.getRequest()), pending);
}
return establishedConnection;
}
} | 3.26 |
flink_ServerConnection_close_rdh | /**
* Close the channel with a cause.
*
* @param cause
* The cause to close the channel with.
* @return Channel close future
*/
private CompletableFuture<Void> close(final
Throwable cause) {
synchronized(lock) {
if (running) {
running = false;
channel.close().addListener(finished -> {
stats.reportInactiveConnection();
for (long requestId : pendingRequests.keySet()) {
EstablishedConnection.TimestampedCompletableFuture<RESP> pending = pendingRequests.remove(requestId);
if ((pending != null) && pending.completeExceptionally(cause)) {
stats.reportFailedRequest();
}
}
// when finishing, if netty successfully closes the channel,
// then the provided exception is used
// as the reason for the closing. If there was something
// wrong
// at the netty side, then that exception
// is prioritized over the provided one.
if (finished.isSuccess()) {
closeFuture.completeExceptionally(cause);
} else {
LOG.warn("Something went wrong when trying to close connection due to : ", cause);
closeFuture.completeExceptionally(finished.cause());
}
});
}
}
return closeFuture;
} | 3.26 |
flink_PoissonSampler_sample_rdh | /**
* Sample the input elements, for each input element, generate its count following a poisson
* distribution.
*
* @param input
* Elements to be sampled.
* @return The sampled result which is lazy computed upon input elements.
*/
@Override
public Iterator<T> sample(final Iterator<T> input) {
if (fraction == 0) {
return emptyIterable;
}
return new SampledIterator<T>() {
T f2;
int currentCount = 0;
@Overridepublic boolean hasNext() {if (currentCount > 0) {
return true;
} else {
samplingProcess();
if (currentCount > 0) {
return true;
} else {
return false;
}
}
}
@Override
public T next()
{
if (currentCount <= 0) {
samplingProcess();
}
currentCount--;
return f2;
}
public int m0(double p) {
// sample 'k' from Poisson(p), conditioned to k >= 1.
double q = Math.pow(Math.E, -p);
// simulate a poisson trial such that k >= 1.
double t = q + ((1 - q) * f1.nextDouble());
int k = 1;
// continue standard poisson generation trials.
t = t * f1.nextDouble();
while (t > q) {
k++;
t = t * f1.nextDouble();
} return k;
}
private void m1(int num) {
// skip the elements that occurrence number is zero.
int elementCount = 0;
while (input.hasNext() && (elementCount < num)) {
f2 = input.next();
elementCount++;
}
}
private void samplingProcess() {
if (fraction <= THRESHOLD) {
double u = Math.max(f1.nextDouble(), EPSILON);
int gap = ((int) (Math.log(u) / (-fraction)));
m1(gap);
if (input.hasNext()) {
f2 = input.next();
currentCount = m0(fraction);
}
} else {
while (input.hasNext()) {
f2 = input.next();
currentCount = f0.sample();
if (currentCount > 0) {break;
}
} }
}
};
} | 3.26 |
flink_IntegerResourceVersion_valueOf_rdh | /**
* Create a {@link IntegerResourceVersion} with given integer value.
*
* @param value
* resource version integer value. The value should not be negative.
* @return {@link IntegerResourceVersion} with given value.
*/
public static IntegerResourceVersion valueOf(int value) {
Preconditions.checkArgument(value
>= 0);
return new IntegerResourceVersion(value);
} | 3.26 |
flink_HadoopDummyReporter_getProgress_rdh | // There should be an @Override, but some CDH4 dependency does not contain this method
public float getProgress() {
return 0;} | 3.26 |
flink_CliView_isRunning_rdh | // --------------------------------------------------------------------------------------------
protected boolean isRunning() {
return isRunning;
} | 3.26 |
flink_CliView_resetMainPart_rdh | /**
* Must be called when values in the main part (main header or main) have changed.
*/protected void resetMainPart() {
mainHeaderLines = null;
mainLines = null;
totalMainWidth
= 0;
} | 3.26 |
flink_CliView_getTerminalWidth_rdh | // --------------------------------------------------------------------------------------------
public int getTerminalWidth() {
if (TerminalUtils.isPlainTerminal(terminal)) {
return f0;
}
return terminal.getWidth();
} | 3.26 |
flink_CliView_resetAllParts_rdh | /**
* Must be called when values in one or more parts have changed.
*/
protected void resetAllParts() {
titleLine = null;
headerLines = null;
mainHeaderLines = null;mainLines = null;
footerLines = null;
totalMainWidth = 0;
} | 3.26 |
flink_SplittableIterator_getSplit_rdh | /**
* Splits this iterator into <i>n</i> partitions and returns the <i>i-th</i> partition out of
* those.
*
* @param num
* The partition to return (<i>i</i>).
* @param numPartitions
* The number of partitions to split into (<i>n</i>).
* @return The iterator for the partition.
*/
public Iterator<T> getSplit(int num, int numPartitions) {if (((numPartitions < 1) || (num < 0)) ||
(num >= numPartitions)) {
throw new IllegalArgumentException();
}
return split(numPartitions)[num];
} | 3.26 |
flink_RequestSplitEvent_hostName_rdh | // ------------------------------------------------------------------------
@Nullable
public String hostName() {
return hostName;
} | 3.26 |
flink_RequestSplitEvent_hashCode_rdh | // ------------------------------------------------------------------------
@Override
public int hashCode() {
return 65932633 + Objects.hashCode(hostName);
} | 3.26 |
flink_ClusterOverview_equals_rdh | // ------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
} else if (obj instanceof ClusterOverview) {
ClusterOverview that = ((ClusterOverview) (obj));
return ((((((((this.numTaskManagersConnected == that.numTaskManagersConnected) && (this.numSlotsTotal == that.numSlotsTotal)) && (this.numSlotsAvailable == that.numSlotsAvailable)) && (this.numTaskManagersBlocked == that.numTaskManagersBlocked)) && (this.numSlotsFreeAndBlocked == that.numSlotsFreeAndBlocked)) && (this.getNumJobsRunningOrPending() == that.getNumJobsRunningOrPending())) &&
(this.getNumJobsFinished() == that.getNumJobsFinished())) && (this.getNumJobsCancelled() == that.getNumJobsCancelled())) && (this.getNumJobsFailed() == that.getNumJobsFailed());
} else {
return false;
}
} | 3.26 |
flink_ThreadInfoSamplesRequest_m0_rdh | /**
* Returns the configured delay between the individual samples.
*
* @return the delay between the individual samples.
*/
public Duration m0() {
return delayBetweenSamples;
} | 3.26 |
flink_ThreadInfoSamplesRequest_getMaxStackTraceDepth_rdh | /**
* Returns the configured maximum depth of the collected stack traces.
*
* @return the maximum depth of the collected stack traces.
*/
public int getMaxStackTraceDepth() {
return maxStackTraceDepth;
} | 3.26 |
flink_ThreadInfoSamplesRequest_getNumSamples_rdh | /**
* Returns the number of samples that are requested to be collected.
*
* @return the number of requested samples.
*/
public int getNumSamples() {
return numSubSamples;
} | 3.26 |
flink_NonReusingBlockResettableIterator_reopen_rdh | // ------------------------------------------------------------------------
public void
reopen(Iterator<T> input) throws IOException {
this.input = input;
this.noMoreBlocks = false;
this.closed = false;
nextBlock();
} | 3.26 |
flink_PeriodicMaterializationManager_close_rdh | // task thread and task canceler can access this method
public synchronized void close() {
LOG.info("Shutting down PeriodicMaterializationManager.");
if (!periodicExecutor.isShutdown()) {periodicExecutor.shutdownNow();
}
} | 3.26 |
flink_PeriodicMaterializationManager_scheduleNextMaterialization_rdh | // task thread and asyncOperationsThreadPool can access this method
private synchronized void scheduleNextMaterialization(long delay) {
if (started && (!periodicExecutor.isShutdown())) {
LOG.info("Task {} schedules the next materialization in {} seconds", f0, delay / 1000);
periodicExecutor.schedule(this::triggerMaterialization, delay, TimeUnit.MILLISECONDS);
}
} | 3.26 |
flink_CollectionDataType_ensureArrayConversionClass_rdh | // --------------------------------------------------------------------------------------------
private static Class<?> ensureArrayConversionClass(LogicalType logicalType, DataType elementDataType, @Nullable
Class<?> clazz) {
// arrays are a special case because their default conversion class depends on the
// conversion class of the element type
if ((logicalType.getTypeRoot() == LogicalTypeRoot.ARRAY) && (clazz == null)) {
Class<?> conversionClass = wrapOrUnWrap(elementDataType.getConversionClass(), elementDataType.getLogicalType().isNullable());
return Array.newInstance(conversionClass, 0).getClass();
}
return wrapOrUnWrap(clazz, elementDataType.getLogicalType().isNullable());
} | 3.26 |
flink_DefaultExecutionGraph_registerJobStatusListener_rdh | // Listeners & Observers
// --------------------------------------------------------------------------------------------
@Override
public void registerJobStatusListener(JobStatusListener listener) {
if (listener != null) {
jobStatusListeners.add(listener);
}
} | 3.26 |
flink_DefaultExecutionGraph_updateState_rdh | // --------------------------------------------------------------------------------------------
// Callbacks and Callback Utilities
// --------------------------------------------------------------------------------------------
@Override
public boolean updateState(TaskExecutionStateTransition state) {
assertRunningInJobMasterMainThread();
final Execution attempt = currentExecutions.get(state.getID());
if (attempt != null) {
try {
final boolean stateUpdated = updateStateInternal(state, attempt);
maybeReleasePartitionGroupsFor(attempt);
return stateUpdated;
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
// failures during updates leave the ExecutionGraph inconsistent
failGlobal(t);
return false;
}
} else {
return false;
}
} | 3.26 |
flink_DefaultExecutionGraph_m1_rdh | /**
* Returns the a stringified version of the user-defined accumulators.
*
* @return an Array containing the StringifiedAccumulatorResult objects
*/
@Override
public StringifiedAccumulatorResult[] m1() {
Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap =
aggregateUserAccumulators();
return StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);
} | 3.26 |
flink_DefaultExecutionGraph_notifyNewlyInitializedJobVertices_rdh | // --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
@Override
public void notifyNewlyInitializedJobVertices(List<ExecutionJobVertex> vertices) {
executionTopology.notifyExecutionGraphUpdated(this, vertices);} | 3.26 |
flink_DefaultExecutionGraph_getSchedulingTopology_rdh | // --------------------------------------------------------------------------------------------
// Configuration of Data-flow wide execution settings
// --------------------------------------------------------------------------------------------
@Override
public SchedulingTopology getSchedulingTopology() {
return executionTopology;
} | 3.26 |
flink_DefaultExecutionGraph_jobVertexFinished_rdh | // ------------------------------------------------------------------------
// Job Status Progress
// ------------------------------------------------------------------------
/**
* Called whenever a job vertex reaches state FINISHED (completed successfully). Once all job
* vertices are in the FINISHED state, the program is successfully done.
*/
@Override
public void jobVertexFinished() {
assertRunningInJobMasterMainThread();
final int numFinished = ++numFinishedJobVertices;
if (numFinished == numJobVerticesTotal) {
FutureUtils.assertNoException(waitForAllExecutionsTermination().thenAccept(ignored -> jobFinished()));
}
} | 3.26 |
flink_DefaultExecutionGraph_setJsonPlan_rdh | // --------------------------------------------------------------------------------------------
// Properties and Status of the Execution Graph
// --------------------------------------------------------------------------------------------
@Override
public void setJsonPlan(String jsonPlan) {
this.jsonPlan = jsonPlan;
} | 3.26 |
flink_DefaultExecutionGraph_transitionState_rdh | // ------------------------------------------------------------------------
// State Transitions
// ------------------------------------------------------------------------
@Overridepublic boolean transitionState(JobStatus current, JobStatus newState) {
return transitionState(current, newState, null);} | 3.26 |
flink_DefaultExecutionGraph_getArchivedExecutionConfig_rdh | /**
* Returns the serializable {@link ArchivedExecutionConfig}.
*
* @return ArchivedExecutionConfig which may be null in case of errors
*/
@Override
public ArchivedExecutionConfig getArchivedExecutionConfig() {
// create a summary of all relevant data accessed in the web interface's JobConfigHandler
try {
ExecutionConfig executionConfig = jobInformation.getSerializedExecutionConfig().deserializeValue(userClassLoader);
if (executionConfig != null) {
return executionConfig.archive();
}
} catch (IOException | ClassNotFoundException e) {
LOG.error("Couldn't create ArchivedExecutionConfig for job {} ", getJobID(), e);
}
return null;
} | 3.26 |
flink_DefaultExecutionGraph_getAccumulatorsSerialized_rdh | /**
* Gets a serialized accumulator map.
*
* @return The accumulator map with serialized accumulator values.
*/@Override
public Map<String, SerializedValue<OptionalFailure<Object>>> getAccumulatorsSerialized() {
return aggregateUserAccumulators().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> serializeAccumulator(entry.getKey(), entry.getValue())));
} | 3.26 |
flink_DefaultExecutionGraph_allVerticesInTerminalState_rdh | /**
* This method is a callback during cancellation/failover and called when all tasks have reached
* a terminal state (cancelled/failed/finished).
*/
private void allVerticesInTerminalState() {
assertRunningInJobMasterMainThread();
// we are done, transition to the final state
JobStatus current;
while (true) {
current = this.state;
if (current == JobStatus.RUNNING) {
failGlobal(new Exception("ExecutionGraph went into allVerticesInTerminalState() from RUNNING"));
} else if (current == JobStatus.CANCELLING) {
if (transitionState(current, JobStatus.CANCELED)) {
onTerminalState(JobStatus.CANCELED);
break;
}
} else if (current == JobStatus.FAILING) {
break;
} else if (current.isGloballyTerminalState()) {
LOG.warn("Job has entered globally terminal state without waiting for all " + "job vertices to reach final state.");
break;
} else {
failGlobal(new Exception("ExecutionGraph went into final state from state " + current));
break;
}
}
// done transitioning the state
} | 3.26 |
flink_DefaultExecutionGraph_attachJobVertices_rdh | /**
* Attach job vertices without initializing them.
*/
private void attachJobVertices(List<JobVertex> topologicallySorted) throws JobException {
for (JobVertex jobVertex : topologicallySorted) {
if (jobVertex.isInputVertex() && (!jobVertex.isStoppable())) {
this.isStoppable = false;
}
VertexParallelismInformation parallelismInfo = parallelismStore.getParallelismInfo(jobVertex.getID());
// create the execution job vertex and attach it to the graph
ExecutionJobVertex ejv = executionJobVertexFactory.createExecutionJobVertex(this, jobVertex, parallelismInfo);
ExecutionJobVertex previousTask = this.tasks.putIfAbsent(jobVertex.getID(), ejv);
if (previousTask != null) {throw new JobException(String.format("Encountered two job vertices with ID %s : previous=[%s] / new=[%s]", jobVertex.getID(), ejv, previousTask));
}
this.verticesInCreationOrder.add(ejv);
this.numJobVerticesTotal++;
}
} | 3.26 |
flink_DoubleMaximum_toString_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return "DoubleMaximum " + this.max;
} | 3.26 |
flink_DoubleMaximum_add_rdh | // ------------------------------------------------------------------------
// Primitive Specializations
// ------------------------------------------------------------------------
public void add(double value) {this.max = Math.max(this.max, value);
} | 3.26 |
flink_ArrayObjectArrayConverter_create_rdh | // --------------------------------------------------------------------------------------------
public static ArrayObjectArrayConverter<?> create(DataType dataType) {
return createForElement(dataType.getChildren().get(0));
} | 3.26 |
flink_ArrayObjectArrayConverter_toBinaryArrayData_rdh | // --------------------------------------------------------------------------------------------
// Runtime helper methods
// --------------------------------------------------------------------------------------------
private ArrayData toBinaryArrayData(E[] external) {
final int length = external.length;
allocateWriter(length);for (int pos = 0; pos <
length; pos++) {
writeElement(pos, external[pos]);
}
return completeWriter().copy();
} | 3.26 |
flink_BlobClient_putInputStream_rdh | /**
* Uploads data from the given input stream to the BLOB server.
*
* @param jobId
* the ID of the job the BLOB belongs to (or <tt>null</tt> if job-unrelated)
* @param inputStream
* the input stream to read the data from
* @param blobType
* whether the BLOB should become permanent or transient
* @return the computed BLOB key of the uploaded BLOB
* @throws IOException
* thrown if an I/O error occurs while uploading the data to the BLOB server
*/
BlobKey putInputStream(@Nullable
JobID jobId, InputStream inputStream, BlobKey.BlobType blobType) throws IOException {
if (this.socket.isClosed()) {
throw new IllegalStateException("BLOB Client is not connected. " + "Client has been shut down or encountered an error before.");
}
checkNotNull(inputStream);
if (LOG.isDebugEnabled()) {
LOG.debug("PUT BLOB stream to {}.", socket.getLocalSocketAddress());
}
try (BlobOutputStream os = new BlobOutputStream(jobId, blobType, socket)) {
IOUtils.copyBytes(inputStream, os, BUFFER_SIZE, false);
return os.finish();
}
catch (Throwable t) {
BlobUtils.closeSilently(socket, LOG);
throw new IOException("PUT operation failed: " + t.getMessage(), t);
}
} | 3.26 |
flink_BlobClient_putBuffer_rdh | // --------------------------------------------------------------------------------------------
// PUT
// --------------------------------------------------------------------------------------------
/**
* Uploads data from the given byte buffer to the BLOB server.
*
* @param jobId
* the ID of the job the BLOB belongs to (or <tt>null</tt> if job-unrelated)
* @param value
* the buffer to read the data from
* @param offset
* the read offset within the buffer
* @param len
* the number of bytes to read from the buffer
* @param blobType
* whether the BLOB should become permanent or transient
* @return the computed BLOB key of the uploaded BLOB
* @throws IOException
* thrown if an I/O error occurs while uploading the data to the BLOB server
*/
BlobKey putBuffer(@Nullable
JobID jobId, byte[] value, int offset, int
len, BlobKey.BlobType blobType) throws IOException {
if (this.socket.isClosed()) {
throw new IllegalStateException("BLOB Client is not connected. " + "Client has been shut down or encountered an error before.");
}
checkNotNull(value);
if (LOG.isDebugEnabled()) {
LOG.debug(((("PUT BLOB buffer (" + len) + " bytes) to ") + socket.getLocalSocketAddress()) + ".");
}
try (BlobOutputStream os = new BlobOutputStream(jobId, blobType, socket)) {
os.write(value, offset, len);// Receive blob key and compare
return os.finish();
} catch (Throwable t)
{
BlobUtils.closeSilently(socket, LOG);
throw new IOException("PUT operation failed: " + t.getMessage(), t);
}
} | 3.26 |
flink_BlobClient_receiveAndCheckGetResponse_rdh | /**
* Reads the response from the input stream and throws in case of errors.
*
* @param is
* stream to read from
* @throws IOException
* if the response is an error or reading the response failed
*/
private static void receiveAndCheckGetResponse(InputStream is) throws IOException {
int response = is.read();
if (response < 0) {
throw new EOFException("Premature end of response");
}
if (response == RETURN_ERROR) {
Throwable cause = readExceptionFromStream(is);
throw new IOException("Server side error: " + cause.getMessage(), cause);
} else if (response != RETURN_OKAY) {
throw new IOException("Unrecognized response");
}
} | 3.26 |
flink_BlobClient_uploadFiles_rdh | /**
* Uploads the JAR files to the {@link PermanentBlobService} of the {@link BlobServer} at the
* given address with HA as configured.
*
* @param serverAddress
* Server address of the {@link BlobServer}
* @param clientConfig
* Any additional configuration for the blob client
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param files
* List of files to upload
* @throws IOException
* if the upload fails
*/
public static List<PermanentBlobKey> uploadFiles(InetSocketAddress serverAddress, Configuration clientConfig, JobID jobId, List<Path> files) throws IOException {
checkNotNull(jobId);
if (files.isEmpty()) {
return Collections.emptyList();
} else {
List<PermanentBlobKey> blobKeys = new ArrayList<>();
try (BlobClient blobClient = new BlobClient(serverAddress, clientConfig)) {
for (final Path file : files) {
final PermanentBlobKey key = blobClient.uploadFile(jobId, file);
blobKeys.add(key);
}
}return
blobKeys;
}
} | 3.26 |
flink_BlobClient_sendGetHeader_rdh | /**
* Constructs and writes the header data for a GET operation to the given output stream.
*
* @param outputStream
* the output stream to write the header data to
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param blobKey
* blob key associated with the requested file
* @throws IOException
* thrown if an I/O error occurs while writing the header data to the output
* stream
*/
private static void sendGetHeader(OutputStream outputStream, @Nullable
JobID jobId, BlobKey blobKey) throws IOException {
checkNotNull(blobKey);
checkArgument((jobId != null) || (blobKey instanceof TransientBlobKey), "permanent BLOBs must be job-related");
// Signal type of operation
outputStream.write(GET_OPERATION);
// Send job ID and key
if (jobId == null) {
outputStream.write(JOB_UNRELATED_CONTENT);
} else { outputStream.write(JOB_RELATED_CONTENT);outputStream.write(jobId.getBytes());
}
blobKey.writeToOutputStream(outputStream);
} | 3.26 |
flink_BlobClient_downloadFromBlobServer_rdh | /**
* Downloads the given BLOB from the given server and stores its contents to a (local) file.
*
* <p>Transient BLOB files are deleted after a successful copy of the server's data into the
* given <tt>localJarFile</tt>.
*
* @param jobId
* job ID the BLOB belongs to or <tt>null</tt> if job-unrelated
* @param blobKey
* BLOB key
* @param localJarFile
* the local file to write to
* @param serverAddress
* address of the server to download from
* @param blobClientConfig
* client configuration for the connection
* @param numFetchRetries
* number of retries before failing
* @throws IOException
* if an I/O error occurs during the download
*/
static void downloadFromBlobServer(@Nullable
JobID jobId,
BlobKey blobKey, File localJarFile, InetSocketAddress serverAddress, Configuration blobClientConfig, int numFetchRetries) throws IOException {
final byte[] buf = new byte[BUFFER_SIZE];
LOG.info("Downloading {}/{} from {}", jobId, blobKey, serverAddress);
// loop over retries
int attempt = 0;
while (true) {
try (final BlobClient bc = new BlobClient(serverAddress, blobClientConfig);final InputStream is = bc.getInternal(jobId, blobKey);final OutputStream os = new FileOutputStream(localJarFile)) {
while (true) {
final int read = is.read(buf);if (read < 0) {
break;
}
os.write(buf, 0, read);
}
return;
} catch (Throwable t) {
String message = (((((("Failed to fetch BLOB " + jobId) + "/") + blobKey) + " from ") + serverAddress) + " and store it under ") + localJarFile.getAbsolutePath();
if (attempt < numFetchRetries) {
if (LOG.isDebugEnabled()) {
LOG.error(message + " Retrying...", t);
} else {
LOG.error(message + " Retrying...");
}
} else {
LOG.error(message + " No retries left.", t);
throw new IOException(message, t);
}
// retry
++attempt;
LOG.info("Downloading {}/{} from {} (retry {})", jobId, blobKey, serverAddress, attempt);
}} // end loop over retries
} | 3.26 |
flink_BlobClient_getInternal_rdh | // --------------------------------------------------------------------------------------------
// GET
// --------------------------------------------------------------------------------------------
/**
* Downloads the BLOB identified by the given BLOB key from the BLOB server.
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param blobKey
* blob key associated with the requested file
* @return an input stream to read the retrieved data from
* @throws FileNotFoundException
* if there is no such file;
* @throws IOException
* if an I/O error occurs during the download
*/
InputStream getInternal(@Nullable
JobID jobId, BlobKey blobKey) throws IOException {
if (this.socket.isClosed()) {
throw new
IllegalStateException("BLOB Client is not connected. " + "Client has been shut down or encountered an error before.");
}
if (LOG.isDebugEnabled()) {
LOG.debug("GET BLOB {}/{} from {}.", jobId, blobKey, socket.getLocalSocketAddress());
}
try {
OutputStream os = this.socket.getOutputStream();InputStream is = this.socket.getInputStream();
// Send GET header
sendGetHeader(os, jobId, blobKey);
receiveAndCheckGetResponse(is);
return new BlobInputStream(is, blobKey, os);
} catch (Throwable t) {
BlobUtils.closeSilently(socket, LOG);
throw new IOException("GET operation failed: " + t.getMessage(), t);
}
} | 3.26 |
flink_BlobClient_uploadFile_rdh | /**
* Uploads a single file to the {@link PermanentBlobService} of the given {@link BlobServer}.
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param file
* file to upload
* @throws IOException
* if the upload fails
*/
public PermanentBlobKey uploadFile(JobID jobId, Path file) throws IOException {
final FileSystem fs = file.getFileSystem();
try (InputStream is = fs.open(file)) {
return ((PermanentBlobKey) (putInputStream(jobId, is, PERMANENT_BLOB)));
}} | 3.26 |
flink_BinaryHashPartition_insertIntoProbeBuffer_rdh | /**
* Inserts the given record into the probe side buffers. This method is only applicable when the
* partition was spilled while processing the build side.
*
* <p>If this method is invoked when the partition is still being built, it has undefined
* behavior.
*
* @param record
* The record to be inserted into the probe side buffers.
* @throws IOException
* Thrown, if the buffer is full, needs to be spilled, and spilling causes
* an error.
*/
final void insertIntoProbeBuffer(BinaryRowData record) throws IOException {
this.probeSideSerializer.serialize(record, this.probeSideBuffer);
this.probeSideRecordCounter++;
} | 3.26 |
flink_BinaryHashPartition_spillPartition_rdh | /**
* Spills this partition to disk and sets it up such that it continues spilling records that are
* added to it. The spilling process must free at least one buffer, either in the partition's
* record buffers, or in the memory segments for overflow buckets. The partition immediately
* takes back one buffer to use it for further spilling.
*
* @param ioAccess
* The I/O manager to be used to create a writer to disk.
* @param targetChannel
* The id of the target channel for this partition.
* @return The number of buffers that were freed by spilling this partition.
* @throws IOException
* Thrown, if the writing failed.
*/
int spillPartition(IOManager ioAccess, FileIOChannel.ID targetChannel, LinkedBlockingQueue<MemorySegment> bufferReturnQueue) throws IOException {
// sanity checks
if (!isInMemory()) {
throw new RuntimeException("Bug in Hybrid Hash Join: " + "Request to spill a partition that has already been spilled.");
}
if (getNumOccupiedMemorySegments() < 2) {
throw new RuntimeException("Bug in Hybrid Hash Join: " + "Request to spill a partition with less than two buffers.");
}
// create the channel block writer and spill the current buffers
// that keep the build side buffers current block, as it is most likely not full, yet
// we return the number of blocks that become available
this.buildSideChannel =
FileChannelUtil.createBlockChannelWriter(ioAccess, targetChannel, bufferReturnQueue, compressionEnable, compressionCodecFactory, compressionBlockSize, memorySegmentSize);
return this.buildSideWriteBuffer.spill(this.buildSideChannel);
} | 3.26 |
flink_BinaryHashPartition_finalizeProbePhase_rdh | /**
*
* @param keepUnprobedSpilledPartitions
* If true then partitions that were spilled but received
* no further probe requests will be retained; used for build-side outer joins.
*/
void finalizeProbePhase(LazyMemorySegmentPool pool,
List<BinaryHashPartition> spilledPartitions, boolean keepUnprobedSpilledPartitions) throws IOException {
if (isInMemory()) {
this.bucketArea.returnMemory(pool);
this.bucketArea = null;
// return the partition buffers
pool.returnAll(Arrays.asList(partitionBuffers));
this.partitionBuffers = null;
} else {
if (bloomFilter != null) {
freeBloomFilter();
}
if ((this.probeSideRecordCounter == 0) && (!keepUnprobedSpilledPartitions)) {
// delete the spill files
this.probeSideBuffer.close();
this.buildSideChannel.deleteChannel();
this.probeSideBuffer.getChannel().deleteChannel();
} else {
// flush the last probe side buffer and register this partition as pending
probeNumBytesInLastSeg = this.probeSideBuffer.close();
spilledPartitions.add(this);
}
}
} | 3.26 |
flink_BinaryHashPartition_getPartitionNumber_rdh | /**
* Gets the partition number of this partition.
*
* @return This partition's number.
*/
int getPartitionNumber() {
return this.partitionNumber;
} | 3.26 |
flink_BinaryHashPartition_getNumOccupiedMemorySegments_rdh | /**
* Gets the number of memory segments used by this partition, which includes build side memory
* buffers and overflow memory segments.
*
* @return The number of occupied memory segments.
*/
int getNumOccupiedMemorySegments() {
// either the number of memory segments, or one for spilling
final int numPartitionBuffers = (this.partitionBuffers != null) ? this.partitionBuffers.length : this.buildSideWriteBuffer.getNumOccupiedMemorySegments();
return (numPartitionBuffers + bucketArea.buckets.length) + bucketArea.numOverflowSegments;
} | 3.26 |
flink_BinaryHashPartition_addHashBloomFilter_rdh | /**
* Add new hash to bloomFilter when insert a record to spilled partition.
*/
void addHashBloomFilter(int hash) {
if (bloomFilter != null) {
// check if too full.
if (!bloomFilter.addHash(hash)) {
freeBloomFilter();
}
}
} | 3.26 |
flink_FineGrainedTaskManagerTracker_m0_rdh | // ---------------------------------------------------------------------------------------------
// Getters of internal state
// ---------------------------------------------------------------------------------------------
@Override
public Collection<? extends TaskManagerInfo> m0() {
return Collections.unmodifiableCollection(taskManagerRegistrations.values());
} | 3.26 |
flink_FineGrainedTaskManagerTracker_notifySlotStatus_rdh | // ---------------------------------------------------------------------------------------------
// Core state transitions
// ---------------------------------------------------------------------------------------------
@Override
public void notifySlotStatus(AllocationID allocationId, JobID jobId, InstanceID instanceId, ResourceProfile resourceProfile, SlotState
slotState) {
Preconditions.checkNotNull(allocationId);
Preconditions.checkNotNull(jobId);
Preconditions.checkNotNull(instanceId);
Preconditions.checkNotNull(resourceProfile);
Preconditions.checkNotNull(slotState);
switch (slotState) {
case FREE :
freeSlot(instanceId, allocationId);
break;
case ALLOCATED :
addAllocatedSlot(allocationId, jobId, instanceId, resourceProfile);
break;
case PENDING :
addPendingSlot(allocationId, jobId, instanceId, resourceProfile);
break;
}
} | 3.26 |
flink_StreamExecutionEnvironment_generateStreamGraph_rdh | /**
* Generates a {@link StreamGraph} that consists of the given {@link Transformation
* transformations} and is configured with the configuration of this environment.
*
* <p>This method does not access or clear the previously registered transformations.
*
* @param transformations
* list of transformations that the graph should contain
* @return The stream graph representing the transformations
*/
@Internal
public StreamGraph generateStreamGraph(List<Transformation<?>> transformations) {
return getStreamGraphGenerator(transformations).generate();
} | 3.26 |
flink_StreamExecutionEnvironment_getDefaultLocalParallelism_rdh | /**
* Gets the default parallelism that will be used for the local execution environment created by
* {@link #createLocalEnvironment()}.
*
* @return The default local parallelism
*/
@PublicEvolving
public static int
getDefaultLocalParallelism()
{
return defaultLocalParallelism;
} | 3.26 |
flink_StreamExecutionEnvironment_initializeContextEnvironment_rdh | // --------------------------------------------------------------------------------------------
// Methods to control the context and local environments for execution from packaged programs
// --------------------------------------------------------------------------------------------
protected static void initializeContextEnvironment(StreamExecutionEnvironmentFactory ctx) {
contextEnvironmentFactory = ctx;
threadLocalContextEnvironmentFactory.set(ctx);
} | 3.26 |
flink_StreamExecutionEnvironment_fromSequence_rdh | /**
* Creates a new data stream that contains a sequence of numbers (longs) and is useful for
* testing and for cases that just need a stream of N events of any kind.
*
* <p>The generated source splits the sequence into as many parallel sub-sequences as there are
* parallel source readers. Each sub-sequence will be produced in order. If the parallelism is
* limited to one, the source will produce one sequence in order.
*
* <p>This source is always bounded. For very long sequences (for example over the entire domain
* of long integer values), you may consider executing the application in a streaming manner
* because of the end bound that is pretty far away.
*
* <p>Use {@link #fromSource(Source, WatermarkStrategy, String)} together with {@link NumberSequenceSource} if you required more control over the created sources. For example, if
* you want to set a {@link WatermarkStrategy}.
*
* @param from
* The number to start at (inclusive)
* @param to
* The number to stop at (inclusive)
*/
public DataStreamSource<Long> fromSequence(long from, long to) {
if (from > to) {
throw new IllegalArgumentException("Start of sequence must not be greater than the end");
}
return fromSource(new NumberSequenceSource(from, to), WatermarkStrategy.noWatermarks(), "Sequence Source");
}
/**
* Creates a new data stream that contains the given elements. The elements must all be of the
* same type, for example, all of the {@link String} or {@link Integer}.
*
* <p>The framework will try and determine the exact type from the elements. In case of generic
* elements, it may be necessary to manually supply the type information via {@link #fromCollection(java.util.Collection, org.apache.flink.api.common.typeinfo.TypeInformation)}.
*
* <p>Note that this operation will result in a non-parallel data stream source, i.e. a data
* stream source with a degree of parallelism one.
*
* @param data
* The array of elements to create the data stream from.
* @param <OUT>
* The type of the returned data stream
* @return The data stream representing the given array of elements
* @deprecated This method will be removed a future release, possibly as early as version 2.0.
Use {@link #fromData(OUT...)} | 3.26 |
flink_StreamExecutionEnvironment_getExecutionEnvironment_rdh | /**
* Creates an execution environment that represents the context in which the program is
* currently executed. If the program is invoked standalone, this method returns a local
* execution environment, as returned by {@link #createLocalEnvironment(Configuration)}.
*
* <p>When executed from the command line the given configuration is stacked on top of the
* global configuration which comes from the {@code flink-conf.yaml}, potentially overriding
* duplicated options.
*
* @param configuration
* The configuration to instantiate the environment with.
* @return The execution environment of the context in which the program is executed.
*/
public static StreamExecutionEnvironment getExecutionEnvironment(Configuration configuration) {
return Utils.resolveFactory(threadLocalContextEnvironmentFactory, contextEnvironmentFactory).map(factory -> factory.createExecutionEnvironment(configuration)).orElseGet(() -> StreamExecutionEnvironment.createLocalEnvironment(configuration));
} | 3.26 |
flink_StreamExecutionEnvironment_readFileStream_rdh | /**
* Creates a data stream that contains the contents of file created while system watches the
* given path. The file will be read with the system's default character set.
*
* @param filePath
* The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path/")
* @param intervalMillis
* The interval of file watching in milliseconds
* @param watchType
* The watch type of file stream. When watchType is {@link org.apache.flink.streaming.api.functions.source.FileMonitoringFunction.WatchType#ONLY_NEW_FILES},
* the system processes only new files. {@link org.apache.flink.streaming.api.functions.source.FileMonitoringFunction.WatchType#REPROCESS_WITH_APPENDED}
* means that the system re-processes all contents of appended file. {@link org.apache.flink.streaming.api.functions.source.FileMonitoringFunction.WatchType#PROCESS_ONLY_APPENDED}
* means that the system processes only appended contents of files.
* @return The DataStream containing the given directory.
* @deprecated Use {@link #readFile(FileInputFormat, String, FileProcessingMode, long)} instead.
*/
@Deprecated
@SuppressWarnings("deprecation")
public DataStream<String> readFileStream(String filePath, long intervalMillis, FileMonitoringFunction.WatchType watchType) {
DataStream<Tuple3<String, Long, Long>> source = addSource(new FileMonitoringFunction(filePath, intervalMillis, watchType), "Read File Stream source");
return source.flatMap(new FileReadFunction());
}
/**
* Reads the contents of the user-specified {@code filePath} based on the given {@link FileInputFormat}. Depending on the provided {@link FileProcessingMode}, the source may
* periodically monitor (every {@code interval} ms) the path for new data ({@link FileProcessingMode#PROCESS_CONTINUOUSLY}), or process once the data currently in the path and
* exit ({@link FileProcessingMode#PROCESS_ONCE}). In addition, if the path contains files not
* to be processed, the user can specify a custom {@link FilePathFilter}. As a default
* implementation you can use {@link FilePathFilter#createDefaultFilter()}.
*
* <p><b>NOTES ON CHECKPOINTING: </b> If the {@code watchType} is set to {@link FileProcessingMode#PROCESS_ONCE}, the source monitors the path <b>once</b>, creates the
* {@link org.apache.flink.core.fs.FileInputSplit FileInputSplits} to be processed, forwards
* them to the downstream readers to read the actual data, and exits, without waiting for the
* readers to finish reading. This implies that no more checkpoint barriers are going to be
* forwarded after the source exits, thus having no checkpoints after that point.
*
* @param inputFormat
* The input format used to create the data stream
* @param filePath
* The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path")
* @param watchType
* The mode in which the source should operate, i.e. monitor path and react to
* new data, or process once and exit
* @param typeInformation
* Information on the type of the elements in the output stream
* @param interval
* In the case of periodic path monitoring, this specifies the interval (in
* millis) between consecutive path scans
* @param <OUT>
* The type of the returned data stream
* @return The data stream that represents the data read from the given file
* @deprecated Use {@code FileSource#forRecordStreamFormat()/forBulkFileFormat()/forRecordFileFormat() instead}. An
example of reading a file using a simple {@code TextLineInputFormat}:
<pre>{@code FileSource<String> source =
FileSource.forRecordStreamFormat(
new TextLineInputFormat(), new Path("/foo/bar"))
.monitorContinuously(Duration.of(10, SECONDS))
.build();} | 3.26 |
flink_StreamExecutionEnvironment_createInput_rdh | /**
* Generic method to create an input data stream with {@link org.apache.flink.api.common.io.InputFormat}.
*
* <p>The data stream is typed to the given TypeInformation. This method is intended for input
* formats where the return type cannot be determined by reflection analysis, and that do not
* implement the {@link org.apache.flink.api.java.typeutils.ResultTypeQueryable} interface.
*
* <p><b>NOTES ON CHECKPOINTING: </b> In the case of a {@link FileInputFormat}, the source
* (which executes the {@link ContinuousFileMonitoringFunction}) monitors the path, creates the
* {@link org.apache.flink.core.fs.FileInputSplit FileInputSplits} to be processed, forwards
* them to the downstream readers to read the actual data, and exits, without waiting for the
* readers to finish reading. This implies that no more checkpoint barriers are going to be
* forwarded after the source exits, thus having no checkpoints.
*
* @param inputFormat
* The input format used to create the data stream
* @param typeInfo
* The information about the type of the output type
* @param <OUT>
* The type of the returned data stream
* @return The data stream that represents the data created by the input format
*/
@PublicEvolving
public <OUT> DataStreamSource<OUT> createInput(InputFormat<OUT, ?> inputFormat, TypeInformation<OUT> typeInfo) {
DataStreamSource<OUT> source;
if (inputFormat instanceof FileInputFormat) {
@SuppressWarnings("unchecked")
FileInputFormat<OUT> format = ((FileInputFormat<OUT>) (inputFormat));
source = createFileInput(format, typeInfo, "Custom File source", FileProcessingMode.PROCESS_ONCE, -1);
} else {
source = createInput(inputFormat, typeInfo, "Custom Source");
}
return source;
} | 3.26 |
flink_StreamExecutionEnvironment_clearJobListeners_rdh | /**
* Clear all registered {@link JobListener}s.
*/
@PublicEvolving
public void clearJobListeners() {
this.jobListeners.clear();
}
/**
* Triggers the program asynchronously. The environment will execute all parts of the program
* that have resulted in a "sink" operation. Sink operations are for example printing results or
* forwarding them to a message queue.
*
* <p>The program execution will be logged and displayed with a generated default name.
*
* @return A {@link JobClient} | 3.26 |
flink_StreamExecutionEnvironment_getConfig_rdh | /**
* Gets the config object.
*/
public ExecutionConfig getConfig() {
return config;
} | 3.26 |
flink_StreamExecutionEnvironment_registerCachedFile_rdh | /**
* Registers a file at the distributed cache under the given name. The file will be accessible
* from any user-defined function in the (distributed) runtime under a local path. Files may be
* local files (which will be distributed via BlobServer), or files in a distributed file
* system. The runtime will copy the files temporarily to a local cache, if needed.
*
* <p>The {@link org.apache.flink.api.common.functions.RuntimeContext} can be obtained inside
* UDFs via {@link org.apache.flink.api.common.functions.RichFunction#getRuntimeContext()} and
* provides access {@link org.apache.flink.api.common.cache.DistributedCache} via {@link org.apache.flink.api.common.functions.RuntimeContext#getDistributedCache()}.
*
* @param filePath
* The path of the file, as a URI (e.g. "file:///some/path" or
* "hdfs://host:port/and/path")
* @param name
* The name under which the file is registered.
* @param executable
* flag indicating whether the file should be executable
*/
public void registerCachedFile(String filePath, String name, boolean executable) {
this.cacheFile.add(new Tuple2<>(name, new DistributedCache.DistributedCacheEntry(filePath, executable)));
} | 3.26 |
flink_StreamExecutionEnvironment_getParallelism_rdh | /**
* Gets the parallelism with which operation are executed by default. Operations can
* individually override this value to use a specific parallelism.
*
* @return The parallelism used by operations, unless they override that value.
*/
public int getParallelism() {
return config.getParallelism();
} | 3.26 |
flink_StreamExecutionEnvironment_m2_rdh | /**
* Creates the plan with which the system will execute the program, and returns it as a String
* using a JSON representation of the execution data flow graph. Note that this needs to be
* called, before the plan is executed.
*
* @return The execution plan of the program, as a JSON String.
*/
public String m2() {
return getStreamGraph(false).getStreamingPlanAsJSON();
} | 3.26 |
flink_StreamExecutionEnvironment_getTypeInfo_rdh | // Private helpers.
@SuppressWarnings("unchecked")
private <OUT, T extends TypeInformation<OUT>> T getTypeInfo(Object source, String sourceName, Class<?> baseSourceClass, TypeInformation<OUT> typeInfo) {
TypeInformation<OUT> resolvedTypeInfo = typeInfo;
if ((resolvedTypeInfo == null) && (source instanceof ResultTypeQueryable)) {
resolvedTypeInfo = ((ResultTypeQueryable<OUT>) (source)).getProducedType();
}
if (resolvedTypeInfo == null) {
try {
resolvedTypeInfo = TypeExtractor.createTypeInfo(baseSourceClass, source.getClass(), 0, null, null);
}
catch (final InvalidTypesException e) {
resolvedTypeInfo = ((TypeInformation<OUT>) (new MissingTypeInfo(sourceName, e)));
}
}return ((T) (resolvedTypeInfo));
} | 3.26 |
flink_StreamExecutionEnvironment_getBufferTimeout_rdh | /**
* Gets the maximum time frequency (milliseconds) for the flushing of the output buffers. For
* clarification on the extremal values see {@link #setBufferTimeout(long)}.
*
* @return The timeout of the buffer.
*/
public long getBufferTimeout() {
return this.bufferTimeout;
} | 3.26 |
flink_StreamExecutionEnvironment_getCheckpointConfig_rdh | // ------------------------------------------------------------------------
// Checkpointing Settings
// ------------------------------------------------------------------------
/**
* Gets the checkpoint config, which defines values like checkpoint interval, delay between
* checkpoints, etc.
*
* @return The checkpoint config.
*/
public CheckpointConfig getCheckpointConfig() {
return checkpointCfg;
} | 3.26 |
flink_StreamExecutionEnvironment_getJobListeners_rdh | /**
* Gets the config JobListeners.
*/
@PublicEvolving
public List<JobListener> getJobListeners() {
return jobListeners;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.