code
stringlengths
67
466k
docstring
stringlengths
1
13.2k
@SuppressWarnings("unchecked") public O withForwardedFieldsFirst(String... forwardedFieldsFirst) { if (this.udfSemantics == null || this.analyzedUdfSemantics) { // extract semantic properties from function annotations setSemanticProperties(extractSemanticAnnotationsFromUdf(getFunction().getClass())); } if (this.udfSemantics == null || this.analyzedUdfSemantics) { setSemanticProperties(new DualInputSemanticProperties()); SemanticPropUtil.getSemanticPropsDualFromString(this.udfSemantics, forwardedFieldsFirst, null, null, null, null, null, getInput1Type(), getInput2Type(), getResultType()); } else { if (this.udfWithForwardedFieldsFirstAnnotation(getFunction().getClass())) { // refuse semantic information as it would override the function annotation throw new SemanticProperties.InvalidSemanticAnnotationException("Forwarded field information " + "has already been added by a function annotation for the first input of this operator. " + "Cannot overwrite function annotations."); } else { SemanticPropUtil.getSemanticPropsDualFromString(this.udfSemantics, forwardedFieldsFirst, null, null, null, null, null, getInput1Type(), getInput2Type(), getResultType()); } } O returnType = (O) this; return returnType; }
Adds semantic information about forwarded fields of the first input of the user-defined function. The forwarded fields information declares fields which are never modified by the function and which are forwarded at the same position to the output or unchanged copied to another position in the output. <p>Fields that are forwarded at the same position are specified by their position. The specified position must be valid for the input and output data type and have the same type. For example <code>withForwardedFieldsFirst("f2")</code> declares that the third field of a Java input tuple from the first input is copied to the third field of an output tuple. <p>Fields which are unchanged copied from the first input to another position in the output are declared by specifying the source field reference in the first input and the target field reference in the output. {@code withForwardedFieldsFirst("f0->f2")} denotes that the first field of the first input Java tuple is unchanged copied to the third field of the Java output tuple. When using a wildcard ("*") ensure that the number of declared fields and their types in first input and output type match. <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFieldsFirst("f2; f3->f0; f4")}) or separate Strings ({@code withForwardedFieldsFirst("f2", "f3->f0", "f4")}). Please refer to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or Flink's documentation for details on field references such as nested fields and wildcard. <p>It is not possible to override existing semantic information about forwarded fields of the first input which was for example added by a {@link org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsFirst} class annotation. <p><b>NOTE: Adding semantic information for functions is optional! If used correctly, semantic information can help the Flink optimizer to generate more efficient execution plans. However, incorrect semantic information can cause the optimizer to generate incorrect execution plans which compute wrong results! So be careful when adding semantic information. </b> @param forwardedFieldsFirst A list of forwarded field expressions for the first input of the function. @return This operator with annotated forwarded field information. @see org.apache.flink.api.java.functions.FunctionAnnotation @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsFirst
@SuppressWarnings("unchecked") public O withForwardedFieldsSecond(String... forwardedFieldsSecond) { if (this.udfSemantics == null || this.analyzedUdfSemantics) { // extract semantic properties from function annotations setSemanticProperties(extractSemanticAnnotationsFromUdf(getFunction().getClass())); } if (this.udfSemantics == null || this.analyzedUdfSemantics) { setSemanticProperties(new DualInputSemanticProperties()); SemanticPropUtil.getSemanticPropsDualFromString(this.udfSemantics, null, forwardedFieldsSecond, null, null, null, null, getInput1Type(), getInput2Type(), getResultType()); } else { if (udfWithForwardedFieldsSecondAnnotation(getFunction().getClass())) { // refuse semantic information as it would override the function annotation throw new SemanticProperties.InvalidSemanticAnnotationException("Forwarded field information " + "has already been added by a function annotation for the second input of this operator. " + "Cannot overwrite function annotations."); } else { SemanticPropUtil.getSemanticPropsDualFromString(this.udfSemantics, null, forwardedFieldsSecond, null, null, null, null, getInput1Type(), getInput2Type(), getResultType()); } } O returnType = (O) this; return returnType; }
Adds semantic information about forwarded fields of the second input of the user-defined function. The forwarded fields information declares fields which are never modified by the function and which are forwarded at the same position to the output or unchanged copied to another position in the output. <p>Fields that are forwarded at the same position are specified by their position. The specified position must be valid for the input and output data type and have the same type. For example <code>withForwardedFieldsSecond("f2")</code> declares that the third field of a Java input tuple from the second input is copied to the third field of an output tuple. <p>Fields which are unchanged copied from the second input to another position in the output are declared by specifying the source field reference in the second input and the target field reference in the output. {@code withForwardedFieldsSecond("f0->f2")} denotes that the first field of the second input Java tuple is unchanged copied to the third field of the Java output tuple. When using a wildcard ("*") ensure that the number of declared fields and their types in second input and output type match. <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFieldsSecond("f2; f3->f0; f4")}) or separate Strings ({@code withForwardedFieldsSecond("f2", "f3->f0", "f4")}). Please refer to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or Flink's documentation for details on field references such as nested fields and wildcard. <p>It is not possible to override existing semantic information about forwarded fields of the second input which was for example added by a {@link org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsSecond} class annotation. <p><b>NOTE: Adding semantic information for functions is optional! If used correctly, semantic information can help the Flink optimizer to generate more efficient execution plans. However, incorrect semantic information can cause the optimizer to generate incorrect execution plans which compute wrong results! So be careful when adding semantic information. </b> @param forwardedFieldsSecond A list of forwarded field expressions for the second input of the function. @return This operator with annotated forwarded field information. @see org.apache.flink.api.java.functions.FunctionAnnotation @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsSecond
public O returns(Class<OUT> typeClass) { requireNonNull(typeClass, "type class must not be null"); try { return returns(TypeInformation.of(typeClass)); } catch (InvalidTypesException e) { throw new InvalidTypesException("Cannot infer the type information from the class alone." + "This is most likely because the class represents a generic type. In that case," + "please use the 'returns(TypeHint)' method instead.", e); } }
Adds a type information hint about the return type of this operator. This method can be used in cases where Flink cannot determine automatically what the produced type of a function is. That can be the case if the function uses generic type variables in the return type that cannot be inferred from the input type. <p>Classes can be used as type hints for non-generic types (classes without generic parameters), but not for generic types like for example Tuples. For those generic types, please use the {@link #returns(TypeHint)} method. <p>Use this method the following way: <pre>{@code DataSet<String[]> result = data1.join(data2).where("id").equalTo("fieldX") .with(new JoinFunctionWithNonInferrableReturnType()) .returns(String[].class); }</pre> @param typeClass The class of the returned data type. @return This operator with the type information corresponding to the given type class.
public O returns(TypeHint<OUT> typeHint) { requireNonNull(typeHint, "TypeHint must not be null"); try { return returns(TypeInformation.of(typeHint)); } catch (InvalidTypesException e) { throw new InvalidTypesException("Cannot infer the type information from the type hint. " + "Make sure that the TypeHint does not use any generic type variables."); } }
Adds a type information hint about the return type of this operator. This method can be used in cases where Flink cannot determine automatically what the produced type of a function is. That can be the case if the function uses generic type variables in the return type that cannot be inferred from the input type. <p>Use this method the following way: <pre>{@code DataSet<Tuple2<String, Double>> result = data1.join(data2).where("id").equalTo("fieldX") .with(new JoinFunctionWithNonInferrableReturnType()) .returns(new TypeHint<Tuple2<String, Double>>(){}); }</pre> @param typeHint The type hint for the returned data type. @return This operator with the type information corresponding to the given type hint.
public O returns(TypeInformation<OUT> typeInfo) { requireNonNull(typeInfo, "TypeInformation must not be null"); fillInType(typeInfo); @SuppressWarnings("unchecked") O returnType = (O) this; return returnType; }
Adds a type information hint about the return type of this operator. This method can be used in cases where Flink cannot determine automatically what the produced type of a function is. That can be the case if the function uses generic type variables in the return type that cannot be inferred from the input type. <p>In most cases, the methods {@link #returns(Class)} and {@link #returns(TypeHint)} are preferable. @param typeInfo The type information for the returned data type. @return This operator using the given type information for the return type.
@Override @SuppressWarnings("unchecked") public Tuple7<T0, T1, T2, T3, T4, T5, T6> copy() { return new Tuple7<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6); }
Shallow tuple copy. @return A new Tuple with the same fields as this.
public static <T0, T1, T2, T3, T4, T5, T6> Tuple7<T0, T1, T2, T3, T4, T5, T6> of(T0 value0, T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6) { return new Tuple7<>(value0, value1, value2, value3, value4, value5, value6); }
Creates a new tuple and assigns the given values to the tuple's fields. This is more convenient than using the constructor, because the compiler can infer the generic type arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new Tuple3<Integer, Double, String>(n, x, s)}
public ChannelHandler[] getServerChannelHandlers() { PartitionRequestQueue queueOfPartitionQueues = new PartitionRequestQueue(); PartitionRequestServerHandler serverHandler = new PartitionRequestServerHandler( partitionProvider, taskEventPublisher, queueOfPartitionQueues, creditBasedEnabled); return new ChannelHandler[] { messageEncoder, new NettyMessage.NettyMessageDecoder(!creditBasedEnabled), serverHandler, queueOfPartitionQueues }; }
Returns the server channel handlers. <pre> +-------------------------------------------------------------------+ | SERVER CHANNEL PIPELINE | | | | +----------+----------+ (3) write +----------------------+ | | | Queue of queues +----------->| Message encoder | | | +----------+----------+ +-----------+----------+ | | /|\ \|/ | | | (2) enqueue | | | +----------+----------+ | | | | Request handler | | | | +----------+----------+ | | | /|\ | | | | | | | +-----------+-----------+ | | | | Message+Frame decoder | | | | +-----------+-----------+ | | | /|\ | | +---------------+-----------------------------------+---------------+ | | (1) client request \|/ +---------------+-----------------------------------+---------------+ | | | | | [ Socket.read() ] [ Socket.write() ] | | | | Netty Internal I/O Threads (Transport Implementation) | +-------------------------------------------------------------------+ </pre> @return channel handlers
public ChannelHandler[] getClientChannelHandlers() { NetworkClientHandler networkClientHandler = creditBasedEnabled ? new CreditBasedPartitionRequestClientHandler() : new PartitionRequestClientHandler(); return new ChannelHandler[] { messageEncoder, new NettyMessage.NettyMessageDecoder(!creditBasedEnabled), networkClientHandler}; }
Returns the client channel handlers. <pre> +-----------+----------+ +----------------------+ | Remote input channel | | request client | +-----------+----------+ +-----------+----------+ | | (1) write +---------------+-----------------------------------+---------------+ | | CLIENT CHANNEL PIPELINE | | | | \|/ | | +----------+----------+ +----------------------+ | | | Request handler + | Message encoder | | | +----------+----------+ +-----------+----------+ | | /|\ \|/ | | | | | | +----------+------------+ | | | | Message+Frame decoder | | | | +----------+------------+ | | | /|\ | | +---------------+-----------------------------------+---------------+ | | (3) server response \|/ (2) client request +---------------+-----------------------------------+---------------+ | | | | | [ Socket.read() ] [ Socket.write() ] | | | | Netty Internal I/O Threads (Transport Implementation) | +-------------------------------------------------------------------+ </pre> @return channel handlers
@Override protected ShardConsumer createShardConsumer( Integer subscribedShardStateIndex, StreamShardHandle handle, SequenceNumber lastSeqNum, ShardMetricsReporter shardMetricsReporter) { return new ShardConsumer( this, subscribedShardStateIndex, handle, lastSeqNum, DynamoDBStreamsProxy.create(getConsumerConfiguration()), shardMetricsReporter); }
Create a new DynamoDB streams shard consumer. @param subscribedShardStateIndex the state index of the shard this consumer is subscribed to @param handle stream handle @param lastSeqNum last sequence number @param shardMetricsReporter the reporter to report metrics to @return
private void openCli(SessionContext context, Executor executor) { CliClient cli = null; try { cli = new CliClient(context, executor); // interactive CLI mode if (options.getUpdateStatement() == null) { cli.open(); } // execute single update statement else { final boolean success = cli.submitUpdate(options.getUpdateStatement()); if (!success) { throw new SqlClientException("Could not submit given SQL update statement to cluster."); } } } finally { if (cli != null) { cli.close(); } } }
Opens the CLI client for executing SQL statements. @param context session context @param executor executor
private static void validateEnvironment(SessionContext context, Executor executor) { System.out.print("Validating current environment..."); try { executor.validateSession(context); System.out.println("done."); } catch (SqlExecutionException e) { throw new SqlClientException( "The configured environment is invalid. Please check your environment files again.", e); } }
--------------------------------------------------------------------------------------------
public static void main(String[] args) { if (args.length < 1) { CliOptionsParser.printHelpClient(); return; } switch (args[0]) { case MODE_EMBEDDED: // remove mode final String[] modeArgs = Arrays.copyOfRange(args, 1, args.length); final CliOptions options = CliOptionsParser.parseEmbeddedModeClient(modeArgs); if (options.isPrintHelp()) { CliOptionsParser.printHelpEmbeddedModeClient(); } else { try { final SqlClient client = new SqlClient(true, options); client.start(); } catch (SqlClientException e) { // make space in terminal System.out.println(); System.out.println(); LOG.error("SQL Client must stop.", e); throw e; } catch (Throwable t) { // make space in terminal System.out.println(); System.out.println(); LOG.error("SQL Client must stop. Unexpected exception. This is a bug. Please consider filing an issue.", t); throw new SqlClientException("Unexpected exception. This is a bug. Please consider filing an issue.", t); } } break; case MODE_GATEWAY: throw new SqlClientException("Gateway mode is not supported yet."); default: CliOptionsParser.printHelpClient(); } }
--------------------------------------------------------------------------------------------
public static AmazonKinesis createKinesisClient(Properties configProps, ClientConfiguration awsClientConfig) { // set a Flink-specific user agent awsClientConfig.setUserAgentPrefix(String.format(USER_AGENT_FORMAT, EnvironmentInformation.getVersion(), EnvironmentInformation.getRevisionInformation().commitId)); // utilize automatic refreshment of credentials by directly passing the AWSCredentialsProvider AmazonKinesisClientBuilder builder = AmazonKinesisClientBuilder.standard() .withCredentials(AWSUtil.getCredentialsProvider(configProps)) .withClientConfiguration(awsClientConfig); if (configProps.containsKey(AWSConfigConstants.AWS_ENDPOINT)) { // Set signingRegion as null, to facilitate mocking Kinesis for local tests builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( configProps.getProperty(AWSConfigConstants.AWS_ENDPOINT), null)); } else { builder.withRegion(Regions.fromName(configProps.getProperty(AWSConfigConstants.AWS_REGION))); } return builder.build(); }
Creates an Amazon Kinesis Client. @param configProps configuration properties containing the access key, secret key, and region @param awsClientConfig preconfigured AWS SDK client configuration @return a new Amazon Kinesis Client
private static AWSCredentialsProvider getCredentialsProvider(final Properties configProps, final String configPrefix) { CredentialProvider credentialProviderType; if (!configProps.containsKey(configPrefix)) { if (configProps.containsKey(AWSConfigConstants.accessKeyId(configPrefix)) && configProps.containsKey(AWSConfigConstants.secretKey(configPrefix))) { // if the credential provider type is not specified, but the Access Key ID and Secret Key are given, it will default to BASIC credentialProviderType = CredentialProvider.BASIC; } else { // if the credential provider type is not specified, it will default to AUTO credentialProviderType = CredentialProvider.AUTO; } } else { credentialProviderType = CredentialProvider.valueOf(configProps.getProperty(configPrefix)); } switch (credentialProviderType) { case ENV_VAR: return new EnvironmentVariableCredentialsProvider(); case SYS_PROP: return new SystemPropertiesCredentialsProvider(); case PROFILE: String profileName = configProps.getProperty( AWSConfigConstants.profileName(configPrefix), null); String profileConfigPath = configProps.getProperty( AWSConfigConstants.profilePath(configPrefix), null); return (profileConfigPath == null) ? new ProfileCredentialsProvider(profileName) : new ProfileCredentialsProvider(profileConfigPath, profileName); case BASIC: return new AWSCredentialsProvider() { @Override public AWSCredentials getCredentials() { return new BasicAWSCredentials( configProps.getProperty(AWSConfigConstants.accessKeyId(configPrefix)), configProps.getProperty(AWSConfigConstants.secretKey(configPrefix))); } @Override public void refresh() { // do nothing } }; case ASSUME_ROLE: final AWSSecurityTokenService baseCredentials = AWSSecurityTokenServiceClientBuilder.standard() .withCredentials(getCredentialsProvider(configProps, AWSConfigConstants.roleCredentialsProvider(configPrefix))) .withRegion(configProps.getProperty(AWSConfigConstants.AWS_REGION)) .build(); return new STSAssumeRoleSessionCredentialsProvider.Builder( configProps.getProperty(AWSConfigConstants.roleArn(configPrefix)), configProps.getProperty(AWSConfigConstants.roleSessionName(configPrefix))) .withExternalId(configProps.getProperty(AWSConfigConstants.externalId(configPrefix))) .withStsClient(baseCredentials) .build(); default: case AUTO: return new DefaultAWSCredentialsProviderChain(); } }
If the provider is ASSUME_ROLE, then the credentials for assuming this role are determined recursively. @param configProps the configuration properties @param configPrefix the prefix of the config properties for this credentials provider, e.g. aws.credentials.provider for the base credentials provider, aws.credentials.provider.role.provider for the credentials provider for assuming a role, and so on.
public static boolean isValidRegion(String region) { try { Regions.fromName(region.toLowerCase()); } catch (IllegalArgumentException e) { return false; } return true; }
Checks whether or not a region ID is valid. @param region The AWS region ID to check @return true if the supplied region ID is valid, false otherwise
public static void setAwsClientConfigProperties(ClientConfiguration config, Properties configProps) { Map<String, Object> awsConfigProperties = new HashMap<>(); for (Map.Entry<Object, Object> entry : configProps.entrySet()) { String key = (String) entry.getKey(); if (key.startsWith(AWS_CLIENT_CONFIG_PREFIX)) { awsConfigProperties.put(key.substring(AWS_CLIENT_CONFIG_PREFIX.length()), entry.getValue()); } } // Jackson does not like the following properties String[] ignorableProperties = {"secureRandom"}; BeanDeserializerModifier modifier = new BeanDeserializerModifierForIgnorables( ClientConfiguration.class, ignorableProperties); DeserializerFactory factory = BeanDeserializerFactory.instance.withDeserializerModifier( modifier); ObjectMapper mapper = new ObjectMapper(null, null, new DefaultDeserializationContext.Impl(factory)); JsonNode propTree = mapper.convertValue(awsConfigProperties, JsonNode.class); try { mapper.readerForUpdating(config).readValue(propTree); } catch (IOException ex) { throw new RuntimeException(ex); } }
Set all prefixed properties on {@link ClientConfiguration}. @param config @param configProps
public static long nextPowerOfTwo(long x) { if (x == 0L) { return 1L; } else { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return (x | x >> 32) + 1L; } }
Return the least power of two greater than or equal to the specified value. <p>Note that this function will return 1 when the argument is 0. @param x a long integer smaller than or equal to 2<sup>62</sup>. @return the least power of two greater than or equal to the specified value.
public static int maxFill(int n, float f) { return Math.min((int) Math.ceil((double) ((float) n * f)), n - 1); }
Returns the maximum number of entries that can be filled before rehashing. @param n the size of the backing array. @param f the load factor. @return the maximum number of entries before rehashing.
@Override public void snapshotState(FunctionSnapshotContext context) throws Exception { Preconditions.checkState(this.checkpointedState != null, "The " + getClass().getSimpleName() + " has not been properly initialized."); if (LOG.isDebugEnabled()) { LOG.debug("{} checkpointing: Messages: {}, checkpoint id: {}, timestamp: {}", idsForCurrentCheckpoint, context.getCheckpointId(), context.getCheckpointTimestamp()); } pendingCheckpoints.addLast(new Tuple2<>(context.getCheckpointId(), idsForCurrentCheckpoint)); idsForCurrentCheckpoint = new HashSet<>(64); this.checkpointedState.clear(); this.checkpointedState.add(SerializedCheckpointData.fromDeque(pendingCheckpoints, idSerializer)); }
------------------------------------------------------------------------
@Override public void recover() throws Exception { LOG.info("Recovering checkpoints from ZooKeeper."); // Get all there is first List<Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String>> initialCheckpoints; while (true) { try { initialCheckpoints = checkpointsInZooKeeper.getAllAndLock(); break; } catch (ConcurrentModificationException e) { LOG.warn("Concurrent modification while reading from ZooKeeper. Retrying."); } } Collections.sort(initialCheckpoints, STRING_COMPARATOR); int numberOfInitialCheckpoints = initialCheckpoints.size(); LOG.info("Found {} checkpoints in ZooKeeper.", numberOfInitialCheckpoints); // Try and read the state handles from storage. We try until we either successfully read // all of them or when we reach a stable state, i.e. when we successfully read the same set // of checkpoints in two tries. We do it like this to protect against transient outages // of the checkpoint store (for example a DFS): if the DFS comes online midway through // reading a set of checkpoints we would run the risk of reading only a partial set // of checkpoints while we could in fact read the other checkpoints as well if we retried. // Waiting until a stable state protects against this while also being resilient against // checkpoints being actually unreadable. // // These considerations are also important in the scope of incremental checkpoints, where // we use ref-counting for shared state handles and might accidentally delete shared state // of checkpoints that we don't read due to transient storage outages. List<CompletedCheckpoint> lastTryRetrievedCheckpoints = new ArrayList<>(numberOfInitialCheckpoints); List<CompletedCheckpoint> retrievedCheckpoints = new ArrayList<>(numberOfInitialCheckpoints); do { LOG.info("Trying to fetch {} checkpoints from storage.", numberOfInitialCheckpoints); lastTryRetrievedCheckpoints.clear(); lastTryRetrievedCheckpoints.addAll(retrievedCheckpoints); retrievedCheckpoints.clear(); for (Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String> checkpointStateHandle : initialCheckpoints) { CompletedCheckpoint completedCheckpoint = null; try { completedCheckpoint = retrieveCompletedCheckpoint(checkpointStateHandle); if (completedCheckpoint != null) { retrievedCheckpoints.add(completedCheckpoint); } } catch (Exception e) { LOG.warn("Could not retrieve checkpoint, not adding to list of recovered checkpoints.", e); } } } while (retrievedCheckpoints.size() != numberOfInitialCheckpoints && !CompletedCheckpoint.checkpointsMatch(lastTryRetrievedCheckpoints, retrievedCheckpoints)); // Clear local handles in order to prevent duplicates on // recovery. The local handles should reflect the state // of ZooKeeper. completedCheckpoints.clear(); completedCheckpoints.addAll(retrievedCheckpoints); if (completedCheckpoints.isEmpty() && numberOfInitialCheckpoints > 0) { throw new FlinkException( "Could not read any of the " + numberOfInitialCheckpoints + " checkpoints from storage."); } else if (completedCheckpoints.size() != numberOfInitialCheckpoints) { LOG.warn( "Could only fetch {} of {} checkpoints from storage.", completedCheckpoints.size(), numberOfInitialCheckpoints); } }
Gets the latest checkpoint from ZooKeeper and removes all others. <p><strong>Important</strong>: Even if there are more than one checkpoint in ZooKeeper, this will only recover the latest and discard the others. Otherwise, there is no guarantee that the history of checkpoints is consistent.
@Override public void addCheckpoint(final CompletedCheckpoint checkpoint) throws Exception { checkNotNull(checkpoint, "Checkpoint"); final String path = checkpointIdToPath(checkpoint.getCheckpointID()); // Now add the new one. If it fails, we don't want to loose existing data. checkpointsInZooKeeper.addAndLock(path, checkpoint); completedCheckpoints.addLast(checkpoint); // Everything worked, let's remove a previous checkpoint if necessary. while (completedCheckpoints.size() > maxNumberOfCheckpointsToRetain) { final CompletedCheckpoint completedCheckpoint = completedCheckpoints.removeFirst(); tryRemoveCompletedCheckpoint(completedCheckpoint, CompletedCheckpoint::discardOnSubsume); } LOG.debug("Added {} to {}.", checkpoint, path); }
Synchronously writes the new checkpoints to ZooKeeper and asynchronously removes older ones. @param checkpoint Completed checkpoint to add.
public static long pathToCheckpointId(String path) { try { String numberString; // check if we have a leading slash if ('/' == path.charAt(0)) { numberString = path.substring(1); } else { numberString = path; } return Long.parseLong(numberString); } catch (NumberFormatException e) { LOG.warn("Could not parse checkpoint id from {}. This indicates that the " + "checkpoint id to path conversion has changed.", path); return -1L; } }
Converts a path to the checkpoint id. @param path in ZooKeeper @return Checkpoint id parsed from the path
@Override public void open(Configuration parameters) throws Exception { super.open(parameters); // check and pass the configuration properties KinesisProducerConfiguration producerConfig = KinesisConfigUtil.getValidatedProducerConfiguration(configProps); producer = getKinesisProducer(producerConfig); final MetricGroup kinesisMectricGroup = getRuntimeContext().getMetricGroup().addGroup(KINESIS_PRODUCER_METRIC_GROUP); this.backpressureCycles = kinesisMectricGroup.counter(METRIC_BACKPRESSURE_CYCLES); kinesisMectricGroup.gauge(METRIC_OUTSTANDING_RECORDS_COUNT, producer::getOutstandingRecordsCount); backpressureLatch = new TimeoutLatch(); callback = new FutureCallback<UserRecordResult>() { @Override public void onSuccess(UserRecordResult result) { backpressureLatch.trigger(); if (!result.isSuccessful()) { if (failOnError) { // only remember the first thrown exception if (thrownException == null) { thrownException = new RuntimeException("Record was not sent successful"); } } else { LOG.warn("Record was not sent successful"); } } } @Override public void onFailure(Throwable t) { backpressureLatch.trigger(); if (failOnError) { thrownException = t; } else { LOG.warn("An exception occurred while processing a record", t); } } }; if (this.customPartitioner != null) { this.customPartitioner.initialize(getRuntimeContext().getIndexOfThisSubtask(), getRuntimeContext().getNumberOfParallelSubtasks()); } LOG.info("Started Kinesis producer instance for region '{}'", producerConfig.getRegion()); }
--------------------------- Lifecycle methods ---------------------------
private void checkAndPropagateAsyncError() throws Exception { if (thrownException != null) { String errorMessages = ""; if (thrownException instanceof UserRecordFailedException) { List<Attempt> attempts = ((UserRecordFailedException) thrownException).getResult().getAttempts(); for (Attempt attempt: attempts) { if (attempt.getErrorMessage() != null) { errorMessages += attempt.getErrorMessage() + "\n"; } } } if (failOnError) { throw new RuntimeException("An exception was thrown while processing a record: " + errorMessages, thrownException); } else { LOG.warn("An exception was thrown while processing a record: {}", thrownException, errorMessages); // reset, prevent double throwing thrownException = null; } } }
Check if there are any asynchronous exceptions. If so, rethrow the exception.
private boolean enforceQueueLimit() { int attempt = 0; while (producer.getOutstandingRecordsCount() >= queueLimit) { backpressureCycles.inc(); if (attempt >= 10) { LOG.warn("Waiting for the queue length to drop below the limit takes unusually long, still not done after {} attempts.", attempt); } attempt++; try { backpressureLatch.await(100); } catch (InterruptedException e) { LOG.warn("Flushing was interrupted."); break; } } return attempt > 0; }
If the internal queue of the {@link KinesisProducer} gets too long, flush some of the records until we are below the limit again. We don't want to flush _all_ records at this point since that would break record aggregation. @return boolean whether flushing occurred or not
private void flushSync() throws Exception { while (producer.getOutstandingRecordsCount() > 0) { producer.flush(); try { Thread.sleep(500); } catch (InterruptedException e) { LOG.warn("Flushing was interrupted."); break; } } }
A reimplementation of {@link KinesisProducer#flushSync()}. This implementation releases the block on flushing if an interruption occurred.
public RemoteInputChannel toRemoteInputChannel(ConnectionID producerAddress) { return new RemoteInputChannel(inputGate, channelIndex, partitionId, checkNotNull(producerAddress), connectionManager, initialBackoff, maxBackoff, metrics); }
------------------------------------------------------------------------
public static QueryableStateConfiguration disabled() { final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString(QueryableStateOptions.PROXY_PORT_RANGE.defaultValue()); final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString(QueryableStateOptions.SERVER_PORT_RANGE.defaultValue()); return new QueryableStateConfiguration(proxyPorts, serverPorts, 0, 0, 0, 0); }
Gets the configuration describing the queryable state as deactivated.
public static QueryableStateConfiguration fromConfiguration(Configuration config) { if (!config.getBoolean(QueryableStateOptions.ENABLE_QUERYABLE_STATE_PROXY_SERVER)) { return null; } final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString( config.getString(QueryableStateOptions.PROXY_PORT_RANGE)); final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString( config.getString(QueryableStateOptions.SERVER_PORT_RANGE)); final int numProxyServerNetworkThreads = config.getInteger(QueryableStateOptions.PROXY_NETWORK_THREADS); final int numProxyServerQueryThreads = config.getInteger(QueryableStateOptions.PROXY_ASYNC_QUERY_THREADS); final int numStateServerNetworkThreads = config.getInteger(QueryableStateOptions.SERVER_NETWORK_THREADS); final int numStateServerQueryThreads = config.getInteger(QueryableStateOptions.SERVER_ASYNC_QUERY_THREADS); return new QueryableStateConfiguration( proxyPorts, serverPorts, numProxyServerNetworkThreads, numProxyServerQueryThreads, numStateServerNetworkThreads, numStateServerQueryThreads); }
Creates the {@link QueryableStateConfiguration} from the given Configuration.
@Override public void setup(TaskContext<ReduceFunction<T>, T> context) { this.taskContext = context; this.running = true; }
------------------------------------------------------------------------
@Override public void prepare() throws Exception { final TaskConfig config = this.taskContext.getTaskConfig(); if (config.getDriverStrategy() != DriverStrategy.ALL_REDUCE) { throw new Exception("Unrecognized driver strategy for AllReduce driver: " + config.getDriverStrategy().name()); } TypeSerializerFactory<T> serializerFactory = this.taskContext.getInputSerializer(0); this.serializer = serializerFactory.getSerializer(); this.input = this.taskContext.getInput(0); ExecutionConfig executionConfig = taskContext.getExecutionConfig(); this.objectReuseEnabled = executionConfig.isObjectReuseEnabled(); if (LOG.isDebugEnabled()) { LOG.debug("AllReduceDriver object reuse: " + (this.objectReuseEnabled ? "ENABLED" : "DISABLED") + "."); } }
--------------------------------------------------------------------------------------------
public void addBroadcastSetForScatterFunction(String name, DataSet<?> data) { this.bcVarsScatter.add(new Tuple2<>(name, data)); }
Adds a data set as a broadcast set to the scatter function. @param name The name under which the broadcast data is available in the scatter function. @param data The data set to be broadcast.
public void addBroadcastSetForGatherFunction(String name, DataSet<?> data) { this.bcVarsGather.add(new Tuple2<>(name, data)); }
Adds a data set as a broadcast set to the gather function. @param name The name under which the broadcast data is available in the gather function. @param data The data set to be broadcast.
public EventId registerEvent(V value, long timestamp) throws Exception { return sharedBuffer.registerEvent(value, timestamp); }
Adds another unique event to the shared buffer and assigns a unique id for it. It automatically creates a lock on this event, so it won't be removed during processing of that event. Therefore the lock should be removed after processing all {@link org.apache.flink.cep.nfa.ComputationState}s <p><b>NOTE:</b>Should be called only once for each unique event! @param value event to be registered @return unique id of that event that should be used when putting entries to the buffer. @throws Exception Thrown if the system cannot access the state.
public NodeId put( final String stateName, final EventId eventId, @Nullable final NodeId previousNodeId, final DeweyNumber version) { if (previousNodeId != null) { lockNode(previousNodeId); } NodeId currentNodeId = new NodeId(eventId, getOriginalNameFromInternal(stateName)); Lockable<SharedBufferNode> currentNode = sharedBuffer.getEntry(currentNodeId); if (currentNode == null) { currentNode = new Lockable<>(new SharedBufferNode(), 0); lockEvent(eventId); } currentNode.getElement().addEdge(new SharedBufferEdge( previousNodeId, version)); sharedBuffer.upsertEntry(currentNodeId, currentNode); return currentNodeId; }
Stores given value (value + timestamp) under the given state. It assigns a preceding element relation to the previous entry. @param stateName name of the state that the event should be assigned to @param eventId unique id of event assigned by this SharedBuffer @param previousNodeId id of previous entry (might be null if start of new run) @param version Version of the previous relation @return assigned id of this element
public List<Map<String, List<EventId>>> extractPatterns( final NodeId nodeId, final DeweyNumber version) { List<Map<String, List<EventId>>> result = new ArrayList<>(); // stack to remember the current extraction states Stack<SharedBufferAccessor.ExtractionState> extractionStates = new Stack<>(); // get the starting shared buffer entry for the previous relation Lockable<SharedBufferNode> entryLock = sharedBuffer.getEntry(nodeId); if (entryLock != null) { SharedBufferNode entry = entryLock.getElement(); extractionStates.add(new SharedBufferAccessor.ExtractionState(Tuple2.of(nodeId, entry), version, new Stack<>())); // use a depth first search to reconstruct the previous relations while (!extractionStates.isEmpty()) { final SharedBufferAccessor.ExtractionState extractionState = extractionStates.pop(); // current path of the depth first search final Stack<Tuple2<NodeId, SharedBufferNode>> currentPath = extractionState.getPath(); final Tuple2<NodeId, SharedBufferNode> currentEntry = extractionState.getEntry(); // termination criterion if (currentEntry == null) { final Map<String, List<EventId>> completePath = new LinkedHashMap<>(); while (!currentPath.isEmpty()) { final NodeId currentPathEntry = currentPath.pop().f0; String page = currentPathEntry.getPageName(); List<EventId> values = completePath .computeIfAbsent(page, k -> new ArrayList<>()); values.add(currentPathEntry.getEventId()); } result.add(completePath); } else { // append state to the path currentPath.push(currentEntry); boolean firstMatch = true; for (SharedBufferEdge edge : currentEntry.f1.getEdges()) { // we can only proceed if the current version is compatible to the version // of this previous relation final DeweyNumber currentVersion = extractionState.getVersion(); if (currentVersion.isCompatibleWith(edge.getDeweyNumber())) { final NodeId target = edge.getTarget(); Stack<Tuple2<NodeId, SharedBufferNode>> newPath; if (firstMatch) { // for the first match we don't have to copy the current path newPath = currentPath; firstMatch = false; } else { newPath = new Stack<>(); newPath.addAll(currentPath); } extractionStates.push(new SharedBufferAccessor.ExtractionState( target != null ? Tuple2.of(target, sharedBuffer.getEntry(target).getElement()) : null, edge.getDeweyNumber(), newPath)); } } } } } return result; }
Returns all elements from the previous relation starting at the given entry. @param nodeId id of the starting entry @param version Version of the previous relation which shall be extracted @return Collection of previous relations starting with the given value
public Map<String, List<V>> materializeMatch(Map<String, List<EventId>> match) { Map<String, List<V>> materializedMatch = new LinkedHashMap<>(match.size()); for (Map.Entry<String, List<EventId>> pattern : match.entrySet()) { List<V> events = new ArrayList<>(pattern.getValue().size()); for (EventId eventId : pattern.getValue()) { try { V event = sharedBuffer.getEvent(eventId).getElement(); events.add(event); } catch (Exception ex) { throw new WrappingRuntimeException(ex); } } materializedMatch.put(pattern.getKey(), events); } return materializedMatch; }
Extracts the real event from the sharedBuffer with pre-extracted eventId. @param match the matched event's eventId. @return the event associated with the eventId.
public void lockNode(final NodeId node) { Lockable<SharedBufferNode> sharedBufferNode = sharedBuffer.getEntry(node); if (sharedBufferNode != null) { sharedBufferNode.lock(); sharedBuffer.upsertEntry(node, sharedBufferNode); } }
Increases the reference counter for the given entry so that it is not accidentally removed. @param node id of the entry
public void releaseNode(final NodeId node) throws Exception { Lockable<SharedBufferNode> sharedBufferNode = sharedBuffer.getEntry(node); if (sharedBufferNode != null) { if (sharedBufferNode.release()) { removeNode(node, sharedBufferNode.getElement()); } else { sharedBuffer.upsertEntry(node, sharedBufferNode); } } }
Decreases the reference counter for the given entry so that it can be removed once the reference counter reaches 0. @param node id of the entry @throws Exception Thrown if the system cannot access the state.
private void removeNode(NodeId node, SharedBufferNode sharedBufferNode) throws Exception { sharedBuffer.removeEntry(node); EventId eventId = node.getEventId(); releaseEvent(eventId); for (SharedBufferEdge sharedBufferEdge : sharedBufferNode.getEdges()) { releaseNode(sharedBufferEdge.getTarget()); } }
Removes the {@code SharedBufferNode}, when the ref is decreased to zero, and also decrease the ref of the edge on this node. @param node id of the entry @param sharedBufferNode the node body to be removed @throws Exception Thrown if the system cannot access the state.
private void lockEvent(EventId eventId) { Lockable<V> eventWrapper = sharedBuffer.getEvent(eventId); checkState( eventWrapper != null, "Referring to non existent event with id %s", eventId); eventWrapper.lock(); sharedBuffer.upsertEvent(eventId, eventWrapper); }
Increases the reference counter for the given event so that it is not accidentally removed. @param eventId id of the entry
public void releaseEvent(EventId eventId) throws Exception { Lockable<V> eventWrapper = sharedBuffer.getEvent(eventId); if (eventWrapper != null) { if (eventWrapper.release()) { sharedBuffer.removeEvent(eventId); } else { sharedBuffer.upsertEvent(eventId, eventWrapper); } } }
Decreases the reference counter for the given event so that it can be removed once the reference counter reaches 0. @param eventId id of the event @throws Exception Thrown if the system cannot access the state.
protected void run(String[] args) throws Exception { LOG.info("Running 'run' command."); final Options commandOptions = CliFrontendParser.getRunCommandOptions(); final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions); final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, true); final RunOptions runOptions = new RunOptions(commandLine); // evaluate help flag if (runOptions.isPrintHelp()) { CliFrontendParser.printHelpForRun(customCommandLines); return; } if (runOptions.getJarFilePath() == null) { throw new CliArgsException("The program JAR file was not specified."); } final PackagedProgram program; try { LOG.info("Building program from JAR file"); program = buildProgram(runOptions); } catch (FileNotFoundException e) { throw new CliArgsException("Could not build the program from JAR file.", e); } final CustomCommandLine<?> customCommandLine = getActiveCustomCommandLine(commandLine); try { runProgram(customCommandLine, commandLine, runOptions, program); } finally { program.deleteExtractedLibraries(); } }
Executions the run action. @param args Command line arguments for the run action.
protected void info(String[] args) throws CliArgsException, FileNotFoundException, ProgramInvocationException { LOG.info("Running 'info' command."); final Options commandOptions = CliFrontendParser.getInfoCommandOptions(); final CommandLine commandLine = CliFrontendParser.parse(commandOptions, args, true); InfoOptions infoOptions = new InfoOptions(commandLine); // evaluate help flag if (infoOptions.isPrintHelp()) { CliFrontendParser.printHelpForInfo(); return; } if (infoOptions.getJarFilePath() == null) { throw new CliArgsException("The program JAR file was not specified."); } // -------- build the packaged program ------------- LOG.info("Building program from JAR file"); final PackagedProgram program = buildProgram(infoOptions); try { int parallelism = infoOptions.getParallelism(); if (ExecutionConfig.PARALLELISM_DEFAULT == parallelism) { parallelism = defaultParallelism; } LOG.info("Creating program plan dump"); Optimizer compiler = new Optimizer(new DataStatistics(), new DefaultCostEstimator(), configuration); FlinkPlan flinkPlan = ClusterClient.getOptimizedPlan(compiler, program, parallelism); String jsonPlan = null; if (flinkPlan instanceof OptimizedPlan) { jsonPlan = new PlanJSONDumpGenerator().getOptimizerPlanAsJSON((OptimizedPlan) flinkPlan); } else if (flinkPlan instanceof StreamingPlan) { jsonPlan = ((StreamingPlan) flinkPlan).getStreamingPlanAsJSON(); } if (jsonPlan != null) { System.out.println("----------------------- Execution Plan -----------------------"); System.out.println(jsonPlan); System.out.println("--------------------------------------------------------------"); } else { System.out.println("JSON plan could not be generated."); } String description = program.getDescription(); if (description != null) { System.out.println(); System.out.println(description); } else { System.out.println(); System.out.println("No description provided."); } } finally { program.deleteExtractedLibraries(); } }
Executes the info action. @param args Command line arguments for the info action.
protected void list(String[] args) throws Exception { LOG.info("Running 'list' command."); final Options commandOptions = CliFrontendParser.getListCommandOptions(); final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions); final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, false); ListOptions listOptions = new ListOptions(commandLine); // evaluate help flag if (listOptions.isPrintHelp()) { CliFrontendParser.printHelpForList(customCommandLines); return; } final boolean showRunning; final boolean showScheduled; final boolean showAll; // print running and scheduled jobs if not option supplied if (!listOptions.showRunning() && !listOptions.showScheduled() && !listOptions.showAll()) { showRunning = true; showScheduled = true; showAll = false; } else { showRunning = listOptions.showRunning(); showScheduled = listOptions.showScheduled(); showAll = listOptions.showAll(); } final CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(commandLine); runClusterAction( activeCommandLine, commandLine, clusterClient -> listJobs(clusterClient, showRunning, showScheduled, showAll)); }
Executes the list action. @param args Command line arguments for the list action.
protected void stop(String[] args) throws Exception { LOG.info("Running 'stop-with-savepoint' command."); final Options commandOptions = CliFrontendParser.getStopCommandOptions(); final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions); final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, false); final StopOptions stopOptions = new StopOptions(commandLine); if (stopOptions.isPrintHelp()) { CliFrontendParser.printHelpForStop(customCommandLines); return; } final String[] cleanedArgs = stopOptions.getArgs(); final String targetDirectory = stopOptions.hasSavepointFlag() && cleanedArgs.length > 0 ? stopOptions.getTargetDirectory() : null; // the default savepoint location is going to be used in this case. final JobID jobId = cleanedArgs.length != 0 ? parseJobId(cleanedArgs[0]) : parseJobId(stopOptions.getTargetDirectory()); final boolean advanceToEndOfEventTime = stopOptions.shouldAdvanceToEndOfEventTime(); logAndSysout((advanceToEndOfEventTime ? "Draining job " : "Suspending job ") + "\"" + jobId + "\" with a savepoint."); final CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(commandLine); runClusterAction( activeCommandLine, commandLine, clusterClient -> { try { clusterClient.stopWithSavepoint(jobId, advanceToEndOfEventTime, targetDirectory); } catch (Exception e) { throw new FlinkException("Could not stop with a savepoint job \"" + jobId + "\".", e); } }); logAndSysout((advanceToEndOfEventTime ? "Drained job " : "Suspended job ") + "\"" + jobId + "\" with a savepoint."); }
Executes the STOP action. @param args Command line arguments for the stop action.
protected void cancel(String[] args) throws Exception { LOG.info("Running 'cancel' command."); final Options commandOptions = CliFrontendParser.getCancelCommandOptions(); final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions); final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, false); CancelOptions cancelOptions = new CancelOptions(commandLine); // evaluate help flag if (cancelOptions.isPrintHelp()) { CliFrontendParser.printHelpForCancel(customCommandLines); return; } final CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(commandLine); final String[] cleanedArgs = cancelOptions.getArgs(); if (cancelOptions.isWithSavepoint()) { final JobID jobId; final String targetDirectory; if (cleanedArgs.length > 0) { jobId = parseJobId(cleanedArgs[0]); targetDirectory = cancelOptions.getSavepointTargetDirectory(); } else { jobId = parseJobId(cancelOptions.getSavepointTargetDirectory()); targetDirectory = null; } if (targetDirectory == null) { logAndSysout("Cancelling job " + jobId + " with savepoint to default savepoint directory."); } else { logAndSysout("Cancelling job " + jobId + " with savepoint to " + targetDirectory + '.'); } runClusterAction( activeCommandLine, commandLine, clusterClient -> { final String savepointPath; try { savepointPath = clusterClient.cancelWithSavepoint(jobId, targetDirectory); } catch (Exception e) { throw new FlinkException("Could not cancel job " + jobId + '.', e); } logAndSysout("Cancelled job " + jobId + ". Savepoint stored in " + savepointPath + '.'); }); } else { final JobID jobId; if (cleanedArgs.length > 0) { jobId = parseJobId(cleanedArgs[0]); } else { throw new CliArgsException("Missing JobID. Specify a JobID to cancel a job."); } logAndSysout("Cancelling job " + jobId + '.'); runClusterAction( activeCommandLine, commandLine, clusterClient -> { try { clusterClient.cancel(jobId); } catch (Exception e) { throw new FlinkException("Could not cancel job " + jobId + '.', e); } }); logAndSysout("Cancelled job " + jobId + '.'); } }
Executes the CANCEL action. @param args Command line arguments for the cancel action.
protected void savepoint(String[] args) throws Exception { LOG.info("Running 'savepoint' command."); final Options commandOptions = CliFrontendParser.getSavepointCommandOptions(); final Options commandLineOptions = CliFrontendParser.mergeOptions(commandOptions, customCommandLineOptions); final CommandLine commandLine = CliFrontendParser.parse(commandLineOptions, args, false); final SavepointOptions savepointOptions = new SavepointOptions(commandLine); // evaluate help flag if (savepointOptions.isPrintHelp()) { CliFrontendParser.printHelpForSavepoint(customCommandLines); return; } final CustomCommandLine<?> activeCommandLine = getActiveCustomCommandLine(commandLine); if (savepointOptions.isDispose()) { runClusterAction( activeCommandLine, commandLine, clusterClient -> disposeSavepoint(clusterClient, savepointOptions.getSavepointPath())); } else { String[] cleanedArgs = savepointOptions.getArgs(); final JobID jobId; if (cleanedArgs.length >= 1) { String jobIdString = cleanedArgs[0]; jobId = parseJobId(jobIdString); } else { throw new CliArgsException("Missing JobID. " + "Specify a Job ID to trigger a savepoint."); } final String savepointDirectory; if (cleanedArgs.length >= 2) { savepointDirectory = cleanedArgs[1]; } else { savepointDirectory = null; } // Print superfluous arguments if (cleanedArgs.length >= 3) { logAndSysout("Provided more arguments than required. Ignoring not needed arguments."); } runClusterAction( activeCommandLine, commandLine, clusterClient -> triggerSavepoint(clusterClient, jobId, savepointDirectory)); } }
Executes the SAVEPOINT action. @param args Command line arguments for the savepoint action.
private String triggerSavepoint(ClusterClient<?> clusterClient, JobID jobId, String savepointDirectory) throws FlinkException { logAndSysout("Triggering savepoint for job " + jobId + '.'); CompletableFuture<String> savepointPathFuture = clusterClient.triggerSavepoint(jobId, savepointDirectory); logAndSysout("Waiting for response..."); final String savepointPath; try { savepointPath = savepointPathFuture.get(); } catch (Exception e) { Throwable cause = ExceptionUtils.stripExecutionException(e); throw new FlinkException("Triggering a savepoint for the job " + jobId + " failed.", cause); } logAndSysout("Savepoint completed. Path: " + savepointPath); logAndSysout("You can resume your program from this savepoint with the run command."); return savepointPath; }
Sends a SavepointTriggerMessage to the job manager.
private void disposeSavepoint(ClusterClient<?> clusterClient, String savepointPath) throws FlinkException { Preconditions.checkNotNull(savepointPath, "Missing required argument: savepoint path. " + "Usage: bin/flink savepoint -d <savepoint-path>"); logAndSysout("Disposing savepoint '" + savepointPath + "'."); final CompletableFuture<Acknowledge> disposeFuture = clusterClient.disposeSavepoint(savepointPath); logAndSysout("Waiting for response..."); try { disposeFuture.get(clientTimeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { throw new FlinkException("Disposing the savepoint '" + savepointPath + "' failed.", e); } logAndSysout("Savepoint '" + savepointPath + "' disposed."); }
Sends a SavepointDisposalRequest to the job manager.
protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException { logAndSysout("Starting execution of program"); final JobSubmissionResult result = client.run(program, parallelism); if (null == result) { throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " + "ExecutionEnvironment.execute()"); } if (result.isJobExecutionResult()) { logAndSysout("Program execution finished"); JobExecutionResult execResult = result.getJobExecutionResult(); System.out.println("Job with JobID " + execResult.getJobID() + " has finished."); System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms"); Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults(); if (accumulatorsResult.size() > 0) { System.out.println("Accumulator Results: "); System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult)); } } else { logAndSysout("Job has been submitted with JobID " + result.getJobID()); } }
--------------------------------------------------------------------------------------------
PackagedProgram buildProgram(ProgramOptions options) throws FileNotFoundException, ProgramInvocationException { String[] programArgs = options.getProgramArgs(); String jarFilePath = options.getJarFilePath(); List<URL> classpaths = options.getClasspaths(); if (jarFilePath == null) { throw new IllegalArgumentException("The program JAR file was not specified."); } File jarFile = new File(jarFilePath); // Check if JAR file exists if (!jarFile.exists()) { throw new FileNotFoundException("JAR file does not exist: " + jarFile); } else if (!jarFile.isFile()) { throw new FileNotFoundException("JAR file is not a file: " + jarFile); } // Get assembler class String entryPointClass = options.getEntryPointClassName(); PackagedProgram program = entryPointClass == null ? new PackagedProgram(jarFile, classpaths, programArgs) : new PackagedProgram(jarFile, classpaths, entryPointClass, programArgs); program.setSavepointRestoreSettings(options.getSavepointRestoreSettings()); return program; }
Creates a Packaged program from the given command line options. @return A PackagedProgram (upon success)
private static int handleParametrizationException(ProgramParametrizationException e) { LOG.error("Program has not been parametrized properly.", e); System.err.println(e.getMessage()); return 1; }
Displays an optional exception message for incorrect program parametrization. @param e The exception to display. @return The return code for the process.
private static int handleError(Throwable t) { LOG.error("Error while running the command.", t); System.err.println(); System.err.println("------------------------------------------------------------"); System.err.println(" The program finished with the following exception:"); System.err.println(); if (t.getCause() instanceof InvalidProgramException) { System.err.println(t.getCause().getMessage()); StackTraceElement[] trace = t.getCause().getStackTrace(); for (StackTraceElement ele: trace) { System.err.println("\t" + ele); if (ele.getMethodName().equals("main")) { break; } } } else { t.printStackTrace(); } return 1; }
Displays an exception message. @param t The exception to display. @return The return code for the process.
private JobID parseJobId(String jobIdString) throws CliArgsException { if (jobIdString == null) { throw new CliArgsException("Missing JobId"); } final JobID jobId; try { jobId = JobID.fromHexString(jobIdString); } catch (IllegalArgumentException e) { throw new CliArgsException(e.getMessage()); } return jobId; }
--------------------------------------------------------------------------------------------
private <T> void runClusterAction(CustomCommandLine<T> activeCommandLine, CommandLine commandLine, ClusterAction<T> clusterAction) throws FlinkException { final ClusterDescriptor<T> clusterDescriptor = activeCommandLine.createClusterDescriptor(commandLine); final T clusterId = activeCommandLine.getClusterId(commandLine); if (clusterId == null) { throw new FlinkException("No cluster id was specified. Please specify a cluster to which " + "you would like to connect."); } else { try { final ClusterClient<T> clusterClient = clusterDescriptor.retrieve(clusterId); try { clusterAction.runAction(clusterClient); } finally { try { clusterClient.shutdown(); } catch (Exception e) { LOG.info("Could not properly shut down the cluster client.", e); } } } finally { try { clusterDescriptor.close(); } catch (Exception e) { LOG.info("Could not properly close the cluster descriptor.", e); } } } }
Retrieves the {@link ClusterClient} from the given {@link CustomCommandLine} and runs the given {@link ClusterAction} against it. @param activeCommandLine to create the {@link ClusterDescriptor} from @param commandLine containing the parsed command line options @param clusterAction the cluster action to run against the retrieved {@link ClusterClient}. @param <T> type of the cluster id @throws FlinkException if something goes wrong
public int parseParameters(String[] args) { // check for action if (args.length < 1) { CliFrontendParser.printHelp(customCommandLines); System.out.println("Please specify an action."); return 1; } // get action String action = args[0]; // remove action from parameters final String[] params = Arrays.copyOfRange(args, 1, args.length); try { // do action switch (action) { case ACTION_RUN: run(params); return 0; case ACTION_LIST: list(params); return 0; case ACTION_INFO: info(params); return 0; case ACTION_CANCEL: cancel(params); return 0; case ACTION_STOP: stop(params); return 0; case ACTION_SAVEPOINT: savepoint(params); return 0; case "-h": case "--help": CliFrontendParser.printHelp(customCommandLines); return 0; case "-v": case "--version": String version = EnvironmentInformation.getVersion(); String commitID = EnvironmentInformation.getRevisionInformation().commitId; System.out.print("Version: " + version); System.out.println(commitID.equals(EnvironmentInformation.UNKNOWN) ? "" : ", Commit ID: " + commitID); return 0; default: System.out.printf("\"%s\" is not a valid action.\n", action); System.out.println(); System.out.println("Valid actions are \"run\", \"list\", \"info\", \"savepoint\", \"stop\", or \"cancel\"."); System.out.println(); System.out.println("Specify the version option (-v or --version) to print Flink version."); System.out.println(); System.out.println("Specify the help option (-h or --help) to get help on the command."); return 1; } } catch (CliArgsException ce) { return handleArgException(ce); } catch (ProgramParametrizationException ppe) { return handleParametrizationException(ppe); } catch (ProgramMissingJobException pmje) { return handleMissingJobException(); } catch (Exception e) { return handleError(e); } }
Parses the command line arguments and starts the requested action. @param args command line arguments of the client. @return The return code of the program
public static void main(final String[] args) { EnvironmentInformation.logEnvironmentInfo(LOG, "Command Line Client", args); // 1. find the configuration directory final String configurationDirectory = getConfigurationDirectoryFromEnv(); // 2. load the global configuration final Configuration configuration = GlobalConfiguration.loadConfiguration(configurationDirectory); // 3. load the custom command lines final List<CustomCommandLine<?>> customCommandLines = loadCustomCommandLines( configuration, configurationDirectory); try { final CliFrontend cli = new CliFrontend( configuration, customCommandLines); SecurityUtils.install(new SecurityConfiguration(cli.configuration)); int retCode = SecurityUtils.getInstalledContext() .runSecured(() -> cli.parseParameters(args)); System.exit(retCode); } catch (Throwable t) { final Throwable strippedThrowable = ExceptionUtils.stripException(t, UndeclaredThrowableException.class); LOG.error("Fatal error while running command line interface.", strippedThrowable); strippedThrowable.printStackTrace(); System.exit(31); } }
Submits the job based on the arguments.
public static String getConfigurationDirectoryFromEnv() { String location = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR); if (location != null) { if (new File(location).exists()) { return location; } else { throw new RuntimeException("The configuration directory '" + location + "', specified in the '" + ConfigConstants.ENV_FLINK_CONF_DIR + "' environment variable, does not exist."); } } else if (new File(CONFIG_DIRECTORY_FALLBACK_1).exists()) { location = CONFIG_DIRECTORY_FALLBACK_1; } else if (new File(CONFIG_DIRECTORY_FALLBACK_2).exists()) { location = CONFIG_DIRECTORY_FALLBACK_2; } else { throw new RuntimeException("The configuration directory was not specified. " + "Please specify the directory containing the configuration file through the '" + ConfigConstants.ENV_FLINK_CONF_DIR + "' environment variable."); } return location; }
--------------------------------------------------------------------------------------------
static void setJobManagerAddressInConfig(Configuration config, InetSocketAddress address) { config.setString(JobManagerOptions.ADDRESS, address.getHostString()); config.setInteger(JobManagerOptions.PORT, address.getPort()); config.setString(RestOptions.ADDRESS, address.getHostString()); config.setInteger(RestOptions.PORT, address.getPort()); }
Writes the given job manager address to the associated configuration object. @param address Address to write to the configuration @param config The configuration to write to
public CustomCommandLine<?> getActiveCustomCommandLine(CommandLine commandLine) { for (CustomCommandLine<?> cli : customCommandLines) { if (cli.isActive(commandLine)) { return cli; } } throw new IllegalStateException("No command-line ran."); }
Gets the custom command-line for the arguments. @param commandLine The input to the command-line. @return custom command-line which is active (may only be one at a time)
private static CustomCommandLine<?> loadCustomCommandLine(String className, Object... params) throws IllegalAccessException, InvocationTargetException, InstantiationException, ClassNotFoundException, NoSuchMethodException { Class<? extends CustomCommandLine> customCliClass = Class.forName(className).asSubclass(CustomCommandLine.class); // construct class types from the parameters Class<?>[] types = new Class<?>[params.length]; for (int i = 0; i < params.length; i++) { Preconditions.checkNotNull(params[i], "Parameters for custom command-lines may not be null."); types[i] = params[i].getClass(); } Constructor<? extends CustomCommandLine> constructor = customCliClass.getConstructor(types); return constructor.newInstance(params); }
Loads a class from the classpath that implements the CustomCommandLine interface. @param className The fully-qualified class name to load. @param params The constructor parameters
public O withForwardedFields(String... forwardedFields) { if (this.udfSemantics == null) { // extract semantic properties from function annotations setSemanticProperties(extractSemanticAnnotations(getFunction().getClass())); } if (this.udfSemantics == null || this.analyzedUdfSemantics) { // discard analyzed semantic properties setSemanticProperties(new SingleInputSemanticProperties()); SemanticPropUtil.getSemanticPropsSingleFromString(this.udfSemantics, forwardedFields, null, null, this.getInputType(), this.getResultType()); } else { if (udfWithForwardedFieldsAnnotation(getFunction().getClass())) { // refuse semantic information as it would override the function annotation throw new SemanticProperties.InvalidSemanticAnnotationException("Forwarded field information " + "has already been added by a function annotation for this operator. " + "Cannot overwrite function annotations."); } else { SemanticPropUtil.getSemanticPropsSingleFromString(this.udfSemantics, forwardedFields, null, null, this.getInputType(), this.getResultType()); } } @SuppressWarnings("unchecked") O returnType = (O) this; return returnType; }
Adds semantic information about forwarded fields of the user-defined function. The forwarded fields information declares fields which are never modified by the function and which are forwarded at the same position to the output or unchanged copied to another position in the output. <p>Fields that are forwarded at the same position are specified by their position. The specified position must be valid for the input and output data type and have the same type. For example <code>withForwardedFields("f2")</code> declares that the third field of a Java input tuple is copied to the third field of an output tuple. <p>Fields which are unchanged copied to another position in the output are declared by specifying the source field reference in the input and the target field reference in the output. {@code withForwardedFields("f0->f2")} denotes that the first field of the Java input tuple is unchanged copied to the third field of the Java output tuple. When using a wildcard ("*") ensure that the number of declared fields and their types in input and output type match. <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFields("f2; f3->f0; f4")}) or separate Strings ({@code withForwardedFields("f2", "f3->f0", "f4")}). Please refer to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or Flink's documentation for details on field references such as nested fields and wildcard. <p>It is not possible to override existing semantic information about forwarded fields which was for example added by a {@link org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFields} class annotation. <p><b>NOTE: Adding semantic information for functions is optional! If used correctly, semantic information can help the Flink optimizer to generate more efficient execution plans. However, incorrect semantic information can cause the optimizer to generate incorrect execution plans which compute wrong results! So be careful when adding semantic information. </b> @param forwardedFields A list of field forward expressions. @return This operator with annotated forwarded field information. @see org.apache.flink.api.java.functions.FunctionAnnotation @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFields
private static JobVertexBackPressureInfo.VertexBackPressureLevel getBackPressureLevel(double backPressureRatio) { if (backPressureRatio <= 0.10) { return JobVertexBackPressureInfo.VertexBackPressureLevel.OK; } else if (backPressureRatio <= 0.5) { return JobVertexBackPressureInfo.VertexBackPressureLevel.LOW; } else { return JobVertexBackPressureInfo.VertexBackPressureLevel.HIGH; } }
Returns the back pressure level as a String. @param backPressureRatio Ratio of back pressures samples to total number of samples. @return Back pressure level ('ok', 'low', or 'high')
@SuppressWarnings("unchecked") @Override public int hash(T value) { int i = 0; try { int code = this.comparators[0].hash(value.getFieldNotNull(keyPositions[0])); for (i = 1; i < this.keyPositions.length; i++) { code *= HASH_SALT[i & 0x1F]; // salt code with (i % HASH_SALT.length)-th salt component code += this.comparators[i].hash(value.getFieldNotNull(keyPositions[i])); } return code; } catch (NullFieldException nfex) { throw new NullKeyFieldException(nfex); } catch (IndexOutOfBoundsException iobex) { throw new KeyFieldOutOfBoundsException(keyPositions[i]); } }
--------------------------------------------------------------------------------------------
public void setNextPartialSolution(OptimizerNode nextPartialSolution, OptimizerNode terminationCriterion) { // check if the root of the step function has the same parallelism as the iteration // or if the step function has any operator at all if (nextPartialSolution.getParallelism() != getParallelism() || nextPartialSolution == partialSolution || nextPartialSolution instanceof BinaryUnionNode) { // add a no-op to the root to express the re-partitioning NoOpNode noop = new NoOpNode(); noop.setParallelism(getParallelism()); DagConnection noOpConn = new DagConnection(nextPartialSolution, noop, ExecutionMode.PIPELINED); noop.setIncomingConnection(noOpConn); nextPartialSolution.addOutgoingConnection(noOpConn); nextPartialSolution = noop; } this.nextPartialSolution = nextPartialSolution; this.terminationCriterion = terminationCriterion; if (terminationCriterion == null) { this.singleRoot = nextPartialSolution; this.rootConnection = new DagConnection(nextPartialSolution, ExecutionMode.PIPELINED); } else { // we have a termination criterion SingleRootJoiner singleRootJoiner = new SingleRootJoiner(); this.rootConnection = new DagConnection(nextPartialSolution, singleRootJoiner, ExecutionMode.PIPELINED); this.terminationCriterionRootConnection = new DagConnection(terminationCriterion, singleRootJoiner, ExecutionMode.PIPELINED); singleRootJoiner.setInputs(this.rootConnection, this.terminationCriterionRootConnection); this.singleRoot = singleRootJoiner; // add connection to terminationCriterion for interesting properties visitor terminationCriterion.addOutgoingConnection(terminationCriterionRootConnection); } nextPartialSolution.addOutgoingConnection(rootConnection); }
Sets the nextPartialSolution for this BulkIterationNode. @param nextPartialSolution The nextPartialSolution to set.
public boolean schemaEquals(Object obj) { return equals(obj) && Arrays.equals(fieldNames, ((RowTypeInfo) obj).fieldNames); }
Tests whether an other object describes the same, schema-equivalent row information.
public static RowTypeInfo projectFields(RowTypeInfo rowType, int[] fieldMapping) { TypeInformation[] fieldTypes = new TypeInformation[fieldMapping.length]; String[] fieldNames = new String[fieldMapping.length]; for (int i = 0; i < fieldMapping.length; i++) { fieldTypes[i] = rowType.getTypeAt(fieldMapping[i]); fieldNames[i] = rowType.getFieldNames()[fieldMapping[i]]; } return new RowTypeInfo(fieldTypes, fieldNames); }
Creates a {@link RowTypeInfo} with projected fields. @param rowType The original RowTypeInfo whose fields are projected @param fieldMapping The field mapping of the projection @return A RowTypeInfo with projected fields.
@Override public void discardState() throws Exception { FileSystem fs = getFileSystem(); fs.delete(filePath, false); }
Discard the state by deleting the file that stores the state. If the parent directory of the state is empty after deleting the state file, it is also deleted. @throws Exception Thrown, if the file deletion (not the directory deletion) fails.
@Internal public static <T, F> FieldAccessor<T, F> getAccessor(TypeInformation<T> typeInfo, int pos, ExecutionConfig config){ // In case of arrays if (typeInfo instanceof BasicArrayTypeInfo || typeInfo instanceof PrimitiveArrayTypeInfo) { return new FieldAccessor.ArrayFieldAccessor<>(pos, typeInfo); // In case of basic types } else if (typeInfo instanceof BasicTypeInfo) { if (pos != 0) { throw new CompositeType.InvalidFieldReferenceException("The " + ((Integer) pos).toString() + ". field selected on a " + "basic type (" + typeInfo.toString() + "). A field expression on a basic type can only select " + "the 0th field (which means selecting the entire basic type)."); } @SuppressWarnings("unchecked") FieldAccessor<T, F> result = (FieldAccessor<T, F>) new FieldAccessor.SimpleFieldAccessor<>(typeInfo); return result; // In case of case classes } else if (typeInfo.isTupleType() && ((TupleTypeInfoBase) typeInfo).isCaseClass()) { TupleTypeInfoBase tupleTypeInfo = (TupleTypeInfoBase) typeInfo; @SuppressWarnings("unchecked") TypeInformation<F> fieldTypeInfo = (TypeInformation<F>) tupleTypeInfo.getTypeAt(pos); return new FieldAccessor.RecursiveProductFieldAccessor<>( pos, typeInfo, new FieldAccessor.SimpleFieldAccessor<>(fieldTypeInfo), config); // In case of tuples } else if (typeInfo.isTupleType()) { @SuppressWarnings("unchecked") FieldAccessor<T, F> result = new FieldAccessor.SimpleTupleFieldAccessor(pos, typeInfo); return result; // Default case, PojoType is directed to this statement } else { throw new CompositeType.InvalidFieldReferenceException("Cannot reference field by position on " + typeInfo.toString() + "Referencing a field by position is supported on tuples, case classes, and arrays. " + "Additionally, you can select the 0th field of a primitive/basic type (e.g. int)."); } }
Creates a {@link FieldAccessor} for the given field position, which can be used to get and set the specified field on instances of this type. @param pos The field position (zero-based) @param config Configuration object @param <F> The type of the field to access @return The created FieldAccessor
@Internal public static <T, F> FieldAccessor<T, F> getAccessor(TypeInformation<T> typeInfo, String field, ExecutionConfig config) { // In case of arrays if (typeInfo instanceof BasicArrayTypeInfo || typeInfo instanceof PrimitiveArrayTypeInfo) { try { return new FieldAccessor.ArrayFieldAccessor<>(Integer.parseInt(field), typeInfo); } catch (NumberFormatException ex) { throw new CompositeType.InvalidFieldReferenceException ("A field expression on an array must be an integer index (that might be given as a string)."); } // In case of basic types } else if (typeInfo instanceof BasicTypeInfo) { try { int pos = field.equals(Keys.ExpressionKeys.SELECT_ALL_CHAR) ? 0 : Integer.parseInt(field); return FieldAccessorFactory.getAccessor(typeInfo, pos, config); } catch (NumberFormatException ex) { throw new CompositeType.InvalidFieldReferenceException("You tried to select the field \"" + field + "\" on a " + typeInfo.toString() + ". A field expression on a basic type can only be \"*\" or \"0\"" + " (both of which mean selecting the entire basic type)."); } // In case of Pojos } else if (typeInfo instanceof PojoTypeInfo) { FieldExpression decomp = decomposeFieldExpression(field); PojoTypeInfo<?> pojoTypeInfo = (PojoTypeInfo) typeInfo; int fieldIndex = pojoTypeInfo.getFieldIndex(decomp.head); if (fieldIndex == -1) { throw new CompositeType.InvalidFieldReferenceException( "Unable to find field \"" + decomp.head + "\" in type " + typeInfo + "."); } else { PojoField pojoField = pojoTypeInfo.getPojoFieldAt(fieldIndex); TypeInformation<?> fieldType = pojoTypeInfo.getTypeAt(fieldIndex); if (decomp.tail == null) { @SuppressWarnings("unchecked") FieldAccessor<F, F> innerAccessor = new FieldAccessor.SimpleFieldAccessor<>((TypeInformation<F>) fieldType); return new FieldAccessor.PojoFieldAccessor<>(pojoField.getField(), innerAccessor); } else { @SuppressWarnings("unchecked") FieldAccessor<Object, F> innerAccessor = FieldAccessorFactory .getAccessor((TypeInformation<Object>) fieldType, decomp.tail, config); return new FieldAccessor.PojoFieldAccessor<>(pojoField.getField(), innerAccessor); } } // In case of case classes } else if (typeInfo.isTupleType() && ((TupleTypeInfoBase) typeInfo).isCaseClass()) { TupleTypeInfoBase tupleTypeInfo = (TupleTypeInfoBase) typeInfo; FieldExpression decomp = decomposeFieldExpression(field); int fieldPos = tupleTypeInfo.getFieldIndex(decomp.head); if (fieldPos < 0) { throw new CompositeType.InvalidFieldReferenceException("Invalid field selected: " + field); } if (decomp.tail == null){ return new FieldAccessor.SimpleProductFieldAccessor<>(fieldPos, typeInfo, config); } else { @SuppressWarnings("unchecked") FieldAccessor<Object, F> innerAccessor = getAccessor(tupleTypeInfo.getTypeAt(fieldPos), decomp.tail, config); return new FieldAccessor.RecursiveProductFieldAccessor<>(fieldPos, typeInfo, innerAccessor, config); } // In case of tuples } else if (typeInfo.isTupleType()) { TupleTypeInfo tupleTypeInfo = (TupleTypeInfo) typeInfo; FieldExpression decomp = decomposeFieldExpression(field); int fieldPos = tupleTypeInfo.getFieldIndex(decomp.head); if (fieldPos == -1) { try { fieldPos = Integer.parseInt(decomp.head); } catch (NumberFormatException ex) { throw new CompositeType.InvalidFieldReferenceException("Tried to select field \"" + decomp.head + "\" on " + typeInfo.toString() + " . Only integer values are allowed here."); } } if (decomp.tail == null) { @SuppressWarnings("unchecked") FieldAccessor<T, F> result = new FieldAccessor.SimpleTupleFieldAccessor(fieldPos, tupleTypeInfo); return result; } else { @SuppressWarnings("unchecked") FieldAccessor<?, F> innerAccessor = getAccessor(tupleTypeInfo.getTypeAt(fieldPos), decomp.tail, config); @SuppressWarnings("unchecked") FieldAccessor<T, F> result = new FieldAccessor.RecursiveTupleFieldAccessor(fieldPos, innerAccessor, tupleTypeInfo); return result; } // Default statement } else { throw new CompositeType.InvalidFieldReferenceException("Cannot reference field by field expression on " + typeInfo.toString() + "Field expressions are only supported on POJO types, tuples, and case classes. " + "(See the Flink documentation on what is considered a POJO.)"); } }
Creates a {@link FieldAccessor} for the field that is given by a field expression, which can be used to get and set the specified field on instances of this type. @param field The field expression @param config Configuration object @param <F> The type of the field to access @return The created FieldAccessor
public final Iterable<Edge<K, EV>> getEdges() { verifyEdgeUsage(); this.edgeIterator.set(edges); return this.edgeIterator; }
Gets an {@link java.lang.Iterable} with all out-going edges. This method is mutually exclusive with {@link #sendMessageToAllNeighbors(Object)} and may be called only once. @return An iterator with all edges.
public final void sendMessageToAllNeighbors(Message m) { verifyEdgeUsage(); outMsg.f1 = m; while (edges.hasNext()) { Tuple next = edges.next(); outMsg.f0 = next.getField(1); out.collect(Either.Right(outMsg)); } }
Sends the given message to all vertices that adjacent to the changed vertex. This method is mutually exclusive to the method {@link #getEdges()} and may be called only once. @param m The message to send.
public final void sendMessageTo(K target, Message m) { outMsg.f0 = target; outMsg.f1 = m; out.collect(Either.Right(outMsg)); }
Sends the given message to the vertex identified by the given key. If the target vertex does not exist, the next superstep will cause an exception due to a non-deliverable message. @param target The key (id) of the target vertex to message. @param m The message.
public final void setNewVertexValue(VV newValue) { if (setNewVertexValueCalled) { throw new IllegalStateException("setNewVertexValue should only be called at most once per updateVertex"); } setNewVertexValueCalled = true; outVertex.f1 = newValue; out.collect(Either.Left(outVertex)); }
Sets the new value of this vertex. <p>This should be called at most once per ComputeFunction. @param newValue The new vertex value.
public static FlinkJoinType toFlinkJoinType(JoinRelType joinRelType) { switch (joinRelType) { case INNER: return FlinkJoinType.INNER; case LEFT: return FlinkJoinType.LEFT; case RIGHT: return FlinkJoinType.RIGHT; case FULL: return FlinkJoinType.FULL; default: throw new IllegalArgumentException("invalid: " + joinRelType); } }
Converts {@link JoinRelType} to {@link FlinkJoinType}.
public static FlinkJoinType getFlinkJoinType(Join join) { if (join instanceof SemiJoin) { // TODO supports ANTI return FlinkJoinType.SEMI; } else { return toFlinkJoinType(join.getJoinType()); } }
Gets {@link FlinkJoinType} of the input Join RelNode.
public static JoinRelType toJoinRelType(FlinkJoinType joinType) { switch (joinType) { case INNER: return JoinRelType.INNER; case LEFT: return JoinRelType.LEFT; case RIGHT: return JoinRelType.RIGHT; case FULL: return JoinRelType.FULL; default: throw new IllegalArgumentException("invalid: " + joinType); } }
Converts {@link FlinkJoinType} to {@link JoinRelType}.
public void setNewVertexValue(VV newValue) { if (setNewVertexValueCalled) { throw new IllegalStateException("setNewVertexValue should only be called at most once per updateVertex"); } setNewVertexValueCalled = true; if (isOptDegrees()) { outValWithDegrees.f1.f0 = newValue; outWithDegrees.collect(outValWithDegrees); } else { outVal.setValue(newValue); out.collect(outVal); } }
Sets the new value of this vertex. Setting a new value triggers the sending of outgoing messages from this vertex. <p>This should be called at most once per updateVertex. @param newValue The new vertex value.
@SuppressWarnings("unchecked") <VertexWithDegree> void updateVertexFromScatterGatherIteration(Vertex<K, VertexWithDegree> vertexState, MessageIterator<Message> inMessages) throws Exception { Vertex<K, VV> vertex = new Vertex<>(vertexState.f0, ((Tuple3<VV, Long, Long>) vertexState.getValue()).f0); updateVertex(vertex, inMessages); }
In order to hide the Tuple3(actualValue, inDegree, OutDegree) vertex value from the user, another function will be called from {@link org.apache.flink.graph.spargel.ScatterGatherIteration}. <p>This function will retrieve the vertex from the vertexState and will set its degrees, afterwards calling the regular updateVertex function. @param vertexState @param inMessages @throws Exception
private static Path validatePath(Path path) { final URI uri = path.toUri(); final String scheme = uri.getScheme(); final String pathPart = uri.getPath(); // some validity checks if (scheme == null) { throw new IllegalArgumentException("The scheme (hdfs://, file://, etc) is null. " + "Please specify the file system scheme explicitly in the URI."); } if (pathPart == null) { throw new IllegalArgumentException("The path to store the checkpoint data in is null. " + "Please specify a directory path for the checkpoint data."); } if (pathPart.length() == 0 || pathPart.equals("/")) { throw new IllegalArgumentException("Cannot use the root directory for checkpoints."); } return path; }
Checks the validity of the path's scheme and path. @param path The path to check. @return The URI as a Path. @throws IllegalArgumentException Thrown, if the URI misses scheme or path.
public void transferAllStateDataToDirectory( IncrementalRemoteKeyedStateHandle restoreStateHandle, Path dest, CloseableRegistry closeableRegistry) throws Exception { final Map<StateHandleID, StreamStateHandle> sstFiles = restoreStateHandle.getSharedState(); final Map<StateHandleID, StreamStateHandle> miscFiles = restoreStateHandle.getPrivateState(); downloadDataForAllStateHandles(sstFiles, dest, closeableRegistry); downloadDataForAllStateHandles(miscFiles, dest, closeableRegistry); }
Transfer all state data to the target directory using specified number of threads. @param restoreStateHandle Handles used to retrieve the state data. @param dest The target directory which the state data will be stored. @throws Exception Thrown if can not transfer all the state data.
private void downloadDataForAllStateHandles( Map<StateHandleID, StreamStateHandle> stateHandleMap, Path restoreInstancePath, CloseableRegistry closeableRegistry) throws Exception { try { List<Runnable> runnables = createDownloadRunnables(stateHandleMap, restoreInstancePath, closeableRegistry); List<CompletableFuture<Void>> futures = new ArrayList<>(runnables.size()); for (Runnable runnable : runnables) { futures.add(CompletableFuture.runAsync(runnable, executorService)); } FutureUtils.waitForAll(futures).get(); } catch (ExecutionException e) { Throwable throwable = ExceptionUtils.stripExecutionException(e); throwable = ExceptionUtils.stripException(throwable, RuntimeException.class); if (throwable instanceof IOException) { throw (IOException) throwable; } else { throw new FlinkRuntimeException("Failed to download data for state handles.", e); } } }
Copies all the files from the given stream state handles to the given path, renaming the files w.r.t. their {@link StateHandleID}.
private void downloadDataForStateHandle( Path restoreFilePath, StreamStateHandle remoteFileHandle, CloseableRegistry closeableRegistry) throws IOException { FSDataInputStream inputStream = null; FSDataOutputStream outputStream = null; try { FileSystem restoreFileSystem = restoreFilePath.getFileSystem(); inputStream = remoteFileHandle.openInputStream(); closeableRegistry.registerCloseable(inputStream); outputStream = restoreFileSystem.create(restoreFilePath, FileSystem.WriteMode.OVERWRITE); closeableRegistry.registerCloseable(outputStream); byte[] buffer = new byte[8 * 1024]; while (true) { int numBytes = inputStream.read(buffer); if (numBytes == -1) { break; } outputStream.write(buffer, 0, numBytes); } } finally { if (closeableRegistry.unregisterCloseable(inputStream)) { inputStream.close(); } if (closeableRegistry.unregisterCloseable(outputStream)) { outputStream.close(); } } }
Copies the file from a single state handle to the given path.
public File getFile(String name) { if (name == null) { throw new NullPointerException("name must not be null"); } Future<Path> future = cacheCopyTasks.get(name); if (future == null) { throw new IllegalArgumentException("File with name '" + name + "' is not available." + " Did you forget to register the file?"); } try { final Path path = future.get(); URI tmp = path.makeQualified(path.getFileSystem()).toUri(); return new File(tmp); } catch (ExecutionException e) { throw new RuntimeException("An error occurred while copying the file.", e.getCause()); } catch (Exception e) { throw new RuntimeException("Error while getting the file registered under '" + name + "' from the distributed cache", e); } }
------------------------------------------------------------------------
public static void writeFileInfoToConfig(String name, DistributedCacheEntry e, Configuration conf) { int num = conf.getInteger(CACHE_FILE_NUM, 0) + 1; conf.setInteger(CACHE_FILE_NUM, num); conf.setString(CACHE_FILE_NAME + num, name); conf.setString(CACHE_FILE_PATH + num, e.filePath); conf.setBoolean(CACHE_FILE_EXE + num, e.isExecutable || new File(e.filePath).canExecute()); conf.setBoolean(CACHE_FILE_DIR + num, e.isZipped || new File(e.filePath).isDirectory()); if (e.blobKey != null) { conf.setBytes(CACHE_FILE_BLOB_KEY + num, e.blobKey); } }
------------------------------------------------------------------------
@Override public void close() throws Exception { if (enterUnlessClosed()) { try { try { // this class' own cleanup logic resourceManagerLeaderElectionService.shutdown(); dispatcher.shutdownNow(); } finally { // in any case must we call the parent cleanup logic super.close(); } } finally { exit(); } } }
------------------------------------------------------------------------
@Override public void snapshotState(FunctionSnapshotContext context) throws Exception { Preconditions.checkState(this.checkpointedState != null, "The " + getClass().getSimpleName() + " has not been properly initialized."); this.checkpointedState.clear(); this.checkpointedState.add(this.numElementsEmitted); }
------------------------------------------------------------------------
public static <OUT> void checkCollection(Collection<OUT> elements, Class<OUT> viewedAs) { for (OUT elem : elements) { if (elem == null) { throw new IllegalArgumentException("The collection contains a null element"); } if (!viewedAs.isAssignableFrom(elem.getClass())) { throw new IllegalArgumentException("The elements in the collection are not all subclasses of " + viewedAs.getCanonicalName()); } } }
Verifies that all elements in the collection are non-null, and are of the given class, or a subclass thereof. @param elements The collection to check. @param viewedAs The class to which the elements must be assignable to. @param <OUT> The generic type of the collection to be checked.
public TableOperation createAggregate( List<Expression> groupings, List<Expression> aggregates, TableOperation child) { validateGroupings(groupings); validateAggregates(aggregates); List<PlannerExpression> convertedGroupings = bridge(groupings); List<PlannerExpression> convertedAggregates = bridge(aggregates); TypeInformation[] fieldTypes = Stream.concat( convertedGroupings.stream(), convertedAggregates.stream() ).map(PlannerExpression::resultType) .toArray(TypeInformation[]::new); String[] fieldNames = Stream.concat( groupings.stream(), aggregates.stream() ).map(expr -> extractName(expr).orElseGet(expr::toString)) .toArray(String[]::new); TableSchema tableSchema = new TableSchema(fieldNames, fieldTypes); return new AggregateTableOperation(groupings, aggregates, child, tableSchema); }
Creates a valid {@link AggregateTableOperation} operation. @param groupings expressions describing grouping key of aggregates @param aggregates expressions describing aggregation functions @param child relational operation on top of which to apply the aggregation @return valid aggregate operation
public TableOperation createWindowAggregate( List<Expression> groupings, List<Expression> aggregates, List<Expression> windowProperties, ResolvedGroupWindow window, TableOperation child) { validateGroupings(groupings); validateAggregates(aggregates); validateWindowProperties(windowProperties, window); List<PlannerExpression> convertedGroupings = bridge(groupings); List<PlannerExpression> convertedAggregates = bridge(aggregates); List<PlannerExpression> convertedWindowProperties = bridge(windowProperties); TypeInformation[] fieldTypes = concat( convertedGroupings.stream(), convertedAggregates.stream(), convertedWindowProperties.stream() ).map(PlannerExpression::resultType) .toArray(TypeInformation[]::new); String[] fieldNames = concat( groupings.stream(), aggregates.stream(), windowProperties.stream() ).map(expr -> extractName(expr).orElseGet(expr::toString)) .toArray(String[]::new); TableSchema tableSchema = new TableSchema(fieldNames, fieldTypes); return new WindowAggregateTableOperation( groupings, aggregates, windowProperties, window, child, tableSchema); }
Creates a valid {@link WindowAggregateTableOperation} operation. @param groupings expressions describing grouping key of aggregates @param aggregates expressions describing aggregation functions @param windowProperties expressions describing window properties @param window grouping window of this aggregation @param child relational operation on top of which to apply the aggregation @return valid window aggregate operation
public ResolvedGroupWindow createResolvedWindow(GroupWindow window, ExpressionResolver resolver) { Expression alias = window.getAlias(); if (!(alias instanceof UnresolvedReferenceExpression)) { throw new ValidationException("Only unresolved reference supported for alias of a group window."); } final String windowName = ((UnresolvedReferenceExpression) alias).getName(); FieldReferenceExpression timeField = getValidatedTimeAttribute(window, resolver); if (window instanceof TumbleWithSizeOnTimeWithAlias) { return validateAndCreateTumbleWindow( (TumbleWithSizeOnTimeWithAlias) window, windowName, timeField); } else if (window instanceof SlideWithSizeAndSlideOnTimeWithAlias) { return validateAndCreateSlideWindow( (SlideWithSizeAndSlideOnTimeWithAlias) window, windowName, timeField); } else if (window instanceof SessionWithGapOnTimeWithAlias) { return validateAndCreateSessionWindow( (SessionWithGapOnTimeWithAlias) window, windowName, timeField); } else { throw new TableException("Unknown window type: " + window); } }
Converts an API class to a resolved window for planning with expressions already resolved. It performs following validations: <ul> <li>The alias is represented with an unresolved reference</li> <li>The time attribute is a single field reference of a {@link TimeIndicatorTypeInfo}(stream), {@link SqlTimeTypeInfo}(batch), or {@link BasicTypeInfo#LONG_TYPE_INFO}(batch) type</li> <li>The size & slide are value literals of either {@link RowIntervalTypeInfo#INTERVAL_ROWS}, or {@link TimeIntervalTypeInfo} type</li> <li>The size & slide are of the same type</li> <li>The gap is a value literal of a {@link TimeIntervalTypeInfo} type</li> </ul> @param window window to resolve @param resolver resolver to resolve potential unresolved field references @return window with expressions resolved
public void initializeCache(Object key) throws Exception { this.sortedWindows = cachedSortedWindows.get(key); if (sortedWindows == null) { this.sortedWindows = new TreeSet<>(); Iterator<Map.Entry<W, W>> keyValues = mapping.iterator(); if (keyValues != null) { while (keyValues.hasNext()) { Map.Entry<W, W> keyValue = keyValues.next(); this.sortedWindows.add(keyValue.getKey()); } } cachedSortedWindows.put(key, sortedWindows); } }
Set current key context of this window set. <p>Notes: {@code initializeCache(Object)} must be called before {@link #addWindow(Window, MergeFunction)} and {@link #retireWindow(Window)} @param key the current access key
public void retireWindow(W window) throws Exception { this.mapping.remove(window); boolean removed = this.sortedWindows.remove(window); if (!removed) { throw new IllegalStateException("Window " + window + " is not in in-flight window set."); } }
Removes the given window from the set of in-flight windows. @param window The {@code Window} to remove.
public W addWindow(W newWindow, MergeFunction<W> mergeFunction) throws Exception { MergeResultCollector collector = new MergeResultCollector(); windowAssigner.mergeWindows(newWindow, sortedWindows, collector); W resultWindow = newWindow; boolean isNewWindowMerged = false; // perform the merge for (Map.Entry<W, Collection<W>> c : collector.mergeResults.entrySet()) { W mergeResult = c.getKey(); Collection<W> mergedWindows = c.getValue(); // if our new window is in the merged windows make the merge result the // result window if (mergedWindows.remove(newWindow)) { isNewWindowMerged = true; resultWindow = mergeResult; } // if our new window is the same as a pre-exising window, nothing to do if (mergedWindows.isEmpty()) { continue; } // pick any of the merged windows and choose that window's state window // as the state window for the merge result W mergedStateNamespace = this.mapping.get(mergedWindows.iterator().next()); // figure out the state windows that we are merging List<W> mergedStateWindows = new ArrayList<>(); for (W mergedWindow : mergedWindows) { W res = this.mapping.get(mergedWindow); if (res != null) { this.mapping.remove(mergedWindow); this.sortedWindows.remove(mergedWindow); // don't put the target state window into the merged windows if (!res.equals(mergedStateNamespace)) { mergedStateWindows.add(res); } } } this.mapping.put(mergeResult, mergedStateNamespace); this.sortedWindows.add(mergeResult); // don't merge the new window itself, it never had any state associated with it // i.e. if we are only merging one pre-existing window into itself // without extending the pre-exising window if (!(mergedWindows.contains(mergeResult) && mergedWindows.size() == 1)) { mergeFunction.merge(mergeResult, mergedWindows, mergedStateNamespace, mergedStateWindows); } } // the new window created a new, self-contained window without merging if (collector.mergeResults.isEmpty() || (resultWindow.equals(newWindow) && !isNewWindowMerged)) { this.mapping.put(resultWindow, resultWindow); this.sortedWindows.add(resultWindow); } return resultWindow; }
Adds a new {@code Window} to the set of in-flight windows. It might happen that this triggers merging of previously in-flight windows. In that case, the provided {@link MergeFunction} is called. <p>This returns the window that is the representative of the added window after adding. This can either be the new window itself, if no merge occurred, or the newly merged window. Adding an element to a window or calling trigger functions should only happen on the returned representative. This way, we never have to deal with a new window that is immediately swallowed up by another window. <p>If the new window is merged, the {@code MergeFunction} callback arguments also don't contain the new window as part of the list of merged windows. @param newWindow The new {@code Window} to add. @param mergeFunction The callback to be invoked in case a merge occurs. @return The {@code Window} that new new {@code Window} ended up in. This can also be the the new {@code Window} itself in case no merge occurred. @throws Exception
public final boolean isResolved() { return getPathParameters().stream().filter(MessageParameter::isMandatory).allMatch(MessageParameter::isResolved) && getQueryParameters().stream().filter(MessageParameter::isMandatory).allMatch(MessageParameter::isResolved); }
Returns whether all mandatory parameters have been resolved. @return true, if all mandatory parameters have been resolved, false otherwise
public static String resolveUrl(String genericUrl, MessageParameters parameters) { Preconditions.checkState(parameters.isResolved(), "Not all mandatory message parameters were resolved."); StringBuilder path = new StringBuilder(genericUrl); StringBuilder queryParameters = new StringBuilder(); for (MessageParameter<?> pathParameter : parameters.getPathParameters()) { if (pathParameter.isResolved()) { int start = path.indexOf(':' + pathParameter.getKey()); final String pathValue = Preconditions.checkNotNull(pathParameter.getValueAsString()); // only replace path parameters if they are present if (start != -1) { path.replace(start, start + pathParameter.getKey().length() + 1, pathValue); } } } boolean isFirstQueryParameter = true; for (MessageQueryParameter<?> queryParameter : parameters.getQueryParameters()) { if (queryParameter.isResolved()) { if (isFirstQueryParameter) { queryParameters.append('?'); isFirstQueryParameter = false; } else { queryParameters.append('&'); } queryParameters.append(queryParameter.getKey()); queryParameters.append('='); queryParameters.append(queryParameter.getValueAsString()); } } path.append(queryParameters); return path.toString(); }
Resolves the given URL (e.g "jobs/:jobid") using the given path/query parameters. <p>This method will fail with an {@link IllegalStateException} if any mandatory parameter was not resolved. <p>Unresolved optional parameters will be ignored. @param genericUrl URL to resolve @param parameters message parameters parameters @return resolved url, e.g "/jobs/1234?state=running" @throws IllegalStateException if any mandatory parameter was not resolved
public static Database createHiveDatabase(String dbName, CatalogDatabase db) { Map<String, String> props = db.getProperties(); return new Database( dbName, db.getDescription().isPresent() ? db.getDescription().get() : null, null, props); }
Creates a Hive database from CatalogDatabase.
@Override @SuppressWarnings("unchecked") protected GroupReduceOperatorBase<?, OUT, ?> translateToDataFlow(Operator<IN> input) { String name = getName() != null ? getName() : "GroupReduce at " + defaultName; // wrap CombineFunction in GroupCombineFunction if combinable if (combinable && function instanceof CombineFunction<?, ?>) { this.function = function instanceof RichGroupReduceFunction<?, ?> ? new RichCombineToGroupCombineWrapper((RichGroupReduceFunction<?, ?>) function) : new CombineToGroupCombineWrapper((CombineFunction<?, ?>) function); } // distinguish between grouped reduce and non-grouped reduce if (grouper == null) { // non grouped reduce UnaryOperatorInformation<IN, OUT> operatorInfo = new UnaryOperatorInformation<>(getInputType(), getResultType()); GroupReduceOperatorBase<IN, OUT, GroupReduceFunction<IN, OUT>> po = new GroupReduceOperatorBase<>(function, operatorInfo, new int[0], name); po.setCombinable(combinable); po.setInput(input); // the parallelism for a non grouped reduce can only be 1 po.setParallelism(1); return po; } if (grouper.getKeys() instanceof SelectorFunctionKeys) { @SuppressWarnings("unchecked") SelectorFunctionKeys<IN, ?> selectorKeys = (SelectorFunctionKeys<IN, ?>) grouper.getKeys(); if (grouper instanceof SortedGrouping) { SortedGrouping<IN> sortedGrouping = (SortedGrouping<IN>) grouper; SelectorFunctionKeys<IN, ?> sortKeys = sortedGrouping.getSortSelectionFunctionKey(); Ordering groupOrder = sortedGrouping.getGroupOrdering(); PlanUnwrappingSortedReduceGroupOperator<IN, OUT, ?, ?> po = translateSelectorFunctionSortedReducer( selectorKeys, sortKeys, groupOrder, function, getResultType(), name, input, isCombinable() ); po.setParallelism(this.getParallelism()); po.setCustomPartitioner(grouper.getCustomPartitioner()); return po; } else { PlanUnwrappingReduceGroupOperator<IN, OUT, ?> po = translateSelectorFunctionReducer( selectorKeys, function, getResultType(), name, input, isCombinable()); po.setParallelism(this.getParallelism()); po.setCustomPartitioner(grouper.getCustomPartitioner()); return po; } } else if (grouper.getKeys() instanceof ExpressionKeys) { int[] logicalKeyPositions = grouper.getKeys().computeLogicalKeyPositions(); UnaryOperatorInformation<IN, OUT> operatorInfo = new UnaryOperatorInformation<>(getInputType(), getResultType()); GroupReduceOperatorBase<IN, OUT, GroupReduceFunction<IN, OUT>> po = new GroupReduceOperatorBase<>(function, operatorInfo, logicalKeyPositions, name); po.setCombinable(combinable); po.setInput(input); po.setParallelism(getParallelism()); po.setCustomPartitioner(grouper.getCustomPartitioner()); // set group order if (grouper instanceof SortedGrouping) { SortedGrouping<IN> sortedGrouper = (SortedGrouping<IN>) grouper; int[] sortKeyPositions = sortedGrouper.getGroupSortKeyPositions(); Order[] sortOrders = sortedGrouper.getGroupSortOrders(); Ordering o = new Ordering(); for (int i = 0; i < sortKeyPositions.length; i++) { o.appendOrdering(sortKeyPositions[i], null, sortOrders[i]); } po.setGroupOrder(o); } return po; } else { throw new UnsupportedOperationException("Unrecognized key type."); } }
--------------------------------------------------------------------------------------------