name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
streampipes_OutputStrategies_keep_rdh
|
/**
* Creates a {@link org.apache.streampipes.model.output.KeepOutputStrategy}. Keep output strategies do not change the
* schema of an input event, i.e., the output schema matches the input schema.
*
* @return KeepOutputStrategy
*/
public static KeepOutputStrategy keep() {
return new KeepOutputStrategy();
}
| 3.26 |
streampipes_OutputStrategies_fixed_rdh
|
/**
* Creates a {@link org.apache.streampipes.model.output.FixedOutputStrategy}.
* Fixed output strategies always output the schema defined by the pipeline element itself.
*
* @param fixedProperties
* An arbitrary number of event properties that form the output event schema
* @return FixedOutputStrategy
*/
public static FixedOutputStrategy fixed(EventProperty... fixedProperties) {
return new FixedOutputStrategy(Arrays.asList(fixedProperties));
}
| 3.26 |
streampipes_OutputStrategies_userDefined_rdh
|
/**
* Creates a {@link org.apache.streampipes.model.output.UserDefinedOutputStrategy}. User-defined output strategies are
* fully flexible output strategies which are created by users at pipeline development time.
*
* @return UserDefinedOutputStrategy
*/
public static UserDefinedOutputStrategy userDefined() {
return
new UserDefinedOutputStrategy();
}
| 3.26 |
streampipes_SpOpcUaClient_getClient_rdh
|
/**
* *
*
* @return current {@link org.eclipse.milo.opcua.sdk.client.OpcUaClient}
*/
public OpcUaClient getClient() {
return this.client;
}
| 3.26 |
streampipes_SpOpcUaClient_createListSubscription_rdh
|
/**
* *
* Register subscriptions for given OPC UA nodes
*
* @param nodes
* List of {@link org.eclipse.milo.opcua.stack.core.types.builtin.NodeId}
* @param opcUaAdapter
* current instance of {@link OpcUaAdapter}
* @throws Exception
*/
public void createListSubscription(List<NodeId> nodes, OpcUaAdapter opcUaAdapter) throws Exception {
client.getSubscriptionManager().addSubscriptionListener(new
UaSubscriptionManager.SubscriptionListener() {
@Override
public void onSubscriptionTransferFailed(UaSubscription subscription, StatusCode statusCode) {
LOG.warn("Transfer for subscriptionId={} failed: {}", subscription.getSubscriptionId(), statusCode);
try {
initSubscription(nodes, opcUaAdapter);
} catch (Exception e) {
LOG.error("Re-creating the subscription failed", e);
}
}});
initSubscription(nodes, opcUaAdapter);
}
| 3.26 |
streampipes_SpOpcUaClient_connect_rdh
|
/**
* *
* Establishes appropriate connection to OPC UA endpoint depending on the {@link SpOpcUaClient} instance
*
* @throws UaException
* An exception occurring during OPC connection
*/
public void connect() throws UaException, ExecutionException, InterruptedException, SpConfigurationException, URISyntaxException {
OpcUaClientConfig clientConfig = new MiloOpcUaConfigurationProvider().makeClientConfig(spOpcConfig);
this.client = OpcUaClient.create(clientConfig);
client.connect().get();
}
| 3.26 |
streampipes_StatementHandler_m0_rdh
|
/**
* Fills a prepared statement with the actual values base on {@link StatementHandler#eventParameterMap}. If
* {@link StatementHandler#eventParameterMap} is empty or not complete (which should only happen once in the
* beginning), it calls
* {@link StatementHandler#generatePreparedStatement
* (DbDescription, TableDescription, Connection, Map)} to generate a new one.
*
* @param event
* @param pre
* @throws SQLException
* @throws SpRuntimeException
*/
private void m0(DbDescription dbDescription, TableDescription tableDescription, Connection connection, final Map<String, Object> event, String pre) throws SQLException, SpRuntimeException {
// TODO: Possible error: when the event does not contain all objects of the parameter list
for (Map.Entry<String, Object> pair : event.entrySet())
{
String newKey = pre + pair.getKey();
if (pair.getValue() instanceof Map) {
// recursively extracts nested values
m0(dbDescription, tableDescription, connection, ((Map<String, Object>) (pair.getValue())), newKey + "_");
} else {
if (!eventParameterMap.containsKey(newKey)) {
// TODO: start the for loop all over again
generatePreparedStatement(dbDescription, tableDescription, connection, event);
}
ParameterInformation p = eventParameterMap.get(newKey);
StatementUtils.setValue(p, pair.getValue(), this.getPreparedStatement());
}
}
}
| 3.26 |
streampipes_StatementHandler_executePreparedStatement_rdh
|
/**
* Clears, fills and executes the saved prepared statement {@code ps} with the data found in
* event. To fill in the values it calls
* {@link StatementHandler#fillPreparedStatement(DbDescription, TableDescription, Connection, Map, String)}.
*
* @param event
* Data to be saved in the SQL table
* @throws SQLException
* When the statement cannot be executed
* @throws SpRuntimeException
* When the table name is not allowed or it is thrown
* by {@link org.apache.streampipes.sinks.databases.jvm.jdbcclient.utils.StatementUtils
* #setValue(ParameterInformation, Object, PreparedStatement)}
*/
public void executePreparedStatement(DbDescription
dbDescription, TableDescription tableDescription, Connection connection, final Map<String, Object> event) throws SQLException, SpRuntimeException {if (this.getPreparedStatement() != null) {
this.preparedStatement.clearParameters();
}m0(dbDescription, tableDescription, connection, event, "");
this.preparedStatement.executeUpdate();
}
| 3.26 |
streampipes_StatementHandler_extendPreparedStatement_rdh
|
/**
*
* @param event
* @param s1
* @param s2
* @param index
* @param preProperty
* @param prefix
* @return */
public int extendPreparedStatement(DbDescription dbDescription, final Map<String, Object> event, StringBuilder s1, StringBuilder s2, int index, String preProperty, String prefix) throws SpRuntimeException {for (Map.Entry<String, Object> pair : event.entrySet()) {
if (pair.getValue() instanceof Map) {
index = extendPreparedStatement(dbDescription, ((Map<String, Object>) (pair.getValue())), s1, s2, index, pair.getKey() + "_", prefix);
} else {
SQLStatementUtils.checkRegEx(pair.getKey(), "Columnname", dbDescription);
eventParameterMap.put(pair.getKey(), new ParameterInformation(index, DbDataTypeFactory.getFromObject(pair.getValue(), dbDescription.getEngine())));
if (dbDescription.isColumnNameQuoted()) {
s1.append(prefix).append("\"").append(preProperty).append(pair.getKey()).append("\"");
} else {s1.append(prefix).append(preProperty).append(pair.getKey());
}
s2.append(prefix).append("?");
index++;
}
prefix = ", ";
}
return index;
}
| 3.26 |
streampipes_StatementHandler_generatePreparedStatement_rdh
|
/**
* Initializes the variables {@link StatementHandler#eventParameterMap} and {@link StatementHandler#preparedStatement}
* according to the parameter event.
*
* @param event
* The event which is getting analyzed
* @throws SpRuntimeException
* When the tablename is not allowed
* @throws SQLException
* When the prepareStatement cannot be evaluated
*/
public void generatePreparedStatement(DbDescription dbDescription, TableDescription tableDescription, Connection connection, final Map<String, Object> event) throws SQLException, SpRuntimeException {
// input: event
// wanted: INSERT INTO test4321 ( randomString, randomValue ) VALUES ( ?,? );
eventParameterMap.clear();
StringBuilder statement1 = new StringBuilder("INSERT INTO ");
StringBuilder statement2 = new StringBuilder("VALUES ( ");
SQLStatementUtils.checkRegEx(tableDescription.getName(), "Tablename", dbDescription);
statement1.append(tableDescription.getName()).append(" ( ");
// Starts index at 1, since the parameterIndex in the PreparedStatement starts at 1 as well
extendPreparedStatement(dbDescription, event, statement1, statement2, 1, "", "");
statement1.append(" ) ");
statement2.append(" );");String finalStatement = statement1.append(statement2).toString();
this.preparedStatement = connection.prepareStatement(finalStatement);
}
| 3.26 |
streampipes_AssetLinkBuilder_withQueryHint_rdh
|
/**
* Sets the query hint for the AssetLink being built.
*
* @param queryHint
* The query hint to set.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withQueryHint(String queryHint) {
this.assetLink.setQueryHint(queryHint);
return this; }
| 3.26 |
streampipes_AssetLinkBuilder_create_rdh
|
/**
* Static method to create a new instance of AssetLinkBuilder.
*
* @return A new instance of AssetLinkBuilder.
*/
public static AssetLinkBuilder create() {
return new AssetLinkBuilder();
}
| 3.26 |
streampipes_AssetLinkBuilder_withLinkLabel_rdh
|
/**
* Sets the link label for the AssetLink being built.
*
* @param linkLabel
* The link label to set.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withLinkLabel(String linkLabel) {
this.assetLink.setLinkLabel(linkLabel);
return this;
}
| 3.26 |
streampipes_AssetLinkBuilder_withResourceId_rdh
|
/**
* Sets the resource ID for the AssetLink being built.
*
* @param resourceId
* The resource ID to set.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withResourceId(String resourceId) {
this.assetLink.setResourceId(resourceId);
return this;
}
| 3.26 |
streampipes_AssetLinkBuilder_withEditingDisabled_rdh
|
/**
* Sets whether editing is disabled for the AssetLink being built.
*
* @param editingDisabled
* Whether editing is disabled.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withEditingDisabled(boolean editingDisabled) {
this.assetLink.setEditingDisabled(editingDisabled);
return this;
}
| 3.26 |
streampipes_AssetLinkBuilder_build_rdh
|
/**
* Builds and returns the final instance of AssetLink.
*
* @return The constructed AssetLink instance.
*/
public AssetLink build() {
return this.assetLink;
}
| 3.26 |
streampipes_AssetLinkBuilder_withLinkType_rdh
|
/**
* Sets the link type for the AssetLink being built.
*
* @param linkType
* The link type to set.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withLinkType(String linkType) {
this.assetLink.setLinkType(linkType);
return this;
}
| 3.26 |
streampipes_Sequence_processElement1_rdh
|
// @Override
// public void open(Configuration parameters) throws Exception {
// TODO: add RuntimeContext
// state = getRuntimeContext().getState(new ValueStateDescriptor<>("sequence-event-storage",
// EventStorage.class));
// }
@Override
public void processElement1(Event value, Context ctx, Collector<Event>
out) throws Exception {state.update(new EventStorage(System.currentTimeMillis(), value));
}
| 3.26 |
streampipes_StreamPipesClient_create_rdh
|
/**
* Create a new StreamPipes API client with custom port and HTTPS settings
*
* @param streamPipesHost
* The hostname of the StreamPipes instance without scheme
* @param streamPipesPort
* The port of the StreamPipes instance
* @param credentials
* The credentials object
* @param httpsDisabled
* Set true if the instance is not served over HTTPS
*/
public static StreamPipesClient create(String streamPipesHost, Integer streamPipesPort, CredentialsProvider credentials, boolean httpsDisabled) {
return new StreamPipesClient(streamPipesHost, streamPipesPort, credentials, httpsDisabled);
}
| 3.26 |
streampipes_StreamPipesClient_pipelineElementTemplates_rdh
|
/**
* Get API to work with pipline element templates
*
* @return {@link org.apache.streampipes.client.api.PipelineElementTemplateApi}
*/
@Override
public IPipelineElementTemplateApi pipelineElementTemplates() {
return new PipelineElementTemplateApi(config);
}
| 3.26 |
streampipes_StreamPipesClient_registerDataFormat_rdh
|
/**
* Register a new data format that is used by the live API
*
* @param spDataFormatFactory
* The data format factory
*/
@Override
public void registerDataFormat(SpDataFormatFactory spDataFormatFactory) {
this.config.addDataFormat(spDataFormatFactory);
}
| 3.26 |
streampipes_StreamPipesClient_processors_rdh
|
/**
* Get API to work with data processors
*
* @return {@link DataProcessorApi}
*/
@Override
public DataProcessorApi processors() {
return new DataProcessorApi(config);
}
| 3.26 |
streampipes_StreamPipesClient_sinks_rdh
|
/**
* Get API to work with data sinks
*
* @return {@link org.apache.streampipes.client.api.DataSinkApi}
*/@Override
public DataSinkApi sinks() {
return new DataSinkApi(config);
}
/**
* Get API to work with data streams
*
* @return {@link org.apache.streampipes.client.api.DataStreamApi}
| 3.26 |
streampipes_StreamPipesClient_pipelines_rdh
|
/**
* Get API to work with pipelines
*
* @return {@link org.apache.streampipes.client.api.PipelineApi}
*/
@Override
public PipelineApi pipelines() {
return new PipelineApi(config);
}
| 3.26 |
streampipes_PipelineManager_startPipeline_rdh
|
/**
* Starts all processing elements of the pipeline with the pipelineId
*
* @param pipelineId
* of pipeline to be started
* @return pipeline status of the start operation
*/
public static PipelineOperationStatus startPipeline(String pipelineId) {
Pipeline pipeline = getPipeline(pipelineId);
return Operations.startPipeline(pipeline);
}
| 3.26 |
streampipes_PipelineManager_getPipeline_rdh
|
/**
* Returns the stored pipeline with the given pipeline id
*
* @param pipelineId
* id of pipeline
* @return pipeline resulting pipeline with given id
*/
public static Pipeline getPipeline(String pipelineId) {
return getPipelineStorage().getPipeline(pipelineId);
}
| 3.26 |
streampipes_PipelineManager_getPipelinesContainingElements_rdh
|
/**
* Checks for the pipelines that contain the processing element
*
* @param elementId
* the id of the processing Element
* @return all pipelines containing the element
*/
public static List<Pipeline> getPipelinesContainingElements(String elementId) {
return PipelineManager.getAllPipelines().stream().filter(pipeline -> mergePipelineElement(pipeline).anyMatch(el -> el.getElementId().equals(elementId))).collect(Collectors.toList());
}
| 3.26 |
streampipes_PipelineManager_getAllPipelines_rdh
|
/**
* Returns all pipelines
*
* @return all pipelines
*/
public static List<Pipeline> getAllPipelines() {
return StorageDispatcher.INSTANCE.getNoSqlStore().getPipelineStorageAPI().getAllPipelines();
}
| 3.26 |
streampipes_PipelineManager_stopPipeline_rdh
|
/**
* Stops all processing elements of the pipeline
*
* @param pipelineId
* of pipeline to be stopped
* @param forceStop
* when it is true, the pipeline is stopped, even if not all processing element
* containers could be reached
* @return pipeline status of the start operation
*/public static PipelineOperationStatus stopPipeline(String pipelineId, boolean forceStop) {
Pipeline pipeline = getPipeline(pipelineId);
return Operations.stopPipeline(pipeline, forceStop);
}
| 3.26 |
streampipes_PipelineManager_addPipeline_rdh
|
/**
* Adds a new pipeline for the user with the username to the storage
*
* @param principalSid
* the ID of the owner principal
* @param pipeline
* to be added
* @return pipelineId of the stored pipeline
*/
public static String addPipeline(String principalSid, Pipeline pipeline) {
String pipelineId = (Objects.isNull(pipeline.getPipelineId())) ? UUIDGenerator.generateUuid() : pipeline.getPipelineId();
preparePipelineBasics(principalSid, pipeline, pipelineId);Operations.storePipeline(pipeline); Permission permission = new PermissionManager().makePermission(pipeline, principalSid);
getPermissionStorage().addPermission(permission);
return pipelineId;
}
| 3.26 |
streampipes_PipelineManager_deletePipeline_rdh
|
/**
* Deletes the pipeline with the pipeline Id
*
* @param pipelineId
* of pipeline to be deleted
*/
public static void deletePipeline(String pipelineId) {
var pipeline = getPipeline(pipelineId);
if (Objects.nonNull(pipeline)) {
getPipelineStorage().deletePipeline(pipelineId);
new NotificationsResourceManager().deleteNotificationsForPipeline(pipeline);
}
}
| 3.26 |
streampipes_ExpandTitleToContentFilter_getInstance_rdh
|
/**
* Returns the singleton instance for ExpandTitleToContentFilter.
*/
public static ExpandTitleToContentFilter getInstance() {
return INSTANCE;
}
| 3.26 |
streampipes_SpGeometryBuilder_getPrecisionModel_rdh
|
/**
* Creates a {@link org.locationtech.jts.geom.PrecisionModel} with a specific precision.
* WGS84/WGS84 will be created a {@link org.locationtech.jts.geom.PrecisionModel#FIXED} with
* 7 decimal positions (scale 1000000). Any other epsg code will create a precision
* with {@link org.locationtech.jts.geom.PrecisionModel#FLOATING}.
*
* @param epsg
* EPSG Code representing SRID
* @return {@link org.locationtech.jts.geom.PrecisionModel}
*/
protected static PrecisionModel getPrecisionModel(Integer epsg) {
PrecisionModel precisionModel;
if (epsg == 4326) {
// use scale precision with 7 decimal positions like default OSM
precisionModel = new PrecisionModel(1000000);} else {
// use default constructor
precisionModel = new PrecisionModel();
}
return precisionModel;
}
| 3.26 |
streampipes_SpGeometryBuilder_isInWGSCoordinateRange_rdh
|
/**
* Is in wgs coordinate range boolean.
*
* @param valueToCheck
* Any Value
* @param min
* Min value to check
* @param max
* max value to check
* @return true if value is in min max range
*/
private static boolean isInWGSCoordinateRange(double valueToCheck, double min, double max) {
return (valueToCheck > min) && (valueToCheck < max);
}
| 3.26 |
streampipes_SpServiceDefinition_addMigrators_rdh
|
/**
* Add a list of migrations to the service definition.
* This inherently checks for duplicates and sorts the migrations as such that
* migrations affecting lower versions always come first.
*
* @param migrators
* migrators to add
*/
public void addMigrators(List<IModelMigrator<?, ?>> migrators) {
for (var migratorToAdd : migrators) {
if (this.migrators.stream().noneMatch(migrator -> MigrationComparison.isEqual(migrator, migratorToAdd))) {
this.migrators.add(migratorToAdd);
}
}
Collections.sort(this.migrators);
}
| 3.26 |
streampipes_InvocablePipelineElementResource_detach_rdh
|
// TODO move endpoint to /elementId/instances/runningInstanceId
@DELETE
@Path("{elementId}/{runningInstanceId}")
@Produces(MediaType.APPLICATION_JSON)public Response detach(@PathParam("elementId")
String elementId, @PathParam("runningInstanceId")
String runningInstanceId) {
IStreamPipesRuntime<?,
?> v7 = RunningInstances.INSTANCE.getInvocation(runningInstanceId);
if (v7 != null) {
Response resp = v7.onRuntimeDetached(runningInstanceId);
if (resp.isSuccess()) {
RunningInstances.INSTANCE.remove(runningInstanceId);
}
return ok(resp);
}
return ok(new Response(elementId, false,
"Could not find the running instance with id: " + runningInstanceId));
}
| 3.26 |
streampipes_Labels_withId_rdh
|
/**
* Creates a label with the string value of an enum.
* Static properties require a fully-specified label, see {@link #from(String, String, String)}
*
* @param internalId
* The internal identifier of the element, e.g., "LATITUDE-FIELD-MAPPING"
* @return */
public static Label withId(Enum<?> internalId) {
return new Label(internalId.name(), "", "");
}
/**
*
* @deprecated Externalize labels by using
{@link org.apache.streampipes.sdk.builder.AbstractProcessingElementBuilder#withLocales(Locales...)}
| 3.26 |
streampipes_PipelineVerificationUtils_getRootNode_rdh
|
/**
* returns the root node of a partial pipeline (a pipeline without an action)
*
* @param pipeline
* @return {@link org.apache.streampipes.model.base.InvocableStreamPipesEntity}
*/
public static InvocableStreamPipesEntity getRootNode(Pipeline pipeline) throws NoSepaInPipelineException {
List<InvocableStreamPipesEntity> elements = new ArrayList<>();
elements.addAll(pipeline.getSepas());
elements.addAll(pipeline.getActions());
List<InvocableStreamPipesEntity> unconfiguredElements = elements.stream().filter(e -> !e.isConfigured()).collect(Collectors.toList());
if (unconfiguredElements.size() != 1) {
throw new NoSepaInPipelineException();
} else {
return unconfiguredElements.get(0);
}
}
| 3.26 |
streampipes_AbstractPipelineElementBuilder_iconUrl_rdh
|
/**
*
* @deprecated: Use {@link #withAssets(String...)} instead
*/
@Deprecated
public X iconUrl(String iconUrl) {
elementDescription.setIconUrl(iconUrl);
return me();
}
| 3.26 |
streampipes_AbstractPipelineElementBuilder_providesAssets_rdh
|
/**
*
* @deprecated: Use {@link #withAssets(String...)} instead
*/
@Deprecated
public X providesAssets(String... assets) {
return withAssets(assets);
}
| 3.26 |
streampipes_MqttClient_connect_rdh
|
/**
* Start blocking connection to MQTT broker.
*/
public void connect() {
try {
this.conn = mqtt.blockingConnection();
this.conn.connect();
} catch (Exception e) {throw new SpRuntimeException((("Could not connect to MQTT broker: " + uri.toString())
+ ", ") + e.getMessage(), e);
}}
| 3.26 |
streampipes_MqttClient_publish_rdh
|
/**
* Publish received event to MQTT broker.
*
* @param event
* event to be published
*/
public void publish(Event
event) {
JsonDataFormatDefinition dataFormatDefinition = new JsonDataFormatDefinition();
byte[] payload = new String(dataFormatDefinition.fromMap(event.getRaw())).getBytes();
try {
this.conn.publish(options.getTopic(), payload, options.getQos(), options.isRetain());
} catch (Exception e) {
throw new SpRuntimeException((("Could not publish to MQTT broker: " + uri.toString()) + ", ") + e.getMessage(), e);
}}
| 3.26 |
streampipes_MqttClient_createMqttClient_rdh
|
/**
* Create new MQTT client
*/
public void createMqttClient() {
this.mqtt = new MQTT();
this.uri
= MqttUtils.makeMqttServerUri(options.getProtocol(), options.getHost(), options.getPort());
try {
/**
* Sets the url for connecting to the MQTT broker, e.g. {@code: tcp://localhost:1883}.
*/
mqtt.setHost(uri);
// authentication
if (options.isBasicAuth()) {
/**
* The username for authenticated sessions.
*/
mqtt.setUserName(options.getUsername());
/**
* The password for authenticated sessions.
*/
mqtt.setPassword(options.getPassword());
}
/**
* The client id used when connecting to the MQTT broker.
*/
mqtt.setClientId(options.getClientId());
/**
* Set to false if you want the MQTT server to persist topic subscriptions and ack positions across
* client sessions. Defaults to true.
*/
mqtt.setCleanSession(options.isCleanSession());
/**
* The maximum amount of time in ms to wait between reconnect attempts. Defaults to 30,000.
*/
mqtt.setReconnectDelayMax(options.getReconnectDelayMaxInMs());
/**
* Configures the Keep Alive timer in seconds. Defines the maximum time interval between messages
* received from a client. It enables the server to detect that the network connection to a client has
* dropped, without having to wait for the long TCP/IP timeout.
*/
mqtt.setKeepAlive(options.getKeepAliveInSec());
/**
* Set to "3.1.1" to use MQTT version 3.1.1. Otherwise defaults to the 3.1 protocol version.
*/
mqtt.setVersion(options.getMqttProtocolVersion());
// last will and testament options
if (options.isLastWill()) {
/**
* If set the server will publish the client's Will message to the specified topics if the client has
* an unexpected disconnection.
*/
mqtt.setWillTopic(options.getWillTopic());
/**
* Sets the quality of service to use for the Will message. Defaults to QoS.AT_MOST_ONCE.
*/
mqtt.setWillQos(options.getWillQoS());
/**
* The Will message to send. Defaults to a zero length message.
*/
mqtt.setWillMessage(options.getWillMessage());/**
* Set to true if you want the Will to be published with the retain option.
*/
mqtt.setWillRetain(options.getWillRetain());
}
} catch (Exception e) {
throw new SpRuntimeException("Failed to initialize MQTT Client: " + e.getMessage(), e);
}
}
| 3.26 |
streampipes_MqttClient_disconnect_rdh
|
/**
* Disconnect from MQTT broker.
*/
public void disconnect() {
try {
if (this.conn.isConnected())
{
this.conn.disconnect();
}} catch (Exception e) {
throw new SpRuntimeException((("Could not disconnect from MQTT broker: " + uri.toString()) + ", ") + e.getMessage(), e);
}
}
| 3.26 |
streampipes_DataSinkApi_subscribe_rdh
|
/**
* Subscribe to the input stream of the sink
*
* @param sink
* The data sink to subscribe to
* @param brokerConfigOverride
* Additional kafka settings which will override the default value (see docs)
* @param callback
* The callback where events will be received
*/
@Override
public ISubscription subscribe(DataSinkInvocation sink, IBrokerConfigOverride
brokerConfigOverride, EventProcessor callback) {
return new SubscriptionManager(brokerConfigOverride, sink.getInputStreams().get(0).getEventGrounding(), callback).subscribe();
}
| 3.26 |
streampipes_TextDocumentStatistics_getNumWords_rdh
|
/**
* Returns the overall number of words in all blocks.
*
* @return Sum
*/
public int getNumWords() {
return numWords;
}
| 3.26 |
streampipes_TextDocumentStatistics_avgNumWords_rdh
|
/**
* Returns the average number of words at block-level (= overall number of words divided by the
* number of blocks).
*
* @return Average
*/
public float avgNumWords() {
return numWords / ((float) (numBlocks));
}
| 3.26 |
streampipes_StreamPipesCredentials_withApiKey_rdh
|
/**
* Create new credentials settings
*
* @param username
* The username of the authenticated user -
* note that this is currently the email address of the registered user!
* @param apiKey
* The API key as generated by StreamPipes
*/
public static CredentialsProvider withApiKey(String username, String apiKey) {
return new StreamPipesApiKeyCredentials(username, apiKey);
}
| 3.26 |
streampipes_InfluxRequests_databaseExists_rdh
|
/**
* Checks whether the given database exists.
*
* @param influxDb
* The InfluxDB client instance
* @param dbName
* The name of the database, the method should look for
* @return True if the database exists, false otherwise
*/public static boolean databaseExists(InfluxDB influxDb, String dbName) {
QueryResult queryResult = influxDb.query(new Query("SHOW DATABASES", ""));
for (List<Object> a : queryResult.getResults().get(0).getSeries().get(0).getValues()) {
if (a.get(0).equals(dbName)) {
return true;
}
}
return false;
}
| 3.26 |
streampipes_DataStreamResourceManager_update_rdh
|
/**
* Takes a data stream {@link SpDataStream} as an input updates it in the database
*/
public void update(SpDataStream dataStream) {
db.updateElement(dataStream);
}
| 3.26 |
streampipes_UnicodeTokenizer_tokenize_rdh
|
/**
* Tokenizes the text and returns an array of tokens.
*
* @param text
* The text
* @return The tokens
*/
public static String[] tokenize(final CharSequence text) {
return PAT_NOT_WORD_BOUNDARY.matcher(PAT_WORD_BOUNDARY.matcher(text).replaceAll("")).replaceAll("$1").replaceAll("[ ]+", " ").trim().split("[ ]+");
}
| 3.26 |
streampipes_Image_getArea_rdh
|
/**
* Returns the image's area (specified by width * height), or -1 if width/height weren't both
* specified or could not be parsed.
*
* @return */public
int getArea() {
return area;
}
| 3.26 |
streampipes_SQLStatementUtils_extractEventProperties_rdh
|
/**
* Creates a SQL-Query with the given Properties (SQL-Injection safe). For nested properties it
* recursively extracts the information. EventPropertyList are getting converted to a string (so
* in SQL to a VARCHAR(255)). For each type it uses {@link DbDataTypeFactory#getFromUri(String, SupportedDbEngines)}
* internally to identify the SQL-type from the runtimeType.
*
* @param properties
* The list of properties which should be included in the query
* @param preProperty
* A string which gets prepended to all property runtimeNames
* @return A StringBuilder with the query which needs to be executed in order to create the table
* @throws SpRuntimeException
* If the runtimeName of any property is not allowed
*/
public static StringBuilder extractEventProperties(List<EventProperty> properties, String preProperty, DbDescription dbDescription) throws SpRuntimeException
{// output: "randomString VARCHAR(255), randomValue INT"
StringBuilder v0 = new StringBuilder();
String separator =
"";
for (EventProperty property : properties) {
// Protection against SqlInjection
checkRegEx(property.getRuntimeName(), "Column name", dbDescription);
if (property instanceof EventPropertyNested) {
// if it is a nested property, recursively extract the needed properties
StringBuilder tmp = extractEventProperties(((EventPropertyNested) (property)).getEventProperties(), (preProperty + property.getRuntimeName()) + "_", dbDescription);
if (tmp.length() > 0) {
v0.append(separator).append(tmp);
}
} else {
// Adding the name of the property (e.g. "randomString")
// Or for properties in a nested structure: input1_randomValue
// "separator" is there for the ", " part
if (dbDescription.isColumnNameQuoted()) {
v0.append(separator).append("\"").append(preProperty).append(property.getRuntimeName()).append("\" ");
} else {
v0.append(separator).append(preProperty).append(property.getRuntimeName()).append(" ");
}
// adding the type of the property (e.g. "VARCHAR(255)")
if (property instanceof EventPropertyPrimitive) {
v0.append(DbDataTypeFactory.getFromUri(((EventPropertyPrimitive) (property)).getRuntimeType(), dbDescription.getEngine()));
} else {
// Must be an EventPropertyList then
v0.append(DbDataTypeFactory.getFromUri(XSD.STRING.toString(), dbDescription.getEngine()));
}
}
separator
= ", ";
}
return v0;
}
| 3.26 |
streampipes_SQLStatementUtils_checkRegEx_rdh
|
/**
* Checks if the input string is allowed (regEx match and length > 0)
*
* @param input
* String which is getting matched with the regEx
* @param regExIdentifier
* Information about the use of the input. Gets included in the exception message
* @throws SpRuntimeException
* If {@code input} does not match with {@link DbDescription#getAllowedRegEx()}
* or if the length of {@code input} is 0
*/
public static final void checkRegEx(String input, String regExIdentifier, DbDescription dbDescription) throws SpRuntimeException {
if ((!input.matches(dbDescription.getAllowedRegEx())) || (input.length() == 0)) {
throw new SpRuntimeException(((((regExIdentifier + " '") + input) + "' not allowed (allowed: '") + dbDescription.getAllowedRegEx()) + "') with a min length of 1");
}
}
| 3.26 |
streampipes_ConnectWorkerDescriptionProvider_getRegisteredAdapters_rdh
|
/**
* This is a helper method to mock the Declarer Singleton in unit tests
*
* @return the registered adapters from the DeclarerSingleton
*/
public Collection<StreamPipesAdapter> getRegisteredAdapters() {
return DeclarersSingleton.getInstance().getAdapters();
}
| 3.26 |
streampipes_ConnectWorkerDescriptionProvider_getAdapterDescriptions_rdh
|
/**
* Retrieves a list of all adapter descriptions that are currently registered.
*
* @return a list of {@link AdapterDescription} objects representing the registered adapters
*/
public List<AdapterDescription> getAdapterDescriptions() {
return getRegisteredAdapters().stream().map(adapter -> applyLocales(adapter.declareConfig().getAdapterDescription())).toList();
}
| 3.26 |
streampipes_MigrateExtensionsResource_getMigrator_rdh
|
/**
* Find and return the corresponding {@link IModelMigrator} instance within the registered migrators.
* This allows to pass the corresponding model migrator to a {@link ModelMigratorConfig} which is exchanged
* between Core and Extensions service.
*
* @param modelMigratorConfig
* config that describes the model migrator to be returned
* @return Optional model migrator which is empty in case no appropriate migrator is found among the registered.
*/
public Optional<MmT> getMigrator(ModelMigratorConfig modelMigratorConfig) {
return DeclarersSingleton.getInstance().getServiceDefinition().getMigrators().stream().filter(modelMigrator -> modelMigrator.config().equals(modelMigratorConfig)).map(modelMigrator -> ((MmT) (modelMigrator))).findFirst();
}
| 3.26 |
streampipes_MigrateExtensionsResource_executeMigration_rdh
|
/**
* Executes the migration for the given pipeline element based on the given migrator.
*
* @param migrator
* migrator that executes the migration
* @param pipelineElementDescription
* pipeline element to be migrated
* @return the migration result containing either the migrated element or the original one in case of a failure
*/
protected MigrationResult<T> executeMigration(MmT migrator, T pipelineElementDescription) {
var extractor = getPropertyExtractor(pipelineElementDescription);
try {
var result
= migrator.migrate(pipelineElementDescription, extractor);
if (result.success()) {
LOG.info("Migration successfully finished.");
// Since adapter migration was successful, version can be adapted to the target version.
// this step is explicitly performed here and not left to the migration itself to
// prevent leaving this step out
var migratedProcessor = result.element();
migratedProcessor.setVersion(migrator.config().toVersion());
return MigrationResult.success(migratedProcessor);
} else {
LOG.error("Migration failed with the following reason: {}", result.message());
// The failed migration is documented in the MigrationResult
// The core is expected to handle the response accordingly, so we can safely return a positive status code
return result;
}} catch (RuntimeException e) {
LOG.error("An unexpected exception caused the migration to fail - " + "sending exception report in migration result");
return MigrationResult.failure(pipelineElementDescription, String.format("Migration failed due to an unexpected exception: %s", StringUtils.join(e.getStackTrace(), "\n")));
}
}
| 3.26 |
streampipes_MigrateExtensionsResource_handleMigration_rdh
|
/**
* Migrates a pipeline element instance based on the provided {@link MigrationRequest}.
* The outcome of the migration is described in {@link MigrationResult}.
* The result is always part of the response.
* Independent, of the migration outcome, the returned response always has OK as status code.
* It is the responsibility of the recipient to interpret the migration result and act accordingly.
*
* @param migrationRequest
* Request that contains both the pipeline element to be migrated and the migration config.
* @return A response with status code ok, that contains a migration result reflecting the outcome of the operation.
*/
protected MigrationResult<T> handleMigration(MigrationRequest<T> migrationRequest) {
var pipelineElementDescription = migrationRequest.migrationElement();
var migrationConfig = migrationRequest.modelMigratorConfig();
LOG.info("Received migration request for pipeline element '{}' to migrate from version {} to {}", pipelineElementDescription.getElementId(), migrationConfig.fromVersion(), migrationConfig.toVersion());
var migratorOptional = getMigrator(migrationConfig);
if (migratorOptional.isPresent()) {
LOG.info("Migrator found for request, starting migration...");
return executeMigration(migratorOptional.get(), pipelineElementDescription);
}
LOG.error("Migrator for migration config {} could not be found. Migration is cancelled.", migrationConfig);
return MigrationResult.failure(pipelineElementDescription, String.format("The given migration config '%s' could not be mapped to a registered migrator.", migrationConfig));
}
| 3.26 |
streampipes_LargestContentExtractor_getInstance_rdh
|
/**
* Returns the singleton instance for {@link LargestContentExtractor}.
*/
public static LargestContentExtractor getInstance() {
return INSTANCE;
}
| 3.26 |
streampipes_DensityRulesClassifier_getInstance_rdh
|
/**
* Returns the singleton instance for RulebasedBoilerpipeClassifier.
*/
public static DensityRulesClassifier getInstance() {
return INSTANCE;
}
| 3.26 |
streampipes_ChangedValueDetectionProcessor_declareModel_rdh
|
// TODO: Change Icon
@Override
public DataProcessorDescription declareModel() {
return ProcessingElementBuilder.create("org.apache.streampipes.processors.transformation.jvm.changed-value").category(DataProcessorType.VALUE_OBSERVER).withLocales(Locales.EN).withAssets(Assets.DOCUMENTATION).requiredStream(StreamRequirementsBuilder.create().requiredPropertyWithUnaryMapping(EpRequirements.anyProperty(), Labels.withId(COMPARE_FIELD_ID), PropertyScope.NONE).build()).outputStrategy(OutputStrategies.append(EpProperties.timestampProperty(CHANGE_FIELD_NAME))).build();
}
| 3.26 |
streampipes_ResetManagement_reset_rdh
|
/**
* Remove all configurations for this user. This includes:
* [pipeline assembly cache, pipelines, adapters, files]
*
* @param username
*/
public static void reset(String username) {
logger.info("Start resetting the system");
// Set hide tutorial to false for user
UserResourceManager.setHideTutorial(username, true); // Clear pipeline assembly Cache
PipelineCacheManager.removeCachedPipeline(username);
PipelineCanvasMetadataCacheManager.removeCanvasMetadataFromCache(username);
// Stop and delete all pipelines
List<Pipeline> allPipelines = PipelineManager.getAllPipelines();
allPipelines.forEach(pipeline -> {
PipelineManager.stopPipeline(pipeline.getPipelineId(), true);
PipelineManager.deletePipeline(pipeline.getPipelineId());
});// Stop and delete all adapters
AdapterMasterManagement adapterMasterManagement = new AdapterMasterManagement();
try {
List<AdapterDescription> allAdapters = adapterMasterManagement.getAllAdapterInstances();
allAdapters.forEach(adapterDescription -> {
try {
adapterMasterManagement.deleteAdapter(adapterDescription.getElementId());
} catch (AdapterException e) {
logger.error("Failed to delete adapter with id: " + adapterDescription.getElementId(), e);
}
});
} catch (AdapterException e) {logger.error("Failed to load all adapter descriptions", e);
}
// Stop and delete all files
List<FileMetadata> allFiles = FileManager.getAllFiles();
allFiles.forEach(fileMetadata -> {
FileManager.deleteFile(fileMetadata.getFileId());
});
// Remove all data in data lake
IDataExplorerSchemaManagement dataLakeMeasureManagement = new DataExplorerSchemaManagement();
DataExplorerQueryManagement dataExplorerQueryManagement = new DataExplorerQueryManagement(dataLakeMeasureManagement);
List<DataLakeMeasure> allMeasurements = dataLakeMeasureManagement.getAllMeasurements();allMeasurements.forEach(measurement -> {
boolean isSuccessDataLake = dataExplorerQueryManagement.deleteData(measurement.getMeasureName());
if (isSuccessDataLake) {
dataLakeMeasureManagement.deleteMeasurementByName(measurement.getMeasureName());
}});
// Remove all data views widgets
IDataExplorerWidgetStorage widgetStorage = StorageDispatcher.INSTANCE.getNoSqlStore().getDataExplorerWidgetStorage();
widgetStorage.getAllDataExplorerWidgets().forEach(widget -> {
widgetStorage.deleteDataExplorerWidget(widget.getId());
});
// Remove all data views
IDashboardStorage dataLakeDashboardStorage = StorageDispatcher.INSTANCE.getNoSqlStore().getDataExplorerDashboardStorage();
dataLakeDashboardStorage.getAllDashboards().forEach(dashboard -> {
dataLakeDashboardStorage.deleteDashboard(dashboard.getCouchDbId());
});
// Remove all dashboard widgets
IDashboardWidgetStorage dashobardWidgetStorage = StorageDispatcher.INSTANCE.getNoSqlStore().getDashboardWidgetStorage();
dashobardWidgetStorage.getAllDashboardWidgets().forEach(widget -> {
dashobardWidgetStorage.deleteDashboardWidget(widget.getId());
});
// Remove all dashboards
IDashboardStorage dashboardStorage = StorageDispatcher.INSTANCE.getNoSqlStore().getDashboardStorage();
dashboardStorage.getAllDashboards().forEach(dashboard -> {
dashboardStorage.deleteDashboard(dashboard.getCouchDbId());
});
logger.info("Resetting the system was completed");
}
| 3.26 |
streampipes_Operations_validatePipeline_rdh
|
/**
*
* @param pipeline
* the pipeline to validate
* @return PipelineModificationMessage a message containing desired pipeline modifications
*/
public static PipelineModificationMessage validatePipeline(Pipeline pipeline) throws Exception {
return new PipelineVerificationHandlerV2(pipeline).verifyPipeline();
}
| 3.26 |
streampipes_ImageZipAdapter_start_rdh
|
/**
* First extracts the user input and then starts a thread publishing events with images in the zip file
*
* @param collector
* is used to pre-process and publish events on message broker
* @param extractor
* to extract configurations
* @param infinite
* Describes if the replay should be restarted when it is finished or not
*/
public void start(IEventCollector collector, IStaticPropertyExtractor extractor, boolean infinite) throws AdapterException {
Integer timeBetweenReplay = extractor.singleValueParameter(ImageZipUtils.INTERVAL_KEY, Integer.class);
String zipFileUrl = extractor.selectedFilename(ImageZipUtils.ZIP_FILE_KEY);
ZipFileImageIterator zipFileImageIterator;
try {
zipFileImageIterator = new ZipFileImageIterator(zipFileUrl, infinite);
} catch (IOException e) {
throw new AdapterException("Error while reading images in the zip file");
}
running = true;
task = new Thread(() -> {
while (running && zipFileImageIterator.hasNext()) {
try { String image = zipFileImageIterator.next();
Map<String, Object> result
= new HashMap<>();
result.put(ImageZipUtils.TIMESTAMP, System.currentTimeMillis());
result.put(ImageZipUtils.IMAGE, image);
collector.collect(result);
} catch
(IOException e) {
LOG.error("Error while reading an image from the zip file " + e.getMessage());
}
try {
TimeUnit.MILLISECONDS.sleep(timeBetweenReplay);
} catch (InterruptedException e) {
LOG.error("Error while waiting for next replay round" + e.getMessage());
}
}
});
task.start();}
| 3.26 |
streampipes_ImageZipAdapter_stop_rdh
|
/**
* Stops the running thread that publishes the images
*/
public void stop() {
task.interrupt();
running = false;
}
| 3.26 |
streampipes_TreeUtils_findSEPAElement_rdh
|
/**
*
* @param id
* the DOM ID
* @param sepas
* list of sepas in model-client format
* @param streams
* list of streams in model-client format
* @return a SEPA-client element
*/
public static NamedStreamPipesEntity findSEPAElement(String id, List<DataProcessorInvocation> sepas, List<SpDataStream> streams) {
List<NamedStreamPipesEntity> allElements = new ArrayList<>();
allElements.addAll(sepas);
allElements.addAll(streams);
for (NamedStreamPipesEntity element : allElements) {
if (id.equals(element.getDom())) {
return element;
}
}
// TODO
return null;
}
| 3.26 |
streampipes_TreeUtils_findByDomId_rdh
|
/**
*
* @param id
* the DOM ID
* @param graphs
* list of invocation graphs
* @return an invocation graph with a given DOM Id
*/
public static InvocableStreamPipesEntity findByDomId(String id, List<InvocableStreamPipesEntity> graphs) {
for (InvocableStreamPipesEntity graph : graphs) {
if (graph.getDom().equals(id)) {
return graph;
}
}
// TODO
return null;
}
| 3.26 |
streampipes_TagActionMap_setTagAction_rdh
|
/**
* Sets a particular {@link TagAction} for a given tag. Any existing TagAction for that tag will
* be removed and overwritten.
*
* @param tag
* The tag (will be stored internally 1. as it is, 2. lower-case, 3. upper-case)
* @param action
* The {@link TagAction}
*/
protected void setTagAction(final String tag, final TagAction action) {
put(tag.toUpperCase(), action);
put(tag.toLowerCase(), action);
put(tag, action);
}
| 3.26 |
streampipes_TagActionMap_m0_rdh
|
/**
* Adds a particular {@link TagAction} for a given tag. If a TagAction already exists for that
* tag, a chained action, consisting of the previous and the new {@link TagAction} is created.
*
* @param tag
* The tag (will be stored internally 1. as it is, 2. lower-case, 3. upper-case)
* @param action
* The {@link TagAction}
*/
protected void m0(final String tag, final TagAction action) {
TagAction previousAction = get(tag);
if (previousAction == null) {
setTagAction(tag, action);
} else {
setTagAction(tag, new CommonTagActions.Chained(previousAction, action));
}
}
| 3.26 |
streampipes_ElasticsearchSinkBase_buildBulkProcessor_rdh
|
/**
* Build the {@link BulkProcessor}.
*
* <p>Note: this is exposed for testing purposes.
*/
@VisibleForTesting
protected BulkProcessor
buildBulkProcessor(BulkProcessor.Listener listener) {
checkNotNull(listener);
BulkProcessor.Builder
bulkProcessorBuilder = callBridge.createBulkProcessorBuilder(client, listener);
// This makes flush() blocking
bulkProcessorBuilder.setConcurrentRequests(0);
if (bulkProcessorFlushMaxActions !=
null) {bulkProcessorBuilder.setBulkActions(bulkProcessorFlushMaxActions);
}
if (bulkProcessorFlushMaxSizeMb != null) {
bulkProcessorBuilder.setBulkSize(new ByteSizeValue(bulkProcessorFlushMaxSizeMb, ByteSizeUnit.MB));
}
if (bulkProcessorFlushIntervalMillis != null) {
bulkProcessorBuilder.setFlushInterval(TimeValue.timeValueMillis(bulkProcessorFlushIntervalMillis));
}
// if backoff retrying is disabled, bulkProcessorFlushBackoffPolicy will be null
callBridge.configureBulkProcessorBackoff(bulkProcessorBuilder, bulkProcessorFlushBackoffPolicy);
return bulkProcessorBuilder.build();
}
| 3.26 |
streampipes_ElasticsearchSinkBase_disableFlushOnCheckpoint_rdh
|
/**
* Disable flushing on checkpoint. When disabled, the sink will not wait for all
* pending action requests to be acknowledged by Elasticsearch on checkpoints.
*
* <p>NOTE: If flushing on checkpoint is disabled, the Flink Elasticsearch Sink does NOT
* provide any strong guarantees for at-least-once delivery of action requests.
*/
public void disableFlushOnCheckpoint() {
this.flushOnCheckpoint = false;
}
| 3.26 |
streampipes_TrailingHeadlineToBoilerplateFilter_getInstance_rdh
|
/**
* Returns the singleton instance for ExpandTitleToContentFilter.
*/
public static TrailingHeadlineToBoilerplateFilter getInstance() {
return INSTANCE;
}
| 3.26 |
streampipes_Formats_cborFormat_rdh
|
/**
* Defines the transport format CBOR used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type CBOR.
*/
public static TransportFormat cborFormat() {
return new TransportFormat(MessageFormat.CBOR);
}
/**
* Defines the transport format Fast-Serializer used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat}
| 3.26 |
streampipes_Formats_smileFormat_rdh
|
/**
* Defines the transport format SMILE used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type SMILE.
*/
public static TransportFormat smileFormat() {
return new TransportFormat(MessageFormat.SMILE);
}
| 3.26 |
streampipes_Formats_thriftFormat_rdh
|
/**
* Defines the transport format Apache Thrift used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type Thrift.
*/
public static TransportFormat thriftFormat() {
return new TransportFormat(MessageFormat.THRIFT);
}
| 3.26 |
streampipes_Formats_jsonFormat_rdh
|
/**
* Defines the transport format JSON used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type JSON.
*/
public static TransportFormat jsonFormat() {
return new TransportFormat(MessageFormat.JSON);
}
| 3.26 |
streampipes_TerminatingBlocksFinder_process_rdh
|
// public static long timeSpent = 0;
public boolean process(TextDocument doc) throws BoilerpipeProcessingException {
boolean changes = false;
// long t = System.currentTimeMillis();
for (TextBlock tb : doc.getTextBlocks()) { final int numWords = tb.getNumWords();
if (numWords < 15) {
final String text = tb.getText().trim();
final int len =
text.length();
if (len >= 8) {
final String textLC = text.toLowerCase();
if ((((((((((((textLC.startsWith("comments")
|| startsWithNumber(textLC, len, " comments", " users responded in")) || textLC.startsWith("© reuters")) || textLC.startsWith("please rate this")) || textLC.startsWith("post a comment")) || textLC.contains("what you think...")) || textLC.contains("add your comment")) || textLC.contains("add comment")) || textLC.contains("reader views")) || textLC.contains("have your say")) || textLC.contains("reader comments")) || textLC.contains("rätta artikeln")) || textLC.equals("thanks for your comments - this feedback is now closed")) {
tb.addLabel(DefaultLabels.INDICATES_END_OF_TEXT);
changes = true;
}
} else if (tb.getLinkDensity() == 1.0) {
if (text.equals("Comment")) {
tb.addLabel(DefaultLabels.INDICATES_END_OF_TEXT);
}
}
}
}
// timeSpent += System.currentTimeMillis() - t;
return changes;
}
| 3.26 |
streampipes_TerminatingBlocksFinder_startsWithNumber_rdh
|
/**
* Checks whether the given text t starts with a sequence of digits, followed by one of the given
* strings.
*
* @param t
* The text to examine
* @param len
* The length of the text to examine
* @param str
* Any strings that may follow the digits.
* @return true if at least one combination matches
*/
private static boolean startsWithNumber(final String t, final int len, final String... str) {
int j = 0;
while ((j < len) && isDigit(t.charAt(j))) {
j++;
}
if (j != 0) {for (String v7 : str) {
if
(t.startsWith(v7, j)) {
return true;
}
}
}
return false;
}
| 3.26 |
streampipes_TerminatingBlocksFinder_getInstance_rdh
|
/**
* Returns the singleton instance for TerminatingBlocksFinder.
*/
public static TerminatingBlocksFinder getInstance() {
return INSTANCE;
}
| 3.26 |
streampipes_DataProcessorApi_m1_rdh
|
/**
* Subscribe to the input stream of the sink
*
* @param processor
* The data processor to subscribe to
* @param index
* The index of the input stream
* @param brokerConfigOverride
* Additional kafka settings which will override the default value (see docs)
* @param callback
* The callback where events will be received
*/
@Override
public ISubscription
m1(DataProcessorInvocation processor, InputStreamIndex index, IBrokerConfigOverride brokerConfigOverride, EventProcessor callback) {
return new SubscriptionManager(brokerConfigOverride, processor.getInputStreams().get(index.toIndex()).getEventGrounding(), callback).subscribe();
}
| 3.26 |
streampipes_DataProcessorApi_m0_rdh
|
/**
* Subscribe to the output stream of the processor
*
* @param processor
* The data processor to subscribe to
* @param brokerConfigOverride
* Additional broker settings which will override the default value (see docs)
* @param callback
* The callback where events will be received
*/
@Override
public ISubscription m0(DataProcessorInvocation processor, IBrokerConfigOverride brokerConfigOverride, EventProcessor callback) {
return new SubscriptionManager(brokerConfigOverride, processor.getOutputStream().getEventGrounding(), callback).subscribe();
}
| 3.26 |
streampipes_DataProcessorApi_subscribe_rdh
|
/**
* Subscribe to the input stream of the processor
*
* @param processor
* The data processor to subscribe to
* @param index
* The index of the input stream
* @param callback
* The callback where events will be received
*/
@Override
public ISubscription subscribe(DataProcessorInvocation processor, InputStreamIndex index, EventProcessor callback) {
return new SubscriptionManager(processor.getInputStreams().get(index.toIndex()).getEventGrounding(), callback).subscribe();
}
| 3.26 |
streampipes_DeclarersSingleton_add_rdh
|
/**
*
* @Deprecated Use ServiceDefinitionBuilder instead
*/
@Deprecated
public DeclarersSingleton add(IStreamPipesPipelineElement<?> d) {
if (d instanceof IStreamPipesDataProcessor) {
addDataProcessor(((IStreamPipesDataProcessor) (d)));
} else if (d instanceof IStreamPipesDataStream) {
addDataStream(((IStreamPipesDataStream) (d)));
}
else if (d instanceof IStreamPipesDataSink) {addDataSink(((IStreamPipesDataSink) (d)));
}
return getInstance();
}
| 3.26 |
streampipes_SupportedFormats_smileFormat_rdh
|
/**
* Defines that a pipeline element (data processor or data sink) supports processing messaging
* arriving in smile format
*
* @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}.
*/
public static TransportFormat smileFormat() {
return new TransportFormat(MessageFormat.SMILE);
}
| 3.26 |
streampipes_SupportedFormats_thriftFormat_rdh
|
/**
* Defines that a pipeline element (data processor or data sink) supports processing messaging arriving in Thrift
* format
*
* @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}.
*/
public static TransportFormat thriftFormat() {
return new TransportFormat(MessageFormat.THRIFT);
}
| 3.26 |
streampipes_SupportedFormats_jsonFormat_rdh
|
/**
* Defines that a pipeline element (data processor or data sink) supports processing messaging arriving in JSON format
*
* @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}.
*/
public static TransportFormat jsonFormat() {
return new TransportFormat(MessageFormat.JSON);
}
| 3.26 |
streampipes_SupportedFormats_fstFormat_rdh
|
/**
* Defines that a pipeline element (data processor or data sink) supports processing messaging
* arriving in fast-serialization format
*
* @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}.
*/
public static TransportFormat fstFormat() {
return new TransportFormat(MessageFormat.FST);
}
| 3.26 |
streampipes_SupportedFormats_cborFormat_rdh
|
/**
* Defines that a pipeline element (data processor or data sink) supports processing messaging
* arriving in Cbor format
*
* @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}.
*/
public static TransportFormat cborFormat() {
return new TransportFormat(MessageFormat.CBOR);
}
| 3.26 |
streampipes_SwingingDoorTrendingFilter_filter_rdh
|
/**
* input a newly arrived event and output whether a new characteristic event is filtered
*
* @param time
* the timestamp extracted from the newly arrived event
* @param value
* the value extracted from the newly arrived event
* @param event
* the newly arrived event
* @return true if a new characteristic event is filtered
*/
public boolean filter(long time, double value, Event event) {
// store the first time and value pair
if (isFirstValue) {
isFirstValue = false;
lastReadTimestamp = time;
lastReadDouble = value;
lastReadEvent = event;
lastStoredTimestamp = time; lastStoredDouble = value;
lastStoredEvent = event;
return true;
}
// if current point to the last stored point's time distance is within compressionMinTimeInterval,
// will not check two doors nor store any point within the compressionMinTimeInterval time range
if ((time - lastStoredTimestamp) <= f1) {
return false;
}
// if current point to the last stored point's time distance is larger than compressionMaxTimeInterval,
// will reset two doors, and store current point;
if ((time - lastStoredTimestamp)
>= compressionMaxTimeInterval) {
reset(time, value, event);
return true;
}
final double currentUpperSlope = ((value - lastStoredDouble) - f0) / (time - lastStoredTimestamp);
if (currentUpperSlope > upperDoor) {
upperDoor = currentUpperSlope;}
final double currentLowerSlope = ((value - lastStoredDouble) + f0) / (time - lastStoredTimestamp);
if (currentLowerSlope < lowerDoor) {
lowerDoor = currentLowerSlope;
}
// current point to the lastStoredPair's value exceeds compDev, will store lastReadPair and
// update two doors
if (upperDoor >= lowerDoor) {
lastStoredTimestamp = lastReadTimestamp;
lastStoredDouble = lastReadDouble;
lastStoredEvent
= lastReadEvent;
upperDoor = ((value - lastStoredDouble) - f0) / (time - lastStoredTimestamp);
lowerDoor = ((value - lastStoredDouble)
+ f0) / (time - lastStoredTimestamp);
lastReadDouble = value;
lastReadTimestamp = time;
lastReadEvent = event;
return true;
}
lastReadDouble = value;
lastReadTimestamp = time;lastReadEvent = event;
return false;
}
| 3.26 |
streampipes_SwingingDoorTrendingFilter_reset_rdh
|
/**
* if current point to the last stored point's time distance >= compressionMaxTimeInterval, will store current
* point and reset upperDoor and lowerDoor
*
* @param time
* current time
* @param value
* current value
* @param event
* current event
*/
private void reset(long time, double value, Event event) {
lastStoredTimestamp = time;
lastStoredDouble = value;
lastStoredEvent = event;
upperDoor = Integer.MIN_VALUE;
lowerDoor = Integer.MAX_VALUE;
}
| 3.26 |
streampipes_SwingingDoorTrendingFilter_forward_rdh
|
/**
* output the recently filtered characteristic event to the collector
*
* @param collector
* the event collector
*/public void forward(SpOutputCollector collector) {
collector.collect(lastStoredEvent);
}
| 3.26 |
streampipes_PropertyRequirementsBuilder_create_rdh
|
/**
* Creates new requirements for a data processor or a data sink at a property level. A matching event property
* needs to provide all requirements assigned by this class.
*
* @return {@link PropertyRequirementsBuilder}
*/
public static PropertyRequirementsBuilder create(Datatypes propertyDatatype) {return new
PropertyRequirementsBuilder(propertyDatatype);
}
| 3.26 |
streampipes_AdapterHealthCheck_m1_rdh
|
/**
* In this method it is checked which adapters are currently running.
* Then it calls all workers to validate if the adapter instance is
* still running as expected. If the adapter is not running anymore a new worker instance is invoked.
*/
public void m1() {
// Get all adapters
Map<String, AdapterDescription> allRunningInstancesAdapterDescriptions = this.getAllRunningInstancesAdapterDescriptions();
// Get all worker containers that run adapters
Map<String, List<AdapterDescription>> groupByWorker = this.getAllWorkersWithAdapters(allRunningInstancesAdapterDescriptions);
// Get adapters that are not running anymore
Map<String, AdapterDescription> allAdaptersToRecover = this.getAdaptersToRecover(groupByWorker, allRunningInstancesAdapterDescriptions);
// Recover Adapters
this.recoverAdapters(allAdaptersToRecover);
}
| 3.26 |
streampipes_InfluxDbStreamAdapter_getNewestTimestamp_rdh
|
// Returns the newest timestamp in the measurement as unix timestamp in Nanoseconds.
// If no entry is found, a SpRuntimeException is thrown
String getNewestTimestamp() throws SpRuntimeException {
List<List<Object>> queryResult = influxDbClient.query(("SELECT * FROM " + influxDbClient.getMeasurement()) + " ORDER BY time DESC LIMIT 1");
if (queryResult.size() > 0) {
return InfluxDbClient.getTimestamp(((String) (queryResult.get(0).get(0))));
}
else {
throw new SpRuntimeException("No entry found in query");}
}
| 3.26 |
streampipes_ElasticsearchApiCallBridge_cleanup_rdh
|
/**
* Perform any necessary state cleanup.
*/
public void cleanup() {// nothing to cleanup
}
| 3.26 |
streampipes_ElasticsearchApiCallBridge_createRequestIndex_rdh
|
/**
* Creates an RequestIndexer instance.
*
* @param bulkProcessor
* The instance of BulkProcessor
* @param flushOnCheckpoint
* If true, the producer will wait until all outstanding action requests have been
* sent to Elasticsearch.
* @param numPendingRequests
* Number of pending action requests not yet acknowledged by Elasticsearch.
* @return The created RequestIndexer.
*/
public RequestIndexer createRequestIndex(BulkProcessor bulkProcessor, boolean flushOnCheckpoint,
AtomicLong numPendingRequests) { return new BulkProcessorIndexer(bulkProcessor, flushOnCheckpoint, numPendingRequests);
}
| 3.26 |
streampipes_FlinkRuntime_addSource_rdh
|
// TODO
private DataStream<Event> addSource(SourceFunction<Map<String, Object>> sourceFunction, Integer sourceIndex) {
return env.addSource(sourceFunction).flatMap(new
MapToEventConverter<>(runtimeParameters.getInputSourceInfo(sourceIndex).getSourceId(), runtimeParameters)).flatMap(new StatisticLogger(null));
}
| 3.26 |
streampipes_FlinkRuntime_getStreamSource_rdh
|
/**
* This method takes the i's input stream and creates a source for the flink graph
* Currently just kafka is supported as a protocol
* TODO Add also jms support
*
* @param i
* @return */
private SourceFunction<Map<String, Object>> getStreamSource(int i) {
if ((runtimeParameters.getModel().getInputStreams().size() - 1) >= i) {
SpDataStream v0 = runtimeParameters.getModel().getInputStreams().get(i);
if (v0 != null) {TransportProtocol protocol = v0.getEventGrounding().getTransportProtocol();
TransportFormat format = v0.getEventGrounding().getTransportFormats().get(0);
SpDataFormatDefinition dataFormatDefinition = getDataFormatDefinition(format);
if (protocol instanceof KafkaTransportProtocol) {return getKafkaConsumer(((KafkaTransportProtocol) (protocol)), dataFormatDefinition);
} else if (protocol instanceof JmsTransportProtocol) {
return getJmsConsumer(((JmsTransportProtocol) (protocol)), dataFormatDefinition);
} else if (protocol instanceof MqttTransportProtocol) {
return getMqttConsumer(((MqttTransportProtocol) (protocol)), dataFormatDefinition);
} else {
return null;
}
} else {
return null;
}
} else {
return null;
}
}
| 3.26 |
streampipes_FlinkRuntime_appendEnvironmentConfig_rdh
|
/**
* This method can be called in case additional environment settings should be applied to the runtime.
*
* @param env
* The Stream Execution environment
*/
public void appendEnvironmentConfig(StreamExecutionEnvironment env) {
// This sets the stream time characteristics
// The default value is TimeCharacteristic.ProcessingTime
if (this.streamTimeCharacteristic != null) {
env.setStreamTimeCharacteristic(this.streamTimeCharacteristic);
env.setParallelism(1);
}
}
| 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.