name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
rocketmq-connect_WorkerConnector_awaitShutdown_rdh | /**
* Wait for this connector to finish shutting down.
*
* @param timeoutMs
* time in milliseconds to await shutdown
* @return true if successful, false if the timeout was reached
*/
public boolean awaitShutdown(long timeoutMs) {
try {
return shutdownLatch.await(timeoutMs,
TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return false;
}
} | 3.26 |
rocketmq-connect_WorkerConnector_m0_rdh | /**
* initialize connector
*/
public void m0() {
try {
if ((!isSourceConnector()) && (!isSinkConnector())) {
throw new ConnectException("Connector implementations must be a subclass of either SourceConnector or SinkConnector");
}log.debug("{} Initializing connector {}", this, connector);
connector.validate(keyValue);
connector.init(context);
}
catch (Throwable t) {
log.error("{} Error initializing connector", this, t);
onFailure(t);
}
} | 3.26 |
rocketmq-connect_WorkerConnector_getKeyValue_rdh | /**
* connector config
*
* @return */
public ConnectKeyValue getKeyValue() {
return keyValue;
} | 3.26 |
rocketmq-connect_WorkerConnector_reconfigure_rdh | /**
* reconfigure
*
* @param keyValue
*/
public void reconfigure(ConnectKeyValue keyValue) {
try {
this.keyValue = keyValue;
m0();
connector.stop();
connector.start(keyValue);
} catch (Throwable throwable) {
throw new ConnectException(throwable);
}} | 3.26 |
rocketmq-connect_RocketMQScheduledReporter_reportTimers_rdh | /**
* report timers
*
* @param timers
*/
private void reportTimers(SortedMap<MetricName, Timer> timers) {
timers.forEach((name, timer) -> {
send(name, timer.getMeanRate());
});} | 3.26 |
rocketmq-connect_RocketMQScheduledReporter_reportHistograms_rdh | /**
* report histograms
*
* @param histograms
*/
private void reportHistograms(SortedMap<MetricName, Double> histograms) {
histograms.forEach((name, value) -> {
send(name, value);
});
} | 3.26 |
rocketmq-connect_RocketMQScheduledReporter_reportCounters_rdh | /**
* report counters
*
* @param counters
*/
private void reportCounters(SortedMap<MetricName, Long> counters) {counters.forEach((name, value) -> {
send(name, Double.parseDouble(value.toString()));
});
} | 3.26 |
rocketmq-connect_RocketMQScheduledReporter_reportGauges_rdh | /**
* report gauges
*
* @param gauges
*/
private void reportGauges(SortedMap<MetricName, Object> gauges) {
gauges.forEach((name, value) -> {
send(name, Double.parseDouble(value.toString()));
});
} | 3.26 |
rocketmq-connect_WorkerSourceTask_convertTransformedRecord_rdh | /**
* Convert the source record into a producer record.
*/
protected Message convertTransformedRecord(final String topic, ConnectRecord record) {
if (record == null) {
return null;
}
Message sourceMessage
= new Message();
sourceMessage.setTopic(topic);
byte[] key = retryWithToleranceOperator.execute(() ->
keyConverter.fromConnectData(topic, record.getKeySchema(), record.getKey()), Stage.CONVERTER, keyConverter.getClass());
byte[] value = retryWithToleranceOperator.execute(() -> valueConverter.fromConnectData(topic, record.getSchema(), record.getData()), Stage.CONVERTER, valueConverter.getClass());
if (value.length > ConnectorConfig.MAX_MESSAGE_SIZE)
{
log.error("Send record, message size is greater than {} bytes, record: {}", ConnectorConfig.MAX_MESSAGE_SIZE, JSON.toJSONString(record));
}
if (key != null) {
sourceMessage.setKeys(new String(key));
}
sourceMessage.setBody(value);
if (retryWithToleranceOperator.failed()) {
return null;
}
// put extend msg property
putExtendMsgProperty(record, sourceMessage, topic); return sourceMessage;
} | 3.26 |
rocketmq-connect_WorkerSourceTask_execute_rdh | /**
* execute poll and send record
*/
@Override
protected void execute() {
while (isRunning()) {
updateCommittableOffsets();
if (shouldPause()) {
onPause();
try {
// wait unpause
if
(awaitUnpause()) {
onResume();
}
continue;
} catch (InterruptedException e) {
// do exception
}}
if (CollectionUtils.isEmpty(toSendRecord)) {
try {
prepareToPollTask();
long start = System.currentTimeMillis();
toSendRecord = poll();
if ((null != toSendRecord) && (toSendRecord.size() > 0)) {
recordPollReturned(toSendRecord.size(), System.currentTimeMillis() - start);
}
if (toSendRecord == null) {
continue;
}
log.trace("{} About to send {} records to RocketMQ", this, toSendRecord.size());
if (!sendRecord()) {
stopRequestedLatch.await(SEND_FAILED_BACKOFF_MS, TimeUnit.MILLISECONDS);
}
} catch (InterruptedException e) {
// Ignore and allow to exit.
} catch (Exception e) {
try {
finalOffsetCommit(true);
} catch (Exception offsetException) {log.error("Failed to commit offsets for already-failing task", offsetException);
}
throw e;
} finally {
finalOffsetCommit(false);
// record source poll times
connectStatsManager.incSourceRecordPollTotalTimes();
}
}
AtomicLong atomicLong = connectStatsService.singleSourceTaskTimesTotal(id().toString());
if (null != atomicLong) {
atomicLong.addAndGet(toSendRecord == null ? 0 : toSendRecord.size());
}
}
} | 3.26 |
rocketmq-connect_WorkerSourceTask_initializeAndStart_rdh | /**
* initinalize and start
*/
@Override
protected void initializeAndStart() {
try {
producer.start();
} catch (MQClientException e) {
log.error("{} Source task producer start failed!!", this);
throw new ConnectException(e);
}
sourceTask.init(sourceTaskContext);
sourceTask.start(taskConfig);
log.info("{} Source task finished initialization and start", this);
} | 3.26 |
rocketmq-connect_WorkerSourceTask_sendRecord_rdh | /**
* Send list of sourceDataEntries to MQ.
*/
private Boolean sendRecord() throws InterruptedException {
int processed = 0;
final CalcSourceRecordWrite
counter = new CalcSourceRecordWrite(toSendRecord.size(), sourceTaskMetricsGroup);
for (ConnectRecord preTransformRecord : toSendRecord) {
retryWithToleranceOperator.sourceRecord(preTransformRecord);
ConnectRecord record = transformChain.doTransforms(preTransformRecord);
String topic = maybeCreateAndGetTopic(record);
Message sourceMessage = convertTransformedRecord(topic, record);
if ((sourceMessage == null) || retryWithToleranceOperator.failed()) {
// commit record
recordFailed(preTransformRecord);
counter.skipRecord();
continue;
}
log.trace("{} Appending record to the topic {} , value {}", this, topic, record.getData());
/**
* prepare to send record
*/Optional<RecordOffsetManagement.SubmittedPosition> submittedRecordPosition = prepareToSendRecord(preTransformRecord);
try {
SendCallback callback = new SendCallback() {
@Override
public void onSuccess(SendResult result) {
log.info("Successful send message to RocketMQ:{}, Topic {}", result.getMsgId(), result.getMessageQueue().getTopic());
// complete record
counter.completeRecord();
// commit record for custom
recordSent(preTransformRecord, sourceMessage, result);
// ack record position
submittedRecordPosition.ifPresent(RecordOffsetManagement.SubmittedPosition::ack);
}
@Override
public void onException(Throwable throwable) {
log.error("Source task send record failed ,error msg {}. message {}", throwable.getMessage(), JSON.toJSONString(sourceMessage), throwable);
// skip record
counter.skipRecord();
// record send failed
recordSendFailed(false, sourceMessage, preTransformRecord,
throwable);
}
};
if (StringUtils.isEmpty(sourceMessage.getKeys())) {
// Round robin
producer.send(sourceMessage, callback);
} else {
// Partition message ordering,
// At the same time, ensure that the data is pulled in an orderly manner, which needs to be guaranteed by sourceTask in the business
producer.send(sourceMessage, new SelectMessageQueueByHash(), sourceMessage.getKeys(), callback); }
} catch (RetriableException e) {
log.warn("{} Failed to send record to topic '{}'. Backing off before retrying: ", this, sourceMessage.getTopic(), e);
// Intercepted as successfully sent, used to continue sending next time
toSendRecord = toSendRecord.subList(processed, toSendRecord.size());
// remove pre submit position, for retry
submittedRecordPosition.ifPresent(RecordOffsetManagement.SubmittedPosition::remove);
// retry metrics
counter.retryRemaining();
return false;
} catch (InterruptedException e) {
log.error("Send message InterruptedException. message: {}, error info: {}.", sourceMessage, e);
// throw e and stop task
throw e;
} catch (Exception e) {
log.error("Send message MQClientException. message: {}, error info: {}.", sourceMessage, e);
recordSendFailed(true, sourceMessage, preTransformRecord, e);
}
processed++;
}
toSendRecord = null;
return true;
} | 3.26 |
rocketmq-connect_WorkerSourceTask_maybeCreateAndGetTopic_rdh | /**
* maybe create and get topic
*
* @param record
* @return */
private String maybeCreateAndGetTopic(ConnectRecord record) {
String topic = overwriteTopicFromRecord(record);
if (StringUtils.isBlank(topic))
{
// topic from config
topic = taskConfig.getString(SourceConnectorConfig.CONNECT_TOPICNAME);
}
if (StringUtils.isBlank(topic)) {
throw new ConnectException("source connect lack of topic config");
}
if ((!workerConfig.isAutoCreateTopicEnable()) || topicCache.contains(topic)) {
return topic;
}
if (!ConnectUtil.isTopicExist(workerConfig, topic)) {
ConnectUtil.createTopic(workerConfig, new TopicConfig(topic));
}
topicCache.add(topic);
return topic;
} | 3.26 |
rocketmq-connect_WorkerSourceTask_recordSent_rdh | /**
* send success record
*
* @param preTransformRecord
* @param sourceMessage
* @param result
*/
private void recordSent(ConnectRecord preTransformRecord, Message sourceMessage, SendResult result) {
commitTaskRecord(preTransformRecord, result);
} | 3.26 |
rocketmq-connect_TopicNameStrategy_subjectName_rdh | /**
* generate subject name
*
* @param topic
* @param isKey
* @return */
public static String subjectName(String topic, boolean isKey) {return isKey ? topic + "-key" : topic + "-value";
} | 3.26 |
rocketmq-connect_AvroDatumReaderFactory_get_rdh | /**
* Get avro datum factory
*
* @return */
public static AvroDatumReaderFactory get(boolean useSchemaReflection, boolean avroUseLogicalTypeConverters,
boolean useSpecificAvroReader, boolean avroReflectionAllowNull) {
return new AvroDatumReaderFactory(useSchemaReflection, avroUseLogicalTypeConverters, useSpecificAvroReader, avroReflectionAllowNull);
} | 3.26 |
rocketmq-connect_MemoryStateManagementServiceImpl_stop_rdh | /**
* Stop dependent services (if needed)
*/@Override
public void stop() {
} | 3.26 |
rocketmq-connect_MemoryStateManagementServiceImpl_get_rdh | /**
* Get the current state of the connector.
*
* @param connector
* the connector name
* @return the state or null if there is none
*/
@Override
public synchronized ConnectorStatus get(String connector) {
return connectors.get(connector);
} | 3.26 |
rocketmq-connect_MemoryStateManagementServiceImpl_putSafe_rdh | /**
* Safely set the state of the connector to the given value. What is
* considered "safe" depends on the implementation, but basically it
* means that the store can provide higher assurance that another worker
* hasn't concurrently written any conflicting data.
*
* @param status
* the status of the connector
*/ @Override
public synchronized void putSafe(ConnectorStatus status) {
put(status);
} | 3.26 |
rocketmq-connect_MemoryStateManagementServiceImpl_getAll_rdh | /**
* Get the states of all tasks for the given connector.
*
* @param connector
* the connector name
* @return a map from task ids to their respective status
*/
@Override
public synchronized Collection<TaskStatus> getAll(String connector) {
return new HashSet<>(tasks.row(connector).values());
} | 3.26 |
rocketmq-connect_MemoryStateManagementServiceImpl_initialize_rdh | /**
* initialize cb config
*
* @param config
*/
@Overridepublic void initialize(WorkerConfig config, RecordConverter converter) {
this.tasks = new Table<>();
this.connectors = new ConcurrentHashMap<>();
} | 3.26 |
rocketmq-connect_MemoryStateManagementServiceImpl_put_rdh | /**
* Set the state of the connector to the given value.
*
* @param status
* the status of the task
*/
@Override
public synchronized void put(TaskStatus status) {
if (status.getState() == State.DESTROYED) {
tasks.remove(status.getId().connector(), status.getId().task());
} else {tasks.put(status.getId().connector(), status.getId().task(), status);
}
} | 3.26 |
rocketmq-connect_DebeziumTimeTypes_maybeBindDebeziumLogical_rdh | /**
* maybe bind debezium logical
*
* @param statement
* @param index
* @param schema
* @param value
* @param timeZone
* @return * @throws SQLException
*/
public static boolean maybeBindDebeziumLogical(PreparedStatement statement, int index, Schema schema, Object value, TimeZone timeZone) throws SQLException {
if (schema.getName() !=
null) {
switch (schema.getName()) {
case Date.SCHEMA_NAME :
statement.setDate(index, new Date(((long) (DebeziumTimeTypes.toMillsTimestamp(Date.SCHEMA_NAME, value)))), DateTimeUtils.getTimeZoneCalendar(timeZone));
return true;
case Timestamp.SCHEMA_NAME :
statement.setTimestamp(index, new Timestamp(((long) (DebeziumTimeTypes.toMillsTimestamp(Timestamp.SCHEMA_NAME, value)))), DateTimeUtils.getTimeZoneCalendar(timeZone));
return true;
case ZonedTimestamp.SCHEMA_NAME :
statement.setTimestamp(index, new Timestamp(((long) (toMillsTimestamp(ZonedTimestamp.SCHEMA_NAME, value)))), DateTimeUtils.getTimeZoneCalendar(timeZone));
return true;
default :
return false;
}
}
return false;
} | 3.26 |
rocketmq-connect_RebalanceImpl_doRebalance_rdh | /**
* Distribute connectors and tasks according to the {@link RebalanceImpl#allocateConnAndTaskStrategy}.
*/
public void doRebalance() {
List<String> curAliveWorkers = clusterManagementService.getAllAliveWorkers();
if (curAliveWorkers != null) {
if (clusterManagementService instanceof ClusterManagementServiceImpl) {
log.info("Current Alive workers : " + curAliveWorkers.size());
} else if (clusterManagementService instanceof MemoryClusterManagementServiceImpl) {
log.info("Current alive worker : " + curAliveWorkers.iterator().next());
}
}
// exculde delete connector
Map<String, ConnectKeyValue> curConnectorConfigs = configManagementService.getConnectorConfigs();
log.trace("Current ConnectorConfigs : " + curConnectorConfigs);
Map<String, List<ConnectKeyValue>> curTaskConfigs = configManagementService.getTaskConfigs();
log.trace("Current TaskConfigs : " + curTaskConfigs);
ConnAndTaskConfigs allocateResult = allocateConnAndTaskStrategy.allocate(curAliveWorkers, clusterManagementService.getCurrentWorker(), curConnectorConfigs, curTaskConfigs);
log.trace("Allocated connector:{}", allocateResult.getConnectorConfigs());
log.trace("Allocated task:{}", allocateResult.getTaskConfigs());
updateProcessConfigsInRebalance(allocateResult);
} | 3.26 |
rocketmq-connect_RebalanceImpl_updateProcessConfigsInRebalance_rdh | /**
* Start all the connectors and tasks allocated to current process.
*
* @param allocateResult
*/
private void updateProcessConfigsInRebalance(ConnAndTaskConfigs allocateResult) {
try {
worker.startConnectors(allocateResult.getConnectorConfigs(), connectController);
} catch (Throwable e) {
log.error("RebalanceImpl#updateProcessConfigsInRebalance start connector failed", e);
}
try {
worker.startTasks(allocateResult.getTaskConfigs());
} catch (Throwable e) {log.error("RebalanceImpl#updateProcessConfigsInRebalance start task failed", e);
}
} | 3.26 |
rocketmq-connect_CassandraSinkTask_start_rdh | /**
* Remember always close the CqlSession according to
* https://docs.datastax.com/en/developer/java-driver/4.5/manual/core/
*
* @param props
*/
@Override
public void start(KeyValue props) {
try {
ConfigUtil.load(props, this.config);
cqlSession = DBUtils.initCqlSession(config);
log.info("init data source success");
} catch (Exception e) {
log.error("Cannot start Cassandra Sink Task because of configuration error{}", e);
}
String mode = config.getMode();
if (mode.equals("bulk")) {Updater updater = new Updater(config, cqlSession);
try {
updater.start();
tableQueue.add(updater);
} catch (Exception e) {
log.error("fail to start updater{}", e);
}
}
} | 3.26 |
rocketmq-connect_JsonSchemaData_nonOptionalSchema_rdh | /**
* no optional schema
*
* @param schema
* @return */
private static Schema nonOptionalSchema(Schema schema) {
return new Schema(schema.getName(), schema.getFieldType(), false, schema.getDefaultValue(), schema.getVersion(), schema.getDoc(), FieldType.STRUCT.equals(schema.getFieldType()) ? schema.getFields() : null, FieldType.MAP.equals(schema.getFieldType()) ? schema.getKeySchema() : null, FieldType.MAP.equals(schema.getFieldType()) || FieldType.ARRAY.equals(schema.getFieldType()) ? schema.getValueSchema() : null, schema.getParameters());
} | 3.26 |
rocketmq-connect_JsonSchemaData_toConnectSchema_rdh | /**
* to connect schema
*
* @param jsonSchema
* @param version
* @param forceOptional
* @return */
private Schema toConnectSchema(Schema jsonSchema, Integer version, boolean forceOptional) {
if (jsonSchema == null) {
return null;
}
final SchemaBuilder builder;
if (jsonSchema instanceof BooleanSchema) {
builder
= SchemaBuilder.bool();
} else if (jsonSchema instanceof NumberSchema) {
NumberSchema numberSchema = ((NumberSchema) (jsonSchema));String type = ((String) (numberSchema.getUnprocessedProperties().get(CONNECT_TYPE_PROP)));
if (type == null) {
builder = (numberSchema.requiresInteger()) ? SchemaBuilder.int64() : SchemaBuilder.float64();
} else {
switch (type) {
case CONNECT_TYPE_INT8 :
builder = SchemaBuilder.int8();
break;
case CONNECT_TYPE_INT16 :
builder = SchemaBuilder.int16();
break;case CONNECT_TYPE_INT32 :
builder = SchemaBuilder.int32();
break;
case CONNECT_TYPE_INT64 :
builder = SchemaBuilder.int64();
break;
case CONNECT_TYPE_FLOAT32 :
builder = SchemaBuilder.float32();
break;
case CONNECT_TYPE_FLOAT64 :
builder = SchemaBuilder.float64();
break;
case CONNECT_TYPE_BYTES :
builder = SchemaBuilder.bytes();
break;
default :
throw new IllegalArgumentException("Unsupported type " + type);
}
}
} else if (jsonSchema instanceof StringSchema) {
String v48 = ((String) (jsonSchema.getUnprocessedProperties().get(CONNECT_TYPE_PROP)));
builder = (CONNECT_TYPE_BYTES.equals(v48)) ? SchemaBuilder.bytes() : SchemaBuilder.string();
} else if (jsonSchema instanceof EnumSchema) {
EnumSchema enumSchema = ((EnumSchema) (jsonSchema));
builder =
SchemaBuilder.string();
builder.parameter(JSON_TYPE_ENUM, "");// JSON enums have no name, use empty string as placeholder
for (Object enumObj : enumSchema.getPossibleValuesAsList()) {
String enumSymbol = enumObj.toString();
builder.parameter(JSON_TYPE_ENUM_PREFIX + enumSymbol, enumSymbol);}
} else if (jsonSchema instanceof CombinedSchema) {
CombinedSchema combinedSchema = ((CombinedSchema) (jsonSchema));
CombinedSchema.ValidationCriterion v53 = combinedSchema.getCriterion();
String
name = null;
if ((v53 == CombinedSchema.ONE_CRITERION) || (v53 == CombinedSchema.ANY_CRITERION)) {
name = JSON_TYPE_ONE_OF;
} else if (v53 == CombinedSchema.ALL_CRITERION) {
return allOfToConnectSchema(combinedSchema, version, forceOptional);
} else {
throw new IllegalArgumentException("Unsupported criterion: " + v53);
}
if (combinedSchema.getSubschemas().size() == 2) {
boolean foundNullSchema = false;
Schema nonNullSchema = null;
for (Schema subSchema : combinedSchema.getSubschemas()) {
if (subSchema instanceof NullSchema) {
foundNullSchema = true;
} else {
nonNullSchema = subSchema;}
}
if (foundNullSchema) {
return toConnectSchema(nonNullSchema, version, true);
}
}
int index = 0;
builder = SchemaBuilder.struct().name(name);
for (Schema subSchema : combinedSchema.getSubschemas()) {
if (subSchema instanceof NullSchema) {
builder.optional();
} else {
String subFieldName = (name + ".field.")
+ (index++);
builder.field(subFieldName, toConnectSchema(subSchema, null, true));
}
}
} else if (jsonSchema instanceof ArraySchema) {
ArraySchema arraySchema = ((ArraySchema) (jsonSchema));
Schema itemsSchema = arraySchema.getAllItemSchema();
if
(itemsSchema == null) {
throw new ConnectException("Array schema did not specify the items type");
}
String type =
((String) (arraySchema.getUnprocessedProperties().get(CONNECT_TYPE_PROP)));
if (f0.equals(type) && (itemsSchema instanceof ObjectSchema)) {
ObjectSchema objectSchema = ((ObjectSchema) (itemsSchema));
builder =
SchemaBuilder.map(toConnectSchema(objectSchema.getPropertySchemas().get(KEY_FIELD)), toConnectSchema(objectSchema.getPropertySchemas().get(VALUE_FIELD)));
}
else {
builder = SchemaBuilder.array(toConnectSchema(itemsSchema));
}
} else if (jsonSchema instanceof ObjectSchema) {
ObjectSchema objectSchema = ((ObjectSchema) (jsonSchema));
String type = ((String) (objectSchema.getUnprocessedProperties().get(CONNECT_TYPE_PROP)));
if (f0.equals(type)) {
builder = SchemaBuilder.map(SchemaBuilder.string().build(), toConnectSchema(objectSchema.getSchemaOfAdditionalProperties()));
} else {
builder = SchemaBuilder.struct();
Map<String, Schema> properties = objectSchema.getPropertySchemas();
SortedMap<Integer, Map.Entry<String, Schema>> sortedMap = new TreeMap<>();
for (Map.Entry<String, Schema> property : properties.entrySet()) {
Schema subSchema = property.getValue();
Integer index = ((Integer) (subSchema.getUnprocessedProperties().get(CONNECT_INDEX_PROP)));
if (index == null) {
index = sortedMap.size();
}
sortedMap.put(index, property);
}
for (Map.Entry<String, Schema> property : sortedMap.values()) {
String subFieldName
= property.getKey();
Schema subSchema = property.getValue();
boolean isFieldOptional = config.useOptionalForNonRequiredProperties() && (!objectSchema.getRequiredProperties().contains(subFieldName));
builder.field(subFieldName, toConnectSchema(subSchema, null, isFieldOptional));
}
}
} else if (jsonSchema instanceof ReferenceSchema) {
ReferenceSchema refSchema = ((ReferenceSchema) (jsonSchema));
return toConnectSchema(refSchema.getReferredSchema(), version, forceOptional);
} else {
throw new ConnectException("Unsupported schema type " + jsonSchema.getClass().getName());
}
String v77 = jsonSchema.getTitle();
if (v77 != null) {
builder.name(v77);
}
Integer connectVersion = ((Integer) (jsonSchema.getUnprocessedProperties().get(CONNECT_VERSION_PROP)));
if (connectVersion != null) {
builder.version(connectVersion);
} else if (version != null) {
builder.version(version);
}
String description = jsonSchema.getDescription();
if (description != null) {
builder.doc(description);
}
Map<String, String> parameters = ((Map<String, String>) (jsonSchema.getUnprocessedProperties().get(CONNECT_PARAMETERS_PROP)));
if (parameters != null) {
builder.parameters(parameters);
}
if (jsonSchema.hasDefaultValue()) {
JsonNode jsonNode = OBJECT_MAPPER.convertValue(jsonSchema.getDefaultValue(), JsonNode.class);
builder.defaultValue(toConnectData(builder.build(), jsonNode));
}
if (!forceOptional) {
builder.required();
}
Schema result = builder.build();
return result;
} | 3.26 |
rocketmq-connect_JsonSchemaData_m0_rdh | /**
* Convert connect data to json schema
*
* @param schema
* @param logicalValue
* @return */
public JsonNode m0(Schema schema, Object logicalValue) {
if (logicalValue == null) {
if (schema == null) {
// Any schema is valid and we don't have a default, so treat this as an optional schema
return null;
}
if (schema.getDefaultValue() != null) {
return
m0(schema, schema.getDefaultValue());}
if (schema.isOptional()) {
return JSON_NODE_FACTORY.nullNode();
}
return null;
}
Object value = logicalValue;
if ((schema
!= null) && (schema.getName() != null)) {
ConnectToJsonLogicalTypeConverter logicalConverter = TO_JSON_LOGICAL_CONVERTERS.get(schema.getName());
if (logicalConverter != null) {
return logicalConverter.convert(schema, logicalValue, config);
}
}
try {
final FieldType schemaType;
if (schema == null) {
schemaType = Schema.schemaType(value.getClass());
if (schemaType == null)
{
throw new ConnectException(("Java class " + value.getClass()) + " does not have corresponding schema type.");
}
} else {
schemaType = schema.getFieldType();
}switch (schemaType) {
case INT8 :
// Use shortValue to create a ShortNode, otherwise an IntNode will be created
return
JSON_NODE_FACTORY.numberNode(((Byte) (value)).shortValue());
case INT16 :
return JSON_NODE_FACTORY.numberNode(((Short) (value)));
case INT32 :
return JSON_NODE_FACTORY.numberNode(((Integer) (value)));
case INT64 :
return JSON_NODE_FACTORY.numberNode(((Long) (value)));
case FLOAT32 :
return JSON_NODE_FACTORY.numberNode(((Float) (value)));
case FLOAT64 :
return JSON_NODE_FACTORY.numberNode(((Double) (value)));
case BOOLEAN :
return JSON_NODE_FACTORY.booleanNode(((Boolean) (value)));
case STRING :
CharSequence charSeq = ((CharSequence) (value));
return JSON_NODE_FACTORY.textNode(charSeq.toString());
case BYTES :
if (value instanceof byte[]) {
return JSON_NODE_FACTORY.binaryNode(((byte[]) (value)));
} else if (value instanceof ByteBuffer) {
return JSON_NODE_FACTORY.binaryNode(((ByteBuffer) (value)).array());} else if (value instanceof BigDecimal) {return JSON_NODE_FACTORY.numberNode(((BigDecimal) (value)));
} else {
throw new ConnectException("Invalid type for bytes type: " + value.getClass());
}
case ARRAY :
{
Collection v92 = ((Collection) (value));
ArrayNode list = JSON_NODE_FACTORY.arrayNode();for (Object elem : v92) {
Schema valueSchema = (schema ==
null) ? null : schema.getValueSchema();
JsonNode fieldValue = m0(valueSchema, elem);
list.add(fieldValue);
}
return list;
}
case MAP :
{
Map<?, ?> map = ((Map<?, ?>) (value));
// If true, using string keys and JSON object; if false, using non-string keys and
// Array-encoding
boolean objectMode;
if (schema == null) {
objectMode = true;
for (Map.Entry<?, ?> entry : map.entrySet()) {
if (!(entry.getKey() instanceof String)) {
objectMode = false;
break;
}
}
} else {
objectMode = (schema.getKeySchema().getFieldType() == FieldType.STRING) && (!schema.getKeySchema().isOptional());
}
ObjectNode obj = null;
ArrayNode list = null;
if (objectMode) {
obj = JSON_NODE_FACTORY.objectNode();
} else {
list = JSON_NODE_FACTORY.arrayNode();
}
for (Map.Entry<?, ?> entry : map.entrySet()) {
Schema keySchema = (schema == null) ? null : schema.getKeySchema();
Schema valueSchema = (schema == null) ? null : schema.getValueSchema();
JsonNode v105 = m0(keySchema, entry.getKey());
JsonNode mapValue = m0(valueSchema, entry.getValue());
if (objectMode) {
obj.set(v105.asText(), mapValue);
} else {
ObjectNode o = JSON_NODE_FACTORY.objectNode();
o.set(KEY_FIELD, v105);
o.set(VALUE_FIELD, mapValue);
list.add(o);
}
}
return objectMode ? obj : list;
}
case
STRUCT :
{
Struct struct = ((Struct) (value));
if (!struct.schema().equals(schema)) {throw new ConnectException("Mismatching schema.");
}
// This handles the inverting of a union which is held as a struct, where each field is
// one of the union types.
if (JSON_TYPE_ONE_OF.equals(schema.getName())) {
for (Field field : schema.getFields()) {
Object object = struct.get(field);
if (object != null) {
return m0(field.getSchema(), object);
}
}
return m0(schema, null);
} else {
ObjectNode obj = JSON_NODE_FACTORY.objectNode();
for (Field field : schema.getFields()) {
JsonNode jsonNode = m0(field.getSchema(), struct.get(field));
if (jsonNode != null) {
obj.set(field.getName(), jsonNode);}
}return obj;
}
}
default :
break;
}
throw new ConnectException(("Couldn't convert " +
value) + " to JSON.");
} catch (ClassCastException e) {
String schemaTypeStr = (schema != null) ? schema.getFieldType().toString() : "unknown schema";
throw new ConnectException((("Invalid type for " + schemaTypeStr) + ": ") + value.getClass());
} } | 3.26 |
rocketmq-connect_JsonSchemaData_toConnectData_rdh | /**
* to connect data
*
* @param schema
* @param jsonValue
* @return */
public static Object toConnectData(Schema schema, JsonNode jsonValue) {
final FieldType schemaType;
if (schema != null)
{
schemaType = schema.getFieldType();
if ((jsonValue == null) || jsonValue.isNull()) {
if (schema.getDefaultValue() != null)
{
// any logical type conversions should already have been applied
return schema.getDefaultValue();
}
if ((jsonValue == null) || schema.isOptional()) {
return null;
}
throw new ConnectException(("Invalid null value for required " + schemaType) + " field");
}
} else {
if (jsonValue == null) {
return null;
}
switch (jsonValue.getNodeType()) {
case NULL :
return null;
case BOOLEAN :
schemaType = FieldType.BOOLEAN;
break;
case NUMBER :
if (jsonValue.isIntegralNumber()) {
schemaType = FieldType.INT64;
} else {
schemaType =
FieldType.FLOAT64;
}break;
case ARRAY :
schemaType = FieldType.ARRAY;
break;
case OBJECT :schemaType = FieldType.MAP;
break;
case STRING :
schemaType = FieldType.STRING;
break;
case BINARY :
case MISSING :
case POJO :
default :
schemaType = null;break;
}
}
final JsonToConnectTypeConverter typeConverter = TO_CONNECT_CONVERTERS.get(schemaType);
if (typeConverter == null) {
throw new
ConnectException("Unknown schema type: " + schemaType);}
if ((schema != null) && (schema.getName() != null)) {
JsonToConnectLogicalTypeConverter logicalConverter = TO_CONNECT_LOGICAL_CONVERTERS.get(schema.getName());
if (logicalConverter != null) {
return logicalConverter.convert(schema, jsonValue);
}
}
return typeConverter.convert(schema, jsonValue);
} | 3.26 |
rocketmq-connect_JsonSchemaData_fromJsonSchema_rdh | /**
* from json schema
*
* @param schema
* @return */public Schema fromJsonSchema(Schema schema) {
return rawSchemaFromConnectSchema(schema);
} | 3.26 |
rocketmq-connect_JsonSchemaData_allOfToConnectSchema_rdh | /**
* all of to connect schema
*
* @param combinedSchema
* @param version
* @param forceOptional
* @return */
private Schema allOfToConnectSchema(CombinedSchema combinedSchema, Integer version, boolean forceOptional) {
ConstSchema constSchema = null;
EnumSchema enumSchema = null;
NumberSchema numberSchema = null; StringSchema stringSchema = null;
for (Schema subSchema : combinedSchema.getSubschemas()) {
if (subSchema instanceof ConstSchema)
{ constSchema = ((ConstSchema) (subSchema));
} else if (subSchema instanceof EnumSchema) {
enumSchema = ((EnumSchema) (subSchema));
} else if (subSchema instanceof NumberSchema) {
numberSchema = ((NumberSchema) (subSchema));
} else if (subSchema
instanceof StringSchema) {
stringSchema = ((StringSchema) (subSchema));
}
}
if ((constSchema != null) && (stringSchema != null)) {
return toConnectSchema(stringSchema, version, forceOptional);
} else if ((constSchema != null) && (numberSchema != null)) {
return toConnectSchema(numberSchema, version, forceOptional);
} else if ((enumSchema != null) && (stringSchema != null)) {
return toConnectSchema(enumSchema, version, forceOptional);
} else if (((numberSchema != null) && (stringSchema != null)) && (stringSchema.getFormatValidator() != null)) {
return toConnectSchema(numberSchema, version, forceOptional);
} else {
throw new IllegalArgumentException((("Unsupported criterion " + combinedSchema.getCriterion()) + " for ") + combinedSchema);}
} | 3.26 |
rocketmq-connect_ChangeCaseConfig_to_rdh | /**
* to
*
* @return */
public CaseFormat to() {
return this.to;
} | 3.26 |
rocketmq-connect_ChangeCaseConfig_from_rdh | /**
* from
*
* @return */
public CaseFormat from() {
return this.f0;
} | 3.26 |
rocketmq-connect_MemoryConfigManagementServiceImpl_resumeConnector_rdh | /**
* resume connector
*
* @param connectorName
*/
@Override
public void resumeConnector(String connectorName) {
if (!connectorKeyValueStore.containsKey(connectorName)) {
throw new ConnectException(("Connector [" + connectorName) + "] does not exist");
}
ConnectKeyValue config = connectorKeyValueStore.get(connectorName);
config.setEpoch(System.currentTimeMillis());
config.setTargetState(TargetState.STARTED);
connectorKeyValueStore.put(connectorName, config.nextGeneration());
triggerListener();
} | 3.26 |
rocketmq-connect_MemoryConfigManagementServiceImpl_getConnectorConfigs_rdh | /**
* get all connector configs enabled
*
* @return */
@Override
public Map<String, ConnectKeyValue> getConnectorConfigs() {
return connectorKeyValueStore.getKVMap();
} | 3.26 |
rocketmq-connect_MemoryConfigManagementServiceImpl_pauseConnector_rdh | /**
* pause connector
*
* @param connectorName
*/
@Override
public void
pauseConnector(String connectorName) {
if (!connectorKeyValueStore.containsKey(connectorName)) {
throw new ConnectException(("Connector [" + connectorName) + "] does not exist");
}
ConnectKeyValue config = connectorKeyValueStore.get(connectorName);
config.setTargetState(TargetState.PAUSED);
connectorKeyValueStore.put(connectorName, config.nextGeneration());
triggerListener();
} | 3.26 |
rocketmq-connect_DelegatingClassLoader_pluginClassLoader_rdh | /**
* Retrieve the PluginClassLoader associated with a plugin class
*
* @param name
* @return */
public PluginClassLoader pluginClassLoader(String name) {
if (StringUtils.isEmpty(name) || StringUtils.isBlank(name)) {
return null;
}
if (!PluginUtils.shouldLoadInIsolation(name)) {
return null;
}
SortedMap<PluginWrapper<?>, ClassLoader> inner = pluginLoaders.get(name);if (inner ==
null) {
return null;
}
ClassLoader pluginLoader = inner.get(inner.lastKey());
return pluginLoader instanceof PluginClassLoader ? ((PluginClassLoader) (pluginLoader)) : null;
} | 3.26 |
rocketmq-connect_BrokerBasedLog_send_rdh | /**
* send data to all workers
*
* @param key
* @param value
* @param callback
*/
@Override
public void send(K key,
V value, Callback callback) {
try {Map.Entry<byte[], byte[]> encode = encode(key, value);
byte[] body = encode.getValue();
if (body.length > MAX_MESSAGE_SIZE) {
log.error("Message size is greater than {} bytes, key: {}, value {}", MAX_MESSAGE_SIZE, key, value);
return;
}
String encodeKey = Base64Util.base64Encode(encode.getKey());
Message message = new Message(topicName, null, encodeKey, body);
producer.send(message, new SelectMessageQueueByHash(), encodeKey, new SendCallback() {
@Override
public void onSuccess(SendResult result) {
log.info("Send async message OK, msgId: {},topic:{}", result.getMsgId(), topicName);
callback.onCompletion(null, value);}
@Override
public void onException(Throwable throwable) {
if (null != throwable) {
log.error("Send async message Failed, error: {}", throwable);
// Keep sending until success
send(key, value, callback);
}
} });
} catch (Exception e) {
log.error("BrokerBaseLog send async message Failed.", e);
}
} | 3.26 |
rocketmq-connect_BrokerBasedLog_prepare_rdh | /**
* Preparation before startup
*/
private void prepare() {
Set<String> consumerGroupSet = ConnectUtil.fetchAllConsumerGroupList(workerConfig);
if (!consumerGroupSet.contains(groupName)) {
log.info("Try to create group: {}!", groupName);
ConnectUtil.createSubGroup(workerConfig,
groupName);
}
if (!ConnectUtil.isTopicExist(workerConfig, topicName)) {
log.info("Try to create store topic: {}!", topicName);TopicConfig topicConfig = new TopicConfig(topicName, 1, 1, PermName.PERM_READ | PermName.PERM_WRITE);
ConnectUtil.createTopic(workerConfig, topicConfig);
}
} | 3.26 |
rocketmq-connect_BrokerBasedLog_readToLogEnd_rdh | /**
* read to log end
*/
private void readToLogEnd()
throws MQClientException, MQBrokerException, RemotingException, InterruptedException {
if (!enabledCompactTopic) {
return;
}
Map<MessageQueue, TopicOffset> minAndMaxOffsets = ConnectUtil.offsetTopics(workerConfig, Lists.newArrayList(topicName)).get(topicName);
while (!minAndMaxOffsets.isEmpty()) {
Iterator<Map.Entry<MessageQueue, TopicOffset>> it = minAndMaxOffsets.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<MessageQueue, TopicOffset>
offsetEntry = it.next();
long lastConsumedOffset = this.consumer.getOffsetStore().readOffset(offsetEntry.getKey(), ReadOffsetType.READ_FROM_MEMORY);
if ((lastConsumedOffset + 1) >= offsetEntry.getValue().getMaxOffset()) {
log.trace("Read to end offset {} for {}",
offsetEntry.getValue().getMaxOffset(), offsetEntry.getKey().getQueueId());
it.remove();
} else {
log.trace("Behind end offset {} for {}; last-read offset is {}", offsetEntry.getValue().getMaxOffset(), offsetEntry.getKey().getQueueId(), lastConsumedOffset);
poll(5000);
break;
}
}
}
} | 3.26 |
rocketmq-connect_DefaultJdbcRecordBinder_getSqlTypeForSchema_rdh | /**
* Dialects not supporting `setObject(index, null)` can override this method
* to provide a specific sqlType, as per the JDBC documentation
*
* @param schema
* the schema
* @return the SQL type
*/
protected Integer
getSqlTypeForSchema(Schema
schema) {
return null;
} | 3.26 |
rocketmq-connect_JdbcSinkConnector_taskConfigs_rdh | /**
* Returns a set of configurations for Tasks based on the current configuration,
* producing at most count configurations.
*
* @param maxTasks
* maximum number of configurations to generate
* @return configurations for Tasks
*/
@Override
public List<KeyValue> taskConfigs(int maxTasks) {
log.info("Starting task config !!! ");
List<KeyValue> configs = new ArrayList<>();
for (int i = 0; i < maxTasks; i++) {
configs.add(this.connectConfig);
}return configs;
} | 3.26 |
rocketmq-connect_JdbcSinkConnector_m0_rdh | /**
* Should invoke before start the connector.
*
* @param config
* @return error message
*/
@Override
public void m0(KeyValue config) {
// do validate config
} | 3.26 |
rocketmq-connect_LocalPositionManagementServiceImpl_replicaOffsets_rdh | /**
* send change position
*/
private void replicaOffsets() {
while (true) {
// wait for the last send to complete
if (committing.get()) {
try {
sleep(1000);
continue;
} catch (InterruptedException e) {
}
}
synchronize(false);
break;
}
} | 3.26 |
rocketmq-connect_LocalPositionManagementServiceImpl_restorePosition_rdh | /**
* restore position
*/
protected void restorePosition() {set(PositionChange.ONLINE, new ExtendRecordPartition(null, new HashMap<>()), new RecordOffset(new HashMap<>()));
} | 3.26 |
rocketmq-connect_SourceOffsetCompute_sourcePartitions_rdh | /**
* source partitions
*
* @param tableId
* @param offsetSuffix
* @return */
public static Map<String, String> sourcePartitions(String prefix, TableId
tableId, String offsetSuffix) {
String fqn = ExpressionBuilder.create().append(tableId, QuoteMethod.NEVER).toString();
Map<String, String> partition = new HashMap<>();
partition.put(JdbcSourceConfigConstants.TABLE_NAME_KEY(offsetSuffix), fqn);
if (StringUtils.isNotEmpty(prefix)) {
partition.put(TOPIC, prefix.concat(tableId.tableName()));
} else {
partition.put(TOPIC, tableId.tableName());
}
return partition;
} | 3.26 |
rocketmq-connect_SourceOffsetCompute_buildTablePartitions_rdh | /**
* build table partitions
*
* @param tableLoadMode
* @param queryMode
* @param tables
* @param dialect
* @return */
private static Map<String, RecordPartition> buildTablePartitions(TableLoadMode tableLoadMode,
QueryMode
queryMode, List<String> tables, DatabaseDialect dialect, String offsetSuffix,
String
topicPrefix) {
Map<String, RecordPartition> partitionsByTableFqn = new HashMap<>();
if (((tableLoadMode == TableLoadMode.MODE_INCREMENTING) || (tableLoadMode == TableLoadMode.MODE_TIMESTAMP)) || (tableLoadMode == TableLoadMode.MODE_TIMESTAMP_INCREMENTING)) {
switch (queryMode) {
case TABLE :
for (String table : tables) {
// Find possible partition maps for different offset protocols
// We need to search by all offset protocol partition keys to support compatibility
TableId tableId = dialect.parseTableNameToTableId(table);
RecordPartition tablePartition = new RecordPartition(SourceOffsetCompute.sourcePartitions(topicPrefix, tableId,
offsetSuffix));
partitionsByTableFqn.put(table, tablePartition);
}
break;
case QUERY :
partitionsByTableFqn.put(JdbcSourceConfigConstants.QUERY_NAME_VALUE, new RecordPartition(sourceQueryPartitions(topicPrefix, offsetSuffix)));
break;
default :
throw new ConnectException("Unknown query mode: " + queryMode);
}
}
return partitionsByTableFqn;
} | 3.26 |
rocketmq-connect_SourceOffsetCompute_initOffset_rdh | /**
* init and compute offset
*
* @return */
public static Map<String, Map<String, Object>> initOffset(JdbcSourceTaskConfig config, SourceTaskContext context, DatabaseDialect dialect, CachedConnectionProvider cachedConnectionProvider) {
List<String> tables = config.getTables();
String query = config.getQuery();
TableLoadMode mode = TableLoadMode.findTableLoadModeByName(config.getMode());
QueryMode queryMode = (!StringUtils.isEmpty(query)) ? QueryMode.QUERY : QueryMode.TABLE;
// step 1 -——-- compute partitions
Map<String, RecordPartition> partitionsByTableFqn = buildTablePartitions(mode, queryMode, tables, dialect, config.getOffsetSuffix(), config.getTopicPrefix());
// step 2 ----- get last time offset
Map<RecordPartition, RecordOffset>
offsets = null;
if (partitionsByTableFqn != null) {
offsets = context.offsetStorageReader().readOffsets(partitionsByTableFqn.values());
}
// step 3 ----- compute offset init value
List<String> tablesOrQuery = (queryMode == QueryMode.QUERY) ? Collections.singletonList(query) : tables;
return initOffsetValues(cachedConnectionProvider, dialect, queryMode, partitionsByTableFqn, offsets, config, tablesOrQuery);
} | 3.26 |
rocketmq-connect_KafkaSinkValueConverter_convertKafkaValue_rdh | /**
* convert value
*
* @param targetSchema
* @param originalValue
* @return */private Object convertKafkaValue(Schema targetSchema, Object originalValue) {
if (targetSchema
== null) {
if (originalValue == null) {
return null;
}
return originalValue;
}
switch (targetSchema.type()) {
case INT8
:
case INT16 :
case INT32 :
case INT64 :
case FLOAT32 :
case FLOAT64 :
case BOOLEAN :
case STRING :
case BYTES :
return originalValue;
case STRUCT :
Struct toStruct = new Struct(targetSchema);
if (originalValue != null) {
convertStructValue(toStruct, ((Struct) (originalValue)));
}
return toStruct;
case ARRAY :
List<Object> array = ((List<Object>) (originalValue));
List<Object> newArray = new ArrayList<>();
array.forEach(item -> {
newArray.add(convertKafkaValue(targetSchema.valueSchema(), item));});
return newArray;
case MAP :Map mapData = ((Map) (originalValue));
Map newMapData = new ConcurrentHashMap();
mapData.forEach((k, v) -> {
newMapData.put(convertKafkaValue(targetSchema.keySchema(), k), convertKafkaValue(targetSchema.valueSchema(), v));});
return newMapData;default :
throw
new RuntimeException(" Type not supported: {}" + targetSchema.type());
}
} | 3.26 |
rocketmq-connect_KafkaSinkValueConverter_convertStructValue_rdh | /**
* convert struct value
*
* @param toStruct
* @param originalStruct
*/
private void convertStructValue(Struct toStruct, Struct originalStruct) {
for (Field field : toStruct.schema().fields()) {
try {
Schema.Type type = field.schema().type();
Object value = originalStruct.get(field.name());
switch (type) {
case INT8 :
case INT16 :
case INT32 :
case INT64 :
case FLOAT32 :
case FLOAT64 :
case BOOLEAN :
case STRING :
case BYTES :
toStruct.put(field.name(), value);
break;
case STRUCT :
case ARRAY :
case MAP :
toStruct.put(field.name(), convertKafkaValue(toStruct.schema().field(field.name()).schema(), value));
break;
}
} catch (Exception ex) {
logger.error("Convert to kafka schema failure, {}", ex);
throw new ConnectException(ex);
}
}
} | 3.26 |
rocketmq-connect_JsonSchemaDeserializer_deserialize_rdh | /**
* deserialize
*
* @param topic
* @param isKey
* @param payload
* @return */
@Override
public JsonSchemaAndValue deserialize(String topic, boolean isKey, byte[] payload) {
if (payload == null) {
return null;}
ByteBuffer buffer = ByteBuffer.wrap(payload);
long
v1 = buffer.getLong();
GetSchemaResponse response = schemaRegistryClient.getSchemaByRecordId(JsonSchemaData.NAMESPACE, topic, v1);
int length = buffer.limit() - ID_SIZE;
int start = buffer.position() + buffer.arrayOffset();
// Return JsonNode if type is null
JsonNode value = null;
try {
value = OBJECT_MAPPER.readTree(new ByteArrayInputStream(buffer.array(), start, length));
} catch (IOException
e) {
throw new RuntimeException(e);
}
// load json schema
SchemaLoader.SchemaLoaderBuilder schemaLoaderBuilder = SchemaLoader.builder().useDefaults(true).draftV7Support();
JSONObject jsonObject = new JSONObject(response.getIdl());
schemaLoaderBuilder.schemaJson(jsonObject);
Schema schema = schemaLoaderBuilder.build().load().build();
// validate schema
if (jsonSchemaConverterConfig.validate())
{
try {
JsonSchemaUtils.validate(schema, value);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
return new JsonSchemaAndValue(new JsonSchema(schema), value);
} | 3.26 |
rocketmq-connect_FilePositionManagementServiceImpl_call_rdh | /**
* Computes a result, or throws an exception if unable to do so.
*
* @return computed result
* @throws Exception
* if unable to compute a result
*/
@Override
public Void call() {try
{
positionStore.persist();
if (callback != null) {
callback.onCompletion(null, null, null);
}
} catch (Exception error) { callback.onCompletion(error, null, null);
}
return null;
} | 3.26 |
rocketmq-connect_MySqlDatabaseDialect_getSqlType_rdh | /**
* get sql type
*
* @param field
* @return */
@Override
protected String getSqlType(SinkRecordField field) {
switch (field.schemaType()) {
case INT8 :
return "TINYINT";
case INT32 :
return "INT";
case INT64 :
return "BIGINT";
case FLOAT32 :
return "FLOAT";
case FLOAT64 :
return "DOUBLE";
case BOOLEAN :
return "TINYINT";
case STRING :
return "TEXT";
case BYTES :
return "VARBINARY(1024)";
default :return super.getSqlType(field);
}
} | 3.26 |
rocketmq-connect_ConfigManagementService_configure_rdh | /**
* Configure class with the given key-value pairs
*
* @param config
* can be DistributedConfig or StandaloneConfig
*/
default void configure(WorkerConfig config) {
} | 3.26 |
rocketmq-connect_StateManagementService_persist_rdh | /**
* Persist all the configs in a store.
*/
default void persist() {
} | 3.26 |
rocketmq-connect_KafkaConnectAdaptorSource_transforms_rdh | /**
* convert transform
*
* @param record
*/
@Override
protected SourceRecord transforms(SourceRecord record) {
List<Transformation> transformations = transformationWrapper.transformations();Iterator transformationIterator = transformations.iterator();
while (transformationIterator.hasNext()) {Transformation<SourceRecord> v2 = ((Transformation) (transformationIterator.next()));
log.trace("applying transformation {} to {}", v2.getClass().getName(), record);
record = v2.apply(record);
if (record == null) {
break;
}
}
return record;
} | 3.26 |
rocketmq-connect_SRemParser_m0_rdh | /**
* SREM key member [member ...]
*/public class SRemParser extends AbstractCommandParser {
@Override
public KVEntry m0() {
return RedisEntry.newEntry(FieldType.ARRAY);
} | 3.26 |
rocketmq-connect_AbstractKafkaSourceConnector_taskConfigs_rdh | /**
* Returns a set of configurations for Tasks based on the current configuration,
* producing at most count configurations.
*
* @param maxTasks
* maximum number of configurations to generate
* @return configurations for Tasks
*/
@Override public List<KeyValue> taskConfigs(int maxTasks) {
List<Map<String, String>> groupConnectors = sourceConnector.taskConfigs(maxTasks);
List<KeyValue> configs = new ArrayList<>();
for (Map<String, String> configMaps : groupConnectors) {
KeyValue keyValue = new DefaultKeyValue();
configMaps.forEach((k, v) -> {
keyValue.put(k, v);
});
configs.add(keyValue);
}
return configs;
} | 3.26 |
rocketmq-connect_AbstractKafkaSourceConnector_start_rdh | /**
* Start the component
*
* @param config
* component context
*/
@Override
public void start(KeyValue
config) {
this.configValue = new ConnectKeyValue();
config.keySet().forEach(key -> {
this.configValue.put(key, config.getString(key));
});
setConnectorClass(configValue);
taskConfig = new HashMap<>(configValue.config());
// get the source class name from config and create source task from reflection
try {
sourceConnector = Class.forName(taskConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG)).asSubclass(SourceConnector.class).getDeclaredConstructor().newInstance();} catch (Exception e) {
throw new ConnectException("Load task class failed, " + taskConfig.get(TaskConfig.TASK_CLASS_CONFIG));
}
} | 3.26 |
rocketmq-connect_AbstractKafkaSourceConnector_originalSinkConnector_rdh | /**
* try override start and stop
*
* @return */
protected SourceConnector originalSinkConnector() {
return sourceConnector;
} | 3.26 |
rocketmq-connect_ConverterConfig_type_rdh | /**
* Get the type of converter as defined by the {@link #TYPE_CONFIG} configuration.
*
* @return the converter type; never null
*/
public ConverterType type(KeyValue
config) {
return ConverterType.withName(config.getString(TYPE_CONFIG));
} | 3.26 |
rocketmq-connect_DebeziumSqlServerConnector_taskClass_rdh | /**
* Return the current connector class
*
* @return task implement class
*/
@Override
public Class<? extends Task> taskClass() {
return DebeziumSqlServerSource.class;
} | 3.26 |
rocketmq-connect_AbstractKafkaSinkConnector_taskConfigs_rdh | /**
* Returns a set of configurations for Tasks based on the current configuration,
* producing at most count configurations.
*
* @param maxTasks
* maximum number of configurations to generate
* @return configurations for Tasks
*/
@Override
public List<KeyValue> taskConfigs(int maxTasks) {
List<Map<String, String>> groupConnectors = sinkConnector.taskConfigs(maxTasks);
List<KeyValue> configs = new ArrayList<>();
for (Map<String, String> configMaps : groupConnectors) {KeyValue keyValue = new DefaultKeyValue();
configMaps.forEach((k, v) -> {
keyValue.put(k, v);
});
configs.add(keyValue);
}
return configs;
} | 3.26 |
rocketmq-connect_AbstractKafkaSinkConnector_originalSinkConnector_rdh | /**
* try override start and stop
*
* @return */
protected SinkConnector originalSinkConnector() {
return sinkConnector;
} | 3.26 |
rocketmq-connect_AbstractKafkaSinkConnector_stop_rdh | /**
* Stop the component.
*/
@Override
public void stop() {
if (sinkConnector != null) {
sinkConnector = null;
configValue = null;
taskConfig = null;
}} | 3.26 |
rocketmq-connect_AbstractKafkaSinkConnector_start_rdh | /**
* Start the component
*
* @param config
* component context
*/
@Override
public void start(KeyValue config) {
this.configValue = new ConnectKeyValue();
config.keySet().forEach(key -> {
this.configValue.put(key, config.getString(key));
});
setConnectorClass(configValue);
taskConfig = new HashMap<>(configValue.config());
// get the source class name from config and create source task from reflection
try {
sinkConnector = Class.forName(taskConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG)).asSubclass(SinkConnector.class).getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new ConnectException("Load task class failed, " + taskConfig.get(TaskConfig.TASK_CLASS_CONFIG));
}
} | 3.26 |
rocketmq-connect_Serde_configure_rdh | /**
* configs in key/value pairs
*
* @param configs
*/
default void configure(Map<String, ?> configs) {
// intentionally left blank
} | 3.26 |
rocketmq-connect_TimestampIncrementingQuerier_beginTimestampValue_rdh | /**
* get begin timestamp from offset topic
*
* @return */
@Override
public Timestamp beginTimestampValue() {
return offset.getTimestampOffset();} | 3.26 |
rocketmq-connect_TimestampIncrementingQuerier_endTimestampValue_rdh | // Get end timestamp from db
@Override
public Timestamp endTimestampValue(Timestamp beginTime) throws SQLException {
long endTimestamp;
final long currentDbTime = dialect.currentTimeOnDB(stmt.getConnection(), DateTimeUtils.getTimeZoneCalendar(timeZone)).getTime();
endTimestamp = currentDbTime -
timestampDelay;
return new Timestamp(endTimestamp);
} | 3.26 |
rocketmq-connect_PatternFilter_filter_rdh | /**
* filter map
*
* @param record
* @param map
* @return */
R filter(R record, Map map) {for (Object field : map.keySet()) {
if (!this.fields.contains(field)) {
continue;
}
Object value = map.get(field);
if (value instanceof String) {
String input = ((String) (value));
if (this.pattern.matcher(input).matches()) {
return null;
}
}
}
return record;
} | 3.26 |
rocketmq-connect_MemoryClusterManagementServiceImpl_stop_rdh | /**
* Stop the cluster manager.
*/
@Override
public void stop()
{
} | 3.26 |
rocketmq-connect_MemoryClusterManagementServiceImpl_configure_rdh | /**
* Configure class with the given key-value pairs
*
* @param config
* can be DistributedConfig or StandaloneConfig
*/
@Override
public void configure(WorkerConfig config) {
this.config = ((StandaloneConfig) (config));
} | 3.26 |
rocketmq-connect_MemoryClusterManagementServiceImpl_getAllAliveWorkers_rdh | /**
* Get all alive workers in the cluster.
*
* @return */
@Override
public List<String> getAllAliveWorkers() {
return Collections.singletonList(this.config.getWorkerId());
} | 3.26 |
rocketmq-connect_MemoryClusterManagementServiceImpl_start_rdh | /**
* Start the cluster manager.
*/
@Override
public void start() {
} | 3.26 |
rocketmq-connect_HudiSinkTask_start_rdh | /**
* Remember always close the CqlSession according to
* https://docs.datastax.com/en/developer/java-driver/4.5/manual/core/
*
* @param props
*/
@Override
public void start(KeyValue props) {
try {
ConfigUtil.load(props, this.hudiConnectConfig);
log.info("init data source success");
} catch (Exception e) {
log.error("Cannot start Hudi Sink Task because of configuration error{}", e);
}
try {
updater = new Updater(hudiConnectConfig);
updater.start();
} catch (Throwable e) {
log.error("fail to start updater{}", e);
}
} | 3.26 |
rocketmq-connect_MqttSinkTask_start_rdh | /**
*
* @param props
*/
@Override
public void start(KeyValue props) {
try {
ConfigUtil.load(props, this.sinkConnectConfig);log.info("init data source success");
} catch (Exception e) {
log.error("Cannot start MQTT Sink Task because of configuration error{}", e);
}
try {
updater = new Updater(sinkConnectConfig);
updater.start();
} catch (Throwable e) {
log.error("fail to start updater{}", e);
}
} | 3.26 |
rocketmq-connect_RocketMQKafkaSinkTaskContext_convertToRecordPartition_rdh | /**
* convert to rocketmq record partition
*
* @param topicPartition
* @return */
public RecordPartition convertToRecordPartition(TopicPartition topicPartition) {
if (topicPartition != null) {
return new RecordPartition(Collections.singletonMap(topicPartition.topic(), topicPartition.partition()));
}
return null;
} | 3.26 |
rocketmq-connect_RocketMQKafkaSinkTaskContext_convertToTopicPartition_rdh | /**
* convert to kafka topic partition
*
* @param partitionMap
* @return */
public TopicPartition convertToTopicPartition(Map<String, ?> partitionMap) {
if (partitionMap.containsKey(TOPIC) && partitionMap.containsKey(QUEUE_ID)) {
return new TopicPartition(partitionMap.get(TOPIC).toString(), Integer.valueOf(partitionMap.get(QUEUE_ID).toString()));
}
return null;
} | 3.26 |
rocketmq-connect_RecordOffsetManagement_ack_rdh | /**
* Acknowledge this record; signals that its offset may be safely committed.
*/
public void ack() {
if (this.acked.compareAndSet(false, true)) {
messageAcked();
}
} | 3.26 |
rocketmq-connect_RecordOffsetManagement_pollOffsetWhile_rdh | /**
*
* @param submittedPositions
* @return */
private RecordOffset pollOffsetWhile(Deque<SubmittedPosition> submittedPositions) {
RecordOffset offset = null;
// Stop pulling if there is an uncommitted breakpoint
while (canCommitHead(submittedPositions)) {offset = submittedPositions.poll().getPosition().getOffset();
}
return offset;
} | 3.26 |
rocketmq-connect_RecordOffsetManagement_awaitAllMessages_rdh | /**
* await all messages
*
* @param timeout
* @param timeUnit
* @return */
public boolean awaitAllMessages(long timeout, TimeUnit timeUnit) {
// Create a new message drain latch as a local variable to avoid SpotBugs warnings about inconsistent synchronization
// on an instance variable when invoking CountDownLatch::await outside a synchronized block
CountDownLatch messageDrainLatch;
synchronized(this) {
messageDrainLatch = new CountDownLatch(f0.get());
this.messageDrainLatch = messageDrainLatch;
}
try {
return messageDrainLatch.await(timeout, timeUnit);
} catch (InterruptedException e) {
return false;
}
} | 3.26 |
rocketmq-connect_RecordOffsetManagement_submitRecord_rdh | /**
* submit record
*
* @param position
* @return */
public SubmittedPosition submitRecord(RecordPosition
position) {
SubmittedPosition submittedPosition = new SubmittedPosition(position);
records.computeIfAbsent(position.getPartition(), e -> new LinkedList<>()).add(submittedPosition);
// ensure thread safety in operation
synchronized(this) {
f0.incrementAndGet();
}
return submittedPosition;
} | 3.26 |
rocketmq-connect_RecordOffsetManagement_remove_rdh | /**
* remove record
*
* @return */
public boolean remove() {
Deque<SubmittedPosition> deque = records.get(position.getPartition());
if (deque == null) {
return false;
}
boolean result = deque.removeLastOccurrence(this);
if (deque.isEmpty()) {
records.remove(position.getPartition());
}
if (result) {
messageAcked();
} else {
log.warn("Attempted to remove record from submitted queue for partition {}, but the record has not been submitted or has already been removed", position.getPartition());
}
return result;
} | 3.26 |
rocketmq-connect_Worker_allocatedTasks_rdh | /**
* get connectors
*
* @return */
public Map<String, List<ConnectKeyValue>> allocatedTasks() {
return latestTaskConfigs;
} | 3.26 |
rocketmq-connect_Worker_checkAndReconfigureConnectors_rdh | /**
* check and reconfigure connectors
*
* @param assigns
*/
private void checkAndReconfigureConnectors(Map<String, ConnectKeyValue> assigns) {
if ((assigns == null) || assigns.isEmpty()) {
return;
}
for (String connectName : assigns.keySet()) {
if (!connectors.containsKey(connectName)) {
// new
continue;
}
WorkerConnector connector = connectors.get(connectName);
ConnectKeyValue oldConfig = connector.getKeyValue();
ConnectKeyValue newConfig = assigns.get(connectName);
if (!oldConfig.equals(newConfig)) {
connector.reconfigure(newConfig);
}}
} | 3.26 |
rocketmq-connect_Worker_checkRunningTasks_rdh | /**
* check running task
*
* @param connectorConfig
*/
private void checkRunningTasks(Map<String, List<ConnectKeyValue>> connectorConfig) {
// STEP 1: check running tasks and put to error status
for (Runnable runnable : runningTasks) {
WorkerTask workerTask = ((WorkerTask) (runnable));
String connectorName = workerTask.id().connector();
ConnectKeyValue taskConfig = workerTask.currentTaskConfig();
List<ConnectKeyValue> taskConfigs
= connectorConfig.get(connectorName);
WorkerTaskState state = ((WorkerTask) (runnable)).getState();
switch (state) {
case ERROR :
errorTasks.add(runnable);
runningTasks.remove(runnable);
break;
case RUNNING :
if (isNeedStop(taskConfig, taskConfigs)) {
try {
// remove committer offset
sourceTaskOffsetCommitter.ifPresent(commiter -> commiter.remove(workerTask.id()));workerTask.doClose();
} catch (Exception e) {
log.error("workerTask stop exception, workerTask: " + workerTask.currentTaskConfig(), e);
}
log.info("Task stopping, connector name {}, config {}", workerTask.id().connector(), workerTask.currentTaskConfig());
runningTasks.remove(runnable);
stoppingTasks.put(runnable, System.currentTimeMillis());
} else {
// status redress
redressRunningStatus(workerTask);
// set target state
TargetState targetState =
configManagementService.snapshot().targetState(connectorName);
if (targetState != null) {
workerTask.transitionTo(targetState);
}
}
break;
default :
log.error("[BUG] Illegal State in when checking running tasks, {} is in {} state", ((WorkerTask) (runnable)).id().connector(), state);
break;
}
}
} | 3.26 |
rocketmq-connect_Worker_checkStoppedTasks_rdh | /**
* check stopped tasks
*/
private void checkStoppedTasks() {
for (Runnable runnable : stoppedTasks)
{
WorkerTask workerTask = ((WorkerTask) (runnable));
Future future = taskToFutureMap.get(runnable);
try {
if (null != future) {
future.get(workerConfig.getMaxStartTimeoutMills(), TimeUnit.MILLISECONDS);
} else {
log.error("[BUG] stopped Tasks reference not found in taskFutureMap");
}
} catch (ExecutionException e) {
Throwable t = e.getCause();
log.info("[BUG] Stopped Tasks should not throw any exception");
t.printStackTrace();
} catch (CancellationException e) {
log.info("[BUG] Stopped Tasks throws PrintStackTrace");
e.printStackTrace();
} catch
(TimeoutException e) {
log.info("[BUG] Stopped Tasks should not throw any exception");e.printStackTrace();} catch (InterruptedException e) {
log.info("[BUG] Stopped Tasks should not throw any exception");
e.printStackTrace();
} finally {
// remove committer offset
sourceTaskOffsetCommitter.ifPresent(commiter -> commiter.remove(workerTask.id()));
workerTask.cleanup();
future.cancel(true);
taskToFutureMap.remove(runnable);
stoppedTasks.remove(runnable);
cleanedStoppedTasks.add(runnable);
}
}
} | 3.26 |
rocketmq-connect_Worker_startTasks_rdh | /**
* Start a collection of tasks with the given configs. If a task is already started with the same configs, it will
* not start again. If a task is already started but not contained in the new configs, it will stop.
*
* @param taskConfigs
* @throws Exception
*/
public void startTasks(Map<String, List<ConnectKeyValue>> taskConfigs) {
synchronized(latestTaskConfigs) {
this.latestTaskConfigs = taskConfigs;
}
} | 3.26 |
rocketmq-connect_Worker_stopConnector_rdh | /**
* Stop a connector managed by this worker.
*
* @param connName
* the connector name.
*/
private void stopConnector(String connName) {
WorkerConnector workerConnector = connectors.get(connName);
log.info("Stopping connector {}", connName);if (workerConnector == null) {
log.warn("Ignoring stop request for unowned connector {}", connName);
return;
}
workerConnector.shutdown();
} | 3.26 |
rocketmq-connect_Worker_startTask_rdh | /**
* start task
*
* @param newTasks
* @throws Exception
*/
private void startTask(Map<String, List<ConnectKeyValue>> newTasks) throws Exception {
for (String connectorName : newTasks.keySet()) {
for (ConnectKeyValue v62 : newTasks.get(connectorName)) {
int taskId = v62.getInt(ConnectorConfig.TASK_ID);
ConnectorTaskId id = new ConnectorTaskId(connectorName, taskId);
ErrorMetricsGroup errorMetricsGroup = new ErrorMetricsGroup(id, this.connectMetrics);
String
taskType = v62.getString(ConnectorConfig.TASK_TYPE);
if (TaskType.DIRECT.name().equalsIgnoreCase(taskType)) {
createDirectTask(id, v62);
continue;
}
ClassLoader savedLoader = plugin.currentThreadLoader();
try {
String connType = v62.getString(ConnectorConfig.CONNECTOR_CLASS);
ClassLoader
connectorLoader = plugin.delegatingLoader().connectorLoader(connType);
savedLoader = Plugin.compareAndSwapLoaders(connectorLoader);
// new task
final Class<? extends Task> taskClass = plugin.currentThreadLoader().loadClass(v62.getString(ConnectorConfig.TASK_CLASS)).asSubclass(Task.class);
final Task task = plugin.newTask(taskClass);
/**
* create key/value converter
*/
RecordConverter valueConverter = plugin.newConverter(v62, false, ConnectorConfig.VALUE_CONVERTER, workerConfig.getValueConverter(), ClassLoaderUsage.CURRENT_CLASSLOADER);
RecordConverter keyConverter = plugin.newConverter(v62, true, ConnectorConfig.KEY_CONVERTER, workerConfig.getKeyConverter(), ClassLoaderUsage.CURRENT_CLASSLOADER);
if (keyConverter == null) {
keyConverter = plugin.newConverter(v62, true, ConnectorConfig.KEY_CONVERTER, workerConfig.getValueConverter(), ClassLoaderUsage.PLUGINS);
log.info("Set up the key converter {} for task {} using the worker config", keyConverter.getClass(), id);
} else {
log.info("Set up the key converter {} for task {} using the connector config", keyConverter.getClass(), id);
}
if (valueConverter == null) {
valueConverter
= plugin.newConverter(v62, false, ConnectorConfig.VALUE_CONVERTER, workerConfig.getKeyConverter(), ClassLoaderUsage.PLUGINS);
log.info("Set up the value converter {} for task {} using the worker config", valueConverter.getClass(), id);
} else {
log.info("Set up the value converter {} for task {} using the connector config", valueConverter.getClass(), id);
}
if (task instanceof SourceTask) {
DefaultMQProducer v74 = ConnectUtil.initDefaultMQProducer(workerConfig);
TransformChain<ConnectRecord> transformChain = new TransformChain<>(v62, plugin);
// create retry operator
RetryWithToleranceOperator retryWithToleranceOperator = ReporterManagerUtil.createRetryWithToleranceOperator(v62, errorMetricsGroup);retryWithToleranceOperator.reporters(ReporterManagerUtil.sourceTaskReporters(id, v62, errorMetricsGroup));
WorkerSourceTask workerSourceTask = new WorkerSourceTask(workerConfig, id,
((SourceTask)
(task)), savedLoader, v62, positionManagementService, keyConverter, valueConverter, v74, workerState, connectStatsManager, connectStatsService, transformChain,
retryWithToleranceOperator, statusListener, this.connectMetrics);
Future future = taskExecutor.submit(workerSourceTask);// schedule offset committer
sourceTaskOffsetCommitter.ifPresent(committer -> committer.schedule(id, workerSourceTask));
taskToFutureMap.put(workerSourceTask, future);
this.pendingTasks.put(workerSourceTask, System.currentTimeMillis());
} else if (task instanceof SinkTask) {
log.info("sink task config keyValue is {}", v62.getProperties());
DefaultLitePullConsumer consumer = ConnectUtil.initDefaultLitePullConsumer(workerConfig, false);
// set consumer groupId
String groupId = v62.getString(SinkConnectorConfig.TASK_GROUP_ID);
if (StringUtils.isBlank(groupId)) {
groupId = ConnectUtil.SYS_TASK_CG_PREFIX + id.connector();
}
consumer.setConsumerGroup(groupId);
Set<String> consumerGroupSet = ConnectUtil.fetchAllConsumerGroupList(workerConfig);
if (!consumerGroupSet.contains(consumer.getConsumerGroup())) {
ConnectUtil.createSubGroup(workerConfig, consumer.getConsumerGroup());
}
TransformChain<ConnectRecord> transformChain = new TransformChain<>(v62, plugin);
// create retry operator
RetryWithToleranceOperator retryWithToleranceOperator = ReporterManagerUtil.createRetryWithToleranceOperator(v62, errorMetricsGroup);
retryWithToleranceOperator.reporters(ReporterManagerUtil.sinkTaskReporters(id, v62,
workerConfig, errorMetricsGroup));
WorkerSinkTask workerSinkTask = new WorkerSinkTask(workerConfig, id, ((SinkTask) (task)), savedLoader, v62, keyConverter, valueConverter, consumer, workerState, connectStatsManager, connectStatsService, transformChain, retryWithToleranceOperator, ReporterManagerUtil.createWorkerErrorRecordReporter(v62,
retryWithToleranceOperator, valueConverter), statusListener, this.connectMetrics);Future future = taskExecutor.submit(workerSinkTask);
taskToFutureMap.put(workerSinkTask, future);
this.pendingTasks.put(workerSinkTask, System.currentTimeMillis());
}
Plugin.compareAndSwapLoaders(savedLoader);
} catch (Exception e) {
log.error("start worker task exception. config {}" + JSON.toJSONString(v62), e);
Plugin.compareAndSwapLoaders(savedLoader);
}
}
}
} | 3.26 |
rocketmq-connect_Worker_checkAndStopConnectors_rdh | /**
* check and stop connectors
*
* @param assigns
*/private void checkAndStopConnectors(Collection<String> assigns) {
Set<String> connectors = this.connectors.keySet();
if (CollectionUtils.isEmpty(assigns)) {
// delete all
for (String connector : connectors) {log.info("It may be that the load balancing assigns this connector to other nodes,connector {}", connector);
stopAndAwaitConnector(connector);
}
return;
}
for (String connectorName : connectors) {
if (!assigns.contains(connectorName)) {
log.info("It may be that the load balancing assigns this connector to other nodes,connector {}", connectorName);
stopAndAwaitConnector(connectorName);
}
}
} | 3.26 |
rocketmq-connect_Worker_stopConnectors_rdh | /**
* stop connectors
*
* @param ids
*/
private void stopConnectors(Collection<String> ids) {
for (String connectorName : ids) {
stopConnector(connectorName);
}} | 3.26 |
rocketmq-connect_Worker_stopAndAwaitConnector_rdh | /**
* Stop a connector that belongs to this worker and await its termination.
*
* @param connName
* the name of the connector to be stopped.
*/
public void stopAndAwaitConnector(String connName) {
stopConnector(connName);
awaitStopConnectors(Collections.singletonList(connName));
} | 3.26 |
rocketmq-connect_Worker_allocatedConnectors_rdh | /**
* get connectors
*
* @return */
public Set<String> allocatedConnectors() {
return new HashSet<>(connectors.keySet());
} | 3.26 |
rocketmq-connect_Worker_getWorkingTasks_rdh | /**
* Beaware that we are not creating a defensive copy of these tasks
* So developers should only use these references for read-only purposes.
* These variables should be immutable
*
* @return */
public Set<Runnable> getWorkingTasks() {
return runningTasks;
} | 3.26 |
rocketmq-connect_Worker_isNeedStop_rdh | /**
* check is need stop
*
* @param taskConfig
* @param keyValues
* @return */
private boolean
isNeedStop(ConnectKeyValue taskConfig, List<ConnectKeyValue> keyValues) {
if (CollectionUtils.isEmpty(keyValues)) {
return true;
}
for (ConnectKeyValue keyValue : keyValues) {
if (keyValue.equals(taskConfig)) {
// not stop
return false;
}
}
return true;
} | 3.26 |
rocketmq-connect_Worker_startConnectors_rdh | /**
* assign connector
* <p>
* Start a collection of connectors with the given configs. If a connector is already started with the same configs,
* it will not start again. If a connector is already started but not contained in the new configs, it will stop.
*
* @param connectorConfigs
* @param connectController
* @throws Exception
*/
public synchronized void startConnectors(Map<String, ConnectKeyValue> connectorConfigs, AbstractConnectController connectController) throws Exception {// Step 1: Check and stop connectors
checkAndStopConnectors(connectorConfigs.keySet());
// Step 2: Check config update
checkAndReconfigureConnectors(connectorConfigs);
// Step 3: check new
Map<String, ConnectKeyValue> newConnectors = checkAndNewConnectors(connectorConfigs);
// Step 4: start connectors
for (String connectorName : newConnectors.keySet()) {
ClassLoader savedLoader = plugin.currentThreadLoader();
try {
ConnectKeyValue keyValue = newConnectors.get(connectorName);
String connectorClass = keyValue.getString(ConnectorConfig.CONNECTOR_CLASS);
ClassLoader v24 = plugin.delegatingLoader().pluginClassLoader(connectorClass);
savedLoader = Plugin.compareAndSwapLoaders(v24);
// instance connector
final Connector connector = plugin.newConnector(connectorClass);
WorkerConnector workerConnector = new WorkerConnector(connectorName, connector, connectorConfigs.get(connectorName), new DefaultConnectorContext(connectorName, connectController), statusListener, savedLoader);
// initinal target state
executor.submit(workerConnector);
workerConnector.transitionTo(keyValue.getTargetState(), new Callback<TargetState>() {
@Overridepublic void onCompletion(Throwable error,
TargetState result)
{
if (error != null) {log.error(error.getMessage());
} else {
log.info("Start connector {} and set target state {} successed!!", connectorName, result);
}
}
});
log.info("Connector {} start", workerConnector.getConnectorName());Plugin.compareAndSwapLoaders(savedLoader);
this.connectors.put(connectorName, workerConnector);
} catch (Exception e) {
Plugin.compareAndSwapLoaders(savedLoader);
log.error("worker connector start exception. workerName: " + connectorName, e);
} finally {
// compare and swap
Plugin.compareAndSwapLoaders(savedLoader);
}
}
// Step 3: check and transition to connectors
checkAndTransitionToConnectors(connectorConfigs);
} | 3.26 |
rocketmq-connect_Worker_maintainTaskState_rdh | /**
* maintain task state
*
* @throws Exception
*/
public void maintainTaskState() throws Exception {
Map<String, List<ConnectKeyValue>> connectorConfig = new HashMap<>();
synchronized(latestTaskConfigs) {
connectorConfig.putAll(latestTaskConfigs);
}
// STEP 0 cleaned error Stopped Task
clearErrorOrStopedTask();
// STEP 1: check running tasks and put to error status
checkRunningTasks(connectorConfig);
// get new Tasks
Map<String, List<ConnectKeyValue>> newTasks = newTasks(connectorConfig);
// STEP 2: try to create new tasks
startTask(newTasks);
// STEP 3: check all pending state
checkPendingTask();
// STEP 4 check stopping tasks
checkStoppingTasks();
// STEP 5 check error tasks
checkErrorTasks();
// STEP 6 check errorTasks and stopped tasks
checkStoppedTasks();
} | 3.26 |
rocketmq-connect_Worker_checkAndNewConnectors_rdh | /**
* check and new connectors
*
* @param assigns
*/
private Map<String, ConnectKeyValue> checkAndNewConnectors(Map<String, ConnectKeyValue> assigns) {
if ((assigns == null) || assigns.isEmpty()) { return new HashMap<>();
}
Map<String, ConnectKeyValue> newConnectors = new HashMap<>();
for (String connectName : assigns.keySet()) {
if (!connectors.containsKey(connectName)) {
newConnectors.put(connectName, assigns.get(connectName));
}
}
return newConnectors;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.