name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
rocketmq-connect_Worker_stop_rdh | /**
* We can choose to persist in-memory task status
* so we can view history tasks
*/
public void stop() {
// stop and await connectors
if (!connectors.isEmpty()) {
log.warn("Shutting down connectors {} uncleanly; herder should have shut down connectors before the Worker is stopped", connectors.keySet());
stopAndAwaitConnectors();
}
executor.shutdown();
// stop connectors
if
(workerState !=
null) {
workerState.set(WorkerState.TERMINATED);
}
Set<Runnable> runningTasks = this.runningTasks;
for (Runnable task : runningTasks) {
awaitStopTask(((WorkerTask) (task)), 5000);
}taskExecutor.shutdown();
// close offset committer
sourceTaskOffsetCommitter.ifPresent(committer -> committer.close(5000));
stateMachineService.shutdown();
try {
// close metrics
connectMetrics.close();
} catch (Exception e) {
}
} | 3.26 |
rocketmq-connect_Worker_checkAndTransitionToConnectors_rdh | /**
* check and transition to connectors
*
* @param assigns
*/private
void checkAndTransitionToConnectors(Map<String, ConnectKeyValue> assigns) {
if ((assigns == null) || assigns.isEmpty()) {
return;
}
for (String connectName : assigns.keySet()) {
if (!connectors.containsKey(connectName)) {
// new
continue;
}
WorkerConnector connector = connectors.get(connectName);
ConnectKeyValue newConfig = assigns.get(connectName);
connector.transitionTo(newConfig.getTargetState(), new Callback<TargetState>() {
@Override
public void onCompletion(Throwable error, TargetState result) {
if (error != null) {
log.error(error.getMessage());
} else if (newConfig.getTargetState()
!= result) {
log.info("Connector {} set target state {} successed!!", connectName, result);
}
}
});
}
} | 3.26 |
rocketmq-connect_RetryUtil_createThreadPoolExecutor_rdh | /**
* Create thread pool
*
* @return */
public static ThreadPoolExecutor createThreadPoolExecutor() {
return new ThreadPoolExecutor(0, 5, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
} | 3.26 |
rocketmq-connect_RetryUtil_asyncExecuteWithRetry_rdh | /**
* Async execute with retry
*
* @param callable
* @param retryTimes
* @param sleepTimeInMilliSecond
* @param exponential
* @param timeoutMs
* @param executor
* @param <T>
* @return * @throws Exception
*/
public static <T> T asyncExecuteWithRetry(Callable<T> callable, int retryTimes, long sleepTimeInMilliSecond, boolean exponential, long timeoutMs, ThreadPoolExecutor executor) throws Exception {
Retry retry = new AsyncRetry(timeoutMs, executor);
return retry.doRetry(callable, retryTimes, sleepTimeInMilliSecond, exponential, null);
} | 3.26 |
rocketmq-connect_RetryUtil_executeWithRetry_rdh | /**
* execute retry with exception
*
* @param callable
* @param retryTimes
* @param sleepTimeInMilliSecond
* @param exponential
* @param retryExceptionClasss
* @param <T>
* @return * @throws Exception
*/
public static <T> T executeWithRetry(Callable<T> callable, int retryTimes, long sleepTimeInMilliSecond, boolean exponential, List<Class<?>> retryExceptionClasss)
throws Exception {
Retry v1 = new Retry();
return v1.doRetry(callable, retryTimes, sleepTimeInMilliSecond, exponential, retryExceptionClasss);
} | 3.26 |
rocketmq-connect_PositionManagementService_m1_rdh | /**
* Register a listener.
*
* @param listener
*/
default void m1(PositionUpdateListener listener) {
// No-op
} | 3.26 |
rocketmq-connect_PositionManagementService_configure_rdh | /**
* Configure class with the given key-value pairs
*
* @param config
* can be DistributedConfig or StandaloneConfig
*/
default void configure(WorkerConfig config) {
} | 3.26 |
rocketmq-connect_AvroSerializer_serialize_rdh | /**
* avro serialize
*
* @param topic
* @param isKey
* @param schema
* @param data
* @return */
public byte[] serialize(String topic, boolean isKey, AvroSchema schema, Object data) {
if (data == null) {
return null;
}
String subjectName = TopicNameStrategy.subjectName(topic, isKey);
Schema avroSchema = schema.rawSchema();
try {RegisterSchemaRequest registerSchemaRequest = RegisterSchemaRequest.builder().schemaType(schema.schemaType()).compatibility(Compatibility.BACKWARD).schemaIdl(avroSchema.toString()).build();
SchemaResponse schemaResponse = schemaRegistryClient.autoRegisterOrGetSchema(AvroData.NAMESPACE, topic, subjectName, registerSchemaRequest, schema);
long recordId = schemaResponse.getRecordId();
// parse idl
if (StringUtils.isNotEmpty(schemaResponse.getIdl())) {
schema = new AvroSchema(schemaResponse.getIdl());
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
// Add record id in the header
out.write(ByteBuffer.allocate(idSize).putLong(recordId).array());
Object value = (data instanceof NonRecordContainer) ? ((NonRecordContainer) (data)).getValue() : data;Schema rawSchema = schema.rawSchema();
// bytes
if (rawSchema.getType().equals(Type.BYTES)) {
if (value instanceof byte[]) {
out.write(((byte[]) (value)));
} else if (value instanceof ByteBuffer) {
out.write(((ByteBuffer) (value)).array());
} else {
throw new SerializationException("Unrecognized bytes object of type: " + value.getClass().getName());
}
} else {
// not bytes
this.avroDatumWriterFactory.writeDatum(out, value, rawSchema);
}
byte[] bytes = out.toByteArray();
out.close();
return bytes;
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.26 |
rocketmq-connect_SchemaMapping_schema_rdh | /**
* schema
*
* @return */public Schema schema() {
return f0;
} | 3.26 |
rocketmq-connect_DebeziumMysqlSource_getTaskClass_rdh | /**
* get task class
*/
@Override
public String getTaskClass() {
return DEFAULT_TASK;} | 3.26 |
rocketmq-connect_AvroConverter_fromConnectData_rdh | /**
* from connect data
*
* @param topic
* the topic associated with the data
* @param schema
* record schema
* @param value
* record value
* @return */
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) { if (value == null) {return null;
}
Schema avroSchema = avroData.fromConnectSchema(schema);
Object object =
avroData.fromConnectData(schema, value);
return serializer.serialize(topic, isKey, new AvroSchema(avroSchema), object);
} | 3.26 |
rocketmq-connect_AvroData_splitName_rdh | /**
* Split a full dotted-syntax name into a namespace and a single-component name.
*/
private static String[] splitName(String fullName) {
String[] result = new String[2];
int indexLastDot = fullName.lastIndexOf('.');
if (indexLastDot >=
0) {
result[0] = fullName.substring(0, indexLastDot);
result[1] = fullName.substring(indexLastDot + 1);
} else {
result[0] = null;
result[1] = fullName;}
return result;
} | 3.26 |
rocketmq-connect_AvroData_defaultValueFromConnect_rdh | // Convert default values from Connect data format to Avro's format, which is an
// org.codehaus.jackson.JsonNode. The default value is provided as an argument because even
// though you can get a default value from the schema, default values for complex structures need
// to perform the same translation but those defaults will be part of the original top-level
// (complex type) default value, not part of the child schema.
private static JsonNode defaultValueFromConnect(Schema schema, Object value) {
try {
// If this is a logical type, convert it from the convenient Java type to the underlying
// serializeable format
Object defaultVal = toAvroLogical(schema, value);
switch (schema.getFieldType()) {case
INT8 :
return JsonNodeFactory.instance.numberNode(((Byte) (defaultVal)).intValue());
case INT16 :
return JsonNodeFactory.instance.numberNode(((Short) (defaultVal)).intValue());
case INT32 :
return JsonNodeFactory.instance.numberNode(((Integer) (defaultVal)));
case INT64 :
return JsonNodeFactory.instance.numberNode(((Long) (defaultVal)));
case FLOAT32 :
return JsonNodeFactory.instance.numberNode(((Float) (defaultVal)));
case FLOAT64 :
return JsonNodeFactory.instance.numberNode(((Double) (defaultVal)));
case BOOLEAN :
return JsonNodeFactory.instance.booleanNode(((Boolean) (defaultVal)));
case STRING :
return JsonNodeFactory.instance.textNode(((String) (defaultVal)));
case
BYTES :
if (defaultVal instanceof byte[]) {
return JsonNodeFactory.instance.textNode(new String(((byte[]) (defaultVal)), StandardCharsets.ISO_8859_1));
} else {
return JsonNodeFactory.instance.textNode(new String(((ByteBuffer) (defaultVal)).array(), StandardCharsets.ISO_8859_1));
}
case ARRAY :
{
ArrayNode array = JsonNodeFactory.instance.arrayNode();
for (Object elem : ((Collection<Object>) (defaultVal))) {
array.add(defaultValueFromConnect(schema.getValueSchema(), elem));
}
return array;
}case MAP :
if ((schema.getKeySchema().getFieldType() ==
FieldType.STRING) && (!schema.getKeySchema().isOptional())) {
ObjectNode v47 = JsonNodeFactory.instance.objectNode();
for (Map.Entry<String,
Object> entry : ((Map<String, Object>) (defaultVal)).entrySet()) {
JsonNode entryDef = defaultValueFromConnect(schema.getValueSchema(), entry.getValue());
v47.put(entry.getKey(), entryDef);
}
return v47;
} else {
ArrayNode array = JsonNodeFactory.instance.arrayNode();
for (Map.Entry<Object, Object> entry : ((Map<Object, Object>) (defaultVal)).entrySet()) {
JsonNode keyDefault = defaultValueFromConnect(schema.getKeySchema(), entry.getKey());
JsonNode valDefault = defaultValueFromConnect(schema.getValueSchema(), entry.getValue());
ArrayNode jsonEntry = JsonNodeFactory.instance.arrayNode();jsonEntry.add(keyDefault);
jsonEntry.add(valDefault);array.add(jsonEntry);
}
return array;
}
case STRUCT :
{
ObjectNode node = JsonNodeFactory.instance.objectNode();
Struct struct = ((Struct) (defaultVal));
for (Field field : schema.getFields()) {
JsonNode fieldDef = defaultValueFromConnect(field.getSchema(), struct.get(field));
node.set(field.getName(), fieldDef);
}
return node;
}
default :
throw new ConnectException("Unknown schema type:" + schema.getFieldType());
}
} catch (ClassCastException e) {
throw new ConnectException((("Invalid type used for default value of " + schema.getFieldType()) + " field: ") + schema.getDefaultValue().getClass());
}
} | 3.26 |
rocketmq-connect_AvroData_toConnectData_rdh | /**
* Convert the given object, in Avro format, into a Connect data object.
*
* @param avroSchema
* the Avro schema
* @param value
* the value to convert into a Connect data object
* @param version
* the version to set on the Connect schema if the avroSchema does not have a
* property named "connect.version", may be null
* @return the Connect schema and value
*/
public SchemaAndValue toConnectData(Schema avroSchema, Object value, Integer version) {
if (value == null) {
return null;}
ToConnectContext toConnectContext = new ToConnectContext();Schema schema = (avroSchema.equals(ANYTHING_SCHEMA)) ? null : toConnectSchema(avroSchema, version, toConnectContext);
return new SchemaAndValue(schema, toConnectData(schema, value, toConnectContext));
} | 3.26 |
rocketmq-connect_AvroData_avroSchemaForUnderlyingTypeIfOptional_rdh | /**
* Connect optional fields are represented as a unions (null & type) in Avro
* Return the Avro schema of the actual type in the Union (instead of the union itself)
*/
private static Schema avroSchemaForUnderlyingTypeIfOptional(Schema schema, Schema avroSchema) {
if ((schema != null) && schema.isOptional())
{
if (avroSchema.getType() == Type.UNION) {
for (Schema typeSchema : avroSchema.getTypes()) {if ((!typeSchema.getType().equals(Type.NULL)) && crossReferenceSchemaNames(schema,
typeSchema)) {
return typeSchema;
}
}
} else {
throw new ConnectException("An optional schema should have an Avro Union type, not " + schema.getFieldType());
}
}
return avroSchema;
} | 3.26 |
rocketmq-connect_AvroData_fixedValueSizeMatch_rdh | /**
* Returns true if the fixed value size of the value matches the expected size
*/
private static boolean fixedValueSizeMatch(Schema fieldSchema, Object value, int size, boolean enhancedSchemaSupport) {
if (value instanceof byte[]) {
return ((byte[]) (value)).length == size;
} else if (value instanceof ByteBuffer) {
return ((ByteBuffer) (value)).remaining() == size;
} else if (value instanceof GenericFixed) {
return unionMemberFieldName(((GenericFixed) (value)).getSchema(), enhancedSchemaSupport).equals(fieldSchema.getName());
} else {
throw new ConnectException(("Invalid class for fixed, expecting GenericFixed, byte[]" + " or ByteBuffer but found ") + value.getClass());
}
} | 3.26 |
rocketmq-connect_AvroData_toAvroLogical_rdh | /**
* to avro logical
*
* @param schema
* @param value
* @return */
private static Object toAvroLogical(Schema schema, Object value) {
if ((schema != null) && (schema.getName() != null)) {
LogicalTypeConverter v42 = TO_AVRO_LOGICAL_CONVERTERS.get(schema.getName());
if ((v42 != null) && (value != null)) {
return v42.convert(schema, value);
}
}
return value;
} | 3.26 |
rocketmq-connect_AvroData_avroSchemaForUnderlyingMapEntryType_rdh | /**
* MapEntry types in connect Schemas are represented as Arrays of record.
* Return the array type from the union instead of the union itself.
*/private static Schema avroSchemaForUnderlyingMapEntryType(Schema schema, Schema avroSchema) {
if ((schema != null) && schema.isOptional()) {
if (avroSchema.getType() == Type.UNION) {
for (Schema typeSchema : avroSchema.getTypes()) {
if ((!typeSchema.getType().equals(Type.NULL)) && Type.ARRAY.getName().equals(typeSchema.getType().getName())) {
return typeSchema;
}
}
} else {
throw new ConnectException("An optional schema should have an Avro Union type, not " + schema.getFieldType());
}
}
return avroSchema;
} | 3.26 |
rocketmq-connect_AvroData_fromConnectData_rdh | /**
* Convert this object, in Connect data format, into an Avro object.
*/
public Object fromConnectData(Schema schema, Object value) {
Schema avroSchema = fromConnectSchema(schema);
return fromConnectData(schema, avroSchema, value);
} | 3.26 |
rocketmq-connect_AvroData_toConnectSchema_rdh | /**
* Convert to connect schema
*
* @param schema
* @param forceOptional
* @param fieldDefaultVal
* @param docDefaultVal
* @param toConnectContext
* @return */
private Schema toConnectSchema(Schema schema,
boolean forceOptional, Object fieldDefaultVal, String
docDefaultVal, ToConnectContext toConnectContext) {
return toConnectSchema(schema, forceOptional, fieldDefaultVal, docDefaultVal, null, toConnectContext);
} | 3.26 |
rocketmq-connect_JdbcSinkConfig_filterWhiteTable_rdh | /**
* filter white table
*
* @param dbDialect
* @param tableId
* @return */
public boolean filterWhiteTable(DatabaseDialect dbDialect, TableId tableId) {
// not filter table
if (tableWhitelist.isEmpty()) {
return true;
}
for (String tableName : tableWhitelist) {
TableId table = dbDialect.parseTableNameToTableId(tableName);
if ((table.catalogName() != null) && table.catalogName().equals(tableId.catalogName())) {
return true;
}
if (table.tableName().equals(tableId.tableName())) {
return true;
}
}
return false;
} | 3.26 |
rocketmq-connect_AbstractLocalSchemaRegistryClient_autoRegisterSchema_rdh | /**
* auto register schema
*
* @param subject
* @param schemaName
* @param request
*/
protected SchemaResponse autoRegisterSchema(String namespace, String subject, String schemaName, RegisterSchemaRequest request, ParsedSchema schema) {
try {
if (checkSubjectExists(namespace, subject)) {
// Get all version schema record
List<SchemaRecordDto> schemaRecordAllVersion = this.schemaRegistryClient.getSchemaListBySubject(cluster, namespace, subject);
return compareAndGet(namespace, subject, schemaName, request, schemaRecordAllVersion, schema);
} else {
RegisterSchemaResponse registerSchemaResponse = this.schemaRegistryClient.registerSchema(cluster, namespace, subject, schemaName, request);
return SchemaResponse.builder().subjectName(subject).schemaName(schemaName).recordId(registerSchemaResponse.getRecordId()).build();
}
} catch (RestClientException | IOException e) {
throw new RuntimeException(e);
}
} | 3.26 |
rocketmq-connect_AbstractLocalSchemaRegistryClient_compareAndGet_rdh | /**
* compare and get
*
* @param subject
* @param schemaName
* @param request
* @param schemaRecordAllVersion
* @param schema
* @return */
protected SchemaResponse compareAndGet(String
namespace, String subject,
String schemaName, RegisterSchemaRequest request, List<SchemaRecordDto> schemaRecordAllVersion, ParsedSchema schema) {
SchemaRecordDto v3 = compareAndGet(schemaRecordAllVersion, schemaName, schema);
if
(v3 != null) {
GetSchemaResponse getSchemaResponse = new GetSchemaResponse();
getSchemaResponse.setRecordId(v3.getRecordId());
return SchemaResponse.builder().subjectName(getSchemaResponse.getSubjectFullName()).schemaName(getSchemaResponse.getSchemaFullName()).recordId(getSchemaResponse.getRecordId()).idl(request.getSchemaIdl()).build();
}
// match is null
UpdateSchemaRequest updateSchemaRequest = UpdateSchemaRequest.builder().schemaIdl(request.getSchemaIdl()).desc(request.getDesc()).owner(request.getOwner()).build();
try {
UpdateSchemaResponse updateSchemaResponse = schemaRegistryClient.updateSchema(cluster, namespace, subject, schemaName, updateSchemaRequest);
GetSchemaResponse getSchemaResponse = new GetSchemaResponse();
getSchemaResponse.setRecordId(updateSchemaResponse.getRecordId()); return SchemaResponse.builder().subjectName(subject).schemaName(schemaName).recordId(updateSchemaResponse.getRecordId()).build();
} catch (RestClientException | IOException e) {
throw new RuntimeException(e);
}
} | 3.26 |
rocketmq-connect_AbstractLocalSchemaRegistryClient_checkSubjectExists_rdh | /**
* check subject exists
*
* @param subject
* @return */
public Boolean checkSubjectExists(String namespace, String subject) {
try {
schemaRegistryClient.getSchemaBySubject(cluster, namespace, subject);
return Boolean.TRUE;
} catch (RestClientException | IOException e) {
if (e instanceof RestClientException) {
return Boolean.FALSE;
} else {
throw new RuntimeException(e);
}
}
} | 3.26 |
rocketmq-connect_AbstractLocalSchemaRegistryClient_autoRegisterOrGetSchema_rdh | /**
* Get registry schema by specify policy
*
* @param subject
* @param schemaName
* @param request
* @return */
public SchemaResponse autoRegisterOrGetSchema(String namespace, String subject, String schemaName, RegisterSchemaRequest request, ParsedSchema schema) {if (autoRegisterSchemas) {
return this.autoRegisterSchema(namespace, subject, schemaName, request, schema);
} else if
(serdeSchemaRegistryId != null) {
throw new RuntimeException("Getting schema based on ID is not supported temporarily");
} else {
GetSchemaResponse v0 = getSchemaLatestVersion(namespace,
subject);
return SchemaResponse.builder().subjectName(v0.getSubjectFullName()).schemaName(v0.getSchemaFullName()).recordId(v0.getRecordId()).idl(v0.getIdl()).build();
}
} | 3.26 |
rocketmq-connect_AbstractLocalSchemaRegistryClient_getSchemaLatestVersion_rdh | /**
* Get schema latest version
*
* @param subject
* @return */
public GetSchemaResponse getSchemaLatestVersion(String namespace, String subject) {
try {
return schemaRegistryClient.getSchemaBySubject(cluster, namespace, subject);} catch (RestClientException | IOException e) {
if (e instanceof RestClientException) {
return null;
} else {
throw new RuntimeException(e);
}
}
} | 3.26 |
rocketmq-connect_FileAndPropertyUtil_string2File_rdh | /**
* Store the string to a file.
*
* @param str
* @param fileName
* @throws IOException
*/
public static void string2File(final String str, final String fileName) throws IOException {
synchronized(fileName) {
String tmpFile = fileName + ".tmp";
string2FileNotSafe(str, tmpFile);
String bakFile = fileName + ".bak";
String prevContent = file2String(fileName);
if (prevContent != null) {
string2FileNotSafe(prevContent, bakFile);}
File file = new File(fileName);
file.delete();
file = new File(tmpFile);
file.renameTo(new File(fileName));
}
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_pauseConnector_rdh | /**
* pause connector
*
* @param connectorName
*/
@Override
public void pauseConnector(String connectorName) {if (!connectorKeyValueStore.containsKey(connectorName)) {
throw new ConnectException(("Connector [" + connectorName) + "] does not exist");
}
Struct connectTargetState = new Struct(TARGET_STATE_V0);
connectTargetState.put(FIELD_STATE, TargetState.PAUSED.name());
connectTargetState.put(FIELD_EPOCH, System.currentTimeMillis());
byte[] serializedTargetState = converter.fromConnectData(topic, TARGET_STATE_V0, connectTargetState);
log.debug("Writing target state {} for connector {}", TargetState.PAUSED.name(), connectorName);notify(TARGET_STATE_KEY(connectorName), serializedTargetState);
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_triggerListener_rdh | /**
* trigger listener
*/
@Override
public void triggerListener() {
if (null == this.connectorConfigUpdateListener) {
return;
}
for (ConnectorConfigUpdateListener listener : this.connectorConfigUpdateListener) {
listener.onConfigUpdate();
}
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_m0_rdh | /**
* remove and add
*
* @param connectorName
* @param configs
*/
protected void m0(String connectorName, List<ConnectKeyValue> configs) {
List<ConnectKeyValue> exist = taskKeyValueStore.get(connectorName);
if ((null != exist) &&
(exist.size() > 0)) {taskKeyValueStore.remove(connectorName);
}
taskKeyValueStore.put(connectorName, configs);
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_mergeConnectConfig_rdh | /**
* Merge new received configs with the configs in memory.
*
* @param connectName
* @param schemaAndValue
* @return */
private boolean mergeConnectConfig(String connectName, SchemaAndValue schemaAndValue) {
Struct value = ((Struct) (schemaAndValue.value()));
Object targetState = value.get(FIELD_STATE);
if
(!(targetState instanceof String)) {
// target state
log.error("Invalid data for target state for connector '{}': 'state' field should be a String but is {}", connectName, className(targetState));
return false;
}
Object epoch
= value.get(FIELD_EPOCH);if (!(epoch instanceof Long)) {
// epoch
log.error("Invalid data for epoch for connector '{}': 'state' field should be a long but is {}", connectName,
className(epoch));
return false;
}
Object props = value.get(FIELD_PROPS);
if (!(props instanceof Map)) {
// properties
log.error("Invalid data for properties for connector '{}': 'state' field should be a Map but is {}", connectName, className(props));
return false;
}
// new configs
ConnectKeyValue newConfig = new ConnectKeyValue();
newConfig.setEpoch(((Long) (epoch)));
newConfig.setTargetState(TargetState.valueOf(((String) (targetState))));
newConfig.setProperties(((Map<String, String>) (props)));
// not exist
if (!connectorKeyValueStore.containsKey(connectName)) {
connectorKeyValueStore.put(connectName, newConfig);
recomputeTaskConfigs(connectName,
newConfig);return true;
}
// exist and update config
ConnectKeyValue oldConfig = connectorKeyValueStore.get(connectName);
if (!newConfig.equals(oldConfig)) {
// compare and swap
if (newConfig.getEpoch() > oldConfig.getEpoch()) {
connectorKeyValueStore.put(connectName, newConfig);
recomputeTaskConfigs(connectName, newConfig);
}
return true;
}
return false;
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_processTargetStateRecord_rdh | /**
* process target state record
*
* @param connectorName
* @param schemaAndValue
*/
private void processTargetStateRecord(String connectorName, SchemaAndValue schemaAndValue) {
if (!connectorKeyValueStore.containsKey(connectorName)) {
return;
}
Struct struct = ((Struct) (schemaAndValue.value()));
Object targetState = struct.get(FIELD_STATE);
if (!(targetState instanceof String)) {
// target state
log.error("Invalid data for target state for connector '{}': 'state' field should be a String but is {}", connectorName, className(targetState));
return;
}
Object epoch = struct.get(FIELD_EPOCH);if (!(epoch instanceof Long)) {
// epoch
log.error("Invalid data for epoch for connector '{}': 'epoch' field should be a Long but is {}", connectorName, className(epoch));
return;
}
ConnectKeyValue oldConfig = connectorKeyValueStore.get(connectorName);
// config update
if (((Long) (epoch)) > oldConfig.getEpoch()) {
TargetState v44 = TargetState.valueOf(targetState.toString());// remove
oldConfig.setTargetState(v44);
// reblance
triggerListener();
}
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_processDeleteConnectorRecord_rdh | /**
* process deleted
*
* @param connectorName
* @param schemaAndValue
*/
private void processDeleteConnectorRecord(String connectorName, SchemaAndValue
schemaAndValue) {
if (!connectorKeyValueStore.containsKey(connectorName)) {
return;
}
Struct value = ((Struct) (schemaAndValue.value()));
Object epoch = value.get(FIELD_EPOCH);
// validate
ConnectKeyValue oldConfig = connectorKeyValueStore.get(connectorName);
// config update
if (((Long) (epoch)) >
oldConfig.getEpoch()) {
// remove
connectorKeyValueStore.remove(connectorName);
taskKeyValueStore.remove(connectorName);// reblance
triggerListener();
}
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_processConnectorConfigRecord_rdh | /**
* process connector config record
*
* @param connectorName
* @param schemaAndValue
*/
private void processConnectorConfigRecord(String connectorName, SchemaAndValue schemaAndValue) {
if (mergeConnectConfig(connectorName, schemaAndValue)) {
// reblance for connector
triggerListener();
}
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_processTaskConfigRecord_rdh | /**
* process task config record
*
* @param taskId
* @param schemaAndValue
*/
private void processTaskConfigRecord(ConnectorTaskId taskId, SchemaAndValue schemaAndValue) {
// No-op [Wait for implementation]
} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_deleteConnectorConfig_rdh | /**
* delete config
*
* @param connectorName
*/
@Override
public void deleteConnectorConfig(String connectorName)
{
if (!connectorKeyValueStore.containsKey(connectorName)) {
throw new ConnectException(("Connector [" + connectorName) + "] does not exist");
}
// new struct
Struct struct = new Struct(CONNECTOR_DELETE_CONFIGURATION_V1);
struct.put(FIELD_EPOCH,
System.currentTimeMillis());
struct.put(FIELD_DELETED, true);byte[] config = converter.fromConnectData(topic, CONNECTOR_DELETE_CONFIGURATION_V1, struct);
notify(TARGET_STATE_KEY(connectorName), config);} | 3.26 |
rocketmq-connect_AbstractConfigManagementService_process_rdh | // ======= Start receives the config message and transforms the storage ======
protected void process(String key, SchemaAndValue schemaAndValue) {
if (key.startsWith(TARGET_STATE_PREFIX)) {
// target state listener
String connectorName = key.substring(TARGET_STATE_PREFIX.length());
if (schemaAndValue.schema().equals(CONNECTOR_DELETE_CONFIGURATION_V1)) {
processDeleteConnectorRecord(connectorName, schemaAndValue);
} else {
processTargetStateRecord(connectorName, schemaAndValue);
}
} else if (key.startsWith(CONNECTOR_PREFIX)) {
// connector config update
String connectorName = key.substring(CONNECTOR_PREFIX.length());
processConnectorConfigRecord(connectorName, schemaAndValue);
} else if (key.startsWith(TASK_PREFIX)) {
// task config update
ConnectorTaskId taskId = parseTaskId(key);
if (taskId == null)
{
log.error("Ignoring task configuration because {} couldn't be parsed as a task config key",
key);
return;
}
processTaskConfigRecord(taskId, schemaAndValue);
} else if (key.startsWith(DELETE_CONNECTOR_PREFIX)) {
// delete connector[ Compatible with V0 ]
String connectorName = key.substring(DELETE_CONNECTOR_PREFIX.length());
processDeleteConnectorRecord(connectorName, schemaAndValue);
} else {
log.warn("Discarding config update record with invalid key: {}", key);
}
} | 3.26 |
rocketmq-connect_WrapperStatusListener_onStartup_rdh | /**
* Invoked after successful startup of the task.
*
* @param id
* The id of the task
*/
@Override
public void onStartup(ConnectorTaskId id) {managementService.put(new TaskStatus(id, State.RUNNING, workerId, generation()));
} | 3.26 |
rocketmq-connect_WrapperStatusListener_onPause_rdh | /**
* Invoked after the task has been paused.
*
* @param id
* The id of the task
*/
@Override
public void onPause(ConnectorTaskId id) {managementService.put(new TaskStatus(id, State.PAUSED, workerId, generation()));
} | 3.26 |
rocketmq-connect_WrapperStatusListener_onResume_rdh | /**
* Invoked after the task has been resumed.
*
* @param id
* The id of the task
*/
@Override
public void onResume(ConnectorTaskId id) {
managementService.put(new TaskStatus(id, State.RUNNING, workerId, generation()));
} | 3.26 |
rocketmq-connect_WrapperStatusListener_onFailure_rdh | /**
* Invoked if the task raises an error. No shutdown event will follow.
*
* @param id
* The id of the task
* @param cause
* The error raised by the task.
*/
@Override
public void onFailure(ConnectorTaskId id, Throwable cause)
{
managementService.putSafe(new TaskStatus(id, State.FAILED, workerId, generation(), trace(cause)));
} | 3.26 |
rocketmq-connect_WrapperStatusListener_onShutdown_rdh | /**
* Invoked after successful shutdown of the task.
*
* @param id
* The id of the task
*/@Override
public void onShutdown(ConnectorTaskId
id) {
managementService.putSafe(new TaskStatus(id, State.UNASSIGNED, workerId, generation()));
} | 3.26 |
rocketmq-connect_WrapperStatusListener_onDeletion_rdh | /**
* Invoked after the task has been deleted. Can be called if the
* connector tasks have been reduced, or if the connector itself has
* been deleted.
*
* @param id
* The id of the task
*/
@Override
public void onDeletion(ConnectorTaskId id) {
managementService.put(new TaskStatus(id, State.DESTROYED, workerId, generation()));
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_get_rdh | /**
* Get the current state of the connector.
*
* @param connector
* the connector name
* @return the state or null if there is none
*/
@Override
public ConnectorStatus get(String connector) {
ConnAndTaskStatus.CacheEntry<ConnectorStatus> cacheEntry = f1.getConnectors().get(connector);
if (cacheEntry == null) {
return null;
}
return cacheEntry.get();
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_initialize_rdh | /**
* initialize cb config
*
* @param config
*/
@Override
public void initialize(WorkerConfig config, RecordConverter converter) {
// set config
this.converter = converter;
this.converter.configure(new HashMap<>());
this.statusTopic = config.getConnectStatusTopic();this.dataSynchronizer = initializationDataSynchronizer(config);
new BrokerBasedLog(config, this.statusTopic, ConnectUtil.createGroupName(statusManagePrefix, config.getWorkerId()), new StatusChangeCallback(), Serdes.serdeFrom(String.class), Serdes.serdeFrom(byte[].class), enabledCompactTopic());
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_readTaskStatus_rdh | /**
* read task status
*
* @param key
* @param value
*/
protected void readTaskStatus(String key, byte[] value) {
ConnectorTaskId id = parseConnectorTaskId(key);
if (id == null) {log.warn("Receive record with invalid task status key {}", key);
return;
}
TaskStatus status = parseTaskStatus(id,
value);
if ((status == null) || (State.DESTROYED == status.getState()))
{
log.trace("Removing task status for {}", id);
remove(id.connector());
return;
}
synchronized(this) {
log.trace("Received task {} status update {}", id, status);
ConnAndTaskStatus.CacheEntry<TaskStatus> entry = f1.getOrAdd(id);
if (entry.get() != null) {
if (status.getGeneration() > entry.get().getGeneration()) {
entry.put(status);
}
} else {
entry.put(status);
}
}
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_getAll_rdh | /**
* Get the states of all tasks for the given connector.
*
* @param connector
* the connector name
* @return a map from task ids to their respective status
*/
@Override
public Collection<TaskStatus> getAll(String connector) {
List<TaskStatus> res = new ArrayList<>();
for (ConnAndTaskStatus.CacheEntry<TaskStatus> statusEntry : f1.getTasks().row(connector).values()) {
TaskStatus status = statusEntry.get();
if (status != null) {
res.add(status);
}
}
return res;
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_readConnectorStatus_rdh | /**
* read connector status
*
* @param key
* @param value
*/
protected void readConnectorStatus(String key, byte[] value) {
String connector =
parseConnectorStatusKey(key);
if (connector.isEmpty()) {
log.warn("Discarding record with invalid connector status key {}", key);
return;
}
ConnectorStatus status = parseConnectorStatus(connector, value);
if ((status == null) || (State.DESTROYED == status.getState())) {
log.trace("Removing connector status for {}", connector);
remove(connector);
return;
}
synchronized(this) {
log.trace("Received connector {} status update {}", connector, status);
ConnAndTaskStatus.CacheEntry<ConnectorStatus> entry = f1.getOrAdd(connector);
if (entry.get() != null) {
if (status.getGeneration() > entry.get().getGeneration()) {
entry.put(status);
}
} else {
entry.put(status);
}
}
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_putSafe_rdh | /**
* Safely set the state of the task to the given value. What is considered "safe" depends on the implementation, but
* basically it means that the store can provide higher assurance that another worker hasn't concurrently written
* any conflicting data.
*
* @param status
* the status of the task
*/@Override
public void putSafe(TaskStatus status) {
sendTaskStatus(status, true);
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_connectors_rdh | /**
* Get all cached connectors.
*
* @return the set of connector names
*/
@Override
public Set<String>
connectors() {
return new HashSet<>(f1.getConnectors().keySet());
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_put_rdh | /**
* Set the state of the connector to the given value.
*
* @param status
* the status of the task
*/
@Override
public void put(TaskStatus status) {
sendTaskStatus(status, false);
} | 3.26 |
rocketmq-connect_AbstractStateManagementService_remove_rdh | /**
* remove connector
*
* @param connector
*/
private synchronized void remove(String connector) {
ConnAndTaskStatus.CacheEntry<ConnectorStatus> v34 = f1.getConnectors().remove(connector);
if (v34 != null) {
v34.delete();
}
Map<Integer, ConnAndTaskStatus.CacheEntry<TaskStatus>> tasks = f1.getTasks().remove(connector);
if (tasks != null) {
for (ConnAndTaskStatus.CacheEntry<TaskStatus> taskEntry : tasks.values()) {
taskEntry.delete();
}
}
} | 3.26 |
rocketmq-connect_RocketMqStateManagementServiceImpl_replicaTargetState_rdh | /**
* sync send online config
*/
@Override
protected void replicaTargetState() {// No-op
} | 3.26 |
rocketmq-connect_RocketMqStateManagementServiceImpl_initialize_rdh | /**
* initialize cb config
*
* @param config
*/
@Override
public void initialize(WorkerConfig config, RecordConverter converter) {
super.initialize(config, converter);
/**
* connector status store
*/this.connectorStatusStore = new MemoryBasedKeyValueStore<>();
/**
* task status store
*/
this.taskStatusStore = new MemoryBasedKeyValueStore<>();
} | 3.26 |
rocketmq-connect_RocketMQSourceValueConverter_convertStructValue_rdh | /**
* convert struct value
*
* @param toStruct
* @param originalStruct
*/
private void convertStructValue(Struct toStruct, Struct originalStruct) {
for (Field field : toStruct.schema().getFields()) {
try {
FieldType type = field.getSchema().getFieldType();
Object value = originalStruct.get(field.getName());
switch (type) {
case INT8 :
case INT16 :
case INT32 :
case INT64 :
case FLOAT32 :
case FLOAT64 :
case BOOLEAN :case STRING :case BYTES :
toStruct.put(field.getName(), value);
break;
case STRUCT :
case ARRAY :
case MAP :
toStruct.put(field.getName(), convertKafkaValue(toStruct.schema().getField(field.getName()).getSchema(), value));
break;
}
} catch (Exception ex) {
logger.error("Convert schema failure! ex {}", ex);
throw
new ConnectException(ex);
}
}
} | 3.26 |
rocketmq-connect_RocketMQSourceValueConverter_convertKafkaValue_rdh | /**
* convert value
*
* @param targetSchema
* @param originalValue
* @return */
private Object convertKafkaValue(Schema targetSchema, Object originalValue) {
if (targetSchema == null) {
if (originalValue == null) {
return null;
}
return originalValue;
}
switch (targetSchema.getFieldType()) {
case INT8 :
case INT16 :
case INT32 :
case INT64 : case FLOAT32 :
case FLOAT64 :
case BOOLEAN :
case STRING :
case BYTES :
return originalValue;
case STRUCT :
Struct toStruct = new Struct(targetSchema);
if (originalValue != null) {
convertStructValue(toStruct, ((Struct) (originalValue)));
}
return toStruct;
case ARRAY :
List<Object> array = ((List<Object>) (originalValue));List<Object> newArray = new ArrayList<>();
array.forEach(item -> {
newArray.add(convertKafkaValue(targetSchema.getValueSchema(), item));
});
return newArray;
case MAP :
Map mapData = ((Map) (originalValue));
Map newMapData =
new ConcurrentHashMap();
mapData.forEach((k, v) -> {
newMapData.put(convertKafkaValue(targetSchema.getKeySchema(), k), convertKafkaValue(targetSchema.getValueSchema(), v));
});
return newMapData;
default :
throw new RuntimeException(" Type not supported: {}" + targetSchema.getFieldType());
}
} | 3.26 |
rocketmq-connect_FileSinkConnector_start_rdh | /**
* Start the component
*
* @param config
* component context
*/
@Override
public void start(KeyValue config) {
this.config = config;
} | 3.26 |
rocketmq-connect_JdbcSinkTask_start_rdh | /**
* Start the component
*
* @param keyValue
*/
@Override
public void start(KeyValue keyValue) {
originalConfig = keyValue;
config = new JdbcSinkConfig(keyValue);
remainingRetries
= config.getMaxRetries();
this.dialect = DatabaseDialectLoader.getDatabaseDialect(config);
log.info("Initializing writer using SQL dialect: {}", dialect.getClass().getSimpleName());
this.jdbcWriter = new JdbcWriter(config, dialect);
} | 3.26 |
rocketmq-connect_ConnAndTaskStatus_delete_rdh | /**
* if it has been deleted, it is meaningless to send it again
*/public void delete() {
this.deleted = true;} | 3.26 |
rocketmq-connect_ConnectUtil_initDefaultLitePullConsumer_rdh | /**
* init default lite pull consumer
*
* @param connectConfig
* @return * @throws MQClientException
*/
public static DefaultLitePullConsumer initDefaultLitePullConsumer(WorkerConfig connectConfig, boolean autoCommit) {DefaultLitePullConsumer consumer = null;
if (Objects.isNull(consumer)) {
if (StringUtils.isBlank(connectConfig.getAccessKey()) && StringUtils.isBlank(connectConfig.getSecretKey())) {
consumer =
new DefaultLitePullConsumer();
} else
{
consumer = new DefaultLitePullConsumer(getAclRPCHook(connectConfig.getAccessKey(), connectConfig.getSecretKey()));}
}
consumer.setNamesrvAddr(connectConfig.getNamesrvAddr());
String uniqueName = (Thread.currentThread().getName() + "-") + (System.currentTimeMillis() % 1000);consumer.setInstanceName(uniqueName);
consumer.setUnitName(uniqueName);
consumer.setAutoCommit(autoCommit);
return consumer; } | 3.26 |
rocketmq-connect_ConnectUtil_m0_rdh | /**
* Flat topics offsets
*/
public static Map<MessageQueue, TopicOffset> m0(WorkerConfig config, List<String> topics) {
Map<MessageQueue, TopicOffset> messageQueueTopicOffsets = Maps.newConcurrentMap();
offsetTopics(config, topics).values().forEach(offsetTopic -> {
messageQueueTopicOffsets.putAll(offsetTopic);
});
return messageQueueTopicOffsets;
} | 3.26 |
rocketmq-connect_ConnectUtil_currentOffsets_rdh | /**
* Get consumer group offset
*/
public static Map<MessageQueue, Long> currentOffsets(WorkerConfig config, String groupName, List<String> topics, Set<MessageQueue> messageQueues) {
// Get consumer group offset
DefaultMQAdminExt adminClient = null;
try {
adminClient = startMQAdminTool(config);
Map<MessageQueue, OffsetWrapper> consumerOffsets
= Maps.newConcurrentMap();
for (String topic : topics) {
ConsumeStats consumeStats = adminClient.examineConsumeStats(groupName, topic);
consumerOffsets.putAll(consumeStats.getOffsetTable());
}
return consumerOffsets.keySet().stream().filter(messageQueue -> messageQueues.contains(messageQueue)).collect(Collectors.toMap(messageQueue -> messageQueue, messageQueue -> consumerOffsets.get(messageQueue).getConsumerOffset()));
} catch (MQClientException | MQBrokerException | RemotingException | InterruptedException e) {
if (e instanceof MQClientException) {
if (((MQClientException) (e)).getResponseCode() == ResponseCode.TOPIC_NOT_EXIST) {
return Collections.emptyMap();
} else {
throw new RuntimeException(e);
}
} else {
throw new RuntimeException(e);
}
} finally {
if (adminClient != null) {
adminClient.shutdown();
}
}
} | 3.26 |
rocketmq-connect_ConnectUtil_convertToMessageQueue_rdh | /**
* convert to message queue
*
* @param recordPartition
* @return */
public static MessageQueue convertToMessageQueue(RecordPartition recordPartition) {
Map<String, ?> partion = recordPartition.getPartition();
String topic = partion.get("topic").toString();
String brokerName = partion.get("brokerName").toString();
int queueId = (partion.containsKey("queueId")) ? Integer.parseInt(partion.get("queueId").toString()) : 0;
return new MessageQueue(topic, brokerName, queueId);
} | 3.26 |
rocketmq-connect_ConnectUtil_offsetTopics_rdh | /**
* Get topic offsets
*/
public static Map<String, Map<MessageQueue, TopicOffset>> offsetTopics(WorkerConfig config, List<String> topics) {
Map<String, Map<MessageQueue, TopicOffset>> offsets = Maps.newConcurrentMap();
DefaultMQAdminExt adminClient = null;
try {
adminClient = startMQAdminTool(config);
for (String topic : topics) {
TopicStatsTable topicStatsTable = adminClient.examineTopicStats(topic);
offsets.put(topic, topicStatsTable.getOffsetTable());
}
return offsets;
} catch (MQClientException | MQBrokerException | RemotingException | InterruptedException e) {
throw new RuntimeException(e);
} finally {
if (adminClient !=
null) {
adminClient.shutdown();
}
}
} | 3.26 |
rocketmq-connect_ConnectUtil_searchOffsetsByTimestamp_rdh | /**
* Search offsets by timestamp
*/
public static Map<MessageQueue, Long> searchOffsetsByTimestamp(WorkerConfig config, Collection<MessageQueue> messageQueues, Long timestamp) {
Map<MessageQueue, Long> offsets = Maps.newConcurrentMap();
DefaultMQAdminExt adminClient = null;
try {
adminClient = startMQAdminTool(config);
for (MessageQueue messageQueue : messageQueues) {
long offset = adminClient.searchOffset(messageQueue, timestamp);
offsets.put(messageQueue, offset);
}
return offsets;
}
catch (MQClientException e) {
throw new
RuntimeException(e);
} finally {
if (adminClient != null) {
adminClient.shutdown();
}
}
} | 3.26 |
rocketmq-connect_ConnectMetrics_templates_rdh | /**
* get connect metrics template
*
* @return */
public ConnectMetricsTemplates templates() {
return templates;
} | 3.26 |
rocketmq-connect_ConnectMetrics_registry_rdh | /**
* get metric registry
*
* @return */
public MetricRegistry registry() {
return metricRegistry;
} | 3.26 |
rocketmq-connect_ConnectMetrics_group_rdh | /**
* get metrics group
*
* @param tagKeyValues
* @return */
public MetricGroup group(String... tagKeyValues) {
return new MetricGroup(getTags(tagKeyValues));} | 3.26 |
rocketmq-connect_RocketMqAdminUtil_offsets_rdh | /**
* Get topic offsets
*
* @param config
* @param topic
* @return */
public static Map<MessageQueue, TopicOffset> offsets(RocketMqConfig config, String topic) {
// Get db schema topic min and max offset
DefaultMQAdminExt adminClient = null;
try {
adminClient = RocketMqAdminUtil.startMQAdminTool(config);
TopicStatsTable topicStatsTable
= adminClient.examineTopicStats(topic);
return topicStatsTable.getOffsetTable();
} catch (MQClientException | MQBrokerException | RemotingException | InterruptedException e) {
throw new RuntimeException(e);
} finally {
if (adminClient != null) {
adminClient.shutdown();
}
}
} | 3.26 |
rocketmq-connect_RocketMqAdminUtil_createTopic_rdh | /**
* Create rocketMq topic
*
* @param config
* @param topicConfig
*/
public static void createTopic(RocketMqConfig config, TopicConfig topicConfig) {
DefaultMQAdminExt defaultMQAdminExt = null;
try {
defaultMQAdminExt = startMQAdminTool(config);
ClusterInfo v6 = defaultMQAdminExt.examineBrokerClusterInfo();
HashMap<String, Set<String>> clusterAddrTable = v6.getClusterAddrTable();Set<String> clusterNameSet = clusterAddrTable.keySet();
for (String clusterName : clusterNameSet) {
Set<String> masterSet = CommandUtil.fetchMasterAddrByClusterName(defaultMQAdminExt, clusterName);
for (String v11 : masterSet) {
defaultMQAdminExt.createAndUpdateTopicConfig(v11, topicConfig);
}
}
} catch (Exception e) {
throw new RuntimeException((("RocketMq create schema history topic: " + topicConfig.getTopicName()) + " ") + " failed", e);
} finally {
if (defaultMQAdminExt != null) {
defaultMQAdminExt.shutdown();
}
}
} | 3.26 |
rocketmq-connect_RocketMqAdminUtil_topicExist_rdh | /**
* check topic exist
*
* @param config
* @param topic
* @return */
public static boolean topicExist(RocketMqConfig config, String topic) {
DefaultMQAdminExt defaultMQAdminExt = null;
boolean foundTopicRouteInfo = false;
try {
defaultMQAdminExt = startMQAdminTool(config);
TopicRouteData topicRouteData = defaultMQAdminExt.examineTopicRouteInfo(topic);
if (topicRouteData != null) {
foundTopicRouteInfo = true;
}
} catch (Exception e) {
foundTopicRouteInfo =
false;
} finally {
if (defaultMQAdminExt != null) {defaultMQAdminExt.shutdown();
}}return
foundTopicRouteInfo;
} | 3.26 |
rocketmq-connect_RocketMqAdminUtil_initDefaultLitePullConsumer_rdh | /**
* init default lite pull consumer
*
* @param config
* @param autoCommit
* @return * @throws MQClientException
*/
public static DefaultLitePullConsumer initDefaultLitePullConsumer(RocketMqConfig config, boolean autoCommit) throws MQClientException {
DefaultLitePullConsumer consumer = null;
if (Objects.isNull(consumer)) {
if (StringUtils.isBlank(config.getAccessKey()) && StringUtils.isBlank(config.getSecretKey())) {
consumer = new DefaultLitePullConsumer(config.getGroupId());
} else {
consumer = new DefaultLitePullConsumer(config.getGroupId(), getAclRPCHook(config.getAccessKey(), config.getSecretKey()));
}
}
consumer.setNamesrvAddr(config.getNamesrvAddr());
String uniqueName = createUniqInstance(config.getNamesrvAddr());
consumer.setInstanceName(uniqueName);
consumer.setUnitName(uniqueName);
consumer.setAutoCommit(autoCommit);
consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET); return consumer;
} | 3.26 |
rocketmq-connect_AbstractConfig_getList_rdh | /**
* get list
*
* @param config
* @param key
* @return */
protected List<String> getList(KeyValue config, String key, String defaultValue) {
if (config.containsKey(key) || Objects.isNull(config.getString(key))) {
return
Collections.singletonList(defaultValue);
}
return Arrays.asList(COMMA_WITH_WHITESPACE.split(config.getString(key), -1));
} | 3.26 |
rocketmq-connect_WorkerTask_initialize_rdh | /**
* Initialize the task for execution.
*
* @param taskConfig
* initial configuration
*/
protected void initialize(ConnectKeyValue taskConfig)
{
// NO-op
} | 3.26 |
rocketmq-connect_WorkerTask_transitionTo_rdh | /**
* change task target state
*
* @param state
*/
public void transitionTo(TargetState state) {
synchronized(this) {
// ignore the state change if we are stopping
if (isStopping()) {
return;
}
// not equal set
if (this.targetState != state) {
this.targetState = state;
// notify thread continue run
this.notifyAll();
}
}
} | 3.26 |
rocketmq-connect_WorkerTask_recordMultiple_rdh | /**
* batch record
*
* @param size
*/
protected void recordMultiple(int size) {
taskMetricsGroup.recordMultiple(size);
} | 3.26 |
rocketmq-connect_WorkerTask_cleanup_rdh | /**
* clean up
*/
public void cleanup() {
log.info("Cleaning a task, current state {}, destination state {}", state.get().name(), WorkerTaskState.TERMINATED.name());
if (state.compareAndSet(WorkerTaskState.STOPPED, WorkerTaskState.TERMINATED) || state.compareAndSet(WorkerTaskState.ERROR, WorkerTaskState.TERMINATED)) {
log.info("Cleaning a task success");
} else {
log.error("[BUG] cleaning a task but it's not in STOPPED or ERROR state");
}
} | 3.26 |
rocketmq-connect_WorkerTask_currentTaskState_rdh | /**
* current task state
*
* @return */
public CurrentTaskState currentTaskState() {
return new CurrentTaskState(id().connector(), taskConfig, state.get());
} | 3.26 |
rocketmq-connect_WorkerTask_getState_rdh | /**
* get state
*
* @return */
public WorkerTaskState getState() {return this.state.get();
} | 3.26 |
rocketmq-connect_WorkerTask_run_rdh | /**
* do execute data
*/
@Override
public void run() {
ClassLoader savedLoader = Plugin.compareAndSwapLoaders(loader);
String savedName = Thread.currentThread().getName();
try {
Thread.currentThread().setName(THREAD_NAME_PREFIX + id);
doRun();
} catch (InterruptedException e) {// set interrupted flag to caller
Thread.currentThread().interrupt();
} catch (Throwable t) {
onFailure(t);
throw t;
} finally {
Thread.currentThread().setName(savedName);
Plugin.compareAndSwapLoaders(savedLoader);
shutdownLatch.countDown();
}
} | 3.26 |
rocketmq-connect_WorkerTask_awaitUnpause_rdh | /**
* Await task resumption.
*
* @return true if the task's target state is not paused, false if the task is shutdown before resumption
* @throws InterruptedException
*/
protected boolean awaitUnpause() throws InterruptedException {
synchronized(this) {
while (targetState == TargetState.PAUSED) {if (isStopping()) {
return false;
}
this.wait();
}
return
true;
}} | 3.26 |
rocketmq-connect_TableDefinitions_get_rdh | /**
* Get the {@link TableDefinition} for the given table.
*
* @param connection
* the JDBC connection to use; may not be null
* @param tableId
* the table identifier; may not be null
* @return the cached {@link TableDefinition}, or null if there is no such table
* @throws SQLException
* if there is any problem using the connection
*/
public TableDefinition get(Connection connection, final TableId tableId) throws SQLException {
TableDefinition dbTable = cache.get(tableId);
if (dbTable == null) {
if (dialect.tableExists(connection, tableId)) {
dbTable = dialect.describeTable(connection, tableId);
if (dbTable != null) {
log.info("Setting metadata for table {} to {}", tableId, dbTable);cache.put(tableId, dbTable);
}}
}
return dbTable;
} | 3.26 |
rocketmq-connect_TableDefinitions_refresh_rdh | /**
* Refresh the cached {@link TableDefinition} for the given table.
*
* @param connection
* the JDBC connection to use; may not be null
* @param tableId
* the table identifier; may not be null
* @return the refreshed {@link TableDefinition}, or null if there is no such table
* @throws SQLException
* if there is any problem using the connection
*/
public TableDefinition refresh(Connection connection, TableId tableId) throws SQLException {
TableDefinition dbTable = dialect.describeTable(connection, tableId);
if (dbTable != null) {
log.info("Refreshing metadata for table {} to {}", tableId, dbTable);
cache.put(dbTable.id(), dbTable);
} else {
log.warn("Failed to refresh metadata for table {}", tableId);
}
return dbTable;
} | 3.26 |
rocketmq-connect_Sensor_record_rdh | /**
* record value
*
* @param value
*/
public void
record(long value) {
recordInternal(value);
} | 3.26 |
rocketmq-connect_AbstractKafkaConnectSink_put_rdh | /**
* Put the records to the sink
*
* @param records
* sink records
*/
@Override
public void put(List<ConnectRecord> records) {
// convert sink data
List<SinkRecord> sinkRecords = new ArrayList<>();
records.forEach(connectRecord -> {
SinkRecord record = this.processSinkRecord(connectRecord);
sinkRecords.add(this.transforms(record));
});
sinkTask.put(sinkRecords);
} | 3.26 |
rocketmq-connect_RedisEventListener_isUsefulEvent_rdh | /**
* Check whether event is an event to be processed
*/
private boolean isUsefulEvent(Event event) {
if (event instanceof AuxField) {
logger.warn("skip AuxField event: {} - {}", ((AuxField) (event)).getAuxKey(), ((AuxField) (event)).getAuxValue());
return false;
}
if (event instanceof PreRdbSyncEvent) {
logger.warn("skip PreRdbSync event: {}", event.getClass());
return false;
}
if (event instanceof PreCommandSyncEvent) {
logger.warn("skip PreCommandSync event: {}", event.getClass());
return false;
}if (event instanceof PostRdbSyncEvent) {
logger.warn("skip PostRdbSync event: {}", event.getClass());
return false;
}
return true;
} | 3.26 |
rocketmq-connect_Serializer_close_rdh | /**
* Close this serializer.
*/
@Override
default void close() {
// intentionally left blank
} | 3.26 |
rocketmq-connect_Serializer_serialize_rdh | /**
* Convert data into a byte array.
*
* @return serialized bytes
*/
default byte[] serialize(String topic, KeyValue extensions, T data) {
return serialize(topic, data);
} | 3.26 |
rocketmq-connect_Serializer_configure_rdh | /**
* An interface for converting objects to bytes.
*/public interface Serializer<T> extends Closeable {
/**
* Configure this class.
*
* @param configs
* configs in key/value pairs
*/
default void configure(Map<String, ?> configs) {
// intentionally left blank
} | 3.26 |
rocketmq-connect_DbStructure_createOrAmendIfNecessary_rdh | /**
* Create or amend table.
*
* @param config
* the connector configuration
* // * @param connection the database connection handle
* @param tableId
* the table ID
* @param fieldsMetadata
* the fields metadata
* @return whether a DDL operation was performed
* @throws SQLException
* if a DDL operation was deemed necessary but failed
*/
public boolean createOrAmendIfNecessary(final DorisSinkConfig
config, final TableId tableId, final FieldsMetadata fieldsMetadata) throws SQLException {
// It seems that doris don't support create or amend table via stream load, so do nothing
return false;
} | 3.26 |
rocketmq-connect_AvroDatumWriterFactory_getDatumWriter_rdh | /**
* get datum writer
*
* @param value
* @param schema
* @return */
private DatumWriter<?> getDatumWriter(Object value, Schema schema) {
if (value instanceof SpecificRecord) {
return new SpecificDatumWriter<>(schema);
} else if (useSchemaReflection) {
return new ReflectDatumWriter<>(schema);
} else {
GenericData genericData = new GenericData();
if
(avroUseLogicalTypeConverters) {
addLogicalTypeConversion(genericData);
}
return new GenericDatumWriter<>(schema, genericData);
}
} | 3.26 |
rocketmq-connect_AvroDatumWriterFactory_get_rdh | /**
* Get avro datum factory
*
* @return */
public static AvroDatumWriterFactory get(boolean useSchemaReflection, boolean avroUseLogicalTypeConverters)
{
return new AvroDatumWriterFactory(useSchemaReflection, avroUseLogicalTypeConverters);
} | 3.26 |
rocketmq-connect_DeadLetterQueueConfig_isDlqContextHeadersEnabled_rdh | /**
* get dlq context headers enabled
*
* @return */
public Boolean isDlqContextHeadersEnabled() {
return config.getProperties().containsKey(DLQ_CONTEXT_PROPERTIES_ENABLE_CONFIG) ? Boolean.valueOf(config.getProperties().get(DLQ_CONTEXT_PROPERTIES_ENABLE_CONFIG)) : DLQ_CONTEXT_PROPERTIES_ENABLE_DEFAULT;
} | 3.26 |
rocketmq-connect_DeadLetterQueueConfig_dlqTopicWriteQueueNums_rdh | /**
* get dlq topic write queue nums
*
* @return */
public Integer dlqTopicWriteQueueNums() {
return config.getInt(DLQ_TOPIC_WRITE_QUEUE_NUMS, DLQ_TOPIC_WRITE_QUEUE_NUMS_DEFAULT);
} | 3.26 |
rocketmq-connect_DeadLetterQueueConfig_m0_rdh | /**
* include error log
*
* @return */
public boolean m0() {
return config.getProperties().containsKey(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG) ? Boolean.valueOf(config.getProperties().get(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG)) : ERRORS_LOG_INCLUDE_MESSAGES_DEFAULT;
} | 3.26 |
rocketmq-connect_DeadLetterQueueConfig_dlqTopicName_rdh | /**
* get dlq topic name
*
* @return */
public String dlqTopicName() {
return config.getString(DLQ_TOPIC_NAME_CONFIG, "");
} | 3.26 |
rocketmq-connect_DeadLetterQueueConfig_dlqTopicReadQueueNums_rdh | /**
* get dlq topic read queue nums
*
* @return */
public Integer dlqTopicReadQueueNums() {
return config.getInt(DLQ_TOPIC_READ_QUEUE_NUMS, DLQ_TOPIC_READ_QUEUE_NUMS_DEFAULT);
} | 3.26 |
rocketmq-connect_RebalanceService_onConfigUpdate_rdh | /**
* When config change.
*/
@Override
public void onConfigUpdate() {
RebalanceService.this.wakeup();
} | 3.26 |
rocketmq-connect_DebeziumMongoDBConnector_getConnectorClass_rdh | /**
* get connector class
*/
@Override
public String getConnectorClass() {
return DEFAULT_CONNECTOR;
} | 3.26 |
rocketmq-connect_DebeziumMongoDBConnector_taskClass_rdh | /**
* Return the current connector class
*
* @return task implement class
*/
@Override
public Class<? extends Task> taskClass() {
return DebeziumMongoDBSource.class;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.