name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
rocketmq-connect_ProcessingContext_currentContext_rdh | /**
* A helper method to set both the stage and the class.
*
* @param stage
* the stage
* @param klass
* the class which will execute the operation in this stage.
*/
public void currentContext(ErrorReporter.Stage stage, Class<?> klass) {
stage(stage);
executingClass(klass);
} | 3.26 |
rocketmq-connect_ProcessingContext_attempt_rdh | /**
*
* @param attempt
* the number of attempts made to execute the current operation.
*/
public void attempt(int attempt) {
this.attempt = attempt;
} | 3.26 |
rocketmq-connect_DebeziumMongoDBSource_getTaskClass_rdh | /**
* get task class
*/
@Override
public String getTaskClass() {
return DEFAULT_TASK;
} | 3.26 |
rocketmq-connect_JsonConverterConfig_cacheSize_rdh | /**
* return cache size
*
* @return */
public int cacheSize() {
return cacheSize;
} | 3.26 |
rocketmq-connect_JsonConverterConfig_m0_rdh | /**
* Return whether schemas are enabled.
*
* @return true if enabled, or false otherwise
*/
public boolean m0() {
return schemasEnabled;
} | 3.26 |
rocketmq-connect_JsonConverterConfig_decimalFormat_rdh | /**
* Get the serialization format for decimal types.
*
* @return the decimal serialization format
*/
public DecimalFormat decimalFormat()
{return decimalFormat;
} | 3.26 |
rocketmq-connect_KafkaConnectAdaptorSink_transforms_rdh | /**
* convert by kafka sink transform
*
* @param record
*/
@Override
protected SinkRecord transforms(SinkRecord record) {
List<Transformation> transformations = transformationWrapper.transformations();
Iterator transformationIterator = transformations.iterator();while (transformationIterator.hasNext()) {
Transformation<SinkRecord> transformation = ((Transformation) (transformationIterator.next()));
log.trace("applying transformation {} to {}", transformation.getClass().getName(), record);
record = transformation.apply(record);
if (record == null) {
break;
}
}
return record;
} | 3.26 |
rocketmq-connect_KafkaConnectAdaptorSink_processSinkRecord_rdh | /**
* convert ConnectRecord to SinkRecord
*
* @param record
* @return */@Override
public SinkRecord processSinkRecord(ConnectRecord record) {
SinkRecord sinkRecord = Converters.fromConnectRecord(record);
return transforms(sinkRecord);
} | 3.26 |
rocketmq-connect_DorisSinkConnector_m0_rdh | /**
* Should invoke before start the connector.
*
* @param config
* @return error message
*/
@Override
public void m0(KeyValue config) {
// do validate config
} | 3.26 |
rocketmq-connect_DorisSinkConnector_taskConfigs_rdh | /**
* Returns a set of configurations for Tasks based on the current configuration,
* producing at most count configurations.
*
* @param maxTasks
* maximum number of configurations to generate
* @return configurations for Tasks
*/
@Override
public List<KeyValue> taskConfigs(int maxTasks) {
log.info("Starting task config !!! ");
List<KeyValue> configs = new ArrayList<>();
for (int i = 0; i < maxTasks; i++) {
configs.add(this.connectConfig);
}
return configs;
} | 3.26 |
rocketmq-connect_DistributedConnectStartup_createConnectController_rdh | /**
* Read configs from command line and create connect controller.
*
* @param args
* @return */
private static DistributedConnectController createConnectController(String[] args) {
try {
// Build the command line options.
Options
options = ServerUtil.buildCommandlineOptions(new Options());
commandLine = ServerUtil.parseCmdLine("connect", args, buildCommandlineOptions(options), new PosixParser());
if (null == commandLine) {
System.exit(-1);
}
// Load configs from command line.
DistributedConfig config = new DistributedConfig();
if (commandLine.hasOption('c')) {
String file = commandLine.getOptionValue('c').trim();
if (file != null) {
configFile = file;
InputStream v4 = new BufferedInputStream(new FileInputStream(file));
properties = new Properties();
properties.load(v4);
FileAndPropertyUtil.properties2Object(properties, config);
v4.close();
}
}
if (StringUtils.isNotEmpty(config.getMetricsConfigPath())) {
String file = config.getMetricsConfigPath();
InputStream in = new BufferedInputStream(new FileInputStream(file));
properties = new Properties();
properties.load(in);
Map<String, String> metricsConfig = new ConcurrentHashMap<>();
if (properties.contains(WorkerConfig.METRIC_CLASS)) {
throw new IllegalArgumentException("[metrics.reporter] is empty");
}
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
if (entry.getKey().equals(WorkerConfig.METRIC_CLASS)) {
continue;
}
metricsConfig.put(entry.getKey().toString(), entry.getValue().toString());
}
config.getMetricsConfig().put(properties.getProperty(WorkerConfig.METRIC_CLASS), metricsConfig);
in.close();
}
if (null == config.getConnectHome()) {
System.out.printf("Please set the %s variable in your environment to match the location of the Connect installation", WorkerConfig.CONNECT_HOME_ENV);
System.exit(-2);
}
LoggerContext lc = ((LoggerContext) (LoggerFactory.getILoggerFactory()));
JoranConfigurator configurator = new JoranConfigurator();
configurator.setContext(lc);
lc.reset();
configurator.doConfigure(config.getConnectHome() + "/conf/logback.xml");
List<String> pluginPaths = new ArrayList<>(16);
if (StringUtils.isNotEmpty(config.getPluginPaths())) {
String[] strArr = config.getPluginPaths().split(",");
for (String path : strArr) {
if (StringUtils.isNotEmpty(path)) {
pluginPaths.add(path);
}
}
}
Plugin plugin = new Plugin(pluginPaths);
// Create controller and initialize.
ClusterManagementService clusterManagementService = ServiceProviderUtil.getClusterManagementService(config.getClusterManagementService());
clusterManagementService.initialize(config);
// config
ConfigManagementService configManagementService = ServiceProviderUtil.getConfigManagementService(config.getConfigManagementService());
configManagementService.initialize(config, new JsonConverter(), plugin);
// position
PositionManagementService positionManagementService = ServiceProviderUtil.getPositionManagementService(config.getPositionManagementService());positionManagementService.initialize(config, new JsonConverter(), new JsonConverter());
// state
StateManagementService stateManagementService = ServiceProviderUtil.getStateManagementService(config.getStateManagementService());
stateManagementService.initialize(config, new JsonConverter());
DistributedConnectController controller = new DistributedConnectController(plugin, config, clusterManagementService, configManagementService, positionManagementService, stateManagementService);
// Invoked when shutdown.
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
private volatile boolean hasShutdown = false;
private AtomicInteger shutdownTimes = new AtomicInteger(0);
@Override
public void run() {
synchronized(this) {
log.info("Shutdown hook was invoked, {}", this.shutdownTimes.incrementAndGet());
if (!this.hasShutdown) {
this.hasShutdown = true;
long beginTime = System.currentTimeMillis();
controller.shutdown();
long consumingTimeTotal = System.currentTimeMillis() - beginTime;
log.info("Shutdown hook over, consuming total time(ms): {}", consumingTimeTotal);
}
}
}
}, "ShutdownHook"));return controller;
} catch (Throwable e) {
e.printStackTrace();
System.exit(-1);
}
return null;
} | 3.26 |
rocketmq-connect_ClusterConfigState_targetState_rdh | /**
* Get the target state of the connector
*
* @param connector
* @return */
public TargetState targetState(String
connector) {
return connectorTargetStates.get(connector);
} | 3.26 |
rocketmq-connect_ClusterConfigState_taskCount_rdh | /**
* Get the number of tasks assigned for the given connector.
*
* @param connectorName
* name of the connector to look up tasks for
* @return the number of tasks
*/
public int taskCount(String connectorName) {
Integer count = connectorTaskCounts.get(connectorName);
return count == null ? 0 : count;
} | 3.26 |
rocketmq-connect_ClusterConfigState_tasks_rdh | /**
* Get the current set of task IDs for the specified connector.
*
* @param connectorName
* the name of the connector to look up task configs for
* @return the current set of connector task IDs
*/
public List<ConnectorTaskId> tasks(String connectorName) {
Integer numTasks = connectorTaskCounts.get(connectorName);
if (numTasks == null) {
return Collections.emptyList();
}
List<ConnectorTaskId> taskIds = new ArrayList<>(numTasks);
for (int taskIndex = 0; taskIndex < numTasks; taskIndex++) {
ConnectorTaskId taskId = new ConnectorTaskId(connectorName, taskIndex);
taskIds.add(taskId);
}
return Collections.unmodifiableList(taskIds);
} | 3.26 |
rocketmq-connect_ClusterConfigState_taskConfig_rdh | /**
* task config
*
* @param task
* @return */
public Map<String, String> taskConfig(ConnectorTaskId task) {
return taskConfigs.get(task);
} | 3.26 |
rocketmq-connect_ClusterConfigState_allTaskConfigs_rdh | /**
* get all task configs
*
* @param connector
* @return */
public List<Map<String, String>> allTaskConfigs(String connector) {
Map<Integer, Map<String, String>> taskConfigs = new TreeMap<>();
for (Map.Entry<ConnectorTaskId, Map<String, String>> taskConfigEntry : this.taskConfigs.entrySet()) {
if (taskConfigEntry.getKey().connector().equals(connector)) {
Map<String,
String> configs = taskConfigEntry.getValue();
taskConfigs.put(taskConfigEntry.getKey().task(), configs);
}
}
return Collections.unmodifiableList(new ArrayList<>(taskConfigs.values()));
} | 3.26 |
rocketmq-connect_ClusterConfigState_contains_rdh | /**
* Check whether this snapshot contains configuration for a connector.
*
* @param connector
* name of the connector
* @return true if this state contains configuration for the connector, false otherwise
*/
public boolean contains(String connector) {
return connectorConfigs.containsKey(connector);
} | 3.26 |
rocketmq-connect_TimestampIncrementingCriteria_extractOffsetTimestamp_rdh | /**
* Extract the timestamp from the row.
*
* @param schema
* the record's schema; never null
* @param record
* the record's struct; never null
* @return the timestamp for this row; may not be null
*/
protected Timestamp extractOffsetTimestamp(Schema schema, Struct record) {
for (ColumnId timestampColumn : timestampColumns) {Field v9 = schema.getField(timestampColumn.name());
Timestamp ts = ((Timestamp) (record.get(v9)));
if (ts != null) {
return
ts;
}
}
return null;
} | 3.26 |
rocketmq-connect_TimestampIncrementingCriteria_extractOffsetIncrementedId_rdh | /**
* Extract the incrementing column value from the row.
*
* @param schema
* the record's schema; never null
* @param record
* the record's struct; never null
* @return the incrementing ID for this row; may not be null
*/
protected Long extractOffsetIncrementedId(Schema schema, Struct record) {
final Long extractedId;
final Field
field
= schema.getField(incrementingColumn.name());
if (field == null) {
throw new ConnectException((("Incrementing column " + incrementingColumn.name()) + " not found in ") + schema.getFields().stream().map(f -> f.getName()).collect(Collectors.joining(",")));
}
final Schema incrementingColumnSchema = field.getSchema();
final Object incrementingColumnValue = record.get(field);
if (incrementingColumnValue == null) {
throw new ConnectException("Null value for incrementing column of type: " + incrementingColumnSchema.getFieldType());
} else if (isIntegralPrimitiveType(incrementingColumnValue)) {
extractedId = ((Number) (incrementingColumnValue)).longValue();
}
else if (isLongFromString(incrementingColumnValue)) {
extractedId =
Long.parseLong(((String) (incrementingColumnValue)));
} else if ((incrementingColumnSchema.getName() != null) && incrementingColumnSchema.getName().equals(Decimal.LOGICAL_NAME)) {
extractedId = extractDecimalId(incrementingColumnValue);
} else {
throw new ConnectException("Invalid type for incrementing column: " + incrementingColumnSchema.getFieldType());
}
log.trace("Extracted incrementing column value: {}", extractedId);
return extractedId;
} | 3.26 |
rocketmq-connect_TimestampIncrementingCriteria_extractValues_rdh | /**
* Extract the offset values from the row.
*
* @param schema
* the record's schema; never null
* @param record
* the record's struct; never null
* @param previousOffset
* a previous timestamp offset if the table has timestamp columns
* @return the timestamp for this row; may not be null
*/
public TimestampIncrementingOffset extractValues(Schema schema, Struct record, TimestampIncrementingOffset previousOffset) {
Timestamp extractedTimestamp = null;
if (hasTimestampColumns()) {
extractedTimestamp = extractOffsetTimestamp(schema, record);
assert (previousOffset == null) || ((previousOffset.getTimestampOffset() != null) && (previousOffset.getTimestampOffset().compareTo(extractedTimestamp) <= 0));
}
Long extractedId = null;
if (hasIncrementedColumn()) {
extractedId = extractOffsetIncrementedId(schema, record);
// If we are only using an incrementing column, then this must be incrementing.
// If we are also using a timestamp, then we may see updates to older rows.
assert (((previousOffset == null) || (previousOffset.getIncrementingOffset() == (-1L))) || (extractedId > previousOffset.getIncrementingOffset())) || hasTimestampColumns();
}
return new TimestampIncrementingOffset(extractedTimestamp, extractedId);
} | 3.26 |
rocketmq-connect_ConnectorPluginsResource_listConnectorPlugins_rdh | /**
* list connector plugins
*
* @param context
* @return */
public void listConnectorPlugins(Context context) {
synchronized(this) {
List<PluginInfo> pluginInfos = Collections.unmodifiableList(connectorPlugins.stream().filter(p -> PluginType.SINK.equals(p.getType()) || PluginType.SOURCE.equals(p.getType())).collect(Collectors.toList()));
context.json(new HttpResponse<>(context.status(), pluginInfos));
}
} | 3.26 |
rocketmq-connect_ConnectorPluginsResource_getConnectorConfigDef_rdh | /**
* Get connector config def
*
* @param context
* @return */
public void getConnectorConfigDef(Context context) {
// No-op
context.json(new HttpResponse<>(HttpStatus.BAD_REQUEST_400, "This function has not been implemented yet"));
} | 3.26 |
rocketmq-connect_ConnectorPluginsResource_listPlugins_rdh | /**
* list connector plugins
*
* @param context
* @return */
public void listPlugins(Context context) {
synchronized(this) {
context.json(new HttpResponse<>(context.status(), Collections.unmodifiableList(connectorPlugins)));
}
} | 3.26 |
rocketmq-connect_ConnectorPluginsResource_reloadPlugins_rdh | /**
* reload plugins
*
* @param context
*/
public void reloadPlugins(Context context) {
try {
connectController.reloadPlugins();
context.json(new HttpResponse<>(context.status(), "Plugin reload succeeded"));
} catch (Exception ex) {
log.error("Reload plugin failed .", ex); context.json(new ErrorMessage(HttpStatus.INTERNAL_SERVER_ERROR_500, ex.getMessage()));
}
} | 3.26 |
rocketmq-connect_FieldsMetadata_extract_rdh | /**
* extract metadata info
*
* @param tableName
* @param pkMode
* @param configuredPkFields
* @param fieldsWhitelist
* @param schema
* @param headers
* @return */public static FieldsMetadata extract(final String tableName, final JdbcSinkConfig.PrimaryKeyMode pkMode, final List<String> configuredPkFields, final Set<String> fieldsWhitelist, final Schema keySchema, final Schema schema, final KeyValue headers) {
if ((schema != null) && (schema.getFieldType() != FieldType.STRUCT)) {
throw new ConnectException("Value schema must be of type Struct");
}
final Map<String, SinkRecordField> allFields = new HashMap<>();
final Set<String> keyFieldNames = new LinkedHashSet<>();
switch (pkMode) {
case NONE :
break;
case RECORD_KEY :
extractRecordKeyPk(tableName, configuredPkFields, keySchema, allFields, keyFieldNames);
break;
case RECORD_VALUE :
extractRecordValuePk(tableName, configuredPkFields, schema,
headers, allFields, keyFieldNames);
break;
default :
throw new ConnectException("Unknown primary key mode: " + pkMode);
}
final Set<String> nonKeyFieldNames = new LinkedHashSet<>();
if (schema != null) {
for (Field field : schema.getFields()) {
if (keyFieldNames.contains(field.getName())) {
continue;
}
if ((!fieldsWhitelist.isEmpty()) && (!fieldsWhitelist.contains(field.getName()))) {
continue;
}
nonKeyFieldNames.add(field.getName());
final Schema fieldSchema =
field.getSchema();
allFields.put(field.getName(), new SinkRecordField(fieldSchema, field.getName(), false));
}
}
if (allFields.isEmpty()) {
throw new ConnectException("No fields found using key and value schemas for table: " + tableName);
}
final Map<String, SinkRecordField>
allFieldsOrdered = new LinkedHashMap<>();
if (schema != null) {for (Field field :
schema.getFields()) {
String fieldName
= field.getName();
if (allFields.containsKey(fieldName)) {
allFieldsOrdered.put(fieldName, allFields.get(fieldName));
}
}
}
if (allFieldsOrdered.size() < allFields.size()) {
ArrayList<String> fieldKeys = new ArrayList<>(allFields.keySet());
Collections.sort(fieldKeys);
for (String fieldName : fieldKeys) {
if (!allFieldsOrdered.containsKey(fieldName)) {
allFieldsOrdered.put(fieldName, allFields.get(fieldName));
}
}
}
return new FieldsMetadata(keyFieldNames, nonKeyFieldNames, allFieldsOrdered);
} | 3.26 |
rocketmq-connect_FieldsMetadata_extractRecordValuePk_rdh | /**
* record value
*
* @param tableName
* @param configuredPkFields
* @param valueSchema
* @param headers
* @param allFields
* @param keyFieldNames
*/
private static void extractRecordValuePk(final String tableName, final List<String> configuredPkFields, final Schema valueSchema, final KeyValue headers, final Map<String, SinkRecordField> allFields, final Set<String> keyFieldNames) {
if (valueSchema == null) {
throw new ConnectException(String.format("PK mode for table '%s' is %s, but record value schema is missing", tableName, PrimaryKeyMode.RECORD_VALUE));
}
List<String> pkFields = new ArrayList<>(configuredPkFields);
if (pkFields.isEmpty()) {
for (Field keyField : valueSchema.getFields()) {
keyFieldNames.add(keyField.getName());
}
} else {
for (Field keyField : valueSchema.getFields()) {
keyFieldNames.add(keyField.getName());
}
for (String fieldName : pkFields) {
if (!keyFieldNames.contains(fieldName)) {
throw new ConnectException(String.format("PK mode for table '%s' is %s with configured PK fields %s, but record value " + "schema does not contain field: %s", tableName, PrimaryKeyMode.RECORD_VALUE, pkFields, fieldName));
}
}
keyFieldNames.addAll(pkFields);
}
for (String fieldName : keyFieldNames) {
final Schema fieldSchema = valueSchema.getField(fieldName).getSchema();
allFields.put(fieldName, new SinkRecordField(fieldSchema, fieldName, true));
}
} | 3.26 |
rocketmq-connect_MetricUtils_getMeterValue_rdh | /**
* get meter value
*
* @param name
* @param meter
* @return */
public static Double getMeterValue(MetricName name, Meter meter) {
if (name.getType().equals(NoneType.none.name())) {
throw new IllegalArgumentException("Meter type configuration error");
}
Stat.RateType rateType = Stat.RateType.valueOf(name.getType());
switch (rateType) {
case MeanRate :
return meter.getMeanRate();
case OneMinuteRate :
return meter.getOneMinuteRate();
case FiveMinuteRate
:
return meter.getFiveMinuteRate();
case FifteenMinuteRate :
return meter.getFifteenMinuteRate();
default :
return 0.0;
}
} | 3.26 |
rocketmq-connect_MetricUtils_metricNameToString_rdh | /**
* MetricName to string
*
* @param name
* @return */
public static String metricNameToString(MetricName name) {
if (StringUtils.isEmpty(name.getType())) {
name.setType("none");
}
StringBuilder sb = new StringBuilder(ROCKETMQ_CONNECT).append(name.getGroup()).append(SPLIT_COMMA).append(name.getName()).append(SPLIT_COMMA).append(name.getType());
for (Map.Entry<String, String> entry : name.getTags().entrySet()) {
sb.append(SPLIT_COMMA).append(entry.getKey()).append(SPLIT_KV).append(entry.getValue());
}
return sb.toString();
} | 3.26 |
rocketmq-connect_MetricUtils_stringToMetricName_rdh | /**
* string to MetricName
*
* @param name
* @return */
public static MetricName stringToMetricName(String name) {
if (StringUtils.isEmpty(name)) {throw new IllegalArgumentException("Metric name str is empty");
}
String[]
splits = name.replace(ROCKETMQ_CONNECT, "").replace(SPLIT_KV, SPLIT_COMMA).split(SPLIT_COMMA);
return new
MetricName(splits[0], splits[1], splits[2], getTags(Arrays.copyOfRange(splits, 3, splits.length)));
} | 3.26 |
rocketmq-connect_DorisSinkTask_start_rdh | /**
* Start the component
*
* @param keyValue
*/
@Override
public void start(KeyValue keyValue) {
originalConfig = keyValue;
config = new DorisSinkConfig(keyValue);
remainingRetries = config.getMaxRetries();
log.info("Initializing doris writer");
this.updater
= new Updater(config);
} | 3.26 |
rocketmq-connect_DorisSinkTask_put_rdh | /**
* Put the records to the sink
*
* @param records
*/
@Override
public void put(List<ConnectRecord> records) throws ConnectException {
if (records.isEmpty()) {
return;
}
final int
recordsCount = records.size();
log.debug("Received {} records.", recordsCount);
try {
updater.write(records);
} catch (TableAlterOrCreateException tace) {
throw tace;
} catch (SQLException sqle) {
SQLException sqlAllMessagesException = getAllMessagesException(sqle);
if (remainingRetries > 0) {
// updater.closeQuietly();
start(originalConfig);
remainingRetries--;
throw new RetriableException(sqlAllMessagesException);
}
}
remainingRetries = config.getMaxRetries();
} | 3.26 |
rocketmq-connect_DeadLetterQueueReporter_populateContextHeaders_rdh | /**
* pop context property
*
* @param producerRecord
* @param context
*/
void populateContextHeaders(Message producerRecord, ProcessingContext context) {
Map<String, String>
headers = producerRecord.getProperties();
if (context.consumerRecord() != null) {
producerRecord.putUserProperty(ERROR_HEADER_ORIG_TOPIC, context.consumerRecord().getTopic());
producerRecord.putUserProperty(ERROR_HEADER_ORIG_PARTITION, String.valueOf(context.consumerRecord().getQueueId()));
producerRecord.putUserProperty(f0, String.valueOf(context.consumerRecord().getQueueOffset()));
}
if (workerId != null) {
producerRecord.putUserProperty(ERROR_HEADER_CLUSTER_ID, workerId);
}producerRecord.putUserProperty(ERROR_HEADER_STAGE, context.stage().name());
producerRecord.putUserProperty(ERROR_HEADER_EXECUTING_CLASS, context.executingClass().getName());
producerRecord.putUserProperty(ERROR_HEADER_CONNECTOR_NAME, connectorTaskId.connector());
producerRecord.putUserProperty(ERROR_HEADER_TASK_ID, connectorTaskId.task() + "");
if (context.error() != null) {
Throwable error = context.error();
headers.put(ERROR_HEADER_EXCEPTION, error.getClass().getName());
headers.put(ERROR_HEADER_EXCEPTION_MESSAGE, error.getMessage());
byte[] trace;
if ((trace = stacktrace(context.error())) != null) {
headers.put(ERROR_HEADER_EXCEPTION_STACK_TRACE, new String(trace));
}
}
} | 3.26 |
rocketmq-connect_DeadLetterQueueReporter_report_rdh | /**
* Write the raw records into a topic
*/
@Override
public void report(ProcessingContext context) {
if (this.f1.dlqTopicName().trim().isEmpty()) {
return;
}
f2.recordDeadLetterQueueProduceRequest();
MessageExt originalMessage
= context.consumerRecord();
if (originalMessage == null) {
f2.recordDeadLetterQueueProduceFailed();
return;
}
Message producerRecord = new Message();
producerRecord.setTopic(f1.dlqTopicName());
producerRecord.setBody(originalMessage.getBody());
if (f1.isDlqContextHeadersEnabled()) {
populateContextHeaders(originalMessage, context);
}
try {
producer.send(originalMessage, new SendCallback() {
@Override
public void onSuccess(SendResult result) {
log.info("Successful send error message to RocketMQ:{}, Topic {}", result.getMsgId(), result.getMessageQueue().getTopic());
}
@Override
public void onException(Throwable throwable) {
f2.recordDeadLetterQueueProduceFailed();
log.error("Source task send record failed ,error msg {}. message {}", throwable.getMessage(),
JSON.toJSONString(originalMessage), throwable);
}
});
} catch (MQClientException e) {
log.error("Send message MQClientException. message: {}, error info: {}.", producerRecord, e);
} catch (RemotingException e) {
log.error("Send message RemotingException. message: {}, error info: {}.", producerRecord, e);
} catch (InterruptedException e) {
log.error("Send message InterruptedException. message: {}, error info: {}.", producerRecord,
e);
throw new ConnectException(e);
}
} | 3.26 |
rocketmq-connect_DeadLetterQueueReporter_build_rdh | /**
* build reporter
*
* @param connectorTaskId
* @param sinkConfig
* @param workerConfig
* @return */
public static DeadLetterQueueReporter build(ConnectorTaskId connectorTaskId, ConnectKeyValue sinkConfig, WorkerConfig workerConfig, ErrorMetricsGroup errorMetricsGroup) {
DeadLetterQueueConfig deadLetterQueueConfig = new DeadLetterQueueConfig(sinkConfig);
String
v1 = deadLetterQueueConfig.dlqTopicName();
if (v1.isEmpty()) {
return null;
}
if (!ConnectUtil.isTopicExist(workerConfig, v1)) {
TopicConfig v2 = new TopicConfig(v1);
v2.setReadQueueNums(deadLetterQueueConfig.dlqTopicReadQueueNums());
v2.setWriteQueueNums(deadLetterQueueConfig.dlqTopicWriteQueueNums());
ConnectUtil.createTopic(workerConfig, v2);
}
DefaultMQProducer dlqProducer = ConnectUtil.initDefaultMQProducer(workerConfig);
return new DeadLetterQueueReporter(dlqProducer, sinkConfig, connectorTaskId, errorMetricsGroup);
} | 3.26 |
rocketmq-connect_ToleranceType_value_rdh | /**
* Tolerate all errors.
*/
ALL;public String value() {
return name().toLowerCase(Locale.ROOT);
} | 3.26 |
rocketmq-connect_BufferedRecords_executeUpdates_rdh | /**
*
* @return an optional count of all updated rows or an empty optional if no info is available
*/
private Optional<Long> executeUpdates() throws DorisException {
Optional<Long> count = Optional.empty();
if (updatePreparedRecords.isEmpty()) {
return count;
}
for (ConnectRecord record : updatePreparedRecords) {
String jsonData = DorisDialect.convertToUpdateJsonString(record);
try {
log.info("[executeUpdates]"
+ jsonData);
loader.loadJson(jsonData, record.getSchema().getName());
} catch (DorisException e) {
log.error("executeUpdates failed");
throw e;
} catch (Exception e) {
throw new DorisException("doris error");}
count = (count.isPresent()) ? count.map(total -> total + 1) : Optional.of(1L);
}
return count;
} | 3.26 |
rocketmq-connect_BufferedRecords_add_rdh | /**
* add record
*
* @param record
* @return * @throws SQLException
*/
public List<ConnectRecord> add(ConnectRecord record) throws SQLException {
recordValidator.validate(record);
final List<ConnectRecord> flushed = new ArrayList<>();
boolean schemaChanged = false;
if (!Objects.equals(keySchema, record.getKeySchema())) {
keySchema = record.getKeySchema();
schemaChanged = true;
}
if (isNull(record.getSchema())) {
// For deletes, value and optionally value schema come in as null.
// We don't want to treat this as a schema change if key schemas is the same
// otherwise we flush unnecessarily.
if (config.isDeleteEnabled())
{
deletesInBatch = true;
}
} else if (Objects.equals(f0, record.getSchema())) {
if (config.isDeleteEnabled() && deletesInBatch) {
// flush so an insert after a delete of same record isn't lost
flushed.addAll(flush());
}
} else {
// value schema is not null and has changed. This is a real schema change.
f0 = record.getSchema();
schemaChanged = true;
}
if (schemaChanged) {
// Each batch needs to have the same schemas, so get the buffered records out
flushed.addAll(flush());
// re-initialize everything that depends on the record schema
final SchemaPair schemaPair = new SchemaPair(record.getKeySchema(), record.getSchema(), record.getExtensions());
// extract field
fieldsMetadata = FieldsMetadata.extract(tableId.tableName(), config.pkMode, config.getPkFields(), config.getFieldsWhitelist(), schemaPair);
}
// set deletesInBatch if schema value is not null
if (isNull(record.getData()) &&
config.isDeleteEnabled()) {
deletesInBatch = true;
}
records.add(record);
if (records.size() >= config.getBatchSize()) {
flushed.addAll(flush());
}
return flushed;
} | 3.26 |
rocketmq-connect_TransformChain_retryWithToleranceOperator_rdh | /**
* set retryWithToleranceOperator
*/
public void retryWithToleranceOperator(RetryWithToleranceOperator retryWithToleranceOperator) {
this.retryWithToleranceOperator = retryWithToleranceOperator;
} | 3.26 |
rocketmq-connect_TransformChain_close_rdh | /**
* close transforms
*
* @throws Exception
* if this resource cannot be closed
*/
@Override
public void close() throws Exception {
for (Transform transform : transformList) {
transform.stop();
}} | 3.26 |
rocketmq-connect_LRUCache_get_rdh | /**
*
* @param key
* @return */
@Override
public V get(K key) {
return cache.get(key);
} | 3.26 |
rocketmq-connect_LRUCache_put_rdh | /**
* put a data to cache
*
* @param key
* @param value
*/@Override
public void
put(K key, V value) {
cache.put(key, value);
} | 3.26 |
rocketmq-connect_LRUCache_remove_rdh | /**
* remove a data to cache
*
* @param key
* @return */
@Override
public boolean remove(K key) {
return cache.remove(key) != null;
} | 3.26 |
rocketmq-connect_LRUCache_size_rdh | /**
* cache size
*
* @return */@Override public long size() {
return cache.size();
} | 3.26 |
rocketmq-connect_JdbcDriverInfo_jdbcMajorVersion_rdh | /**
* Get the major version of the JDBC specification supported by the driver.
*
* @return the major version number
*/
public int jdbcMajorVersion()
{
return jdbcMajorVersion;
} | 3.26 |
rocketmq-connect_DorisStreamLoader_loadJson_rdh | /**
* JSON import
*
* @param jsonData
* @throws Exception
*/
public void loadJson(String jsonData, String table) throws Exception {
try (CloseableHttpClient client = httpClientBuilder.build()) {
HttpPut put = new HttpPut(getLoadURL(table));
put.removeHeaders(HttpHeaders.CONTENT_LENGTH);
put.removeHeaders(HttpHeaders.TRANSFER_ENCODING); put.setHeader(HttpHeaders.EXPECT, "100-continue");
put.setHeader(HttpHeaders.AUTHORIZATION, basicAuthHeader(user, passwd));
// You can set stream load related properties in the Header, here we set label and column_separator.
put.setHeader("label", UUID.randomUUID().toString());
put.setHeader("column_separator", ",");
put.setHeader("format", "json");
// Set up the import file. Here you can also use StringEntity to transfer arbitrary data.
StringEntity entity = new StringEntity(jsonData);
put.setEntity(entity);
log.info(put.toString());
try (CloseableHttpResponse response = client.execute(put)) {
String loadResult = "";
if (response.getEntity() != null) {
loadResult = EntityUtils.toString(response.getEntity());
}
final int statusCode = response.getStatusLine().getStatusCode();
if (statusCode != 200) {
throw new IOException(String.format("Stream load failed. status: %s load result: %s", statusCode, loadResult));
}
log.info("Get load result: " + loadResult);
}
}
} | 3.26 |
rocketmq-connect_DorisStreamLoader_basicAuthHeader_rdh | /**
* Construct authentication information, the authentication method used by doris here is Basic Auth
*
* @param username
* @param password
* @return */
private String
basicAuthHeader(String username, String password) {
final String tobeEncode = (username + ":") + password;
byte[] encoded = Base64.encodeBase64(tobeEncode.getBytes(StandardCharsets.UTF_8));
return "Basic " + new String(encoded);
} | 3.26 |
rocketmq-connect_AbstractConnectController_taskConfigs_rdh | /**
* task configs
*
* @param connName
* @return */
public List<TaskInfo> taskConfigs(final String connName) {
ClusterConfigState configState = configManagementService.snapshot();
List<TaskInfo> result = new ArrayList<>();
for (int i = 0; i < configState.taskCount(connName); i++) {
ConnectorTaskId id = new ConnectorTaskId(connName, i);
result.add(new TaskInfo(id, configState.rawTaskConfig(id)));
}
return result;
} | 3.26 |
rocketmq-connect_AbstractConnectController_putConnectorConfig_rdh | /**
* add connector
*
* @param connectorName
* @param configs
* @return * @throws Exception
*/
public String putConnectorConfig(String connectorName, ConnectKeyValue configs) throws Exception {
return configManagementService.putConnectorConfig(connectorName, configs);
} | 3.26 |
rocketmq-connect_AbstractConnectController_connectors_rdh | /**
* Get a list of connectors currently running in this cluster.
*
* @return A list of connector names
*/
public Collection<String> connectors() {
return configManagementService.snapshot().connectors();
} | 3.26 |
rocketmq-connect_AbstractConnectController_connectorTypeForClass_rdh | /**
* Retrieves ConnectorType for the corresponding connector class
*
* @param connClass
* class of the connector
*/
public ConnectorType connectorTypeForClass(String connClass) {
return ConnectorType.from(plugin.newConnector(connClass).getClass());
} | 3.26 |
rocketmq-connect_AbstractConnectController_connectorInfo_rdh | /**
* Get the definition and status of a connector.
*
* @param connector
* name of the connector
*/
public ConnectorInfo connectorInfo(String connector) {
final ClusterConfigState configState = configManagementService.snapshot();
if (!configState.contains(connector)) {
throw new ConnectException(("Connector[" + connector) + "] does not exist");
}
Map<String, String> config = configState.rawConnectorConfig(connector);
return new ConnectorInfo(connector, config, configState.tasks(connector), connectorTypeForClass(config.get(ConnectorConfig.CONNECTOR_CLASS)));
} | 3.26 |
rocketmq-connect_AbstractConnectController_pauseConnector_rdh | /**
* Pause the connector. This call will asynchronously suspend processing by the connector and all
* of its tasks.
*
* @param connector
* name of the connector
*/
public void pauseConnector(String connector) {
configManagementService.pauseConnector(connector);
} | 3.26 |
rocketmq-connect_AbstractConnectController_reloadPlugins_rdh | /**
* reload plugins
*/
public void reloadPlugins() {
configManagementService.getPlugin().initLoaders();
} | 3.26 |
rocketmq-connect_AbstractConnectController_deleteConnectorConfig_rdh | /**
* Remove the connector with the specified connector name in the cluster.
*
* @param connectorName
*/
public void deleteConnectorConfig(String connectorName) {
configManagementService.deleteConnectorConfig(connectorName);
} | 3.26 |
rocketmq-connect_AbstractConnectController_resumeConnector_rdh | /**
* Resume the connector. This call will asynchronously start the connector and its tasks (if
* not started already).
*
* @param connector
* name of the connector
*/
public void resumeConnector(String connector) {
configManagementService.resumeConnector(connector);
} | 3.26 |
rocketmq-connect_CountDownLatch2_await_rdh | /**
* Causes the current thread to wait until the latch has counted down to zero, unless the thread is {@linkplain Thread#interrupt interrupted}.
*
* <p>If the current count is zero then this method returns immediately.
*
* <p>If the current count is greater than zero then the current
* thread becomes disabled for thread scheduling purposes and lies dormant until one of two things happen:
* <ul>
* <li>The count reaches zero due to invocations of the
* {@link #countDown} method; or
* <li>Some other thread {@linkplain Thread#interrupt interrupts}
* the current thread.
* </ul>
*
* <p>If the current thread:
* <ul>
* <li>has its interrupted status set on entry to this method; or
* <li>is {@linkplain Thread#interrupt interrupted} while waiting,
* </ul>
* then {@link InterruptedException} is thrown and the current thread's interrupted status is cleared.
*
* @throws InterruptedException
* if the current thread is interrupted while waiting
*/
public void await() throws InterruptedException {
sync.acquireSharedInterruptibly(1);
}
/**
* Causes the current thread to wait until the latch has counted down to zero, unless the thread is {@linkplain Thread#interrupt interrupted}, or the specified waiting time elapses.
*
* <p>If the current count is zero then this method returns immediately
* with the value {@code true}.
*
* <p>If the current count is greater than zero then the current
* thread becomes disabled for thread scheduling purposes and lies dormant until one of three things happen:
* <ul>
* <li>The count reaches zero due to invocations of the
* {@link #countDown} method; or
* <li>Some other thread {@linkplain Thread#interrupt interrupts}
* the current thread; or
* <li>The specified waiting time elapses.
* </ul>
*
* <p>If the count reaches zero then the method returns with the
* value {@code true}.
*
* <p>If the current thread:
* <ul>
* <li>has its interrupted status set on entry to this method; or
* <li>is {@linkplain Thread#interrupt interrupted} while waiting,
* </ul>
* then {@link InterruptedException} is thrown and the current thread's interrupted status is cleared.
*
* <p>If the specified waiting time elapses then the value {@code false}
* is returned. If the time is less than or equal to zero, the method will not wait at all.
*
* @param timeout
* the maximum time to wait
* @param unit
* the time unit of the {@code timeout} argument
* @return {@code true} if the count reached zero and {@code false} | 3.26 |
rocketmq-connect_CountDownLatch2_toString_rdh | /**
* Returns a string identifying this latch, as well as its state. The state, in brackets, includes the String {@code "Count ="} followed by the current count.
*
* @return a string identifying this latch, as well as its state
*/public String toString() {
return ((super.toString() + "[Count = ") + sync.getCount())
+ "]";
} | 3.26 |
rocketmq-connect_ClusterManagementServiceImpl_prepare_rdh | /**
* Preparation before startup
*
* @param connectConfig
*/
private void prepare(WorkerConfig connectConfig) {
String consumerGroup = this.defaultMQPullConsumer.getConsumerGroup();
Set<String> consumerGroupSet = ConnectUtil.fetchAllConsumerGroupList(connectConfig);
if (!consumerGroupSet.contains(consumerGroup)) {
log.info("try to create consumerGroup: {}!", consumerGroup);
ConnectUtil.createSubGroup(connectConfig, consumerGroup);
}
String clusterStoreTopic = connectConfig.getClusterStoreTopic();
if (!ConnectUtil.isTopicExist(connectConfig, clusterStoreTopic)) {
log.info("try to create cluster store topic: {}!", clusterStoreTopic);
TopicConfig topicConfig = new TopicConfig(clusterStoreTopic, 1, 1, 6);
ConnectUtil.createTopic(connectConfig, topicConfig);
}
} | 3.26 |
rocketmq-connect_AbstractPositionManagementService_mergeOffset_rdh | /**
* Merge new received position info with local store.
*
* @param partition
* @param offset
* @return */
private boolean mergeOffset(ExtendRecordPartition partition, RecordOffset offset) {
if ((null == partition) || partition.getPartition().isEmpty()) {
return false;
}
if (positionStore.getKVMap().containsKey(partition)) {RecordOffset existedOffset = positionStore.getKVMap().get(partition);
// update
if (!offset.equals(existedOffset)) {
positionStore.put(partition, offset);
return true;
}
} else {
// add new position
positionStore.put(partition, offset);
return true;
}
return false;
} | 3.26 |
rocketmq-connect_AbstractPositionManagementService_set_rdh | /**
* send position
*
* @param partition
* @param position
*/
protected synchronized void set(PositionChange change, ExtendRecordPartition partition, RecordOffset position) {
String v5
= partition.getNamespace();
// When serializing the key, we add in the namespace information so the key is [namespace, real key]
byte[] key = keyConverter.fromConnectData(v5, null, Arrays.asList(change.name(), v5, partition != null ? partition.getPartition() : new HashMap<>()));
ByteBuffer keyBuffer
= (key != null) ?
ByteBuffer.wrap(key) : null;
byte[] value = valueConverter.fromConnectData(v5, null, position != null ? position.getOffset() : new HashMap<>());
ByteBuffer v9 = (value != null) ? ByteBuffer.wrap(value) : null;
notify(keyBuffer, v9);
} | 3.26 |
rocketmq-connect_DebeziumPostgresConnector_taskClass_rdh | /**
* Return the current connector class
*
* @return task implement class
*/
@Override
public Class<? extends Task> taskClass() {
return DebeziumPostgresSource.class;
} | 3.26 |
rocketmq-connect_DatabaseDialect_buildDropTableStatement_rdh | // drop table
default String buildDropTableStatement(TableId table, boolean ifExists, boolean cascade) {
ExpressionBuilder builder = expressionBuilder();
builder.append("DROP TABLE ");
builder.append(table);
if (ifExists) {
builder.append(" IF EXISTS");
}
if (cascade) {
builder.append(" CASCADE");
}
return builder.toString();
} | 3.26 |
rocketmq-connect_DatabaseDialect_parseTableNameToTableId_rdh | /**
* parse to Table Id
*
* @param fqn
* @return */
default TableId parseTableNameToTableId(String fqn) {
List<String> parts = identifierRules().parseQualifiedIdentifier(fqn);
if (parts.isEmpty()) {
throw new IllegalArgumentException(("Invalid fully qualified name: '" + fqn) + "'");
}
if (parts.size() == 1) {
return new TableId(null, null, parts.get(0));
}
if (parts.size() ==
3) {
return new TableId(parts.get(0), parts.get(1), parts.get(2));
}
if (useCatalog()) {
return new TableId(parts.get(0), null, parts.get(1));
}
return new TableId(null, parts.get(0), parts.get(1));
} | 3.26 |
rocketmq-connect_AvroSerdeFactory_addLogicalTypeConversion_rdh | /**
* add logical type conversion
*
* @param avroData
*/
public void addLogicalTypeConversion(GenericData avroData) {
avroData.addLogicalTypeConversion(new Conversions.DecimalConversion());
avroData.addLogicalTypeConversion(new TimeConversions.DateConversion());
avroData.addLogicalTypeConversion(new TimeConversions.TimeMillisConversion());
avroData.addLogicalTypeConversion(new TimeConversions.TimeMicrosConversion());
avroData.addLogicalTypeConversion(new TimeConversions.TimestampMillisConversion());
avroData.addLogicalTypeConversion(new TimeConversions.TimestampMicrosConversion());
avroData.addLogicalTypeConversion(new TimeConversions.LocalTimestampMillisConversion());
avroData.addLogicalTypeConversion(new TimeConversions.LocalTimestampMicrosConversion());
} | 3.26 |
rocketmq-connect_Serdes_Integer_rdh | /**
* A serde for nullable {@code Integer} type.
*/
public static Serde<Integer> Integer() {
return new IntegerSerde();
} | 3.26 |
rocketmq-connect_Serdes_Float_rdh | /**
* A serde for nullable {@code Float} type.
*/
public static Serde<Float> Float() {
return new FloatSerde();
} | 3.26 |
rocketmq-connect_Serdes_Long_rdh | /**
* A serde for nullable {@code Long} type.
*/
public static Serde<Long> Long() {
return new LongSerde();
} | 3.26 |
rocketmq-connect_Serdes_Short_rdh | /**
* A serde for nullable {@code Short} type.
*/public static Serde<Short> Short() {
return new ShortSerde();
} | 3.26 |
rocketmq-connect_Serdes_ByteArray_rdh | /**
* A serde for nullable {@code byte[]} type.
*/
public static Serde<byte[]> ByteArray() {
return new ByteArraySerde();
} | 3.26 |
rocketmq-connect_Serdes_Double_rdh | /**
* A serde for nullable {@code Double} type.
*/
public static Serde<Double> Double() {
return new DoubleSerde();
} | 3.26 |
rocketmq-connect_Serdes_String_rdh | /**
* A serde for nullable {@code String} type.
*/public static Serde<String> String() {
return new StringSerde();
} | 3.26 |
rocketmq-connect_Serdes_ByteBuffer_rdh | /**
* A serde for nullable {@code ByteBuffer} type.
*/
public static Serde<ByteBuffer> ByteBuffer() {
return new ByteBufferSerde();
} | 3.26 |
rocketmq-connect_Serdes_serdeFrom_rdh | /**
* Construct a serde object from separate serializer and deserializer
*
* @param serializer
* must not be null.
* @param deserializer
* must not be null.
*/
public static <T> Serde<T> serdeFrom(final Serializer<T> serializer, final Deserializer<T> deserializer) {
if (serializer == null) {throw new IllegalArgumentException("serializer must not be null");
}
if (deserializer == null) {
throw new IllegalArgumentException("deserializer must not be null");
}
return new
WrapperSerde<>(serializer, deserializer);
} | 3.26 |
rocketmq-connect_MetricsReporter_onCounterAdded_rdh | /**
* Called when a {@link Counter} is added to the registry.
*
* @param name
* the counter's name
* @param counter
* the counter
*/public void onCounterAdded(String name, Counter counter) {
this.onCounterAdded(MetricUtils.stringToMetricName(name), counter.getCount());
} | 3.26 |
rocketmq-connect_MetricsReporter_m0_rdh | /**
* Called when a {@link Meter} is removed from the registry.
*
* @param name
* the meter's name
*/
public void m0(String name) { this.m0(MetricUtils.stringToMetricName(name));
} | 3.26 |
rocketmq-connect_MetricsReporter_onHistogramRemoved_rdh | /**
* Called when a {@link Histogram} is removed from the registry.
*
* @param name
* the histogram's name
*/
public void onHistogramRemoved(String name) {
this.onCounterRemoved(MetricUtils.stringToMetricName(name));
} | 3.26 |
rocketmq-connect_MetricsReporter_onTimerAdded_rdh | /**
* Called when a {@link Timer} is added to the registry.
*
* @param name
* the timer's name
* @param timer
* the timer
*/public void onTimerAdded(String name,
Timer timer) {
this.onTimerAdded(MetricUtils.stringToMetricName(name), timer);
} | 3.26 |
rocketmq-connect_MetricsReporter_onHistogramAdded_rdh | /**
* Called when a {@link Histogram} is added to the registry.
*
* @param name
* the histogram's name
* @param histogram
* the histogram
*/
public void onHistogramAdded(String name, Histogram histogram) {
MetricName metricName = MetricUtils.stringToMetricName(name);
this.onHistogramAdded(metricName, MetricUtils.getHistogramValue(metricName, histogram));
} | 3.26 |
rocketmq-connect_MetricsReporter_onGaugeRemoved_rdh | /**
* Called when a {@link Gauge} is removed from the registry.
*
* @param name
* the gauge's name
*/
public void onGaugeRemoved(String name) {
this.onGaugeRemoved(MetricUtils.stringToMetricName(name));
} | 3.26 |
rocketmq-connect_MetricsReporter_onMeterAdded_rdh | /**
* Called when a {@link Meter} is added to the registry.
*
* @param name
* the meter's name
* @param meter
* the meter
*/
public void onMeterAdded(String name, Meter meter) {
MetricName metricName = MetricUtils.stringToMetricName(name);
onMeterAdded(metricName, MetricUtils.getMeterValue(metricName, meter));
} | 3.26 |
rocketmq-connect_MetricsReporter_onGaugeAdded_rdh | /**
* Called when a {@link Gauge} is added to the registry.
*
* @param name
* the gauge's name
* @param gauge
* the gauge
*/
public void onGaugeAdded(String name, Gauge<?> gauge) {
this.onGaugeAdded(MetricUtils.stringToMetricName(name), gauge.getValue());
} | 3.26 |
rocketmq-connect_MetricsReporter_onCounterRemoved_rdh | /**
* Called when a {@link Counter} is removed from the registry.
*
* @param name
* the counter's name
*/
public void onCounterRemoved(String name) {
this.onCounterRemoved(MetricUtils.stringToMetricName(name));
} | 3.26 |
rocketmq-connect_ConnectKeyValueSerde_serde_rdh | /**
* serializer and deserializer
*
* @return */
public static ConnectKeyValueSerde serde() {
return new ConnectKeyValueSerde(new ConnectKeyValueSerializer(), new ConnectKeyValueDeserializer());
} | 3.26 |
rocketmq-connect_RestHandler_getAllocatedConnectors_rdh | // old rest api
private void getAllocatedConnectors(Context context)
{
try {
ConcurrentMap<String, WorkerConnector> workerConnectors = connectController.getWorker().getConnectors();
Map<String, Map<String, String>> connectors = new HashMap<>();
for (Map.Entry<String, WorkerConnector> entry : workerConnectors.entrySet()) {
connectors.put(entry.getKey(), entry.getValue().getKeyValue().getProperties());
}
context.json(new HttpResponse<>(context.status(), connectors));
} catch (Exception ex) {
context.json(new ErrorMessage(HttpStatus.INTERNAL_SERVER_ERROR_500, ex.getMessage()));
}
} | 3.26 |
rocketmq-connect_RestHandler_listConnectors_rdh | /**
* list all connectors
*
* @param context
*/
private void listConnectors(Context context) {
try {
Map<String, Map<String, Object>> out = new HashMap<>();
for (String connector : connectController.connectors()) {
Map<String, Object> connectorExpansions = new HashMap<>();
connectorExpansions.put("status", connectController.connectorStatus(connector));
connectorExpansions.put("info", connectController.connectorInfo(connector));
out.put(connector, connectorExpansions);
}
context.json(new HttpResponse<>(context.status(), out));
} catch
(Exception ex) {
log.error("List all connectors failed. ", ex);
context.json(new ErrorMessage(HttpStatus.INTERNAL_SERVER_ERROR_500, ex.getMessage()));}
} | 3.26 |
rocketmq-connect_Stat_type_rdh | /**
* type
*
* @return */
default String type() {
return NoneType.none.name();
} | 3.26 |
rocketmq-connect_JsonSchemaConverterConfig_decimalFormat_rdh | /**
* decimal format
*
* @return */
public DecimalFormat decimalFormat() {
return props.containsKey(DECIMAL_FORMAT_CONFIG) ? DecimalFormat.valueOf(props.get(DECIMAL_FORMAT_CONFIG).toString().toUpperCase(Locale.ROOT)) : DECIMAL_FORMAT_DEFAULT;
} | 3.26 |
rocketmq-connect_RedisSourceConnector_validate_rdh | /**
* Should invoke before start the connector.
*
* @param config
* @return error message
*/
@Override
public void validate(KeyValue config) {
this.redisConfig.load(config);
} | 3.26 |
rocketmq-connect_StandaloneConnectStartup_createConnectController_rdh | /**
* Read configs from command line and create connect controller.
*
* @param args
* @return */
private static StandaloneConnectController createConnectController(String[] args) {
try {
// Build the command line options.
Options options = ServerUtil.buildCommandlineOptions(new Options());
commandLine = ServerUtil.parseCmdLine("connect", args, buildCommandlineOptions(options), new PosixParser());
if (null == commandLine) {
System.exit(-1);
}
// Load configs from command line.
StandaloneConfig config =
new StandaloneConfig();if (commandLine.hasOption('c')) {
String file = commandLine.getOptionValue('c').trim();
if (file
!= null) {
configFile = file;
InputStream in = new BufferedInputStream(new FileInputStream(file));
properties = new Properties();
properties.load(in);
FileAndPropertyUtil.properties2Object(properties, config);
in.close();
}
}
if (StringUtils.isNotEmpty(config.getMetricsConfigPath())) {
String file = config.getMetricsConfigPath();
InputStream in = new BufferedInputStream(new FileInputStream(file));
properties = new Properties();
properties.load(in);
Map<String, String> metricsConfig =
new ConcurrentHashMap<>();
if (properties.contains(WorkerConfig.METRIC_CLASS)) {
throw new IllegalArgumentException("[metrics.reporter] is empty");
}
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
if (entry.getKey().equals(WorkerConfig.METRIC_CLASS))
{
continue;
}
metricsConfig.put(entry.getKey().toString(), entry.getValue().toString());}
config.getMetricsConfig().put(properties.getProperty(WorkerConfig.METRIC_CLASS), metricsConfig);
in.close();}
if (null == config.getConnectHome()) {
System.out.printf("Please set the %s variable in your environment to match the location of the Connect installation", WorkerConfig.CONNECT_HOME_ENV);
System.exit(-2);
}
LoggerContext lc = ((LoggerContext) (LoggerFactory.getILoggerFactory()));
JoranConfigurator configurator = new JoranConfigurator();
configurator.setContext(lc);
lc.reset();
configurator.doConfigure(config.getConnectHome() + "/conf/logback.xml");
List<String> pluginPaths = new ArrayList<>(16);
if (StringUtils.isNotEmpty(config.getPluginPaths())) {
String[] strArr = config.getPluginPaths().split(",");
for (String path : strArr) {
if (StringUtils.isNotEmpty(path)) {
pluginPaths.add(path);
}
}
}
Plugin plugin = new Plugin(pluginPaths);
ClusterManagementService clusterManagementService = new MemoryClusterManagementServiceImpl();
clusterManagementService.initialize(config);
ConfigManagementService configManagementService = new MemoryConfigManagementServiceImpl();configManagementService.initialize(config, null, plugin);
PositionManagementService positionManagementServices = new FilePositionManagementServiceImpl();
positionManagementServices.initialize(config, null, null);
StateManagementService stateManagementService = new MemoryStateManagementServiceImpl();
stateManagementService.initialize(config, null);
StandaloneConnectController controller = new StandaloneConnectController(plugin, config, clusterManagementService, configManagementService, positionManagementServices,
stateManagementService);
// Invoked when shutdown.
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable()
{
private volatile boolean hasShutdown = false;
private AtomicInteger shutdownTimes = new AtomicInteger(0);
@Override
public void m0() {
synchronized(this) {
log.info("Shutdown hook was invoked, {}", this.shutdownTimes.incrementAndGet());
if (!this.hasShutdown) {
this.hasShutdown = true;
long beginTime = System.currentTimeMillis();
controller.shutdown();
long consumingTimeTotal = System.currentTimeMillis() - beginTime;
log.info("Shutdown hook over, consuming total time(ms): {}", consumingTimeTotal);}
}
}
}, "ShutdownHook"));
return controller;
} catch (Throwable e) {
e.printStackTrace();
System.exit(-1);
}
return null;
} | 3.26 |
rocketmq-connect_KafkaSinkAdaptorConnector_start_rdh | /**
* Start the component
*
* @param config
* component context
*/
@Override
public void start(KeyValue config) {
super.start(config);
sinkConnector.validate(taskConfig);
sinkConnector.initialize(new KafkaConnectorContext(connectorContext));
sinkConnector.start(taskConfig);
} | 3.26 |
rocketmq-connect_DebeziumOracleConnector_taskClass_rdh | /**
* Return the current connector class
*
* @return task implement class
*/
@Override
public Class<? extends Task> taskClass() {
return DebeziumOracleSource.class;
} | 3.26 |
rocketmq-connect_DebeziumOracleConnector_getConnectorClass_rdh | /**
* get connector class
*/@Override
public String getConnectorClass() {
return DEFAULT_CONNECTOR;
} | 3.26 |
rocketmq-connect_SetMaximumPrecision_m0_rdh | /**
* transform key
*/public static class Key extends SetMaximumPrecision<ConnectRecord> {
@Override
public ConnectRecord m0(ConnectRecord r) {
SchemaAndValue v13 = this.process(r, r.getKeySchema(), r.getKey());
ConnectRecord record = new ConnectRecord(r.getPosition().getPartition(), r.getPosition().getOffset(), r.getTimestamp(), v13.schema(), v13.value(), r.getSchema(), r.getData());
record.setExtensions(r.getExtensions());
return record;
} | 3.26 |
rocketmq-connect_LogReporter_report_rdh | /**
* Log error context.
*
* @param context
* the processing context.
*/
@Override
public void report(ProcessingContext context) {
errorMetricsGroup.recordErrorLogged();
log.error(message(context), context.error());
} | 3.26 |
rocketmq-connect_LogReporter_message_rdh | /**
* format error message
*
* @param context
* @return */
String message(ProcessingContext context) {
return String.format("Error encountered in task %s. %s", id.toString(), context.toString(deadLetterQueueConfig.includeRecordDetailsInErrorLog()));
} | 3.26 |
rocketmq-connect_ParsedSchema_validate_rdh | /**
* validate data
*/
default void validate()
{
} | 3.26 |
rocketmq-connect_ParsedSchema_deepEquals_rdh | /**
* deep equals
*
* @param schema
* @return */
default boolean deepEquals(ParsedSchema schema) {
return Objects.equals(rawSchema(), schema.rawSchema());
} | 3.26 |
rocketmq-connect_WorkerConnector_getConnector_rdh | /**
* connector object
*
* @return */
public Connector getConnector() {
return connector;
} | 3.26 |
rocketmq-connect_WorkerConnector_getConnectorName_rdh | /**
* connector name
*
* @return */
public String getConnectorName() {
return connectorName;} | 3.26 |
rocketmq-connect_WorkerConnector_shutdown_rdh | /**
* Stop this connector. This method does not block, it only triggers shutdown. Use
* #{@link #awaitShutdown} to block until completion.
*/
public synchronized void shutdown() {
log.info("Scheduled shutdown for {}",
this);
stopping = true;
notify();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.