name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
zilla_ManyToOneRingBuffer_producerPosition_rdh
/** * {@inheritDoc } */ public long producerPosition() { return buffer.getLongVolatile(tailPositionIndex); }
3.26
zilla_ManyToOneRingBuffer_consumerPosition_rdh
/** * {@inheritDoc } */ public long consumerPosition() { return buffer.getLongVolatile(headPositionIndex); }
3.26
zilla_ManyToOneRingBuffer_consumerHeartbeatTime_rdh
/** * {@inheritDoc } */ public long consumerHeartbeatTime() { return buffer.getLongVolatile(consumerHeartbeatIndex); }
3.26
zilla_ManyToOneRingBuffer_write_rdh
/** * {@inheritDoc } */ public boolean write(final int msgTypeId, final DirectBuffer srcBuffer, final int srcIndex, final int length) { checkTypeId(msgTypeId); checkMsgLength(length); boolean isSuccessful = false; final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY != recordIndex) { buffer.putInt(typeOffset(recordIndex), msgTypeId); buffer.putBytes(encodedMsgOffset(recordIndex), srcBuffer, srcIndex, length); buffer.putIntOrdered(lengthOffset(recordIndex), recordLength); isSuccessful = true; } return isSuccessful; }
3.26
zilla_ManyToOneRingBuffer_maxMsgLength_rdh
/** * {@inheritDoc } */ public int maxMsgLength() { return maxMsgLength; }
3.26
zilla_ManyToOneRingBuffer_tryClaim_rdh
/** * {@inheritDoc } */ public int tryClaim(final int msgTypeId, final int length) {checkTypeId(msgTypeId); checkMsgLength(length); final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY == recordIndex) { return recordIndex; } buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); UnsafeAccess.UNSAFE.storeFence(); buffer.putInt(typeOffset(recordIndex), msgTypeId); return encodedMsgOffset(recordIndex); }
3.26
zilla_ManyToOneRingBuffer_buffer_rdh
/** * {@inheritDoc } */ public AtomicBuffer buffer() { return buffer;}
3.26
zilla_WsClientFactory_assembleHeader_rdh
// @return no bytes consumed to assemble websocket header private int assembleHeader(DirectBuffer buffer, int offset, int length) { int remaining = Math.min(length, MAXIMUM_HEADER_SIZE - headerLength); // may copy more than actual header length (up to max header length), but will adjust at the end header.putBytes(headerLength, buffer, offset, remaining); int consumed = remaining; if ((headerLength + remaining) >= 2) { int wsHeaderLength = wsHeaderLength(header);// eventual headLength must not be more than wsHeaderLength if ((headerLength + remaining) > wsHeaderLength) { consumed = wsHeaderLength - headerLength; } } headerLength += consumed; return consumed; }
3.26
rocketmq-connect_WorkerSinkTask_pauseAll_rdh
// pause all consumer topic queue private void pauseAll() { consumer.pause(f1); }
3.26
rocketmq-connect_WorkerSinkTask_consumeFromOffset_rdh
/** * consume fro offset * * @param messageQueue * @param taskConfig */ public long consumeFromOffset(MessageQueue messageQueue, ConnectKeyValue taskConfig) { // -1 when started long offset = consumer.getOffsetStore().readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY); if (0 > offset) { // query from broker offset = consumer.getOffsetStore().readOffset(messageQueue, ReadOffsetType.READ_FROM_STORE); } if (offset < 0) { String consumeFromWhere = taskConfig.getString(ConnectorConfig.CONSUME_FROM_WHERE); if (StringUtils.isBlank(consumeFromWhere)) { consumeFromWhere = ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET.name(); } try { switch (ConsumeFromWhere.valueOf(consumeFromWhere)) { case CONSUME_FROM_LAST_OFFSET : consumer.seekToEnd(messageQueue); break; case CONSUME_FROM_FIRST_OFFSET : consumer.seekToBegin(messageQueue); break; default : break; } } catch (MQClientException e) { throw new ConnectException(e); } }log.info("Consume {} from {}", messageQueue, offset); return offset < 0 ? 0 : offset; }
3.26
rocketmq-connect_WorkerSinkTask_errorRecordReporter_rdh
/** * error record reporter * * @return */ public WorkerErrorRecordReporter errorRecordReporter() { return errorRecordReporter; }
3.26
rocketmq-connect_WorkerSinkTask_execute_rdh
/** * execute poll and send record */ @Override protected void execute() { while (isRunning()) { try { long startTimeStamp = System.currentTimeMillis(); log.info("START pullMessageFromQueues, time started : {}", startTimeStamp); if (f1.size() == 0) { log.info("messageQueuesOffsetMap is null, : {}", startTimeStamp); stopPullMsgLatch.await(PULL_MSG_ERROR_BACKOFF_MS, TimeUnit.MILLISECONDS);continue; } if (shouldPause()) { // pause pauseAll(); onPause(); try { // wait unpause if (awaitUnpause()) { // check paused for retry if (!pausedForRetry) { resumeAll(); onResume(); } } continue; } catch (InterruptedException e) { // do exception } } iteration(); } catch (RetriableException e) { log.error(" Sink task {}, pull message RetriableException, Error {} ", this, e.getMessage(), e); } catch (InterruptedException interruptedException) { // NO-op } catch (Throwable e) { log.error(" Sink task {}, pull message Throwable, Error {} ", this, e.getMessage(), e); throw e;} } }
3.26
rocketmq-connect_WorkerSinkTask_initializeAndStart_rdh
/** * initinalize and start */ @Override protected void initializeAndStart() { Set<String> topics = new SinkConnectorConfig(taskConfig).parseTopicList(); if (collections4.CollectionUtils.isEmpty(topics)) { throw new ConnectException("Sink connector topics config can be null, please check sink connector config info"); } // sub topics try { for (String topic : topics) { consumer.setPullBatchSize(MAX_MESSAGE_NUM); consumer.subscribe(topic, "*"); } if (messageQueueListener == null) { messageQueueListener = consumer.getMessageQueueListener(); } consumer.setMessageQueueListener(new MessageQueueListener() { @Override public void messageQueueChanged(String subTopic, Set<MessageQueue> mqAll, Set<MessageQueue> mqDivided) { // update assign message queue messageQueueListener.messageQueueChanged(subTopic, mqAll, mqDivided); // listener message queue changed log.info("Message queue changed start, old message queues offset {}", JSON.toJSONString(f1));if (isStopping()) { log.trace("Skipping partition revocation callback as task has already been stopped"); return; } // remove and close message queue log.info("Task {},MessageQueueChanged, old messageQueuesOffsetMap {}", id.toString(), JSON.toJSONString(f1)); removeAndCloseMessageQueue(subTopic, mqDivided); // add new message queue assignMessageQueue(mqDivided); log.info("Task {}, Message queue changed end, new message queues offset {}", id, JSON.toJSONString(f1)); preCommit(); log.info("Message queue changed start, new message queues offset {}", JSON.toJSONString(f1)); } }); consumer.start(); } catch (MQClientException e) { log.error("Task {},InitializeAndStart MQClientException", id.toString(), e); throw new ConnectException(e); } log.info("Sink task consumer start. taskConfig {}", JSON.toJSONString(taskConfig)); sinkTask.init(sinkTaskContext); sinkTask.start(taskConfig); log.info("{} Sink task finished initialization and start", this); }
3.26
rocketmq-connect_WorkerSinkTask_pollConsumer_rdh
/** * poll consumer * * @param timeoutMs * @return */ private List<MessageExt> pollConsumer(long timeoutMs) {List<MessageExt> msgs = consumer.poll(timeoutMs); // metrics recordReadSuccess(msgs.size()); return msgs; }
3.26
rocketmq-connect_WorkerSinkTask_m0_rdh
/** * do commit * * @param offsets * @param seqno */ private void m0(Map<MessageQueue, Long> offsets, int seqno) { log.debug("{} Committing offsets synchronously using sequence number {}: {}", this, seqno, offsets); try { for (Map.Entry<MessageQueue, Long> offsetEntry : offsets.entrySet()) { consumer.getOffsetStore().updateOffset(offsetEntry.getKey(), offsetEntry.getValue(), true); // consumer.getOffsetStore().updateConsumeOffsetToBroker(offsetEntry.getKey(), offsetEntry.getValue(), false); } onCommitCompleted(null, seqno, offsets);} catch (Exception e) { onCommitCompleted(e, seqno, offsets); } }
3.26
rocketmq-connect_WorkerSinkTask_resumeAll_rdh
// resume all consumer topic queue private void resumeAll() { for (MessageQueue queue : f1) { if (!sinkTaskContext.getPausedQueues().contains(queue)) { consumer.resume(singleton(queue)); } }}
3.26
rocketmq-connect_WorkerSinkTask_commitOffsets_rdh
/** * commit offset * * @param now * @param closing */ private void commitOffsets(long now, boolean closing) { commitOffsets(now, closing, f1); }
3.26
rocketmq-connect_WorkerSinkTask_compareAndCommit_rdh
/** * compare and commit * * @param offsetsToCommit * @param lastCommittedQueuesOffsets * @param taskProvidedOffsets */ private void compareAndCommit(Map<MessageQueue, Long> offsetsToCommit, Map<MessageQueue, Long> lastCommittedQueuesOffsets, Map<MessageQueue, Long> taskProvidedOffsets) { // Get all assign topic message queue Collection<MessageQueue> assignedTopicQueues = this.f1; // committable offsets final Map<MessageQueue, Long> committableOffsets = new HashMap<>(lastCommittedQueuesOffsets); for (Map.Entry<MessageQueue, Long> taskProvidedOffsetsEntry : taskProvidedOffsets.entrySet()) { // task provided offset final MessageQueue queue = taskProvidedOffsetsEntry.getKey(); final Long taskProvidedOffset = taskProvidedOffsetsEntry.getValue(); // check reblance remove if (!assignedTopicQueues.contains(queue)) { log.warn("{} After rebalancing, the MessageQueue is removed from the current consumer {}/{} , assignment={}", this, queue, taskProvidedOffset, assignedTopicQueues); continue; } if (!committableOffsets.containsKey(queue)) { log.debug("{} The MessageQueue provided by the task is not subscribed {}/{} , requested={}", this, queue, taskProvidedOffset, committableOffsets.keySet()); continue; } if (committableOffsets.containsKey(queue)) { // current offset long currentOffset = offsetsToCommit.get(queue); // compare and set if (currentOffset >= taskProvidedOffset) { committableOffsets.put(queue, taskProvidedOffset); } } } if (committableOffsets.equals(lastCommittedQueuesOffsets)) { log.debug("{} Skipping offset commit, no change since last commit", this); onCommitCompleted(null, commitSeqno, null); return; } m0(committableOffsets, commitSeqno); }
3.26
rocketmq-connect_WorkerSinkTask_preCommit_rdh
/** * reset offset by custom */ private void preCommit() { Map<MessageQueue, Long> offsets = sinkTaskContext.queuesOffsets();if (offsets.isEmpty()) {return; } for (Map.Entry<MessageQueue, Long> entry : offsets.entrySet()) { MessageQueue queue = entry.getKey(); Long offset = entry.getValue(); if (offset != null) {log.trace("{} Rewind {} to offset {}", this, queue, offset); try { consumer.seek(queue, offset); lastCommittedOffsets.put(queue, offset); f0.put(queue, offset); } catch (MQClientException e) { // NO-op } }} sinkTaskContext.cleanQueuesOffsets(); }
3.26
rocketmq-connect_WorkerSinkTask_closeMessageQueues_rdh
/** * remove offset from currentOffsets/lastCommittedOffsets * remove message from messageBatch * * @param queues * @param lost */ private void closeMessageQueues(Set<MessageQueue> queues, boolean lost) { if (!lost) { commitOffsets(System.currentTimeMillis(), true, queues); } else { log.trace("{} Closing the task as partitions have been lost: {}", this, queues); f0.keySet().removeAll(queues); } lastCommittedOffsets.keySet().removeAll(queues); messageBatch.removeIf(record -> { MessageQueue messageQueue = ConnectUtil.convertToMessageQueue(record.getPosition().getPartition()); return queues.contains(messageQueue); }); }
3.26
rocketmq-connect_WorkerSinkTask_onCommitCompleted_rdh
/** * commit * * @param error * @param seqno * @param committedOffsets */ private void onCommitCompleted(Throwable error, long seqno, Map<MessageQueue, Long> committedOffsets) { if (commitSeqno != seqno) { // skip this commit sinkTaskMetricsGroup.recordOffsetCommitSkip(); return; } if (error != null) { log.error("{} An exception was thrown when committing commit offset, sequence number {}: {}", this, seqno, committedOffsets, error); recordCommitFailure(System.currentTimeMillis() - commitStarted); } else { log.debug("{} Finished offset commit successfully in {} ms for sequence number {}: {}", this, System.currentTimeMillis() - commitStarted, seqno, committedOffsets); if (committedOffsets != null) { lastCommittedOffsets.putAll(committedOffsets); log.debug("{} Last committed offsets are now {}", this, committedOffsets); } sinkTaskMetricsGroup.recordOffsetCommitSuccess(); } committing = false; }
3.26
rocketmq-connect_WorkerSinkTask_m1_rdh
/** * receive message from MQ. * * @param messages */ private void m1(List<MessageExt> messages) { if (messageBatch.isEmpty()) {originalOffsets.clear(); } for (MessageExt message : messages) { this.retryWithToleranceOperator.consumerRecord(message); ConnectRecord connectRecord = convertMessages(message); originalOffsets.put(new MessageQueue(message.getTopic(), message.getBrokerName(), message.getQueueId()), message.getQueueOffset() + 1); if ((connectRecord != null) && (!this.retryWithToleranceOperator.failed())) { messageBatch.add(connectRecord); } log.info("Received one message success : msgId {}", message.getMsgId()); } try { long start = System.currentTimeMillis(); sinkTask.put(new ArrayList<>(messageBatch)); // metrics recordMultiple(messageBatch.size()); sinkTaskMetricsGroup.recordPut(System.currentTimeMillis() - start); f0.putAll(originalOffsets); messageBatch.clear(); if (!shouldPause()) { if (pausedForRetry) { resumeAll(); pausedForRetry = false; } } } catch (RetriableException e) { log.error("task {} put sink recode RetriableException", this, e.getMessage(), e); // pause all consumer wait for put data pausedForRetry = true; pauseAll(); throw e;} catch (Throwable t) { log.error("task {} put sink recode Throwable", this, t.getMessage(), t); throw t; } }
3.26
rocketmq-connect_WorkerSinkTask_removeAndCloseMessageQueue_rdh
/** * remove and close message queue * * @param queues */ public void removeAndCloseMessageQueue(String topic, Set<MessageQueue> queues) { Set<MessageQueue> removeMessageQueues; if (queues == null) { removeMessageQueues = new HashSet<>(); for (MessageQueue messageQueue : f1) { if (messageQueue.getTopic().equals(topic)) { removeMessageQueues.add(messageQueue); } }} else { // filter not contains in messageQueues removeMessageQueues = f1.stream().filter(messageQueue -> topic.equals(messageQueue.getTopic()) && (!queues.contains(messageQueue))).collect(Collectors.toSet()); } if ((removeMessageQueues == null) || removeMessageQueues.isEmpty()) { return;} // clean message queues offset closeMessageQueues(removeMessageQueues, false); // remove record partitions Set<RecordPartition> waitRemoveQueueMetaDatas = new HashSet<>(); for (MessageQueue messageQueue : removeMessageQueues) { recordPartitions.forEach(key -> { if ((key.getPartition().get(TOPIC).equals(messageQueue.getTopic()) && key.getPartition().get(BROKER_NAME).equals(messageQueue.getBrokerName())) && Integer.valueOf(String.valueOf(key.getPartition().get(QUEUE_ID))).equals(messageQueue.getQueueId())) { waitRemoveQueueMetaDatas.add(key); } }); } recordPartitions.removeAll(waitRemoveQueueMetaDatas); // start remove f1.removeAll(removeMessageQueues); }
3.26
rocketmq-connect_JdbcSourceConnector_taskConfigs_rdh
/** * Returns a set of configurations for Tasks based on the current configuration, * producing at most count configurations. * * @param maxTasks * maximum number of configurations to generate * @return configurations for Tasks */ @Override public List<KeyValue> taskConfigs(int maxTasks) { log.info(("Connector task config divide[" + maxTasks) + "]"); List<KeyValue> v0 = Lists.newArrayList(); List<String> v1 = Lists.newArrayList(); log.info(("Connector table white list[" + jdbcSourceConfig.getTableWhitelist()) + "]"); jdbcSourceConfig.getTableWhitelist().forEach(table -> { v1.add(table); }); maxTasks = (v1.size() > maxTasks) ? maxTasks : v1.size(); List<List<String>> tablesGrouped = ConnectorGroupUtils.groupPartitions(v1, maxTasks); for (List<String> tableGroup : tablesGrouped) { KeyValue keyValue = new DefaultKeyValue(); for (String key : originalConfig.keySet()) { keyValue.put(key, originalConfig.getString(key)); } keyValue.put(JdbcSourceTaskConfig.TABLES_CONFIG, StringUtils.join(tableGroup, ",")); v0.add(keyValue);} return v0; }
3.26
rocketmq-connect_JdbcSourceConnector_start_rdh
/** * Start the component * * @param config * component context */@Override public void start(KeyValue config) { originalConfig = config; }
3.26
rocketmq-connect_JdbcSourceConnector_validate_rdh
/** * Should invoke before start the connector. * * @param config * @return error message */ @Override public void validate(KeyValue config) { jdbcSourceConfig = new JdbcSourceConfig(config); // validate config }
3.26
rocketmq-connect_ClusterManagementService_configure_rdh
/** * Configure class with the given key-value pairs * * @param config * can be DistributedConfig or StandaloneConfig */ default void configure(WorkerConfig config) { }
3.26
rocketmq-connect_PositionStorageWriter_beginFlush_rdh
/** * begin flush offset * * @return */ public synchronized boolean beginFlush() { if (isFlushing()) { throw new ConnectException("PositionStorageWriter is already flushing");} if (data.isEmpty()) { return false; } this.toFlush = this.data; this.data = new HashMap<>(); return true; }
3.26
rocketmq-connect_PositionStorageWriter_call_rdh
/** * Computes a result, or throws an exception if unable to do so. * * @return computed result * @throws Exception * if unable to compute a result */ @Override public Void call() { try { // has been canceled if (flushId != currentFlushId) { return null; } positionManagementService.putPosition(toFlush); log.debug("Submitting {} entries to backing store. The offsets are: {}", data.size(), toFlush); positionManagementService.persist(); positionManagementService.synchronize(true); // persist finished toFlush = null; currentFlushId++; } catch (Throwable throwable) { // rollback cancelFlush(); this.callback.onCompletion(throwable, null, null); } return null; }
3.26
rocketmq-connect_PositionStorageWriter_writeOffset_rdh
/** * write offsets * * @param positions * positions */ @Override public void writeOffset(Map<RecordPartition, RecordOffset> positions) { for (Map.Entry<RecordPartition, RecordOffset> offset : positions.entrySet()) { writeOffset(offset.getKey(), offset.getValue()); } }
3.26
rocketmq-connect_PositionStorageWriter_close_rdh
/** * Closes this stream and releases any system resources associated * with it. If the stream is already closed then invoking this * method has no effect. * * @throws IOException * if an I/O error occurs */ @Override public void close() throws IOException {if (executorService != null) { executorService.shutdown(); } }
3.26
rocketmq-connect_IdentifierRules_trailingQuoteString_rdh
/** * Get the string used as a trailing quote. * * @return the trailing quote string; never null */ public String trailingQuoteString() { return trailingQuoteString; }
3.26
rocketmq-connect_IdentifierRules_leadingQuoteString_rdh
/** * Get the string used as a leading quote. * * @return the leading quote string; never null */ public String leadingQuoteString() { return leadingQuoteString; }
3.26
rocketmq-connect_IdentifierRules_identifierDelimiter_rdh
/** * Get the delimiter that is used to delineate segments within fully-qualified identifiers. * * @return the identifier delimiter; never null */ public String identifierDelimiter() { return identifierDelimiter; }
3.26
rocketmq-connect_ColumnDefinition_scale_rdh
/** * Gets the column's number of digits to right of the decimal point. 0 is returned for data types * where the scale is not applicable. * * @return scale */ public int scale() { return scale;}
3.26
rocketmq-connect_ColumnDefinition_displaySize_rdh
/** * Indicates the column's normal maximum width in characters. * * @return the normal maximum number of characters allowed as the width of the designated column */ public int displaySize() { return displaySize; }
3.26
rocketmq-connect_ColumnDefinition_nullability_rdh
/** * Indicates the nullability of values in the column. * * @return the nullability status of the given column; never null */ public Nullability nullability() { return f2; }
3.26
rocketmq-connect_ColumnDefinition_isCurrency_rdh
/** * Indicates whether the column is a cash value. * * @return <code>true</code> if so; <code>false</code> otherwise */ public boolean isCurrency() { return currency; }
3.26
rocketmq-connect_ColumnDefinition_mutability_rdh
/** * Indicates whether the designated column is mutable. * * @return the mutability; never null */ public Mutability mutability() { return mutability; } /** * Returns the fully-qualified name of the Java class whose instances are manufactured if the * method {@link java.sql.ResultSet#getObject(int)} is called to retrieve a value from the column. * {@link java.sql.ResultSet#getObject(int)}
3.26
rocketmq-connect_ColumnDefinition_isOptional_rdh
/** * Indicates whether values in the column are optional. This is equivalent to calling: * <pre> * nullability() == Nullability.NULL || nullability() == Nullability.UNKNOWN * </pre> * * @return <code>true</code> if so; <code>false</code> otherwise */ public boolean isOptional() { return (f2 == Nullability.f0) || (f2 == Nullability.UNKNOWN); }
3.26
rocketmq-connect_ColumnDefinition_id_rdh
/** * Get the column's identifier. * * @return column identifier; never null */ public ColumnId id() { return f1; }
3.26
rocketmq-connect_ColumnDefinition_isSignedNumber_rdh
/** * Indicates whether values in the column are signed numbers. * * @return <code>true</code> if so; <code>false</code> otherwise */ public boolean isSignedNumber() { return signedNumbers; }
3.26
rocketmq-connect_ColumnDefinition_m0_rdh
/** * Indicates whether the column is automatically numbered. * * @return <code>true</code> if so; <code>false</code> otherwise */ public boolean m0() { return autoIncremented; }
3.26
rocketmq-connect_ColumnDefinition_precision_rdh
/** * Get the column's table identifier. * * @return the table identifier; never null */ // public TableId tableId() { // return id.tableId(); // } /** * Get the column's specified column size. For numeric data, this is the maximum precision. For * character data, this is the length in characters. For datetime datatypes, this is the length in * characters of the String representation (assuming the maximum allowed precision of the * fractional seconds component). For binary data, this is the length in bytes. For the ROWID * datatype, this is the length in bytes. 0 is returned for data types where the column size is * not applicable. * * @return precision */ public int precision() { return precision; }
3.26
rocketmq-connect_ServiceProviderUtil_getConfigManagementService_rdh
/** * Get config management service by class name * * @param configManagementServiceClazz * @return */ @NotNull public static ConfigManagementService getConfigManagementService(String configManagementServiceClazz) { if (StringUtils.isEmpty(configManagementServiceClazz)) { configManagementServiceClazz = LocalConfigManagementServiceImpl.class.getName(); } ConfigManagementService configManagementService = null; ServiceLoader<ConfigManagementService> configManagementServiceServiceLoader = ServiceLoader.load(ConfigManagementService.class); Iterator<ConfigManagementService> configManagementServiceIterator = configManagementServiceServiceLoader.iterator(); while (configManagementServiceIterator.hasNext()) { ConfigManagementService v7 = configManagementServiceIterator.next(); if (v7.getClass().getName().equals(configManagementServiceClazz)) { configManagementService = v7; break; } } if (null == configManagementService) { throw new ConnectException((("ConfigManagementService class " + configManagementServiceClazz) + " not ") + "found"); } return configManagementService; }
3.26
rocketmq-connect_ServiceProviderUtil_getStateManagementService_rdh
/** * Get state management service by class name * * @param stateManagementServiceClazz * @return */ @NotNull public static StateManagementService getStateManagementService(String stateManagementServiceClazz) { if (StringUtils.isEmpty(stateManagementServiceClazz)) { stateManagementServiceClazz = LocalStateManagementServiceImpl.class.getName(); } StateManagementService stateManagementService = null; ServiceLoader<StateManagementService> stateManagementServices = ServiceLoader.load(StateManagementService.class); Iterator<StateManagementService> stateManagementServiceIterator = stateManagementServices.iterator(); while (stateManagementServiceIterator.hasNext()) { StateManagementService currentStateManagementService = stateManagementServiceIterator.next(); if (currentStateManagementService.getClass().getName().equals(stateManagementServiceClazz)) { stateManagementService = currentStateManagementService; break; } } if (null == stateManagementService) { throw new ConnectException((("StateManagementService class " + stateManagementServiceClazz) + " not ") + "found"); } return stateManagementService; }
3.26
rocketmq-connect_ServiceProviderUtil_getPositionManagementService_rdh
/** * Get position management service by class name * * @param positionManagementServiceClazz * @return */ @NotNull public static PositionManagementService getPositionManagementService(String positionManagementServiceClazz) { if (StringUtils.isEmpty(positionManagementServiceClazz)) { positionManagementServiceClazz = LocalPositionManagementServiceImpl.class.getName(); } PositionManagementService positionManagementService = null; ServiceLoader<PositionManagementService> positionManagementServiceServiceLoader = ServiceLoader.load(PositionManagementService.class); Iterator<PositionManagementService> positionManagementServiceIterator = positionManagementServiceServiceLoader.iterator(); while (positionManagementServiceIterator.hasNext()) { PositionManagementService currentPositionManagementService = positionManagementServiceIterator.next(); if (currentPositionManagementService.getClass().getName().equals(positionManagementServiceClazz)) { positionManagementService = currentPositionManagementService; break; } } if (null == positionManagementService) { throw new ConnectException((("PositionManagementService class " + positionManagementServiceClazz) + " not ") + "found"); } return positionManagementService; }
3.26
rocketmq-connect_ServiceProviderUtil_getClusterManagementService_rdh
/** * Get custer management service by class name * * @param clusterManagementServiceClazz * @return */@NotNull public static ClusterManagementService getClusterManagementService(String clusterManagementServiceClazz) { if (StringUtils.isEmpty(clusterManagementServiceClazz)) { clusterManagementServiceClazz = ClusterManagementServiceImpl.class.getName(); } ClusterManagementService clusterManagementService = null; ServiceLoader<ClusterManagementService> clusterManagementServiceServiceLoader = ServiceLoader.load(ClusterManagementService.class); Iterator<ClusterManagementService> clusterManagementServiceIterator = clusterManagementServiceServiceLoader.iterator(); while (clusterManagementServiceIterator.hasNext()) { ClusterManagementService v3 = clusterManagementServiceIterator.next(); if (v3.getClass().getName().equals(clusterManagementServiceClazz)) { clusterManagementService = v3; break; } } if (null == clusterManagementService) { throw new ConnectException((("ClusterManagementService class " + clusterManagementServiceClazz) + " not ") + "found"); } return clusterManagementService; }
3.26
rocketmq-connect_JsonSchemaSerializer_serialize_rdh
/** * serialize * * @param topic * @param isKey * @param value * @param schema * @return */ public byte[] serialize(String topic, boolean isKey, JsonSchema schema, Object value) { if (value == null) { return null; } String subjectName = TopicNameStrategy.subjectName(topic, isKey); try { RegisterSchemaRequest schemaRequest = RegisterSchemaRequest.builder().schemaType(schema.schemaType()).compatibility(Compatibility.BACKWARD).schemaIdl(schema.toString()).desc(schema.name()).build(); SchemaResponse schemaResponse = registryClient.autoRegisterOrGetSchema(JsonSchemaData.NAMESPACE, topic, subjectName, schemaRequest, schema); long schemaId = schemaResponse.getRecordId(); // parse idl if (StringUtils.isNotEmpty(schemaResponse.getIdl())) { schema = new JsonSchema(schemaResponse.getIdl()); } // validate json value if (converterConfig.validate()) { JsonSchemaUtils.validate(schema.rawSchema(), value); } // serialize value ByteArrayOutputStream out = new ByteArrayOutputStream(); out.write(ByteBuffer.allocate(ID_SIZE).putLong(schemaId).array()); out.write(OBJECT_MAPPER.writeValueAsBytes(value)); byte[] bytes = out.toByteArray(); out.close(); return bytes; } catch (IOException e) { throw new SerializationException("Error serializing JSON message", e); } }
3.26
rocketmq-connect_LocalStateManagementServiceImpl_start_rdh
/** * Start dependent services (if needed) */ @Override public void start() { connectorStatusStore.load(); taskStatusStore.load(); dataSynchronizer.start(); startSignal(); }
3.26
rocketmq-connect_LocalStateManagementServiceImpl_prePersist_rdh
/** * pre persist */ private void prePersist() { Map<String, ConnAndTaskStatus.CacheEntry<ConnectorStatus>> connectors = connAndTaskStatus.getConnectors(); if (connectors.isEmpty()) { return; } connectors.forEach((connectName, connectorStatus) -> { connectorStatusStore.put(connectName, connectorStatus.get()); Map<Integer, ConnAndTaskStatus.CacheEntry<TaskStatus>> cacheTaskStatus = connAndTaskStatus.getTasks().row(connectName); if (cacheTaskStatus == null) { return; } taskStatusStore.put(connectName, new ArrayList<>()); cacheTaskStatus.forEach((taskId, taskStatus) -> { if (taskStatus != null) { taskStatusStore.get(connectName).add(taskStatus.get()); } }); }); }
3.26
rocketmq-connect_LocalStateManagementServiceImpl_replicaTargetState_rdh
/** * sync send online config */ @Override public void replicaTargetState() { /** * connector status store */ Map<String, ConnectorStatus> connectorStatusMap = connectorStatusStore.getKVMap(); connectorStatusMap.forEach((connectorName, connectorStatus) -> { if (connectorStatus == null) { return; } // send status put(connectorStatus); });/** * task status store */ Map<String, List<TaskStatus>> v2 = taskStatusStore.getKVMap(); if (v2.isEmpty()) { return;} v2.forEach((connectorName, taskStatusList) -> { if ((taskStatusList == null) || taskStatusList.isEmpty()) { return; } taskStatusList.forEach(taskStatus -> { // send status put(taskStatus); }); }); }
3.26
rocketmq-connect_LocalStateManagementServiceImpl_initialize_rdh
/** * initialize cb config * * @param config */ @Override public void initialize(WorkerConfig config, RecordConverter converter) { super.initialize(config, converter);/** * connector status store */ this.connectorStatusStore = new FileBaseKeyValueStore<>(FilePathConfigUtil.getConnectorStatusConfigPath(config.getStorePathRootDir()), new Serdes.StringSerde(), new JsonSerde(ConnectorStatus.class)); /** * task status store */ this.taskStatusStore = new FileBaseKeyValueStore<>(FilePathConfigUtil.getTaskStatusConfigPath(config.getStorePathRootDir()), new Serdes.StringSerde(), new ListSerde(TaskStatus.class)); }
3.26
rocketmq-connect_ExtendKeyValue_getList_rdh
/** * get list by class * * @param s * @param clazz * @param <T> * @return */ public <T> List<T> getList(String s, Class<T> clazz) { List configs = getList(s); List<T> castConfigs = new ArrayList<>(); configs.forEach(config -> { castConfigs.add(clazz.cast(config)); }); return castConfigs; }
3.26
rocketmq-connect_Deserializer_close_rdh
/** * Close this deserializer. * <p> * This method must be idempotent as it may be called multiple times. */ @Override default void close() { // intentionally left blank }
3.26
rocketmq-connect_Deserializer_configure_rdh
/** * Configure this class. * * @param configs * configs in key/value pairs */ default void configure(Map<String, ?> configs) { // intentionally left blank }
3.26
rocketmq-connect_Deserializer_deserialize_rdh
/** * Deserialize a record value from a byte array into a value or object. */ default T deserialize(String topic, KeyValue extensions, byte[] data) { return m0(topic, data); }
3.26
rocketmq-connect_WorkerErrorRecordReporter_report_rdh
/** * report record * * @param record * @param error * @return */ @Override public void report(ConnectRecord record, Throwable error) { RecordPartition partition = record.getPosition().getPartition(); String topic = (partition.getPartition().containsKey("topic")) ? String.valueOf(partition.getPartition().get("topic")) : null; Integer queueId = (partition.getPartition().containsKey("queueId")) ? ((Integer) (partition.getPartition().get("queueId"))) : null; Long queueOffset = (partition.getPartition().containsKey("queueOffset")) ? ((Long) (partition.getPartition().get("queueOffset"))) : null; String brokerName = (partition.getPartition().containsKey("brokerName")) ? String.valueOf(partition.getPartition().get("topic")) : null;MessageExt consumerRecord = new MessageExt(); if ((converter != null) && (converter instanceof RecordConverter)) { byte[] value = converter.fromConnectData(topic, record.getSchema(), record.getData()); consumerRecord.setBody(value); consumerRecord.setBrokerName(brokerName); consumerRecord.setQueueId(queueId); consumerRecord.setQueueOffset(queueOffset); } else {byte[] messageBody = JSON.toJSONString(record).getBytes(); consumerRecord.setBody(messageBody); } // add extensions record.getExtensions().keySet().forEach(key -> { consumerRecord.putUserProperty(key, record.getExtensions().getString(key)); }); retryWithToleranceOperator.executeFailed(Stage.TASK_PUT, SinkTask.class, consumerRecord, error); }
3.26
rocketmq-connect_RetryWithToleranceOperator_error_rdh
/** * error * * @return */ public Throwable error() { return this.context.error(); }
3.26
rocketmq-connect_RetryWithToleranceOperator_sourceRecord_rdh
/** * Set the source record being processed in the connect pipeline. * * @param preTransformRecord * the source record */ public void sourceRecord(ConnectRecord preTransformRecord) { this.context.sourceRecord(preTransformRecord); }
3.26
rocketmq-connect_RetryWithToleranceOperator_consumerRecord_rdh
/** * Set the record consumed rocketmq in a sink * * @param messageExt */ public void consumerRecord(MessageExt messageExt) { this.context.consumerRecord(messageExt); }
3.26
rocketmq-connect_RetryWithToleranceOperator_execAndRetry_rdh
/** * Attempt to execute an operation. */ protected <V> V execAndRetry(Operation<V> operation) throws Exception { int v1 = 0; long startTime = System.currentTimeMillis(); long deadline = startTime + retryTimeout; do { try { v1++; return operation.call(); } catch (RetriableException e) { log.trace("Caught a retriable exception while executing {} operation with {}", context.stage(), context.executingClass()); errorMetricsGroup.recordFailure(); if (checkRetry(startTime)) { backoff(v1, deadline); if (Thread.currentThread().isInterrupted()) { log.trace("Thread was interrupted. Marking operation as failed."); context.error(e); return null; } errorMetricsGroup.recordRetry(); } else { log.trace("Can't retry. start={}, attempt={}, deadline={}", startTime, v1, deadline); context.error(e); return null; } } finally { context.attempt(v1); } } while (true ); }
3.26
rocketmq-connect_RetryWithToleranceOperator_execAndHandleError_rdh
/** * Execute a given operation multiple times (if needed), and tolerate certain exceptions. */ protected <V> V execAndHandleError(Operation<V> operation, Class<? extends Exception> tolerated) { try { V result = execAndRetry(operation); if (context.failed()) { markAsFailed(); errorMetricsGroup.recordSkipped(); } return result; } catch (Exception e) { errorMetricsGroup.recordFailure(); markAsFailed(); context.error(e); if (!tolerated.isAssignableFrom(e.getClass())) { throw new ConnectException("Unhandled exception in error handler", e);} if (!withinToleranceLimits()) { throw new ConnectException("Tolerance exceeded in error handler", e); } errorMetricsGroup.recordSkipped(); return null; } }
3.26
rocketmq-connect_RetryWithToleranceOperator_execute_rdh
/** * Execute the recoverable operation. If the operation is already in a failed state, then simply return * with the existing failure. */public <V> V execute(Operation<V> operation, ErrorReporter.Stage stage, Class<?> executingClass) { context.currentContext(stage, executingClass); if (context.failed()) {log.debug("ProcessingContext is already in failed state. Ignoring requested operation."); return null; } try { Class<? extends Exception> ex = TOLERABLE_EXCEPTIONS.getOrDefault(context.stage(), RetriableException.class); return execAndHandleError(operation, ex); } finally { if (context.failed()) { errorMetricsGroup.recordError(); context.report(); } } }
3.26
rocketmq-connect_RetryWithToleranceOperator_failed_rdh
/** * failed * * @return */ public boolean failed() { return this.context.failed(); }
3.26
rocketmq-connect_RetryWithToleranceOperator_reporters_rdh
/** * Set the error reporters for this connector. * * @param reporters * the error reporters (should not be null). */ public void reporters(List<ErrorReporter> reporters) { this.context.reporters(reporters); }
3.26
rocketmq-connect_ExpressionBuilder_appendTableName_rdh
/** * Append to this builder's expression the specified Column identifier, possibly surrounded by * the leading and trailing quotes based upon {@link #setQuoteIdentifiers(QuoteMethod)}. * * @param name * the name to be appended * @param quote * the quote method to be used * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendTableName(String name, QuoteMethod quote) { appendLeadingQuote(quote); sb.append(name); appendTrailingQuote(quote); return this; }
3.26
rocketmq-connect_ExpressionBuilder_of_rdh
/** * Append to this list all of the items in the specified {@link Iterable} objects. * * @param objects1 * the first collection of objects to be added to the list * @param objects2 * a second collection of objects to be added to the list * @return this builder to enable methods to be chained; never null */ default ExpressionBuilder of(Iterable<? extends T> objects1, Iterable<? extends T> objects2) { of(objects1); return of(objects2); } /** * Append to this list all of the items in the specified {@link Iterable}
3.26
rocketmq-connect_ExpressionBuilder_appendStringQuoted_rdh
/** * Append to this builder's expression a string surrounded by single quote characters ({@code '}). * Use {@link #appendIdentifier(String, QuoteMethod)} for identifiers, * {@link #appendColumnName(String, QuoteMethod)} for column names, or * {@link #appendTableName(String, QuoteMethod)} for table names. * * @param name * the object whose string representation is to be appended * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendStringQuoted(Object name) { appendStringQuote(); sb.append(name); appendStringQuote(); return this; }
3.26
rocketmq-connect_ExpressionBuilder_m0_rdh
/** * Get a {@link Transform} that will quote just the column names. * * @return the transform; never null */ // public static Transform<ColumnId> columnNames() { // return (builder, input) -> builder.appendColumnName(input.name()); // } /** * Get a {@link Transform} that will quote just the column names and append the given string. * * @param appended * the string to append after the quoted column names * @return the transform; never null */ // public static Transform<ColumnId> columnNamesWith(final String appended) { // return (builder, input) -> { // builder.appendColumnName(input.name()); // builder.append(appended); // }; // } /** * Get a {@link Transform} that will append a placeholder rather than each of the column names. * * @param str * the string to output instead the each column name * @return the transform; never null */ // public static Transform<ColumnId> placeholderInsteadOfColumnNames(final String str) { // return (builder, input) -> builder.append(str); // } /** * Get a {@link Transform} that will append the prefix and then the quoted column name. * * @param prefix * the string to output before the quoted column names * @return the transform; never null */ // public static Transform<ColumnId> columnNamesWithPrefix(final String prefix) { // return (builder, input) -> { // builder.append(prefix); // builder.appendColumnName(input.name()); // }; // } /** * Create a new ExpressionBuilder using the default {@link IdentifierRules}. * * @return the expression builder */ public static ExpressionBuilder m0() { return new ExpressionBuilder(); }
3.26
rocketmq-connect_ExpressionBuilder_escapeQuotesWith_rdh
/** * Return a new ExpressionBuilder that escapes quotes with the specified prefix. * This builder remains unaffected. * * @param prefix * the prefix * @return the new ExpressionBuilder, or this builder if the prefix is null or empty */ public ExpressionBuilder escapeQuotesWith(String prefix) { if ((prefix == null) || prefix.isEmpty()) { return this; } return new ExpressionBuilder(this.rules.escapeQuotesWith(prefix)); }
3.26
rocketmq-connect_ExpressionBuilder_appendBinaryLiteral_rdh
/** * Append to this builder's expression the binary value as a hex string, prefixed and * suffixed by a single quote character. * * @param value * the value to be appended * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendBinaryLiteral(byte[] value) { return append("x'").append(BytesUtil.toHex(value)).append("'"); }
3.26
rocketmq-connect_ExpressionBuilder_quote_rdh
/** * Get a {@link Transform} that will surround the inputs with quotes. * * @return the transform; never null */ public static Transform<String> quote() { return (builder, input) -> builder.appendColumnName(input); }
3.26
rocketmq-connect_ExpressionBuilder_appendTo_rdh
/** * Append this object to the specified builder. * * @param builder * the builder to use; may not be null * @param useQuotes * whether quotes should be used for this object */ default void appendTo(ExpressionBuilder builder, QuoteMethod useQuotes) { switch (useQuotes) { case ALWAYS :appendTo(builder, true); break; case NEVER :default : // do nothing break; } }
3.26
rocketmq-connect_ExpressionBuilder_appendIdentifierDelimiter_rdh
/** * Append to this builder's expression the delimiter defined by this builder's * {@link IdentifierRules}. * * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendIdentifierDelimiter() { sb.append(rules.identifierDelimiter()); return this; }
3.26
rocketmq-connect_ExpressionBuilder_appendIdentifier_rdh
/** * Append to this builder's expression the identifier. * * @param name * the name to be appended * @param quoted * true if the name should be quoted, or false otherwise * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendIdentifier(String name, QuoteMethod quoted) { appendLeadingQuote(quoted); sb.append(name); appendTrailingQuote(quoted); return this; }
3.26
rocketmq-connect_ExpressionBuilder_appendLeadingQuote_rdh
/** * Always append to this builder's expression the leading quote character(s) defined by this * builder's {@link IdentifierRules}. * * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendLeadingQuote() {return appendLeadingQuote(QuoteMethod.ALWAYS); }
3.26
rocketmq-connect_ExpressionBuilder_append_rdh
/** * Append to this builder's expression the specified object surrounded by quotes. If the object * is {@link Expressable}, then this builder delegates to the object's * {@link Expressable#appendTo(ExpressionBuilder, boolean)} method. Otherwise, the string * representation of the object is appended to the expression. * * @param obj * the object to be appended * @param transform * the transform that should be used on the supplied object to obtain the * representation that is appended to the expression; may be null * @param <T> * the type of object to transform before appending. * @return this builder to enable methods to be chained; never null */ public <T> ExpressionBuilder append(T obj, Transform<T> transform) { if (transform != null) { transform.apply(this, obj); } else { append(obj); } return this; }
3.26
rocketmq-connect_ExpressionBuilder_appendColumnName_rdh
/** * Append to this builder's expression the specified Column identifier, possibly surrounded by * the leading and trailing quotes based upon {@link #setQuoteIdentifiers(QuoteMethod)}. * * @param name * the name to be appended * @param quote * whether to quote the column name; may not be null * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendColumnName(String name, QuoteMethod quote) { appendLeadingQuote(quote); sb.append(name); appendTrailingQuote(quote); return this; }
3.26
rocketmq-connect_ExpressionBuilder_setQuoteIdentifiers_rdh
/** * Set when this expression builder should quote identifiers, such as table and column names. * * @param method * the quoting method; may be null if the default method * ({@link QuoteMethod#ALWAYS always}) should be used * @return this expression builder; never null */ public ExpressionBuilder setQuoteIdentifiers(QuoteMethod method) { this.quoteSqlIdentifiers = (method != null) ? method : DEFAULT_QUOTE_METHOD; return this; }
3.26
rocketmq-connect_ExpressionBuilder_appendIdentifierQuoted_rdh
/** * Append to this builder's expression the specified identifier, surrounded by the leading and * trailing quotes. * * @param name * the name to be appended * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendIdentifierQuoted(String name) { appendLeadingQuote(); sb.append(name); appendTrailingQuote(); return this; }
3.26
rocketmq-connect_RocketMqDatabaseHistory_recoverRecords_rdh
/** * Recover records * * @param records */ @Override protected void recoverRecords(Consumer<HistoryRecord> records) { DefaultLitePullConsumer consumer = null; try { consumer = RocketMqAdminUtil.initDefaultLitePullConsumer(rocketMqConfig, false); consumer.start(); // Select message queue MessageQueue messageQueue = new ZeroMessageQueueSelector().select(new ArrayList<>(consumer.fetchMessageQueues(topicName)), null, null); consumer.assign(Collections.singleton(messageQueue)); consumer.seekToBegin(messageQueue); // Read all messages in the topic ... long lastProcessedOffset = UNLIMITED_VALUE; Long maxOffset = null; int recoveryAttempts = 0; do { if (recoveryAttempts > maxRecoveryAttempts) { throw new IllegalStateException("The database schema history couldn't be recovered."); } // Get db schema history topic end offset maxOffset = getMaxOffsetOfSchemaHistoryTopic(maxOffset, messageQueue); log.debug("End offset of database schema history topic is {}", maxOffset); // Poll record from db schema history topic List<MessageExt> recoveredRecords = consumer.poll(pollInterval); int numRecordsProcessed = 0; for (MessageExt message : recoveredRecords) { if (message.getQueueOffset() > lastProcessedOffset) { HistoryRecord recordObj = new HistoryRecord(reader.read(message.getBody())); log.trace("Recovering database history: {}", recordObj); if ((recordObj == null) || (!recordObj.isValid())) { log.warn("Skipping invalid database history record '{}'. " + "This is often not an issue, but if it happens repeatedly please check the '{}' topic.", recordObj, topicName); } else { records.accept(recordObj); log.trace("Recovered database history: {}", recordObj); } lastProcessedOffset = message.getQueueOffset(); ++numRecordsProcessed; } } if (numRecordsProcessed == 0) { log.debug("No new records found in the database schema history; will retry"); recoveryAttempts++; } else { log.debug("Processed {} records from database schema history", numRecordsProcessed); } } while (lastProcessedOffset < (maxOffset - 1) ); } catch (MQClientException | MQBrokerException | IOException | RemotingException | InterruptedException e) { throw new DatabaseHistoryException(e); } finally { if (consumer != null) { consumer.shutdown(); } } }
3.26
rocketmq-connect_JsonSerializer_serialize_rdh
/** * Convert {@code data} into a byte array. * * @param topic * topic associated with data * @param data * typed data * @return serialized bytes */ @Override public byte[] serialize(String topic, Object data) { if (Objects.isNull(data)) {return null; } try { return JSON.toJSONString(data, SerializerFeature.DisableCircularReferenceDetect, SerializerFeature.WriteMapNullValue).getBytes(StandardCharsets.UTF_8); } catch (Exception e) { throw new ConnectException("Error serializing JSON message", e); } }
3.26
rocketmq-connect_PluginUtils_shouldLoadInIsolation_rdh
/** * Return whether the class with the given name should be loaded in isolation using a plugin * classloader. * * @param name * the fully qualified name of the class. * @return true if this class should be loaded in isolation, false otherwise. */ public static boolean shouldLoadInIsolation(String name) { return !(BLACKLIST.matcher(name).matches() && (!INCLUDE.matcher(name).matches())); }
3.26
rocketmq-connect_PluginUtils_prunedName_rdh
/** * Remove the plugin type name at the end of a plugin class name, if such suffix is present. * This method is meant to be used to extract plugin aliases. */ public static String prunedName(PluginWrapper<?> plugin) { switch (plugin.type()) { case SOURCE : case SINK :case CONNECTOR : return prunePluginName(plugin, "Connector"); default : return prunePluginName(plugin, plugin.type().simpleName()); } }
3.26
rocketmq-connect_PluginUtils_isAliasUnique_rdh
/** * Verify whether a given plugin's alias matches another alias in a collection of plugins. * * @param alias * the plugin descriptor to test for alias matching. * @param plugins * the collection of plugins to test against. * @param <U> * the plugin type. * @return false if a match was found in the collection, otherwise true. */ public static <U> boolean isAliasUnique(PluginWrapper<U> alias, Collection<PluginWrapper<U>> plugins) { boolean matched = false;for (PluginWrapper<U> plugin : plugins) { if (simpleName(alias).equals(simpleName(plugin)) || prunedName(alias).equals(prunedName(plugin))) {if (matched) { return false; } matched = true; } } return true; }
3.26
rocketmq-connect_PluginUtils_isConcrete_rdh
/** * Verify the given class corresponds to a concrete class and not to an abstract class or */ public static boolean isConcrete(Class<?> klass) { int mod = klass.getModifiers();return (!Modifier.isAbstract(mod)) && (!Modifier.isInterface(mod)); }
3.26
rocketmq-connect_PluginUtils_simpleName_rdh
/** * Return the simple class name of a plugin as {@code String}. * * @param plugin * the plugin descriptor. * @return the plugin's simple class name. */ public static String simpleName(PluginWrapper<?> plugin) { return plugin.pluginClass().getSimpleName(); }
3.26
rocketmq-connect_FilterTransform_start_rdh
/** * Start the component * * @param config * component context */ @Override public void start(KeyValue config) { this.keyValue = config;log.info("transform config {}", this.keyValue); }
3.26
rocketmq-connect_FilterTransform_stop_rdh
/** * Stop the component. */ @Override public void stop() { }
3.26
rocketmq-connect_KafkaSourceAdaptorConnector_start_rdh
/** * Start the component * * @param config * component context */ @Override public void start(KeyValue config) { super.start(config); sourceConnector.validate(taskConfig);sourceConnector.initialize(new KafkaConnectorContext(connectorContext));sourceConnector.start(taskConfig); }
3.26
rocketmq-connect_StringConverter_fromConnectData_rdh
/** * Convert a rocketmq Connect data object to a native object for serialization. * * @param topic * the topic associated with the data * @param schema * the schema for the value * @param value * the value to convert * @return the serialized value */ @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (Exception e) { throw new ConnectException("Failed to serialize to a string: ", e); } }
3.26
rocketmq-connect_StringConverter_toConnectData_rdh
/** * Convert a native object to a Rocketmq Connect data object. */ @Overridepublic SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(SchemaBuilder.string().build(), deserializer.deserialize(topic, value)); } catch (Exception e) { throw new ConnectException("Failed to deserialize string: ", e); }}
3.26
rocketmq-connect_StringConverter_configure_rdh
/** * Configure this class. * * @param configs * configs in key/value pairs */ @Override public void configure(Map<String, ?> configs) { serializer.configure(configs); deserializer.configure(configs); }
3.26
rocketmq-connect_ProcessingContext_sourceRecord_rdh
/** * Set the source record being processed in the connect pipeline. * * @param record * the source record */ public void sourceRecord(ConnectRecord record) { this.sourceRecord = record; reset(); }
3.26
rocketmq-connect_ProcessingContext_executingClass_rdh
/** * * @param klass * set the class which is currently executing. */ public void executingClass(Class<?> klass) { this.klass = klass; }
3.26
rocketmq-connect_ProcessingContext_report_rdh
/** * report errors */ public void report() {if (reporters.size() == 1) { reporters.iterator().next().report(this); } reporters.stream().forEach(r -> r.report(this)); }
3.26
rocketmq-connect_ProcessingContext_consumerRecord_rdh
/** * * @param consumedMessage * the record */ public void consumerRecord(MessageExt consumedMessage) { this.consumedMessage = consumedMessage; reset(); }
3.26
rocketmq-connect_ProcessingContext_failed_rdh
/** * * @return */ public boolean failed() { return error() != null; }
3.26
rocketmq-connect_ProcessingContext_stage_rdh
/** * * @return the stage in the connector pipeline which is currently executing. */ public Stage stage() { return stage; }
3.26