name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_ManagedCursorImpl_filterReadEntries_rdh | /**
* Given a list of entries, filter out the entries that have already been individually deleted.
*
* @param entries
* a list of entries
* @return a list of entries not containing deleted messages
*/List<Entry> filterReadEntries(List<Entry> entries) {
f2.readLock().lock();
try {
Range<PositionImpl> entriesRange = Range.closed(((PositionImpl) (entries.get(0).getPosition())), ((PositionImpl) (entries.get(entries.size() - 1).getPosition())));
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Filtering entries {} - alreadyDeleted: {}", ledger.getName(), name, entriesRange, individualDeletedMessages);
}
Range<PositionImpl> span = (individualDeletedMessages.isEmpty()) ? null : individualDeletedMessages.span();
if ((span == null) ||
(!entriesRange.isConnected(span))) {
// There are no individually deleted messages in this entry list, no need to perform filtering
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] No filtering needed for entries {}", ledger.getName(), name, entriesRange);
}return entries;
} else {
// Remove from the entry list all the entries that were already marked for deletion
return Lists.newArrayList(Collections2.filter(entries, entry -> {
boolean v136 = !individualDeletedMessages.contains(entry.getLedgerId(), entry.getEntryId());
if (!v136) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Filtering entry at {} - already deleted", ledger.getName(), name, entry.getPosition());
}
entry.release();
}
return v136;
}));
}
} finally {
f2.readLock().unlock();
}
} | 3.26 |
pulsar_ManagedCursorImpl_persistPositionWhenClosing_rdh | /**
* Persist given markDelete position to cursor-ledger or zk-metaStore based on max number of allowed unack-range
* that can be persist in zk-metastore. If current unack-range is higher than configured threshold then broker
* persists mark-delete into cursor-ledger else into zk-metastore.
*
* @param position
* @param properties
* @param callback
* @param ctx
*/
void persistPositionWhenClosing(PositionImpl
position, Map<String, Long> properties, final AsyncCallbacks.CloseCallback callback, final Object ctx) {
if (shouldPersistUnackRangesToLedger()) {
persistPositionToLedger(cursorLedger, new MarkDeleteEntry(position, properties, null, null), new VoidCallback() {
@Override
public void operationComplete() {
log.info("[{}][{}] Updated md-position={} into cursor-ledger {}", ledger.getName(), name, markDeletePosition, cursorLedger.getId());
asyncCloseCursorLedger(callback, ctx);
}
@Override
public void operationFailed(ManagedLedgerException e) {log.warn("[{}][{}] Failed to persist mark-delete position into cursor-ledger{}: {}", ledger.getName(), name, cursorLedger.getId(), e.getMessage());
callback.closeFailed(e, ctx);
}
});
} else {
persistPositionMetaStore(-1, position, properties, new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void result, Stat stat) {
log.info("[{}][{}] Closed cursor at md-position={}", ledger.getName(), name, markDeletePosition);
// At this point the position had already been safely stored in the cursor z-node
callback.closeComplete(ctx);
asyncDeleteLedger(cursorLedger);
}
@Override
public void operationFailed(MetaStoreException e) {
log.warn("[{}][{}] Failed to update cursor info when closing: {}",
ledger.getName(), name, e.getMessage());
callback.closeFailed(e, ctx);
}
}, true);
}
} | 3.26 |
pulsar_ManagedCursorImpl_trySetStateToClosing_rdh | /**
* Try set {@link #state} to {@link State#Closing}.
*
* @return false if the {@link #state} already is {@link State#Closing} or {@link State#Closed}.
*/
private boolean trySetStateToClosing() {
final AtomicBoolean notClosing = new AtomicBoolean(false);
STATE_UPDATER.updateAndGet(this, state -> {
switch (state) {
case Closing :
case Closed :
{
notClosing.set(false);
return state;
}default :
{
notClosing.set(true);
return State.Closing;
}
}
});
return notClosing.get();
} | 3.26 |
pulsar_ManagedCursorImpl_recover_rdh | /**
* Performs the initial recovery, reading the mark-deleted position from the ledger and then calling initialize to
* have a new opened ledger.
*/
void recover(final VoidCallback
callback) {
// Read the meta-data ledgerId from the store
log.info("[{}] Recovering from bookkeeper ledger cursor: {}", ledger.getName(), name);
ledger.getStore().asyncGetCursorInfo(ledger.getName(), name, new MetaStoreCallback<ManagedCursorInfo>() {
@Override
public void operationComplete(ManagedCursorInfo info, Stat stat) {
updateCursorLedgerStat(info, stat);f3 = (info.getLastActive() != 0) ? info.getLastActive() : f3;
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Recover cursor last active to [{}]", ledger.getName(), name, f3);
}
Map<String, String> recoveredCursorProperties = Collections.emptyMap();
if (info.getCursorPropertiesCount() > 0) {
// Recover properties map
recoveredCursorProperties = new HashMap<>();
for (int i = 0; i < info.getCursorPropertiesCount(); i++) {
StringProperty property = info.getCursorProperties(i);
recoveredCursorProperties.put(property.getName(), property.getValue());
}
}
cursorProperties = recoveredCursorProperties;
if (info.getCursorsLedgerId() == (-1L)) {
// There is no cursor ledger to read the last position from. It means the cursor has been properly
// closed and the last mark-delete position is stored in the ManagedCursorInfo itself.
PositionImpl recoveredPosition = new PositionImpl(info.getMarkDeleteLedgerId(), info.getMarkDeleteEntryId());
if (info.getIndividualDeletedMessagesCount() > 0) {
recoverIndividualDeletedMessages(info.getIndividualDeletedMessagesList());
}Map<String, Long> recoveredProperties = Collections.emptyMap();
if (info.getPropertiesCount() > 0) {
// Recover properties map
recoveredProperties = new HashMap<>();for (int i = 0; i < info.getPropertiesCount(); i++) {LongProperty property = info.getProperties(i);
recoveredProperties.put(property.getName(), property.getValue());
}
}
recoveredCursor(recoveredPosition, recoveredProperties, recoveredCursorProperties, null);
callback.operationComplete();
} else {
// Need to proceed and read the last entry in the specified ledger to find out the last position
log.info("[{}] Cursor {} meta-data recover from ledger {}", ledger.getName(), name, info.getCursorsLedgerId());recoverFromLedger(info, callback);
}
}
@Override
public void operationFailed(MetaStoreException e) {
callback.operationFailed(e);
}
});
} | 3.26 |
pulsar_ManagedCursorImpl_startCreatingNewMetadataLedger_rdh | // //////////////////////////////////////////////////
void startCreatingNewMetadataLedger() {
// Change the state so that new mark-delete ops will be queued and not immediately submitted
State oldState = STATE_UPDATER.getAndSet(this, State.SwitchingLedger);
if (oldState == State.SwitchingLedger) {
// Ignore double request
return;
}
// Check if we can immediately switch to a new metadata ledger
if (PENDING_MARK_DELETED_SUBMITTED_COUNT_UPDATER.get(this) == 0) {
createNewMetadataLedger();
}
} | 3.26 |
pulsar_ManagedCursorImpl_getPendingReadOpsCount_rdh | // / Expose internal values for debugging purpose
public int getPendingReadOpsCount() {
return PENDING_READ_OPS_UPDATER.get(this);
} | 3.26 |
pulsar_ManagedCursorImpl_notifyEntriesAvailable_rdh | /**
*
* @return Whether the cursor responded to the notification
*/
void notifyEntriesAvailable() {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Received ml notification", ledger.getName(), name);
}
OpReadEntry opReadEntry = WAITING_READ_OP_UPDATER.getAndSet(this, null);if (opReadEntry != null) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Received notification of new messages persisted, reading at {} -- last: {}", ledger.getName(), name, opReadEntry.readPosition, ledger.lastConfirmedEntry);
log.debug("[{}] Consumer {} cursor notification: other counters: consumed {} mdPos {} rdPos {}", ledger.getName(), name, f0, markDeletePosition, readPosition);
}
PENDING_READ_OPS_UPDATER.incrementAndGet(this);
opReadEntry.readPosition = ((PositionImpl) (getReadPosition()));
ledger.asyncReadEntries(opReadEntry);} else // No one is waiting to be notified. Ignore
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Received notification but had no pending read operation", ledger.getName(), name);
}
} | 3.26 |
pulsar_ManagedCursorImpl_asyncReplayEntries_rdh | /**
* Async replays given positions: a. before reading it filters out already-acked messages b. reads remaining entries
* async and gives it to given ReadEntriesCallback c. returns all already-acked messages which are not replayed so,
* those messages can be removed by caller(Dispatcher)'s replay-list and it won't try to replay it again
*/
@Override
public Set<? extends Position> asyncReplayEntries(final Set<? extends Position> positions, ReadEntriesCallback callback, Object ctx) {
return asyncReplayEntries(positions, callback, ctx, false);
} | 3.26 |
pulsar_ManagedCursorImpl_m4_rdh | /**
* Checks given position is part of deleted-range and returns next position of upper-end as all the messages are
* deleted up to that point.
*
* @param position
* @return next available position
*/
public PositionImpl m4(PositionImpl position) {
Range<PositionImpl> range = individualDeletedMessages.rangeContaining(position.getLedgerId(), position.getEntryId());
if (range != null) {
PositionImpl nextPosition = range.upperEndpoint().getNext();
return (nextPosition != null) && (nextPosition.compareTo(position) > 0) ? nextPosition : position.getNext();
}
return position.getNext();
} | 3.26 |
pulsar_ManagedCursorImpl_getBatchPositionAckSet_rdh | // this method will return a copy of the position's ack set
public long[] getBatchPositionAckSet(Position position) {
if (!(position instanceof PositionImpl)) {
return null;
}
if (f1 != null) {
BitSetRecyclable bitSetRecyclable = f1.get(position);
if (bitSetRecyclable == null) {
return null;} else {
return bitSetRecyclable.toLongArray();
}
} else {
return null;
}
} | 3.26 |
pulsar_ManagedCursorImpl_getRollbackPosition_rdh | /**
* If we fail to recover the cursor ledger, we want to still open the ML and rollback.
*
* @param info
*/
private PositionImpl getRollbackPosition(ManagedCursorInfo info) {
PositionImpl firstPosition = ledger.getFirstPosition();
PositionImpl snapshottedPosition = new PositionImpl(info.getMarkDeleteLedgerId(), info.getMarkDeleteEntryId());
if (firstPosition == null) {
// There are no ledgers in the ML, any position is good
return snapshottedPosition;
} else if (snapshottedPosition.compareTo(firstPosition) < 0) {
// The snapshotted position might be pointing to a ledger that was already deleted
return firstPosition;
} else {
return snapshottedPosition;}
} | 3.26 |
pulsar_ManagedCursorImpl_setReadPosition_rdh | /**
* Internal version of seek that doesn't do the validation check.
*
* @param newReadPositionInt
*/
void setReadPosition(Position newReadPositionInt) {
checkArgument(newReadPositionInt instanceof PositionImpl);
if ((this.markDeletePosition == null) || (((PositionImpl) (newReadPositionInt)).compareTo(this.markDeletePosition) > 0)) {
this.readPosition = ((PositionImpl) (newReadPositionInt));
ledger.onCursorReadPositionUpdated(this, newReadPositionInt);
}
} | 3.26 |
pulsar_ManagedCursorImpl_updateLastMarkDeleteEntryToLatest_rdh | // update lastMarkDeleteEntry field if newPosition is later than the current lastMarkDeleteEntry.newPosition
private void updateLastMarkDeleteEntryToLatest(final PositionImpl newPosition, final Map<String, Long> properties) {
LAST_MARK_DELETE_ENTRY_UPDATER.updateAndGet(this, last
-> {
if ((last != null) && (last.newPosition.compareTo(newPosition)
> 0)) {
// keep current value, don't update
return last;
} else {
// use given properties or when missing, use the properties from the previous field value
Map<String, Long> propertiesToUse = (properties != null) ? properties : last != null ? last.properties : Collections.emptyMap();
return new MarkDeleteEntry(newPosition, propertiesToUse, null, null);
}
});
} | 3.26 |
pulsar_ManagedCursorImpl_isBkErrorNotRecoverable_rdh | /**
* return BK error codes that are considered not likely to be recoverable.
*/
public static boolean isBkErrorNotRecoverable(int rc) {
switch (rc) {
case Code.NoSuchLedgerExistsException :
case Code.NoSuchLedgerExistsOnMetadataServerException :
case Code.ReadException :
case Code.LedgerRecoveryException :
case Code.NoSuchEntryException :
return true;
default :
return false;
}} | 3.26 |
pulsar_ManagedCursorImpl_setAcknowledgedPosition_rdh | /**
*
* @param newMarkDeletePosition
* the new acknowledged position
* @return the previous acknowledged position
*/
PositionImpl setAcknowledgedPosition(PositionImpl newMarkDeletePosition) {
if (newMarkDeletePosition.compareTo(markDeletePosition) < 0) {
throw new MarkDeletingMarkedPosition((("Mark deleting an already mark-deleted position. Current mark-delete: " + markDeletePosition) + " -- attempted mark delete: ") + newMarkDeletePosition);
}
PositionImpl oldMarkDeletePosition = markDeletePosition;
if (!newMarkDeletePosition.equals(oldMarkDeletePosition))
{
long skippedEntries = 0;
if ((newMarkDeletePosition.getLedgerId()
== oldMarkDeletePosition.getLedgerId()) && (newMarkDeletePosition.getEntryId() == (oldMarkDeletePosition.getEntryId() + 1))) {
// Mark-deleting the position next to current one
skippedEntries = (individualDeletedMessages.contains(newMarkDeletePosition.getLedgerId(), newMarkDeletePosition.getEntryId())) ? 0 : 1;
} else {
skippedEntries = getNumberOfEntries(Range.openClosed(oldMarkDeletePosition, newMarkDeletePosition));
}
PositionImpl positionAfterNewMarkDelete = ledger.getNextValidPosition(newMarkDeletePosition);
// sometime ranges are connected but belongs to different ledgers so, they are placed sequentially
// eg: (2:10..3:15] can be returned as (2:10..2:15],[3:0..3:15]. So, try to iterate over connected range and
// found the last non-connected range which gives new markDeletePosition
while (positionAfterNewMarkDelete.compareTo(ledger.lastConfirmedEntry) <=
0) {
if (individualDeletedMessages.contains(positionAfterNewMarkDelete.getLedgerId(), positionAfterNewMarkDelete.getEntryId())) {
Range<PositionImpl> rangeToBeMarkDeleted = individualDeletedMessages.rangeContaining(positionAfterNewMarkDelete.getLedgerId(), positionAfterNewMarkDelete.getEntryId());
newMarkDeletePosition = rangeToBeMarkDeleted.upperEndpoint();
positionAfterNewMarkDelete = ledger.getNextValidPosition(newMarkDeletePosition);
// check if next valid position is also deleted and part of the deleted-range
continue;
}
break;
}
if (log.isDebugEnabled()) {
log.debug("[{}] Moved ack position from: {} to: {} -- skipped: {}", ledger.getName(), oldMarkDeletePosition, newMarkDeletePosition, skippedEntries);
}
MSG_CONSUMED_COUNTER_UPDATER.addAndGet(this, skippedEntries);
}
// markDelete-position and clear out deletedMsgSet
markDeletePosition = newMarkDeletePosition;
individualDeletedMessages.removeAtMost(markDeletePosition.getLedgerId(), markDeletePosition.getEntryId());
READ_POSITION_UPDATER.updateAndGet(this, currentReadPosition -> {
if (currentReadPosition.compareTo(markDeletePosition) <= 0) {
// If the position that is mark-deleted is past the read position, it
// means that the client has skipped some entries. We need to move
// read position forward
PositionImpl newReadPosition = ledger.getNextValidPosition(markDeletePosition);
if (log.isDebugEnabled()) {
log.debug("[{}] Moved read position from: {} to: {}, and new mark-delete position {}", ledger.getName(), currentReadPosition, newReadPosition, markDeletePosition);
}ledger.onCursorReadPositionUpdated(this, newReadPosition);
return newReadPosition;
} else {
return currentReadPosition;
}
}); return newMarkDeletePosition;
} | 3.26 |
pulsar_GenericRecord_getSchemaType_rdh | /**
* Return the schema tyoe.
*
* @return the schema type
* @throws UnsupportedOperationException
* if this feature is not implemented
* @see SchemaType#AVRO
* @see SchemaType#PROTOBUF_NATIVE
* @see SchemaType#JSON
*/
@Override
default SchemaType getSchemaType() {
throw new UnsupportedOperationException();
} | 3.26 |
pulsar_GenericRecord_getField_rdh | /**
* Retrieve the value of the provided <tt>field</tt>.
*
* @param field
* the field to retrieve the value
* @return the value object
*/
default Object getField(Field
field) {
return getField(field.getName());
} | 3.26 |
pulsar_GenericRecord_getNativeObject_rdh | /**
* Return the internal native representation of the Record,
* like a AVRO GenericRecord.
*
* @return the internal representation of the record
* @throws UnsupportedOperationException
* if the operation is not supported
*/
@Override
default Object getNativeObject() {
throw new UnsupportedOperationException();
} | 3.26 |
pulsar_ShadedJCloudsUtils_addStandardModules_rdh | /**
* Setup standard modules.
*
* @param builder
* the build
*/
public static void addStandardModules(ContextBuilder builder) {
List<AbstractModule> modules = new ArrayList<>();
modules.add(new SLF4JLoggingModule());
if (ENABLE_OKHTTP_MODULE) {
modules.add(new OkHttpCommandExecutorServiceModule());
} else if (ENABLE_APACHE_HC_MODULE) {
modules.add(new ApacheHCHttpCommandExecutorServiceModule());
}
builder.modules(modules);
} | 3.26 |
pulsar_SingleSnapshotAbortedTxnProcessorImpl_trimExpiredAbortedTxns_rdh | // In this implementation we clear the invalid aborted txn ID one by one.
@Overridepublic void trimExpiredAbortedTxns() {
while ((!aborts.isEmpty()) && (!((ManagedLedgerImpl) (topic.getManagedLedger())).ledgerExists(aborts.get(aborts.firstKey()).getLedgerId()))) {
if (log.isDebugEnabled()) {
log.debug("[{}] Topic transaction buffer clear aborted transaction, TxnId : {}, Position : {}", topic.getName(), aborts.firstKey(), aborts.get(aborts.firstKey()));
}
aborts.remove(aborts.firstKey());
}
} | 3.26 |
pulsar_DLOutputStream_writeAsync_rdh | /**
* Write all input stream data to the distribute log.
*
* @param inputStream
* the data we need to write
* @return */
CompletableFuture<DLOutputStream> writeAsync(InputStream inputStream) {
return getRecords(inputStream).thenCompose(this::writeAsync);
} | 3.26 |
pulsar_DLOutputStream_m0_rdh | /**
* Every package will be a stream. So we need mark the stream as EndOfStream when the stream
* write done.
*
* @return */
CompletableFuture<Void> m0() {return writer.markEndOfStream().thenCompose(ignore
-> writer.asyncClose()).thenCompose(ignore -> distributedLogManager.asyncClose());
} | 3.26 |
pulsar_CompactedTopicImpl_getCompactedTopicContext_rdh | /**
* Getter for CompactedTopicContext.
*
* @return CompactedTopicContext
*/
public Optional<CompactedTopicContext> getCompactedTopicContext() throws ExecutionException, InterruptedException {
return compactedTopicContext == null ? Optional.empty() : Optional.of(compactedTopicContext.get());
} | 3.26 |
pulsar_EtcdSessionWatcher_checkConnectionStatus_rdh | // task that runs every TICK_TIME to check Etcd connection
private synchronized void checkConnectionStatus() {
try {
CompletableFuture<SessionEvent> future = new CompletableFuture<>(); client.getKVClient().get(ByteSequence.from("/".getBytes(StandardCharsets.UTF_8))).thenRun(() -> {
future.complete(SessionEvent.Reconnected);
}).exceptionally(ex -> {
future.complete(SessionEvent.ConnectionLost);
return null;
});
SessionEvent ectdClientState;
try {
ectdClientState = future.get(tickTimeMillis, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
// Consider etcd disconnection if etcd operation takes more than TICK_TIME
ectdClientState = SessionEvent.ConnectionLost;
}
checkState(ectdClientState);
} catch (RejectedExecutionException | InterruptedException e) {
task.cancel(true);
} catch (Throwable t) {
log.warn("Error while checking Etcd connection status", t);}
} | 3.26 |
pulsar_AbstractPushSource_notifyError_rdh | /**
* Allows the source to notify errors asynchronously.
*
* @param ex
*/
public void notifyError(Exception ex) {
consume(new ErrorNotifierRecord(ex));
} | 3.26 |
pulsar_AbstractPushSource_consume_rdh | /**
* Send this message to be written to Pulsar.
* Pass null if you you are done with this task
*
* @param record
* next message from source which should be sent to a Pulsar topic
*/
public void consume(Record<T> record) { try {if (record != null) {
queue.put(record);
} else {
queue.put(nullRecord);
}
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} | 3.26 |
pulsar_AbstractPushSource_getQueueLength_rdh | /**
* Get length of the queue that records are push onto.
* Users can override this method to customize the queue length
*
* @return queue length
*/
public int getQueueLength() {
return DEFAULT_QUEUE_LENGTH;
} | 3.26 |
pulsar_WorkerApiV2Resource_clientAppId_rdh | /**
*
* @deprecated use {@link #authParams()} instead
*/
@Deprecated
public String clientAppId() {
return httpRequest != null ? ((String) (httpRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName))) : null;
} | 3.26 |
pulsar_RawReader_create_rdh | /**
* Topic reader which receives raw messages (i.e. as they are stored in the managed ledger).
*/public interface RawReader {
/**
* Create a raw reader for a topic.
*/
static CompletableFuture<RawReader> create(PulsarClient client, String topic, String subscription) {
CompletableFuture<Consumer<byte[]>> future = new CompletableFuture<>();
RawReader r = new RawReaderImpl(((PulsarClientImpl) (client)), topic, subscription, future);
return future.thenApply(__ ->
r);
} | 3.26 |
pulsar_MetadataStoreFactoryImpl_removeIdentifierFromMetadataURL_rdh | /**
* Removes the identifier from the full metadata url.
*
* zk:my-zk:3000 -> my-zk:3000
* etcd:my-etcd:3000 -> my-etcd:3000
* my-default-zk:3000 -> my-default-zk:3000
*
* @param metadataURL
* @return */
public static String removeIdentifierFromMetadataURL(String metadataURL)
{MetadataStoreProvider provider = findProvider(metadataURL);
if (metadataURL.startsWith(provider.urlScheme() + ":")) {
return metadataURL.substring(provider.urlScheme().length() + 1);
}
return metadataURL;
} | 3.26 |
pulsar_MathUtils_ceilDiv_rdh | /**
* Ceil version of Math.floorDiv().
*
* @param x
* the dividend
* @param y
* the divisor
* @return the smallest value that is larger than or equal to the algebraic quotient.
*/
public static int ceilDiv(int x, int y) {
return -Math.floorDiv(-x, y);
} | 3.26 |
pulsar_MathUtils_m0_rdh | /**
* Compute sign safe mod.
*
* @param dividend
* @param divisor
* @return */
public static int m0(long dividend, int divisor) {
int mod = ((int) (dividend % divisor));
if (mod < 0) {
mod += divisor;
}
return mod;
} | 3.26 |
pulsar_PulsarClientImpl_newTransaction_rdh | //
// Transaction related API
//
// This method should be exposed in the PulsarClient interface. Only expose it when all the transaction features
// are completed.
// @Override
public TransactionBuilder newTransaction() {
return new TransactionBuilderImpl(this, tcClient);
} | 3.26 |
pulsar_PulsarClientImpl_newTableViewBuilder_rdh | /**
*
* @deprecated use {@link #newTableView(Schema)} instead.
*/
@Override
@Deprecated
public <T> TableViewBuilder<T> newTableViewBuilder(Schema<T> schema) {
return new TableViewBuilderImpl<>(this, schema);
} | 3.26 |
pulsar_PulsarClientImpl_getConnection_rdh | /**
* Only for test.
*/
@VisibleForTesting
public CompletableFuture<ClientCnx> getConnection(final String topic) {
TopicName topicName = TopicName.get(topic);
return lookup.getBroker(topicName).thenCompose(pair -> getConnection(pair.getLeft(), pair.getRight(), cnxPool.genRandomKeyToSelectCon()));
} | 3.26 |
pulsar_PulsarClientImpl_newPartitionedProducerImpl_rdh | /**
* Factory method for creating PartitionedProducerImpl instance.
*
* Allows overriding the PartitionedProducerImpl instance in tests.
*
* @param topic
* topic name
* @param conf
* producer configuration
* @param schema
* topic schema
* @param interceptors
* producer interceptors
* @param producerCreatedFuture
* future for signaling completion of async producer creation
* @param metadata
* partitioned topic metadata
* @param <T>
* message type class
* @return new PartitionedProducerImpl instance
*/
protected <T> PartitionedProducerImpl<T> newPartitionedProducerImpl(String topic, ProducerConfigurationData conf, Schema<T> schema, ProducerInterceptors interceptors, CompletableFuture<Producer<T>> producerCreatedFuture, PartitionedTopicMetadata metadata) {
return new PartitionedProducerImpl<>(this, topic, conf, metadata.partitions, producerCreatedFuture, schema, interceptors);
} | 3.26 |
pulsar_PulsarClientImpl_newProducerImpl_rdh | /**
* Factory method for creating ProducerImpl instance.
*
* Allows overriding the ProducerImpl instance in tests.
*
* @param topic
* topic name
* @param partitionIndex
* partition index of a partitioned topic. the value -1 is used for non-partitioned topics.
* @param conf
* producer configuration
* @param schema
* topic schema
* @param interceptors
* producer interceptors
* @param producerCreatedFuture
* future for signaling completion of async producer creation
* @param <T>
* message type class
* @return a producer instance
*/protected <T> ProducerImpl<T> newProducerImpl(String topic, int partitionIndex,
ProducerConfigurationData conf, Schema<T> schema, ProducerInterceptors interceptors, CompletableFuture<Producer<T>> producerCreatedFuture, Optional<String> overrideProducerName) {
return new ProducerImpl<>(this, topic, conf, producerCreatedFuture, partitionIndex, schema, interceptors, overrideProducerName);
} | 3.26 |
pulsar_PulsarClientImpl_getSchema_rdh | /**
* Read the schema information for a given topic.
*
* If the topic does not exist or it has no schema associated, it will return an empty response
*/
public CompletableFuture<Optional<SchemaInfo>> getSchema(String topic) {
TopicName topicName;
try
{
topicName = TopicName.get(topic);
} catch (Throwable t) {
return FutureUtil.failedFuture(new PulsarClientException.InvalidTopicNameException(("Invalid topic name: '" + topic)
+ "'"));
}
return lookup.getSchema(topicName);
} | 3.26 |
pulsar_PulsarClientImpl_timer_rdh | /**
* visible for pulsar-functions. *
*/
public Timer timer() {
return timer;
} | 3.26 |
pulsar_HandlerState_changeToReadyState_rdh | // moves the state to ready if it wasn't closed
protected boolean changeToReadyState() {
if (STATE_UPDATER.get(this) == State.Ready) {return true;
}
return (STATE_UPDATER.compareAndSet(this, State.Uninitialized, State.Ready) || STATE_UPDATER.compareAndSet(this, State.Connecting, State.Ready)) || STATE_UPDATER.compareAndSet(this, State.RegisteringSchema, State.Ready);
} | 3.26 |
pulsar_InetAddressUtils_isIPv6HexCompressedAddress_rdh | /**
* Checks whether the parameter is a valid compressed IPv6 address.
*
* @param input
* the address string to check for validity
* @return true if the input parameter is a valid compressed IPv6 address
*/
public static boolean isIPv6HexCompressedAddress(final String input) {
int colonCount = 0;
for (int i = 0; i < input.length(); i++) {
if (input.charAt(i) == COLON_CHAR) {
colonCount++;
}
}
return (colonCount <= MAX_COLON_COUNT)
&& IPV6_HEX_COMPRESSED_PATTERN.matcher(input).matches();
} | 3.26 |
pulsar_InetAddressUtils_isIPv6StdAddress_rdh | /**
* Checks whether the parameter is a valid standard (non-compressed) IPv6 address.
*
* @param input
* the address string to check for validity
* @return true if the input parameter is a valid standard (non-compressed) IPv6 address
*/
public static boolean isIPv6StdAddress(final String input) {
return IPV6_STD_PATTERN.matcher(input).matches();
} | 3.26 |
pulsar_InetAddressUtils_isIPv6Address_rdh | /**
* Checks whether the parameter is a valid IPv6 address (including compressed).
*
* @param input
* the address string to check for validity
* @return true if the input parameter is a valid standard or compressed IPv6 address
*/
public static boolean isIPv6Address(final String input) {
return isIPv6StdAddress(input) || isIPv6HexCompressedAddress(input);
} | 3.26 |
pulsar_IOUtils_confirmPrompt_rdh | /**
* Confirm prompt for the console operations.
*
* @param prompt
* Prompt message to be displayed on console
* @return Returns true if confirmed as 'Y', returns false if confirmed as 'N'
* @throws IOException
*/
public static boolean confirmPrompt(String prompt) throws
IOException {
while (true) {
System.out.print(prompt + " (Y or N) ");
StringBuilder responseBuilder = new StringBuilder();
while (true) {
int c = System.in.read();
if (((c == (-1)) || (c == '\r')) || (c == '\n')) {
break;
}
responseBuilder.append(((char) (c)));
}
String response = responseBuilder.toString();
if (response.equalsIgnoreCase("y") || response.equalsIgnoreCase("yes")) {
return true;
} else if (response.equalsIgnoreCase("n") || response.equalsIgnoreCase("no")) {
return false;
}
System.out.println("Invalid input: " + response);
// else ask them again
}
} | 3.26 |
pulsar_MetaStoreImpl_m2_rdh | //
// update timestamp if missing or 0
// 3 cases - timestamp does not exist for ledgers serialized before
// - timestamp is 0 for a ledger in recovery
// - ledger has timestamp which is the normal case now
private static ManagedLedgerInfo m2(ManagedLedgerInfo info) {
List<ManagedLedgerInfo.LedgerInfo> infoList = new ArrayList<>(info.getLedgerInfoCount());
long currentTime = System.currentTimeMillis();
for (ManagedLedgerInfo.LedgerInfo ledgerInfo : info.getLedgerInfoList()) {
if ((!ledgerInfo.hasTimestamp()) || (ledgerInfo.getTimestamp() == 0)) {
ManagedLedgerInfo.LedgerInfo.Builder singleInfoBuilder = ledgerInfo.toBuilder();
singleInfoBuilder.setTimestamp(currentTime);
infoList.add(singleInfoBuilder.build());
} else {
infoList.add(ledgerInfo);
}
}
ManagedLedgerInfo.Builder mlInfo = ManagedLedgerInfo.newBuilder();
mlInfo.addAllLedgerInfo(infoList);
if (info.hasTerminatedPosition()) {
mlInfo.setTerminatedPosition(info.getTerminatedPosition());
}
mlInfo.addAllProperties(info.getPropertiesList());
return mlInfo.build();} | 3.26 |
pulsar_MetaStoreImpl_compressManagedInfo_rdh | /**
* Compress Managed Info data such as LedgerInfo, CursorInfo.
*
* compression data structure
* [MAGIC_NUMBER](2) + [METADATA_SIZE](4) + [METADATA_PAYLOAD] + [MANAGED_LEDGER_INFO_PAYLOAD]
*/
private byte[] compressManagedInfo(byte[] info, byte[] metadata, int metadataSerializedSize, MLDataFormats.CompressionType compressionType) {
if ((compressionType == null) || compressionType.equals(CompressionType.NONE)) {
return info;
}
ByteBuf metadataByteBuf = null;
ByteBuf encodeByteBuf = null;
try {metadataByteBuf = PulsarByteBufAllocator.DEFAULT.buffer(metadataSerializedSize + 6, metadataSerializedSize + 6);
metadataByteBuf.writeShort(MAGIC_MANAGED_INFO_METADATA);
metadataByteBuf.writeInt(metadataSerializedSize);
metadataByteBuf.writeBytes(metadata);
encodeByteBuf = getCompressionCodec(compressionType).encode(Unpooled.wrappedBuffer(info));
CompositeByteBuf compositeByteBuf = PulsarByteBufAllocator.DEFAULT.compositeBuffer();
compositeByteBuf.addComponent(true, metadataByteBuf);
compositeByteBuf.addComponent(true, encodeByteBuf);
byte[] dataBytes = new byte[compositeByteBuf.readableBytes()];
compositeByteBuf.readBytes(dataBytes);
return dataBytes;
} finally {
if
(metadataByteBuf != null) {
metadataByteBuf.release();
}
if (encodeByteBuf != null) {
encodeByteBuf.release();
}
}
} | 3.26 |
pulsar_FunctionMetaDataManager_initialize_rdh | /**
* Public methods. Please use these methods if references FunctionMetaManager from an external class
*/
/**
* Initializes the FunctionMetaDataManager.
* We create a new reader
*/
public synchronized void initialize() {
try (Reader reader = FunctionMetaDataTopicTailer.createReader(workerConfig, pulsarClient.newReader(), MessageId.earliest)) {
// read all existing messages
while (reader.hasMessageAvailable()) {processMetaDataTopicMessage(reader.readNext());
}
this.isInitialized.complete(null);
} catch (Exception e) {
log.error("Failed to initialize meta data store", e);
throw new RuntimeException("Failed to initialize Metadata Manager", e);
}
log.info("FunctionMetaData Manager initialization complete");
} | 3.26 |
pulsar_FunctionMetaDataManager_giveupLeadership_rdh | /**
* called by the leader service when we lose leadership. We close the exclusive producer
* and start the tailer.
*/
public synchronized void giveupLeadership() {
log.info("FunctionMetaDataManager giving up leadership by closing exclusive producer");
try {
exclusiveLeaderProducer.close();
exclusiveLeaderProducer = null;
initializeTailer();
} catch (PulsarClientException e) {
log.error("Error closing exclusive producer", e);
errorNotifier.triggerError(e);
}
} | 3.26 |
pulsar_FunctionMetaDataManager_getFunctionMetaData_rdh | /**
* Get the function metadata for a function.
*
* @param tenant
* the tenant the function belongs to
* @param namespace
* the namespace the function belongs to
* @param functionName
* the function name
* @return FunctionMetaData that contains the function metadata
*/
public synchronized FunctionMetaData getFunctionMetaData(String tenant, String namespace, String functionName) {
return this.functionMetaDataMap.get(tenant).get(namespace).get(functionName);
} | 3.26 |
pulsar_FunctionMetaDataManager_acquireExclusiveWrite_rdh | /**
* Acquires a exclusive producer. This method cannot return null. It can only return a valid exclusive producer
* or throw NotLeaderAnymore exception.
*
* @param isLeader
* if the worker is still the leader
* @return A valid exclusive producer
* @throws WorkerUtils.NotLeaderAnymore
* if the worker is no longer the leader.
*/
public Producer<byte[]> acquireExclusiveWrite(Supplier<Boolean> isLeader) throws NotLeaderAnymore {
// creates exclusive producer for metadata topic
return WorkerUtils.createExclusiveProducerWithRetry(pulsarClient, workerConfig.getFunctionMetadataTopic(), workerConfig.getWorkerId() + "-leader", isLeader, 1000);
} | 3.26 |
pulsar_FunctionMetaDataManager_start_rdh | // Starts the tailer if we are in non-leader mode
public synchronized void start() {
if (exclusiveLeaderProducer == null) {
try {
// This means that we are in non-leader mode. start function metadata tailer
initializeTailer();
} catch (PulsarClientException e) {throw new RuntimeException("Could not start MetaData topic tailer", e);
}
}
} | 3.26 |
pulsar_FunctionMetaDataManager_getAllFunctionMetaData_rdh | /**
* Get a list of all the meta for every function.
*
* @return list of function metadata
*/
public synchronized List<FunctionMetaData> getAllFunctionMetaData() {
List<FunctionMetaData> ret = new LinkedList<>();
for (Map<String, Map<String, FunctionMetaData>> i : this.functionMetaDataMap.values()) {
for (Map<String, FunctionMetaData> j : i.values()) {
ret.addAll(j.values());
}
}
return ret;
} | 3.26 |
pulsar_FunctionMetaDataManager_listFunctions_rdh | /**
* List all the functions in a namespace.
*
* @param tenant
* the tenant the namespace belongs to
* @param namespace
* the namespace
* @return a list of function names
*/public synchronized Collection<FunctionMetaData> listFunctions(String tenant, String namespace) {
List<FunctionMetaData> ret = new LinkedList<>();
if (!this.functionMetaDataMap.containsKey(tenant)) {
return ret;
}
if (!this.functionMetaDataMap.get(tenant).containsKey(namespace)) {
return ret;
}
for (FunctionMetaData functionMetaData : this.functionMetaDataMap.get(tenant).get(namespace).values()) {
ret.add(functionMetaData);
}
return ret;
} | 3.26 |
pulsar_FunctionMetaDataManager_processMetaDataTopicMessage_rdh | /**
* This is called by the MetaData tailer. It updates the in-memory cache.
* It eats up any exception thrown by processUpdate/processDeregister since
* that's just part of the state machine
*
* @param message
* The message read from metadata topic that needs to be processed
*/
public void processMetaDataTopicMessage(Message<byte[]> message) throws IOException {
try {
if (workerConfig.getUseCompactedMetadataTopic()) {processCompactedMetaDataTopicMessage(message);
} else {
processUncompactedMetaDataTopicMessage(message);
}
} catch (IllegalArgumentException e) {
// Its ok. Nothing much we can do about it
}
lastMessageSeen = message.getMessageId();
} | 3.26 |
pulsar_FunctionMetaDataManager_containsFunctionMetaData_rdh | /**
* Private methods for internal use. Should not be used outside of this class
*/
private boolean containsFunctionMetaData(FunctionMetaData functionMetaData) {
return containsFunctionMetaData(functionMetaData.getFunctionDetails());
} | 3.26 |
pulsar_FunctionMetaDataManager_containsFunction_rdh | /**
* Check if the function exists.
*
* @param tenant
* tenant that the function belongs to
* @param namespace
* namespace that the function belongs to
* @param functionName
* name of function
* @return true if function exists and false if it does not
*/
public synchronized boolean containsFunction(String tenant, String namespace, String functionName) {
return containsFunctionMetaData(tenant, namespace, functionName);
} | 3.26 |
pulsar_FunctionMetaDataManager_updateFunctionOnLeader_rdh | /**
* Called by the worker when we are in the leader mode. In this state, we update our in-memory
* data structures and then write to the metadata topic.
*
* @param functionMetaData
* The function metadata in question
* @param delete
* Is this a delete operation
* @throws IllegalStateException
* if we are not the leader
* @throws IllegalArgumentException
* if the request is out of date.
*/
public synchronized void updateFunctionOnLeader(FunctionMetaData functionMetaData, boolean delete) throws IllegalStateException, IllegalArgumentException {
boolean needsScheduling;
if (exclusiveLeaderProducer == null) {
throw new IllegalStateException("Not the leader");
}
// Check first to avoid local cache update failure
m0(functionMetaData, delete);
byte[] v7;
if (workerConfig.getUseCompactedMetadataTopic()) {
if (delete) {
v7 = "".getBytes();
} else {
v7 = functionMetaData.toByteArray();
}
} else {
Request.ServiceRequest serviceRequest = Request.ServiceRequest.newBuilder().setServiceRequestType(delete ? ServiceRequestType.DELETE : ServiceRequestType.UPDATE).setFunctionMetaData(functionMetaData).setWorkerId(workerConfig.getWorkerId()).setRequestId(UUID.randomUUID().toString()).build();
v7 = serviceRequest.toByteArray(); }
try {
TypedMessageBuilder builder = exclusiveLeaderProducer.newMessage().value(v7).property(f0, Long.toString(functionMetaData.getVersion()));
if (workerConfig.getUseCompactedMetadataTopic()) {
builder = builder.key(FunctionCommon.getFullyQualifiedName(functionMetaData.getFunctionDetails()));
}
lastMessageSeen = builder.send();
if (delete) {
needsScheduling = processDeregister(functionMetaData);
} else {
needsScheduling = processUpdate(functionMetaData);
}
} catch (Exception e) {
log.error("Could not write into Function Metadata topic", e);
throw new IllegalStateException("Internal Error updating function at the leader", e);
}
if (needsScheduling) {
this.schedulerManager.schedule();}
} | 3.26 |
pulsar_FunctionMetaDataManager_acquireLeadership_rdh | /**
* Called by the leader service when this worker becomes the leader.
* We first get exclusive producer on the metadata topic. Next we drain the tailer
* to ensure that we have caught up to metadata topic. After which we close the tailer.
* Note that this method cannot be syncrhonized because the tailer might still be processing messages
*/
public void acquireLeadership(Producer<byte[]> exclusiveProducer) {
log.info("FunctionMetaDataManager becoming leader by creating exclusive producer");if (exclusiveLeaderProducer != null) {
log.error("FunctionMetaData Manager entered invalid state");
errorNotifier.triggerError(new IllegalStateException());
}
this.exclusiveLeaderProducer = exclusiveProducer;
FunctionMetaDataTopicTailer tailer = this.functionMetaDataTopicTailer;
this.functionMetaDataTopicTailer = null;
// Now that we have created the exclusive producer, wait for reader to get over
if (tailer != null) {
try {
tailer.stopWhenNoMoreMessages().get();
} catch (Exception e) {
log.error("Error while waiting for metadata tailer thread to finish", e);
errorNotifier.triggerError(e);
}
tailer.close();
}
log.info("FunctionMetaDataManager done becoming leader");
} | 3.26 |
pulsar_MessageDeduplication_checkStatus_rdh | /**
* Check the status of deduplication. If the configuration has changed, it will enable/disable deduplication,
* returning a future to track the completion of the task
*/
public CompletableFuture<Void> checkStatus() {
boolean shouldBeEnabled = isDeduplicationEnabled();
synchronized(this) {
if ((status == Status.Recovering) || (status == Status.Removing)) {
// If there's already a transition happening, check later for status
pulsar.getExecutor().schedule(this::checkStatus, 1, TimeUnit.MINUTES);
return CompletableFuture.completedFuture(null);
}
if ((status == Status.Initialized) && (!shouldBeEnabled)) {
status = Status.Removing;
managedLedger.asyncDeleteCursor(PersistentTopic.DEDUPLICATION_CURSOR_NAME, new DeleteCursorCallback()
{
@Override
public void deleteCursorComplete(Object ctx) {
status = Status.Disabled;
log.info("[{}] Deleted deduplication cursor", f0.getName());
}
@Override
public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) {
if (exception instanceof ManagedLedgerException.CursorNotFoundException) {
status = Status.Disabled;
} else {
log.error("[{}] Deleted deduplication cursor error", f0.getName(), exception);
}
}
}, null);
}if ((status == Status.Enabled) && (!shouldBeEnabled)) {
// Disabled deduping
CompletableFuture<Void> future = new CompletableFuture<>();
status =
Status.Removing;
managedLedger.asyncDeleteCursor(PersistentTopic.DEDUPLICATION_CURSOR_NAME, new DeleteCursorCallback() {
@Override
public void deleteCursorComplete(Object ctx) {
status = Status.Disabled;
managedCursor = null;
highestSequencedPushed.clear();highestSequencedPersisted.clear();
future.complete(null);
log.info("[{}] Disabled deduplication", f0.getName());
}
@Override
public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) {
// It's ok for disable message deduplication.
if (exception instanceof ManagedLedgerException.CursorNotFoundException) {
status = Status.Disabled;
managedCursor = null;
highestSequencedPushed.clear();
highestSequencedPersisted.clear();
future.complete(null);
} else {
log.warn("[{}] Failed to disable deduplication: {}", f0.getName(), exception.getMessage());
status = Status.f1;
future.completeExceptionally(exception);
}
}
}, null);
return future;
} else if (((status == Status.Disabled) || (status == Status.Initialized)) && shouldBeEnabled) {
// Enable deduping
CompletableFuture<Void> future = new CompletableFuture<>();managedLedger.asyncOpenCursor(PersistentTopic.DEDUPLICATION_CURSOR_NAME, new OpenCursorCallback() {
@Override
public void
openCursorComplete(ManagedCursor cursor, Object ctx) {
// We don't want to retain cache for this cursor
cursor.setAlwaysInactive();
managedCursor = cursor;
m0().thenRun(() -> {
status = Status.Enabled;
future.complete(null);
log.info("[{}] Enabled deduplication", f0.getName());}).exceptionally(ex -> {
status = Status.f1;
log.warn("[{}] Failed to enable deduplication: {}", f0.getName(), ex.getMessage());
future.completeExceptionally(ex);
return null;
});
}
@Override
public void openCursorFailed(ManagedLedgerException exception, Object ctx) {
log.warn("[{}] Failed to enable deduplication: {}", f0.getName(), exception.getMessage());
future.completeExceptionally(exception);
}
}, null);
return future;
} else
{
// Nothing to do, we are in the correct state
return CompletableFuture.completedFuture(null);
}
}
} | 3.26 |
pulsar_MessageDeduplication_replayCursor_rdh | /**
* Read all the entries published from the cursor position until the most recent and update the highest sequence id
* from each producer.
*
* @param future
* future to trigger when the replay is complete
*/
private void replayCursor(CompletableFuture<Void> future) {
managedCursor.asyncReadEntries(100, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
for (Entry entry : entries) {
ByteBuf messageMetadataAndPayload = entry.getDataBuffer();
MessageMetadata md = Commands.parseMessageMetadata(messageMetadataAndPayload);
String producerName = md.getProducerName();
long sequenceId = Math.max(md.getHighestSequenceId(), md.getSequenceId());
highestSequencedPushed.put(producerName, sequenceId);
highestSequencedPersisted.put(producerName, sequenceId);
producerRemoved(producerName);
entry.release();
}
if (managedCursor.hasMoreEntries()) {
// Read next batch of entries
pulsar.getExecutor().execute(() -> replayCursor(future));
} else {
// Done replaying
future.complete(null);
}
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
future.completeExceptionally(exception);
}
}, null, PositionImpl.LATEST);
} | 3.26 |
pulsar_MessageDeduplication_isDuplicate_rdh | /**
* Assess whether the message was already stored in the topic.
*
* @return true if the message should be published or false if it was recognized as a duplicate
*/
public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf headersAndPayload) {
if ((!isEnabled()) || publishContext.isMarkerMessage()) {
return MessageDupStatus.NotDup;
}
String producerName = publishContext.getProducerName();
long sequenceId = publishContext.getSequenceId();
long highestSequenceId = Math.max(publishContext.getHighestSequenceId(), sequenceId);
MessageMetadata md = null;
if (producerName.startsWith(replicatorPrefix)) {
// Message is coming from replication, we need to use the original producer name and sequence id
// for the purpose of deduplication and not rely on the "replicator" name.
int readerIndex = headersAndPayload.readerIndex();
md = Commands.parseMessageMetadata(headersAndPayload);
producerName = md.getProducerName();sequenceId = md.getSequenceId();
highestSequenceId = Math.max(md.getHighestSequenceId(), sequenceId);
publishContext.setOriginalProducerName(producerName);
publishContext.setOriginalSequenceId(sequenceId);
publishContext.setOriginalHighestSequenceId(highestSequenceId);
headersAndPayload.readerIndex(readerIndex);
}
long chunkID = -1;long totalChunk = -1;
if (publishContext.isChunked()) {
if (md == null) {
int readerIndex = headersAndPayload.readerIndex();
md = Commands.parseMessageMetadata(headersAndPayload);
headersAndPayload.readerIndex(readerIndex);
}
chunkID = md.getChunkId();
totalChunk = md.getNumChunksFromMsg();}
// All chunks of a message use the same message metadata and sequence ID,
// so we only need to check the sequence ID for the last chunk in a chunk message.
if ((chunkID != (-1)) && (chunkID != (totalChunk - 1))) {
publishContext.setProperty(IS_LAST_CHUNK, Boolean.FALSE);
return MessageDupStatus.NotDup;
}
// Synchronize the get() and subsequent put() on the map. This would only be relevant if the producer
// disconnects and re-connects very quickly. At that point the call can be coming from a different thread
synchronized(highestSequencedPushed) {
Long lastSequenceIdPushed = highestSequencedPushed.get(producerName);
if ((lastSequenceIdPushed != null) && (sequenceId <= lastSequenceIdPushed)) {
if (log.isDebugEnabled()) {
log.debug("[{}] Message identified as duplicated producer={} seq-id={} -- highest-seq-id={}", f0.getName(), producerName, sequenceId, lastSequenceIdPushed);
}
// Also need to check sequence ids that has been persisted.
// If current message's seq id is smaller or equals to the
// lastSequenceIdPersisted than its definitely a dup
// If current message's seq id is between lastSequenceIdPersisted and
// lastSequenceIdPushed, then we cannot be sure whether the message is a dup or not
// we should return an error to the producer for the latter case so that it can retry at a future time
Long lastSequenceIdPersisted = highestSequencedPersisted.get(producerName);if ((lastSequenceIdPersisted != null) && (sequenceId
<= lastSequenceIdPersisted)) {
return MessageDupStatus.Dup;
} else {
return MessageDupStatus.Unknown;
}
}
highestSequencedPushed.put(producerName, highestSequenceId);
}
// Only put sequence ID into highestSequencedPushed and
// highestSequencedPersisted until receive and persistent the last chunk.
if ((chunkID != (-1)) && (chunkID == (totalChunk - 1))) {publishContext.setProperty(IS_LAST_CHUNK, Boolean.TRUE);
}
return MessageDupStatus.NotDup;
} | 3.26 |
pulsar_MessageDeduplication_purgeInactiveProducers_rdh | /**
* Remove from hash maps all the producers that were inactive for more than the configured amount of time.
*/
public synchronized void purgeInactiveProducers() {
long minimumActiveTimestamp = System.currentTimeMillis() - TimeUnit.MINUTES.toMillis(pulsar.getConfiguration().getBrokerDeduplicationProducerInactivityTimeoutMinutes());
Iterator<Map.Entry<String, Long>> mapIterator = inactiveProducers.entrySet().iterator();
boolean hasInactive = false;
while (mapIterator.hasNext()) {Map.Entry<String, Long> entry = mapIterator.next();
String producerName = entry.getKey();
long lastActiveTimestamp = entry.getValue();
if (lastActiveTimestamp < minimumActiveTimestamp) {
log.info("[{}] Purging dedup information for producer {}",
f0.getName(), producerName);
mapIterator.remove();
highestSequencedPushed.remove(producerName);highestSequencedPersisted.remove(producerName);
hasInactive = true;
}
}
if (hasInactive && isEnabled()) {
takeSnapshot(getManagedCursor().getMarkDeletedPosition());
}
} | 3.26 |
pulsar_MessageDeduplication_producerRemoved_rdh | /**
* Topic will call this method whenever a producer disconnects.
*/
public void producerRemoved(String producerName) {
// Producer is no-longer active
inactiveProducers.put(producerName, System.currentTimeMillis());
} | 3.26 |
pulsar_MessageDeduplication_recordMessagePersisted_rdh | /**
* Call this method whenever a message is persisted to get the chance to trigger a snapshot.
*/
public void recordMessagePersisted(PublishContext publishContext, PositionImpl position) {
if ((!isEnabled()) || publishContext.isMarkerMessage()) {
return;
}String producerName = publishContext.getProducerName();
long sequenceId = publishContext.getSequenceId();
long highestSequenceId = publishContext.getHighestSequenceId();
if (publishContext.getOriginalProducerName() != null) {
// In case of replicated messages, this will be different from the current replicator producer name
producerName = publishContext.getOriginalProducerName();
sequenceId = publishContext.getOriginalSequenceId();
highestSequenceId = publishContext.getOriginalHighestSequenceId();
}
Boolean isLastChunk = ((Boolean) (publishContext.getProperty(IS_LAST_CHUNK)));
if ((isLastChunk == null) || isLastChunk) {
highestSequencedPersisted.put(producerName, Math.max(highestSequenceId, sequenceId)); }
if ((++snapshotCounter) >= snapshotInterval) {
snapshotCounter = 0;
takeSnapshot(position);
}
} | 3.26 |
pulsar_RangeEntryCacheImpl_readFromStorage_rdh | /**
* Reads the entries from Storage.
*
* @param lh
* the handle
* @param firstEntry
* the first entry
* @param lastEntry
* the last entry
* @param shouldCacheEntry
* if we should put the entry into the cache
* @return a handle to the operation
*/
CompletableFuture<List<EntryImpl>> readFromStorage(ReadHandle lh, long firstEntry, long lastEntry, boolean shouldCacheEntry) {
final int entriesToRead = ((int) (lastEntry - firstEntry)) + 1;
CompletableFuture<List<EntryImpl>> readResult = lh.readAsync(firstEntry, lastEntry).thenApply(ledgerEntries -> {
requireNonNull(ml.getName());
requireNonNull(ml.getExecutor());
try {
// We got the entries, we need to transform them to a List<> type
long totalSize = 0;
final List<EntryImpl> entriesToReturn = Lists.newArrayListWithExpectedSize(entriesToRead);
for (LedgerEntry e : ledgerEntries) {
EntryImpl entry = RangeEntryCacheManagerImpl.create(e, interceptor);
entriesToReturn.add(entry);
totalSize += entry.getLength();
if (shouldCacheEntry) {
EntryImpl cacheEntry = EntryImpl.create(entry);
insert(cacheEntry);
cacheEntry.release();
}
}
ml.getMbean().recordReadEntriesOpsCacheMisses(entriesToReturn.size(), totalSize);
manager.mlFactoryMBean.recordCacheMiss(entriesToReturn.size(), totalSize);
ml.getMbean().addReadEntriesSample(entriesToReturn.size(), totalSize);
return entriesToReturn;
} finally {
ledgerEntries.close();
}
});
// handle LH invalidation
readResult.exceptionally(exception -> {
if ((exception instanceof BKException) && (((BKException) (exception)).getCode() == BKException.Code.TooManyRequestsException)) {} else {
ml.invalidateLedgerHandle(lh);
pendingReadsManager.invalidateLedger(lh.getId());
}
return null;
});
return readResult;
} | 3.26 |
pulsar_SingletonCleanerListener_objectMapperFactoryClearCaches_rdh | // Call ObjectMapperFactory.clearCaches() using reflection to clear up classes held in
// the singleton Jackson ObjectMapper instances
private static void objectMapperFactoryClearCaches() {
if (OBJECTMAPPERFACTORY_CLEARCACHES_METHOD != null) {
try {
OBJECTMAPPERFACTORY_CLEARCACHES_METHOD.invoke(null);
} catch (IllegalAccessException | InvocationTargetException e) {LOG.warn("Cannot clean singleton ObjectMapper caches", e);
}
}} | 3.26 |
pulsar_SingletonCleanerListener_jsonSchemaClearCaches_rdh | // Call JSONSchema.clearCaches() using reflection to clear up classes held in
// the singleton Jackson ObjectMapper instance of JSONSchema class
private static void jsonSchemaClearCaches() {
if (JSONSCHEMA_CLEARCACHES_METHOD != null) {
try {
JSONSCHEMA_CLEARCACHES_METHOD.invoke(null);
} catch (IllegalAccessException | InvocationTargetException e) {
LOG.warn("Cannot clean singleton JSONSchema caches", e);
}}
} | 3.26 |
pulsar_BrokerInterceptors_load_rdh | /**
* Load the broker event interceptor for the given <tt>interceptor</tt> list.
*
* @param conf
* the pulsar broker service configuration
* @return the collection of broker event interceptor
*/
public static BrokerInterceptor load(ServiceConfiguration conf) throws IOException {
BrokerInterceptorDefinitions definitions = BrokerInterceptorUtils.searchForInterceptors(conf.getBrokerInterceptorsDirectory(), conf.getNarExtractionDirectory());
ImmutableMap.Builder<String, BrokerInterceptorWithClassLoader> builder = ImmutableMap.builder();
conf.getBrokerInterceptors().forEach(interceptorName -> {
BrokerInterceptorMetadata definition = definitions.interceptors().get(interceptorName);
if (null == definition) {
throw new RuntimeException((("No broker interceptor is found for name `" + interceptorName) + "`. Available broker interceptors are : ") + definitions.interceptors());
}
BrokerInterceptorWithClassLoader interceptor;
try {
interceptor = BrokerInterceptorUtils.load(definition, conf.getNarExtractionDirectory());
if (interceptor != null) {
builder.put(interceptorName, interceptor);
}
log.info("Successfully loaded broker interceptor for name `{}`", interceptorName);
} catch (IOException e) {
log.error(("Failed to load the broker interceptor for name `" + interceptorName) + "`", e);
throw new RuntimeException(("Failed to load the broker interceptor for name `" + interceptorName) + "`");
}
});
Map<String, BrokerInterceptorWithClassLoader> v4 = builder.build();
if ((v4 != null) && (!v4.isEmpty())) {
return new BrokerInterceptors(v4);
} else {
return null;
}
} | 3.26 |
pulsar_NoStrictCacheSizeAllocator_release_rdh | /**
* This method used to release used cache size and add available cache size.
* in normal case, the available size shouldn't exceed max cache size.
*
* @param size
* release size
*/
public void release(long size) {lock.lock();
try {
availableCacheSize.add(size);
if (availableCacheSize.longValue() > maxCacheSize) {
availableCacheSize.reset();
availableCacheSize.add(maxCacheSize);
}
} finally {
lock.unlock();
}} | 3.26 |
pulsar_NoStrictCacheSizeAllocator_allocate_rdh | /**
* This operation will cost available cache size.
* if the request size exceed the available size, it's should be allowed,
* because maybe one entry size exceed the size and
* the query must be finished, the available size will become invalid.
*
* @param size
* allocate size
*/
public void allocate(long size) {
lock.lock();
try {
availableCacheSize.add(-size);
} finally {
lock.unlock();
}
} | 3.26 |
pulsar_PulsarClusterMetadataSetup_createMetadataNode_rdh | /**
* a wrapper for creating a persistent node with store.put but ignore exception of node exists.
*/
private static void createMetadataNode(MetadataStore store, String path, byte[] data) throws InterruptedException, ExecutionException {
try {store.put(path, data, Optional.of(-1L)).get();
} catch (ExecutionException e) {
if (!(e.getCause() instanceof MetadataStoreException.BadVersionException)) {
throw e;
}
// Ignore
}
} | 3.26 |
pulsar_Subscription_isCumulativeAckMode_rdh | // Subscription utils
static boolean isCumulativeAckMode(SubType subType) {
return SubType.Exclusive.equals(subType) || SubType.Failover.equals(subType);
} | 3.26 |
pulsar_InstanceConfig_getInstanceName_rdh | /**
* Get the string representation of {@link #getInstanceId()}.
*
* @return the string representation of {@link #getInstanceId()}.
*/
public String getInstanceName() {
return "" + instanceId;
} | 3.26 |
pulsar_SchemasImpl_convertGetSchemaResponseToSchemaInfo_rdh | // the util function converts `GetSchemaResponse` to `SchemaInfo`
static SchemaInfo convertGetSchemaResponseToSchemaInfo(TopicName tn, GetSchemaResponse response) {
byte[] schema;
if (response.getType() == SchemaType.KEY_VALUE) {
try {
schema = DefaultImplementation.getDefaultImplementation().convertKeyValueDataStringToSchemaInfoSchema(response.getData().getBytes(UTF_8));
} catch (IOException conversionError)
{
throw new RuntimeException(conversionError);
}
} else {
schema = response.getData().getBytes(UTF_8);
}
return SchemaInfo.builder().schema(schema).type(response.getType()).timestamp(response.getTimestamp()).properties(response.getProperties()).name(tn.getLocalName()).build();
} | 3.26 |
pulsar_SchemasImpl_convertSchemaDataToStringLegacy_rdh | // the util function exists for backward compatibility concern
static String convertSchemaDataToStringLegacy(SchemaInfo schemaInfo) throws IOException {
byte[] schemaData = schemaInfo.getSchema();
if (null == schemaInfo.getSchema()) {
return "";
}
if (schemaInfo.getType() == SchemaType.KEY_VALUE) {
return DefaultImplementation.getDefaultImplementation().convertKeyValueSchemaInfoDataToString(DefaultImplementation.getDefaultImplementation().decodeKeyValueSchemaInfo(schemaInfo));
}return new String(schemaData, UTF_8);
} | 3.26 |
pulsar_PulsarKafkaSinkTaskContext_currentOffset_rdh | // for tests
private Long currentOffset(TopicPartition topicPartition) {
Long offset = currentOffsets.computeIfAbsent(topicPartition, kv -> {
List<ByteBuffer> req = Lists.newLinkedList();
ByteBuffer key = topicPartitionAsKey(topicPartition);
req.add(key);
try {
Map<ByteBuffer, ByteBuffer> result = offsetStore.get(req).get();
if ((result != null) && (result.size() != 0)) {
Optional<ByteBuffer> val = result.entrySet().stream().filter(entry -> entry.getKey().equals(key)).findFirst().map(entry -> entry.getValue());
if (val.isPresent()) {
long received = val.get().getLong();
if (log.isDebugEnabled()) {
log.debug("read initial offset for {} == {}", topicPartition, received);}
return received;
}
}
return -1L;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.error("error getting initial state of {}", topicPartition, e);
throw new <e>RuntimeException("error getting initial state of " + topicPartition);
} catch
(ExecutionException e) {
log.error("error getting initial state of {}", topicPartition, e);
throw new <e>RuntimeException("error getting initial state of " + topicPartition);
}
});
return offset;
} | 3.26 |
pulsar_AuthenticationFactoryOAuth2_clientCredentials_rdh | /**
* Authenticate with client credentials.
*
* @param issuerUrl
* the issuer URL
* @param credentialsUrl
* the credentials URL
* @param audience
* An optional field. The audience identifier used by some Identity Providers, like Auth0.
* @param scope
* An optional field. The value of the scope parameter is expressed as a list of space-delimited,
* case-sensitive strings. The strings are defined by the authorization server.
* If the value contains multiple space-delimited strings, their order does not matter,
* and each string adds an additional access range to the requested scope.
* From here: https://datatracker.ietf.org/doc/html/rfc6749#section-4.4.2
* @return an Authentication object
*/
public static Authentication clientCredentials(URL issuerUrl, URL
credentialsUrl, String audience, String scope) {
ClientCredentialsFlow flow = ClientCredentialsFlow.builder().issuerUrl(issuerUrl).privateKey(credentialsUrl.toExternalForm()).audience(audience).scope(scope).build();
return new AuthenticationOAuth2(flow, Clock.systemDefaultZone());
} | 3.26 |
pulsar_RawBatchMessageContainerImpl_setCryptoKeyReader_rdh | /**
* Sets a CryptoKeyReader instance to encrypt batched messages during serialization, `toByteBuf()`.
*
* @param cryptoKeyReader
* a CryptoKeyReader instance
*/
public void setCryptoKeyReader(CryptoKeyReader cryptoKeyReader) {
this.cryptoKeyReader = cryptoKeyReader;
} | 3.26 |
pulsar_SubscriptionPolicies_checkEmpty_rdh | /**
* Check if this SubscriptionPolicies is empty. Empty SubscriptionPolicies can be auto removed from TopicPolicies.
*
* @return true if this SubscriptionPolicies is empty.
*/
public boolean checkEmpty() {
return dispatchRate == null;
} | 3.26 |
pulsar_FastThreadLocalStateCleaner_cleanupAllFastThreadLocals_rdh | // cleanup all fast thread local state on all active threads
public void cleanupAllFastThreadLocals(BiConsumer<Thread, Object> cleanedValueListener) {
for (Thread v11 : ThreadUtils.getAllThreads()) {
cleanupAllFastThreadLocals(v11, cleanedValueListener);
}
} | 3.26 |
pulsar_SaslAuthenticationState_authenticate_rdh | /**
* Returns null if authentication has completed, and no auth data is required to send back to client.
* Do auth and Returns the auth data back to client, if authentication has not completed.
*/
@Override
public AuthData authenticate(AuthData authData) throws AuthenticationException {
return pulsarSaslServer.response(authData);
} | 3.26 |
pulsar_RelativeTimeUtil_nsToSeconds_rdh | /**
* Convert nanoseconds to seconds and keep three decimal places.
*
* @param ns
* @return seconds
*/
public static double nsToSeconds(long ns) {
double seconds = ((double)
(ns)) / 1000000000;
BigDecimal bd = new
BigDecimal(seconds);
return bd.setScale(3, RoundingMode.HALF_UP).doubleValue();
} | 3.26 |
pulsar_ManagedCursor_asyncReadEntriesWithSkipOrWait_rdh | /**
* Asynchronously read entries from the ManagedLedger, up to the specified number and size.
*
* <p/>If no entries are available, the callback will not be triggered. Instead it will be registered to wait until
* a new message will be persisted into the managed ledger
*
* @see #readEntriesOrWait(int, long)
* @param maxEntries
* maximum number of entries to return
* @param callback
* callback object
* @param ctx
* opaque context
* @param maxPosition
* max position can read
* @param skipCondition
* predicate of read filter out
*/
default void asyncReadEntriesWithSkipOrWait(int maxEntries, ReadEntriesCallback callback, Object ctx, PositionImpl maxPosition, Predicate<PositionImpl> skipCondition) {
asyncReadEntriesOrWait(maxEntries, callback, ctx, maxPosition);} | 3.26 |
pulsar_ManagedCursor_skipNonRecoverableLedger_rdh | /**
* If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is
* used to delete information about this ledger in the ManagedCursor.
*/
default void skipNonRecoverableLedger(long ledgerId) {} | 3.26 |
pulsar_ManagedCursor_seek_rdh | /**
* Move the cursor to a different read position.
*
* <p/>If the new position happens to be before the already mark deleted position, it will be set to the mark
* deleted position instead.
*
* @param newReadPosition
* the position where to move the cursor
*/
default void seek(Position newReadPosition) {seek(newReadPosition, false);
} | 3.26 |
pulsar_ManagedCursor_m2_rdh | /**
* Asynchronously read entries from the ManagedLedger, up to the specified number and size.
*
* <p/>If no entries are available, the callback will not be triggered. Instead it will be registered to wait until
* a new message will be persisted into the managed ledger
*
* @see #readEntriesOrWait(int, long)
* @param maxEntries
* maximum number of entries to return
* @param maxSizeBytes
* max size in bytes of the entries to return
* @param callback
* callback object
* @param ctx
* opaque context
* @param maxPosition
* max position can read
* @param skipCondition
* predicate of read filter out
*/
default void m2(int maxEntries, long maxSizeBytes, ReadEntriesCallback callback, Object ctx, PositionImpl maxPosition, Predicate<PositionImpl> skipCondition) {
asyncReadEntriesOrWait(maxEntries, maxSizeBytes, callback, ctx, maxPosition);
} | 3.26 |
pulsar_ManagedCursor_scan_rdh | /**
* Scan the cursor from the current position up to the end.
* Please note that this is an expensive operation
*
* @param startingPosition
* the position to start from, if not provided the scan will start from
* the lastDeleteMarkPosition
* @param condition
* a condition to continue the scan, the condition can access the entry
* @param batchSize
* number of entries to process at each read
* @param maxEntries
* maximum number of entries to scan
* @param timeOutMs
* maximum time to spend on this operation
* @throws InterruptedException
* @throws ManagedLedgerException
*/
default CompletableFuture<ScanOutcome> scan(Optional<Position> startingPosition, Predicate<Entry> condition, int batchSize, long maxEntries, long timeOutMs) {
return CompletableFuture.failedFuture(new UnsupportedOperationException());
} | 3.26 |
pulsar_MessageId_fromByteArray_rdh | /**
* De-serialize a message id from a byte array.
*
* @param data
* byte array containing the serialized message id
* @return the de-serialized messageId object
* @throws IOException
* if the de-serialization fails
*/
static MessageId fromByteArray(byte[] data) throws IOException {return DefaultImplementation.getDefaultImplementation().newMessageIdFromByteArray(data);
}
/**
* De-serialize a message id from a byte array with its topic
* information attached.
*
* <p>The topic information is needed when acknowledging a {@link MessageId} on
* a consumer that is consuming from multiple topics.
*
* @param data
* the byte array with the serialized message id
* @param topicName
* the topic name
* @return a {@link MessageId instance} | 3.26 |
pulsar_ModularLoadManagerImpl_m0_rdh | /**
* As the leader broker, attempt to automatically detect and split hot namespace bundles.
*/
@Override
public void m0() {
if ((((!conf.isLoadBalancerAutoBundleSplitEnabled()) || (pulsar.getLeaderElectionService() == null)) || (!pulsar.getLeaderElectionService().isLeader())) || (knownBrokers.size() <= 1)) {
return;
}
final boolean unloadSplitBundles = pulsar.getConfiguration().isLoadBalancerAutoUnloadSplitBundlesEnabled();
synchronized(bundleSplitStrategy) {
final Map<String, String> bundlesToBeSplit = bundleSplitStrategy.findBundlesToSplit(loadData, pulsar);
NamespaceBundleFactory namespaceBundleFactory = pulsar.getNamespaceService().getNamespaceBundleFactory();
int splitCount = 0;
for (String bundleName : bundlesToBeSplit.keySet()) {
try {
final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundleName);
final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundleName);
if (!namespaceBundleFactory.canSplitBundle(namespaceBundleFactory.getBundle(namespaceName, bundleRange))) {
continue;
}
// Make sure the same bundle is not selected again.
loadData.getBundleData().remove(bundleName);
localData.getLastStats().remove(bundleName);
// Clear namespace bundle-cache
this.pulsar.getNamespaceService().getNamespaceBundleFactory().invalidateBundleCache(NamespaceName.get(namespaceName));
deleteBundleDataFromMetadataStore(bundleName);
// Check NamespacePolicies and AntiAffinityNamespace support unload bundle.
boolean isUnload = false;
String broker = bundlesToBeSplit.get(bundleName);
if ((unloadSplitBundles && shouldNamespacePoliciesUnload(namespaceName, bundleRange, broker)) && shouldAntiAffinityNamespaceUnload(namespaceName, bundleRange, broker)) {
isUnload = true;
}
log.info("Load-manager splitting bundle {} and unloading {}", bundleName, isUnload);
pulsar.getAdminClient().namespaces().splitNamespaceBundle(namespaceName, bundleRange, isUnload, null);
splitCount++;
log.info("Successfully split namespace bundle {}", bundleName);
} catch (Exception e) {
log.error("Failed to split namespace bundle {}", bundleName, e);
}
}
updateBundleSplitMetrics(splitCount);
}
} | 3.26 |
pulsar_ModularLoadManagerImpl_writeBrokerDataOnZooKeeper_rdh | /**
* As any broker, write the local broker data to metadata store.
*/
@Override
public void writeBrokerDataOnZooKeeper() {
writeBrokerDataOnZooKeeper(false);
} | 3.26 |
pulsar_ModularLoadManagerImpl_selectBrokerForAssignment_rdh | /**
* As the leader broker, find a suitable broker for the assignment of the given bundle.
*
* @param serviceUnit
* ServiceUnitId for the bundle.
* @return The name of the selected broker, as it appears on metadata store.
*/
@Overridepublic Optional<String> selectBrokerForAssignment(final ServiceUnitId serviceUnit) {
// Use brokerCandidateCache as a lock to reduce synchronization.
long startTime = System.nanoTime();
try {
synchronized(brokerCandidateCache) {
final String v71 = serviceUnit.toString();
if (preallocatedBundleToBroker.containsKey(v71)) {
// If the given bundle is already in preallocated, return the selected broker.
return Optional.of(preallocatedBundleToBroker.get(v71));
}
Optional<String> v72 = selectBroker(serviceUnit);
if (!v72.isPresent()) {
// If no broker is selected, return empty.
return v72;
}
// Add new bundle to preallocated.
preallocateBundle(v71, v72.get());
return v72;
}
} finally {
selectBrokerForAssignment.observe(System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
} } | 3.26 |
pulsar_ModularLoadManagerImpl_updateBundleUnloadingMetrics_rdh | /**
* As leader broker, update bundle unloading metrics.
*
* @param bundlesToUnload
*/
private void updateBundleUnloadingMetrics(Multimap<String, String> bundlesToUnload) {
unloadBrokerCount += bundlesToUnload.keySet().size();
unloadBundleCount += bundlesToUnload.values().size();List<Metrics> metrics = new ArrayList<>();
Map<String, String> dimensions = new HashMap<>(); dimensions.put("metric", "bundleUnloading");Metrics m = Metrics.create(dimensions);
m.put("brk_lb_unload_broker_total", unloadBrokerCount);
m.put("brk_lb_unload_bundle_total", unloadBundleCount); metrics.add(m);
this.bundleUnloadMetrics.set(metrics);
} | 3.26 |
pulsar_ModularLoadManagerImpl_disableBroker_rdh | /**
* As any broker, disable the broker this manager is running on.
*
* @throws PulsarServerException
* If there's a failure when disabling broker on metadata store.
*/
@Override
public void disableBroker() throws PulsarServerException {
if (StringUtils.isNotEmpty(brokerZnodePath)) {
try {
brokerDataLock.release().join();
} catch (CompletionException e) {
if (e.getCause() instanceof NotFoundException) {
throw new PulsarServerException.NotFoundException(MetadataStoreException.unwrap(e));
} else {
throw new PulsarServerException(MetadataStoreException.unwrap(e));
}
}
}
} | 3.26 |
pulsar_ModularLoadManagerImpl_getBundleDataOrDefault_rdh | // Attempt to local the data for the given bundle in metadata store
// If it cannot be found, return the default bundle data.
@Override
public BundleData getBundleDataOrDefault(final String bundle) {
BundleData bundleData = null;
try {
Optional<BundleData> optBundleData = pulsarResources.getLoadBalanceResources().getBundleDataResources().getBundleData(bundle).join();
if (optBundleData.isPresent()) {
return optBundleData.get();
}
Optional<ResourceQuota> optQuota = resourceQuotaCache.get(String.format("%s/%s", RESOURCE_QUOTA_ZPATH, bundle)).join();
if (optQuota.isPresent()) {
ResourceQuota quota = optQuota.get();
bundleData = new BundleData(NUM_SHORT_SAMPLES, NUM_LONG_SAMPLES);
// Initialize from existing resource quotas if new API ZNodes do not exist.
final TimeAverageMessageData shortTermData = bundleData.getShortTermData();
final TimeAverageMessageData longTermData = bundleData.getLongTermData();
shortTermData.setMsgRateIn(quota.getMsgRateIn());
shortTermData.setMsgRateOut(quota.getMsgRateOut());
shortTermData.setMsgThroughputIn(quota.getBandwidthIn());
shortTermData.setMsgThroughputOut(quota.getBandwidthOut());
longTermData.setMsgRateIn(quota.getMsgRateIn());
longTermData.setMsgRateOut(quota.getMsgRateOut());
longTermData.setMsgThroughputIn(quota.getBandwidthIn());
longTermData.setMsgThroughputOut(quota.getBandwidthOut());
// Assume ample history.
shortTermData.setNumSamples(NUM_SHORT_SAMPLES);
longTermData.setNumSamples(NUM_LONG_SAMPLES);
}
} catch (Exception e) {
log.warn("Error when trying to find bundle {} on metadata store: {}", bundle, e);
}
if (bundleData == null) {
bundleData = new BundleData(NUM_SHORT_SAMPLES, NUM_LONG_SAMPLES, defaultStats);
}
return bundleData;
} | 3.26 |
pulsar_ModularLoadManagerImpl_updateBundleData_rdh | // As the leader broker, use the local broker data saved on metadata store to update the bundle stats so that better
// load management decisions may be made.
private void updateBundleData() {
final Map<String, BundleData> bundleData = loadData.getBundleData();
final Set<String> activeBundles = new HashSet<>();
// Iterate over the broker data.
for (Map.Entry<String, BrokerData> brokerEntry : loadData.getBrokerData().entrySet()) {
final String broker = brokerEntry.getKey();
final BrokerData brokerData = brokerEntry.getValue();
final Map<String, NamespaceBundleStats> statsMap = brokerData.getLocalData().getLastStats();
// Iterate over the last bundle stats available to the current
// broker to update the bundle data.
for (Map.Entry<String, NamespaceBundleStats> entry
: statsMap.entrySet()) {
final String bundle = entry.getKey();
final NamespaceBundleStats stats = entry.getValue();
activeBundles.add(bundle);
if (bundleData.containsKey(bundle)) {
// If we recognize the bundle, add these stats as a new sample.
bundleData.get(bundle).update(stats);
} else {
// Otherwise, attempt to find the bundle data on metadata store.
// If it cannot be found, use the latest stats as the first sample.
BundleData currentBundleData = getBundleDataOrDefault(bundle);
currentBundleData.update(stats);
bundleData.put(bundle, currentBundleData);
}
}
// Remove all loaded bundles from the preallocated maps.
final Map<String, BundleData> v37 = brokerData.getPreallocatedBundleData();
Set<String> ownedNsBundles = pulsar.getNamespaceService().getOwnedServiceUnits().stream().map(NamespaceBundle::toString).collect(Collectors.toSet());
synchronized(v37) {
preallocatedBundleToBroker.keySet().removeAll(v37.keySet());
final Iterator<Map.Entry<String, BundleData>> preallocatedIterator = v37.entrySet().iterator();
while (preallocatedIterator.hasNext()) {
final String bundle = preallocatedIterator.next().getKey();
if ((!ownedNsBundles.contains(bundle)) || (brokerData.getLocalData().getBundles().contains(bundle) && bundleData.containsKey(bundle))) {
preallocatedIterator.remove();}
}
}
// Using the newest data, update the aggregated time-average data for the current broker.
TimeAverageBrokerData timeAverageData = new TimeAverageBrokerData(); timeAverageData.reset(statsMap.keySet(), bundleData, defaultStats);
brokerData.setTimeAverageData(timeAverageData);
final ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> namespaceToBundleRange = brokerToNamespaceToBundleRange.computeIfAbsent(broker, k -> ConcurrentOpenHashMap.<String, ConcurrentOpenHashSet<String>>newBuilder().build());
synchronized(namespaceToBundleRange) {
namespaceToBundleRange.clear();
LoadManagerShared.fillNamespaceToBundlesMap(statsMap.keySet(), namespaceToBundleRange);
LoadManagerShared.fillNamespaceToBundlesMap(v37.keySet(), namespaceToBundleRange);
}
}
// Remove not active bundle from loadData
for (String bundle : bundleData.keySet()) {
if (!activeBundles.contains(bundle)) {
bundleData.remove(bundle);
if (pulsar.getLeaderElectionService().isLeader()) {
deleteBundleDataFromMetadataStore(bundle);
}
}
}
} | 3.26 |
pulsar_ModularLoadManagerImpl_m1_rdh | /**
* As any broker, retrieve the namespace bundle stats and system resource usage to update data local to this broker.
*
* @return */
@Override
public LocalBrokerData m1() {
lock.lock();try
{
final SystemResourceUsage systemResourceUsage = LoadManagerShared.getSystemResourceUsage(brokerHostUsage);
localData.update(systemResourceUsage, getBundleStats());
updateLoadBalancingMetrics(systemResourceUsage);
if (conf.isExposeBundlesMetricsInPrometheus()) {
updateLoadBalancingBundlesMetrics(getBundleStats());
}
} catch (Exception e) {
log.warn("Error when attempting to update local broker data", e);if (e instanceof ConcurrentModificationException) {
throw ((ConcurrentModificationException) (e));
}
} finally {
lock.unlock();
}
return localData;
} | 3.26 |
pulsar_ModularLoadManagerImpl_getBundleStats_rdh | // Use the Pulsar client to acquire the namespace bundle stats.
private Map<String, NamespaceBundleStats> getBundleStats() {
return pulsar.getBrokerService().getBundleStats();
} | 3.26 |
pulsar_ModularLoadManagerImpl_updateAll_rdh | // Update both the broker data and the bundle data.
public void updateAll() {
if (log.isDebugEnabled()) {
log.debug("Updating broker and bundle data for loadreport");
}cleanupDeadBrokersData();
updateAllBrokerData();
updateBundleData();
// broker has latest load-report: check if any bundle requires split
m0();
} | 3.26 |
pulsar_ModularLoadManagerImpl_updateLoadBalancingBundlesMetrics_rdh | /**
* As any broker, update its bundle metrics.
*
* @param bundlesData
*/
private void updateLoadBalancingBundlesMetrics(Map<String, NamespaceBundleStats> bundlesData)
{
List<Metrics> metrics = new ArrayList<>();
for (Map.Entry<String, NamespaceBundleStats> entry : bundlesData.entrySet()) {
final String bundle = entry.getKey();
final NamespaceBundleStats
stats = entry.getValue();
Map<String, String> dimensions = new HashMap<>();dimensions.put("broker", pulsar.getAdvertisedAddress());
dimensions.put("bundle",
bundle);
dimensions.put("metric", "bundle");
Metrics m = Metrics.create(dimensions);
m.put("brk_bundle_msg_rate_in", stats.msgRateIn);
m.put("brk_bundle_msg_rate_out", stats.msgRateOut);
m.put("brk_bundle_topics_count", stats.topics);
m.put("brk_bundle_consumer_count", stats.consumerCount);
m.put("brk_bundle_producer_count", stats.producerCount);
m.put("brk_bundle_msg_throughput_in", stats.msgThroughputIn);
m.put("brk_bundle_msg_throughput_out", stats.msgThroughputOut);
metrics.add(m);
}
this.bundleMetrics.set(metrics);
} | 3.26 |
pulsar_ModularLoadManagerImpl_stop_rdh | /**
* As any broker, stop the load manager.
*
* @throws PulsarServerException
* If an unexpected error occurred when attempting to stop the load manager.
*/
@Override public void stop() throws PulsarServerException {
executors.shutdownNow();
try {
brokersData.close();
} catch (Exception e) {
log.warn("Failed to release broker lock: {}", e.getMessage());
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.