name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
pulsar_NarUnpacker_makeFile_rdh
/** * Creates the specified file, whose contents will come from the <tt>InputStream</tt>. * * @param inputStream * the contents of the file to create. * @param file * the file to create. * @throws IOException * if the file could not be created. */ private static void makeFile(final InputStream inputStream, final File file) throws IOException { try (final InputStream in = inputStream;final FileOutputStream fos = new FileOutputStream(file)) { byte[] bytes = new byte[65536];int numRead; while ((numRead = in.read(bytes)) != (-1)) { fos.write(bytes, 0, numRead); } } }
3.26
pulsar_HierarchicalLedgerUtils_ledgerListToSet_rdh
/** * Get all ledger ids in the given zk path. * * @param ledgerNodes * List of ledgers in the given path * example:- {L1652, L1653, L1650} * @param path * The zookeeper path of the ledger ids. The path should start with {@ledgerRootPath } * example (with ledgerRootPath = /ledgers):- /ledgers/00/0053 */ NavigableSet<Long> ledgerListToSet(List<String> ledgerNodes, String ledgerRootPath, String path) { NavigableSet<Long> zkActiveLedgers = new TreeSet<>(); if (!path.startsWith(ledgerRootPath)) { log.warn("Ledger path [{}] is not a valid path name, it should start wth {}", path, ledgerRootPath); return zkActiveLedgers; } long ledgerIdPrefix = 0; char ch; for (int i = ledgerRootPath.length() + 1; i < path.length(); i++) { ch = path.charAt(i); if ((ch < '0') || (ch > '9')) { continue; } ledgerIdPrefix = (ledgerIdPrefix * 10) + (ch - '0'); } for (String ledgerNode : ledgerNodes) { if (AbstractZkLedgerManager.isSpecialZnode(ledgerNode)) { continue; } long ledgerId = ledgerIdPrefix; for (int i = 0; i < ledgerNode.length(); i++) { ch = ledgerNode.charAt(i); if ((ch < '0') || (ch > '9')) { continue; } ledgerId = (ledgerId * 10) + (ch - '0'); } zkActiveLedgers.add(ledgerId); } return zkActiveLedgers; }
3.26
pulsar_NamespaceBundleStatsComparator_compare_rdh
// sort in reverse order, maximum loaded should be on top public int compare(String a, String b) { int result = 0; if (this.f0 == ResourceType.CPU) { result = map.get(a).compareByMsgRate(map.get(b)); } else if (this.f0 == ResourceType.Memory) { result = map.get(a).compareByTopicConnections(map.get(b)); } else if (this.f0 == ResourceType.BandwidthIn) {result = map.get(a).compareByBandwidthIn(map.get(b)); } else if (this.f0 == ResourceType.BandwidthOut) { result = map.get(a).compareByBandwidthOut(map.get(b));} else { result = map.get(a).compareTo(map.get(b)); } if (result > 0) { return -1; } else { return 1; }}
3.26
pulsar_BookKeeperTestClient_waitForBookieInSet_rdh
/** * Wait for bookie to appear in either the writable set of bookies, * or the read only set of bookies. Also ensure that it doesn't exist * in the other set before completing. */ private Future<?> waitForBookieInSet(BookieId b, boolean writable) throws Exception { log.info("Wait for {} to become {}", b, writable ? "writable" : "readonly"); CompletableFuture<Void> readOnlyFuture = new CompletableFuture<>(); CompletableFuture<Void> writableFuture = new CompletableFuture<>(); RegistrationListener readOnlyListener = bookies -> { boolean contains = bookies.getValue().contains(b); if (((!writable) && contains) || (writable && (!contains))) { readOnlyFuture.complete(null); } }; RegistrationListener writableListener = bookies -> { boolean contains = bookies.getValue().contains(b); if ((writable && contains) || ((!writable) && (!contains))) { writableFuture.complete(null); } }; getMetadataClientDriver().getRegistrationClient().watchWritableBookies(writableListener); getMetadataClientDriver().getRegistrationClient().watchReadOnlyBookies(readOnlyListener); if (writable) { return writableFuture.thenCompose(ignored -> getMetadataClientDriver().getRegistrationClient().getReadOnlyBookies()).thenCompose(readonlyBookies -> { if (readonlyBookies.getValue().contains(b)) { // if the bookie still shows up at readonly path, wait for it to disappear return readOnlyFuture; } else { return FutureUtils.Void(); } }); } else { return readOnlyFuture.thenCompose(ignored -> getMetadataClientDriver().getRegistrationClient().getWritableBookies()).thenCompose(writableBookies -> {if (writableBookies.getValue().contains(b)) {// if the bookie still shows up at writable path, wait for it to disappear return writableFuture; } else { return FutureUtils.Void();}}); } }
3.26
pulsar_BookKeeperTestClient_readBookiesBlocking_rdh
/** * Force a read to zookeeper to get list of bookies. * * @throws InterruptedException * @throws KeeperException */public void readBookiesBlocking() throws InterruptedException, BKException { bookieWatcher.initialBlockingBookieRead(); }
3.26
pulsar_SslContextAutoRefreshBuilder_get_rdh
/** * It updates SSLContext at every configured refresh time and returns updated SSLContext. * * @return */ public T get() { T ctx = getSslContext(); if (ctx == null) { try { update(); lastRefreshTime = System.currentTimeMillis(); return getSslContext(); } catch (GeneralSecurityException | IOException e) { log.error("Exception while trying to refresh ssl Context {}", e.getMessage(), e); } } else { long now = System.currentTimeMillis(); if ((refreshTime <= 0) || (now > (lastRefreshTime + refreshTime))) { if (needUpdate()) { try { ctx = update(); lastRefreshTime = now; } catch (GeneralSecurityException | IOException e) { log.error("Exception while trying to refresh ssl Context {} ", e.getMessage(), e); } } } } return ctx; }
3.26
pulsar_DeviationShedder_findBundlesForUnloading_rdh
/** * Recommend that all of the returned bundles be unloaded based on observing excessive standard deviations according * to some metric. * * @param loadData * The load data to used to make the unloading decision. * @param conf * The service configuration. * @return A map from all selected bundles to the brokers on which they reside. */ @Override public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) {final Multimap<String, String> result = ArrayListMultimap.create();bundleTreeSetCache.clear(); metricTreeSetCache.clear(); double sum = 0;double squareSum = 0; final Map<String, BrokerData> brokerDataMap = loadData.getBrokerData(); // Treating each broker as a data point, calculate the sum and squared // sum of the evaluated broker metrics. // These may be used to calculate the standard deviation. for (Map.Entry<String, BrokerData> entry : brokerDataMap.entrySet()) { final double value = brokerValue(entry.getValue(), conf); sum += value; squareSum += value * value; metricTreeSetCache.add(new ImmutablePair<>(value, entry.getKey())); } // Mean cannot change by just moving around bundles. final double mean = sum / brokerDataMap.size(); double v7 = Math.sqrt((squareSum / brokerDataMap.size()) - (mean * mean)); final double deviationThreshold = getDeviationThreshold(conf);String lastMostOverloaded = null; // While the most loaded broker is above the standard deviation // threshold, continue to move bundles. while (((metricTreeSetCache.last().getKey() - mean) / v7) > deviationThreshold) { final Pair<Double, String> mostLoadedPair = metricTreeSetCache.last(); final double v11 = mostLoadedPair.getKey(); final String mostLoaded = mostLoadedPair.getValue(); final Pair<Double, String> leastLoadedPair = metricTreeSetCache.first(); final double leastValue = leastLoadedPair.getKey(); final String leastLoaded = metricTreeSetCache.first().getValue(); if (!mostLoaded.equals(lastMostOverloaded)) { // Reset the bundle tree set now that a different broker is // being considered. bundleTreeSetCache.clear(); for (String bundle : brokerDataMap.get(mostLoaded).getLocalData().getBundles()) { if (!result.containsKey(bundle)) {// Don't consider bundles that are already going to be // moved. bundleTreeSetCache.add(new ImmutablePair<>(bundleValue(bundle, brokerDataMap.get(mostLoaded), conf), bundle)); } } lastMostOverloaded = mostLoaded; } boolean selected = false; while (!(bundleTreeSetCache.isEmpty() || selected)) { Pair<Double, String> mostExpensivePair = bundleTreeSetCache.pollLast(); double loadIncurred = mostExpensivePair.getKey(); // When the bundle is moved, we want the now least loaded server // to have lower overall load than the // most loaded server does not. Thus, we will only consider // moving the bundle if this condition // holds, and otherwise we will try the next bundle. if ((loadIncurred + leastValue) < v11) { // Update the standard deviation and replace the old load // values in the broker tree set with the // load values assuming this move took place. final String bundleToMove = mostExpensivePair.getValue(); result.put(bundleToMove, mostLoaded); metricTreeSetCache.remove(mostLoadedPair); metricTreeSetCache.remove(leastLoadedPair); final double newHighLoad = v11 - loadIncurred; final double newLowLoad = leastValue - loadIncurred; squareSum -= (v11 * v11) + (leastValue * leastValue); squareSum += (newHighLoad * newHighLoad) + (newLowLoad * newLowLoad); v7 = Math.sqrt((squareSum / brokerDataMap.size()) - (mean * mean)); metricTreeSetCache.add(new ImmutablePair<>(newLowLoad, leastLoaded)); metricTreeSetCache.add(new ImmutablePair<>(newHighLoad, mostLoaded)); selected = true; } } if (!selected) { // Move on to the next broker if no bundle could be moved. metricTreeSetCache.pollLast(); } } return result; }
3.26
pulsar_PulsarShell_computeDefaultPulsarShellRootDirectory_rdh
/** * Compute the default Pulsar shell root directory. * If system property "user.home" returns invalid value, the default value will be the current directory. * * @return */ private static String computeDefaultPulsarShellRootDirectory() { final String userHome = System.getProperty("user.home");if ((!StringUtils.isBlank(userHome)) && (!"?".equals(userHome))) { return userHome; } return System.getProperty("user.dir"); }
3.26
pulsar_SystemTopicClient_deleteAsync_rdh
/** * Async delete event in the system topic. * * @param key * the key of the event * @param t * pulsar event * @return message id future */ default CompletableFuture<MessageId> deleteAsync(String key, T t) { throw new UnsupportedOperationException("Unsupported operation"); }
3.26
pulsar_SystemTopicClient_delete_rdh
/** * Delete event in the system topic. * * @param key * the key of the event * @param t * pulsar event * @return message id * @throws PulsarClientException * exception while write event cause */ default MessageId delete(String key, T t) throws PulsarClientException { throw new UnsupportedOperationException("Unsupported operation"); }
3.26
pulsar_ManagedLedgerImpl_getNumberOfEntries_rdh
/** * Get the number of entries between a contiguous range of two positions. * * @param range * the position range * @return the count of entries */ long getNumberOfEntries(Range<PositionImpl> range) { PositionImpl fromPosition = range.lowerEndpoint(); boolean fromIncluded = range.lowerBoundType() == BoundType.CLOSED; PositionImpl toPosition = range.upperEndpoint(); boolean toIncluded = range.upperBoundType() == BoundType.CLOSED; if (fromPosition.getLedgerId() == toPosition.getLedgerId()) { // If the 2 positions are in the same ledger long count = (toPosition.getEntryId() - fromPosition.getEntryId()) - 1; count += (fromIncluded) ? 1 : 0; count += (toIncluded) ? 1 : 0;return count; } else { long count = 0; // If the from & to are pointing to different ledgers, then we need to : // 1. Add the entries in the ledger pointed by toPosition count += toPosition.getEntryId(); count += (toIncluded) ? 1 : 0; // 2. Add the entries in the ledger pointed by fromPosition LedgerInfo li = ledgers.get(fromPosition.getLedgerId()); if (li != null) { count += li.getEntries() - (fromPosition.getEntryId() + 1); count += (fromIncluded) ? 1 : 0; } // 3. Add the whole ledgers entries in between for (LedgerInfo ls : ledgers.subMap(fromPosition.getLedgerId(), false, toPosition.getLedgerId(), false).values()) { count += ls.getEntries(); } return count; } }
3.26
pulsar_ManagedLedgerImpl_isFenced_rdh
// The state that is transitioned to when a BK write failure happens // After handling the BK write failure, managed ledger will get signalled to create a new ledger public boolean isFenced() { return false; }
3.26
pulsar_ManagedLedgerImpl_m2_rdh
// ////////////////////////////////////////////////////////////////////// // Private helpers synchronized void m2(final LedgerHandle lh) { final State state = STATE_UPDATER.get(this); LedgerHandle currentLedger = this.currentLedger; if ((currentLedger == lh) && ((state == State.ClosingLedger) || (state == State.LedgerOpened))) { STATE_UPDATER.set(this, State.ClosedLedger);} else if (state == State.Closed) { // The managed ledger was closed during the write operation clearPendingAddEntries(new ManagedLedgerAlreadyClosedException("Managed ledger was already closed")); return;} else { // In case we get multiple write errors for different outstanding write request, we should close the ledger // just once return; } long entriesInLedger = lh.getLastAddConfirmed() + 1; if (log.isDebugEnabled()) { log.debug("[{}] Ledger has been closed id={} entries={}", f0, lh.getId(), entriesInLedger); } if (entriesInLedger > 0) { LedgerInfo info = LedgerInfo.newBuilder().setLedgerId(lh.getId()).setEntries(entriesInLedger).setSize(lh.getLength()).setTimestamp(clock.millis()).build(); ledgers.put(lh.getId(), info); } else { // The last ledger was empty, so we can discard it ledgers.remove(lh.getId()); mbean.startDataLedgerDeleteOp(); } trimConsumedLedgersInBackground(); maybeOffloadInBackground(NULL_OFFLOAD_PROMISE); if (!pendingAddEntries.isEmpty()) { // Need to create a new ledger to write pending entries createLedgerAfterClosed(); }}
3.26
pulsar_ManagedLedgerImpl_advanceCursorsIfNecessary_rdh
/** * Non-durable cursors have to be moved forward when data is trimmed since they are not retain that data. * This is to make sure that the `consumedEntries` counter is correctly updated with the number of skipped * entries and the stats are reported correctly. */ @VisibleForTesting void advanceCursorsIfNecessary(List<LedgerInfo> ledgersToDelete) throws LedgerNotExistException { if (ledgersToDelete.isEmpty()) { return; } // Just ack messages like a consumer. Normally, consumers will not confirm a position that does not exist, so // find the latest existing position to ack. PositionImpl v191 = calculateLastEntryInLedgerList(ledgersToDelete); if (v191 == null) { log.warn("[{}] The ledgers to be trim are all empty, skip to advance non-durable cursors: {}", f0, ledgersToDelete); return; }cursors.forEach(cursor -> { // move the mark delete position to the highestPositionToDelete only if it is smaller than the add confirmed // to prevent the edge case where the cursor is caught up to the latest and highestPositionToDelete may be // larger than the last add confirmed if (((v191.compareTo(((PositionImpl) (cursor.getMarkDeletedPosition()))) > 0) && (v191.compareTo(((PositionImpl) (cursor.getManagedLedger().getLastConfirmedEntry()))) <= 0)) && (!(((!cursor.isDurable()) && (cursor instanceof NonDurableCursorImpl)) && ((NonDurableCursorImpl) (cursor)).isReadCompacted()))) { cursor.asyncMarkDelete(v191, cursor.getProperties(), new MarkDeleteCallback() { @Override public void m7(Object ctx) { } @Override public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { ManagedLedgerImpl.log.warn("[{}] Failed to mark delete while trimming data ledgers: {}", f0, exception.getMessage()); } }, null); } }); }
3.26
pulsar_ManagedLedgerImpl_delete_rdh
/** * Delete this ManagedLedger completely from the system. * * @throws Exception */ @Overridepublic void delete() throws InterruptedException, ManagedLedgerException { final CountDownLatch counter = new CountDownLatch(1); final AtomicReference<ManagedLedgerException> exception = new AtomicReference<>(); asyncDelete(new DeleteLedgerCallback() { @Override public void deleteLedgerComplete(Object ctx) { counter.countDown(); } @Override public void deleteLedgerFailed(ManagedLedgerException e, Object ctx) { exception.set(e); counter.countDown(); } }, null); if (!counter.await(AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) { throw new ManagedLedgerException("Timeout during managed ledger delete operation");} if (exception.get() != null) { log.error("[{}] Error deleting managed ledger", f0, exception.get()); throw exception.get(); } }
3.26
pulsar_ManagedLedgerImpl_getPositionAfterN_rdh
/** * Get the entry position at a given distance from a given position. * * @param startPosition * starting position * @param n * number of entries to skip ahead * @param startRange * specifies whether to include the start position in calculating the distance * @return the new position that is n entries ahead */ public PositionImpl getPositionAfterN(final PositionImpl startPosition, long n, PositionBound startRange) { long entriesToSkip = n; long currentLedgerId; long currentEntryId; if (startRange == PositionBound.startIncluded) { currentLedgerId = startPosition.getLedgerId();currentEntryId = startPosition.getEntryId(); } else { PositionImpl nextValidPosition = getNextValidPosition(startPosition); currentLedgerId = nextValidPosition.getLedgerId(); currentEntryId = nextValidPosition.getEntryId(); } boolean lastLedger = false; long totalEntriesInCurrentLedger; while (entriesToSkip >= 0) { // for the current ledger, the number of entries written is deduced from the lastConfirmedEntry // for previous ledgers, LedgerInfo in ZK has the number of entries if ((currentLedger != null) && (currentLedgerId == currentLedger.getId())) { lastLedger = true; if (currentLedgerEntries > 0) { totalEntriesInCurrentLedger = lastConfirmedEntry.getEntryId() + 1; } else { totalEntriesInCurrentLedger = 0; } } else { LedgerInfo ledgerInfo = ledgers.get(currentLedgerId); totalEntriesInCurrentLedger = (ledgerInfo != null) ? ledgerInfo.getEntries() : 0; } long unreadEntriesInCurrentLedger = (totalEntriesInCurrentLedger > 0) ? totalEntriesInCurrentLedger - currentEntryId : 0; if (unreadEntriesInCurrentLedger >= entriesToSkip) {// if the current ledger has more entries than what we need to skip // then the return position is in the same ledger currentEntryId += entriesToSkip; break; } else { // skip remaining entry from the next ledger entriesToSkip -= unreadEntriesInCurrentLedger; if (lastLedger) { // there are no more ledgers, return the last position currentEntryId = totalEntriesInCurrentLedger; break; } Long lid = ledgers.ceilingKey(currentLedgerId + 1); currentLedgerId = (lid != null) ? lid : ledgers.lastKey(); currentEntryId = 0; } } PositionImpl positionToReturn = getPreviousPosition(PositionImpl.get(currentLedgerId, currentEntryId)); if (positionToReturn.compareTo(lastConfirmedEntry) > 0) { positionToReturn = lastConfirmedEntry; } if (log.isDebugEnabled()) { log.debug("getPositionAfterN: Start position {}:{}, startIncluded: {}, Return position {}:{}", startPosition.getLedgerId(), startPosition.getEntryId(), startRange, positionToReturn.getLedgerId(), positionToReturn.getEntryId()); } return positionToReturn; }
3.26
pulsar_ManagedLedgerImpl_invalidateEntriesUpToSlowestReaderPosition_rdh
// slowest reader position is earliest mark delete position when cacheEvictionByMarkDeletedPosition=true // it is the earliest read position when cacheEvictionByMarkDeletedPosition=false private void invalidateEntriesUpToSlowestReaderPosition() { if (entryCache.getSize() <= 0) { return; } if (!activeCursors.isEmpty()) { PositionImpl evictionPos = activeCursors.getSlowestReaderPosition();if (evictionPos != null) { entryCache.invalidateEntries(evictionPos); } } else { entryCache.clear();} }
3.26
pulsar_ManagedLedgerImpl_checkFenced_rdh
/** * Throws an exception if the managed ledger has been previously fenced. * * @throws ManagedLedgerException */ private void checkFenced() throws ManagedLedgerException { if (STATE_UPDATER.get(this).isFenced()) { log.error("[{}] Attempted to use a fenced managed ledger", f0); throw new ManagedLedgerFencedException(); } }
3.26
pulsar_ManagedLedgerImpl_isBkErrorNotRecoverable_rdh
/** * return BK error codes that are considered not likely to be recoverable. */ private static boolean isBkErrorNotRecoverable(int rc) { switch (rc) { case Code.NoSuchLedgerExistsException : case Code.NoSuchLedgerExistsOnMetadataServerException :case Code.NoSuchEntryException : return true; default : return false; } }
3.26
pulsar_ManagedLedgerImpl_getFirstPositionAndCounter_rdh
/** * Get the first position written in the managed ledger, alongside with the associated counter. */ Pair<PositionImpl, Long> getFirstPositionAndCounter() { PositionImpl pos; long count; Pair<PositionImpl, Long> lastPositionAndCounter;do { pos = getFirstPosition(); lastPositionAndCounter = getLastPositionAndCounter(); count = lastPositionAndCounter.getRight() - getNumberOfEntries(Range.openClosed(pos, lastPositionAndCounter.getLeft())); } while ((pos.compareTo(getFirstPosition()) != 0) || (lastPositionAndCounter.getLeft().compareTo(getLastPosition()) != 0) ); return Pair.of(pos, count); }
3.26
pulsar_ManagedLedgerImpl_hasActiveCursors_rdh
/** * Tells whether the managed ledger has any active-cursor registered. * * @return true if at least a cursor exists */ public boolean hasActiveCursors() { // Use hasCursors instead of isEmpty because isEmpty does not take into account non-durable cursors return !activeCursors.isEmpty(); }
3.26
pulsar_ManagedLedgerImpl_releaseReadHandleIfNoLongerRead_rdh
/** * * @param ledgerId * the ledger handle which maybe will be released. * @return if the ledger handle was released. */ private boolean releaseReadHandleIfNoLongerRead(long ledgerId, long slowestNonDurationLedgerId) {if (ledgerId < slowestNonDurationLedgerId) { if (log.isDebugEnabled()) { log.debug("[{}] Ledger {} no longer needs to be read, close the cached readHandle", f0, ledgerId); } invalidateReadHandle(ledgerId); return true; } return false; }
3.26
pulsar_ManagedLedgerImpl_createComplete_rdh
// ////////////////////////////////////////////////////////////////////// // Callbacks @Override public synchronized void createComplete(int rc, final LedgerHandle lh, Object ctx) { if (STATE_UPDATER.get(this) == State.Closed) { if (lh != null) { log.warn("[{}] ledger create completed after the managed ledger is closed rc={} ledger={}, so just" + " close this ledger handle.", f0, rc, lh != null ? lh.getId() : -1); lh.closeAsync(); } return; } if (log.isDebugEnabled()) { log.debug("[{}] createComplete rc={} ledger={}", f0, rc, lh != null ? lh.getId() : -1);} if (checkAndCompleteLedgerOpTask(rc, lh, ctx)) { return; } mbean.endDataLedgerCreateOp(); if (rc != Code.OK) { log.error("[{}] Error creating ledger rc={} {}", f0, rc, BKException.getMessage(rc)); ManagedLedgerException status = createManagedLedgerException(rc); // no pending entries means that creating this new ledger is NOT caused by write failure if (pendingAddEntries.isEmpty()) { STATE_UPDATER.set(this, State.ClosedLedger); } else { STATE_UPDATER.set(this, State.WriteFailed); } // Empty the list of pending requests and make all of them fail clearPendingAddEntries(status); f1 = clock.millis(); } else { log.info("[{}] Created new ledger {}", f0, lh.getId()); LedgerInfo newLedger = LedgerInfo.newBuilder().setLedgerId(lh.getId()).setTimestamp(0).build(); final MetaStoreCallback<Void> cb = new MetaStoreCallback<Void>() { @Override public void operationComplete(Void v, Stat stat) { if (log.isDebugEnabled()) { log.debug("[{}] Updating of ledgers list after create complete. version={}", f0, stat); } ledgersStat = stat; synchronized(ManagedLedgerImpl.this) { LedgerHandle originalCurrentLedger = currentLedger; ledgers.put(lh.getId(), newLedger); currentLedger = lh;currentLedgerEntries = 0; currentLedgerSize = 0; updateLedgersIdsComplete(originalCurrentLedger); mbean.addLedgerSwitchLatencySample(System.currentTimeMillis() - lastLedgerCreationInitiationTimestamp, TimeUnit.MILLISECONDS); } metadataMutex.unlock(); // May need to update the cursor position maybeUpdateCursorBeforeTrimmingConsumedLedger(); } @Override public void operationFailed(MetaStoreException e) { log.warn("[{}] Error updating meta data with the new list of ledgers: {}", f0, e.getMessage()); handleBadVersion(e); mbean.startDataLedgerDeleteOp();bookKeeper.asyncDeleteLedger(lh.getId(), (rc1, ctx1) -> { mbean.endDataLedgerDeleteOp(); if (rc1 != BKException.Code.OK) { ManagedLedgerImpl.log.warn("[{}] Failed to delete ledger {}: {}", f0, lh.getId(), BKException.getMessage(rc1)); } }, null); if (e instanceof BadVersionException) { synchronized(ManagedLedgerImpl.this) { log.error("[{}] Failed to update ledger list. z-node version mismatch. Closing managed ledger", f0); f1 = clock.millis(); // Return ManagedLedgerFencedException to addFailed callback // to indicate that the ledger is now fenced and topic needs to be closed clearPendingAddEntries(new ManagedLedgerFencedException(e)); // Do not need to unlock metadataMutex here because we are going to close to topic // anyways return; } } metadataMutex.unlock(); synchronized(ManagedLedgerImpl.this) {f1 = clock.millis(); STATE_UPDATER.set(ManagedLedgerImpl.this, State.ClosedLedger); clearPendingAddEntries(e); } } }; updateLedgersListAfterRollover(cb, newLedger); } }
3.26
pulsar_ManagedLedgerImpl_checkAndCompleteLedgerOpTask_rdh
/** * check if ledger-op task is already completed by timeout-task. If completed then delete the created ledger * * @return */ protected boolean checkAndCompleteLedgerOpTask(int rc, LedgerHandle lh, Object ctx) { if (ctx instanceof CompletableFuture) { // ledger-creation is already timed out and callback is already completed so, delete this ledger and return. if (((CompletableFuture) (ctx)).complete(lh)) { return false; } else { if (rc == Code.OK) { log.warn("[{}]-{} ledger creation timed-out, deleting ledger", this.f0, lh.getId()); asyncDeleteLedger(lh.getId(), DEFAULT_LEDGER_DELETE_RETRIES); } return true; }} return false; }
3.26
pulsar_ManagedLedgerImpl_calculateLastEntryInLedgerList_rdh
/** * * @return null if all ledgers is empty. */ private PositionImpl calculateLastEntryInLedgerList(List<LedgerInfo> ledgersToDelete) { for (int i = ledgersToDelete.size() - 1; i >= 0; i--) { LedgerInfo ledgerInfo = ledgersToDelete.get(i); if (((ledgerInfo != null) && ledgerInfo.hasEntries()) && (ledgerInfo.getEntries() > 0)) { return PositionImpl.get(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1); } } return null;}
3.26
pulsar_ManagedLedgerImpl_getLastPositionAndCounter_rdh
/** * Get the last position written in the managed ledger, alongside with the associated counter. */ Pair<PositionImpl, Long> getLastPositionAndCounter() { PositionImpl pos; long count; do {pos = lastConfirmedEntry;count = ENTRIES_ADDED_COUNTER_UPDATER.get(this);// Ensure no entry was written while reading the two values } while (pos.compareTo(lastConfirmedEntry) != 0 ); return Pair.of(pos, count); }
3.26
pulsar_ManagedLedgerImpl_getPreviousPosition_rdh
/** * Get the entry position that come before the specified position in the message stream, using information from the * ledger list and each ledger entries count. * * @param position * the current position * @return the previous position */ public PositionImpl getPreviousPosition(PositionImpl position) {if (position.getEntryId() > 0) { return PositionImpl.get(position.getLedgerId(), position.getEntryId() - 1); } // The previous position will be the last position of an earlier ledgers NavigableMap<Long, LedgerInfo> headMap = ledgers.headMap(position.getLedgerId(), false); final Map.Entry<Long, LedgerInfo> firstEntry = headMap.firstEntry(); if (firstEntry == null) { // There is no previous ledger, return an invalid position in the current ledger return PositionImpl.get(position.getLedgerId(), -1); } // We need to find the most recent non-empty ledger for (long ledgerId : headMap.descendingKeySet()) { LedgerInfo li = headMap.get(ledgerId); if ((li != null) && (li.getEntries() > 0)) { return PositionImpl.get(li.getLedgerId(), li.getEntries() - 1); } } // in case there are only empty ledgers, we return a position in the first one return PositionImpl.get(firstEntry.getKey(), -1); }
3.26
pulsar_ManagedLedgerImpl_isValidPosition_rdh
/** * Validate whether a specified position is valid for the current managed ledger. * * @param position * the position to validate * @return true if the position is valid, false otherwise */ public boolean isValidPosition(PositionImpl position) { PositionImpl lac = lastConfirmedEntry; if (log.isDebugEnabled()) { log.debug("IsValid position: {} -- last: {}", position, lac); } if (!ledgers.containsKey(position.getLedgerId())) { return false; } else if (position.getEntryId() < 0) { return false; } else if ((currentLedger != null) && (position.getLedgerId() == currentLedger.getId())) { // If current ledger is empty, the largest read position can be "{current_ledger: 0}". // Else, the read position can be set to "{LAC + 1}" when subscribe at LATEST, return ((position.getLedgerId() == lac.getLedgerId()) && (position.getEntryId() <= (lac.getEntryId() + 1))) || (position.getEntryId() == 0); } else if (position.getLedgerId() == lac.getLedgerId()) { // The ledger witch maintains LAC was closed, and there is an empty current ledger. // If entry id is larger than LAC, it should be "{current_ledger: 0}". return position.getEntryId() <= lac.getEntryId(); } else { // Look in the ledgers map LedgerInfo ls = ledgers.get(position.getLedgerId()); if (ls == null) { if (position.getLedgerId() < lac.getLedgerId()) { // Pointing to a non-existing ledger that is older than the current ledger is invalid return false; } else { // Pointing to a non-existing ledger is only legitimate if the ledger was empty return position.getEntryId() == 0; } } return position.getEntryId() < ls.getEntries(); } }
3.26
pulsar_ManagedLedgerImpl_getEstimatedBacklogSize_rdh
/** * Get estimated backlog size from a specific position. */ public long getEstimatedBacklogSize(PositionImpl pos) { if (pos == null) {return 0; } return estimateBacklogFromPosition(pos); }
3.26
pulsar_ManagedLedgerImpl_asyncCreateLedger_rdh
/** * Create ledger async and schedule a timeout task to check ledger-creation is complete else it fails the callback * with TimeoutException. * * @param bookKeeper * @param config * @param digestType * @param cb * @param metadata */ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig config, DigestType digestType, CreateCallback cb, Map<String, byte[]> metadata) { CompletableFuture<LedgerHandle> ledgerFutureHook = new CompletableFuture<>(); Map<String, byte[]> finalMetadata = new HashMap<>(); finalMetadata.putAll(ledgerMetadata); finalMetadata.putAll(metadata); if ((config.getBookKeeperEnsemblePlacementPolicyClassName() != null) && (config.getBookKeeperEnsemblePlacementPolicyProperties() != null)) { try { finalMetadata.putAll(LedgerMetadataUtils.buildMetadataForPlacementPolicyConfig(config.getBookKeeperEnsemblePlacementPolicyClassName(), config.getBookKeeperEnsemblePlacementPolicyProperties())); } catch (EnsemblePlacementPolicyConfig.ParseEnsemblePlacementPolicyConfigException e) { log.error("[{}] Serialize the placement configuration failed", f0, e); cb.createComplete(Code.UnexpectedConditionException, null, ledgerFutureHook); return; } } createdLedgerCustomMetadata = finalMetadata; try { bookKeeper.asyncCreateLedger(config.getEnsembleSize(), config.getWriteQuorumSize(), config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerFutureHook, finalMetadata); } catch (Throwable cause) {log.error("[{}] Encountered unexpected error when creating ledger", f0, cause); ledgerFutureHook.completeExceptionally(cause); cb.createComplete(Code.UnexpectedConditionException, null, ledgerFutureHook); return;} ScheduledFuture timeoutChecker = scheduledExecutor.schedule(() -> { if ((!ledgerFutureHook.isDone()) && ledgerFutureHook.completeExceptionally(new TimeoutException(f0 + " Create ledger timeout"))) { if (log.isDebugEnabled()) { log.debug("[{}] Timeout creating ledger", f0); } cb.createComplete(BKException.Code.TimeoutException, null, ledgerFutureHook); } else if (log.isDebugEnabled()) { log.debug("[{}] Ledger already created when timeout task is triggered", f0); } }, config.getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS); ledgerFutureHook.whenComplete((ignore, ex) -> { timeoutChecker.cancel(false); }); }
3.26
pulsar_MultiTopicsConsumerImpl_getPartitionsOfTheTopicMap_rdh
// get all partitions that in the topics map int getPartitionsOfTheTopicMap() { return partitionedTopics.values().stream().mapToInt(Integer::intValue).sum(); }
3.26
pulsar_MultiTopicsConsumerImpl_getPartitionedTopics_rdh
// get topics name public List<String> getPartitionedTopics() { return partitionedTopics.keySet().stream().collect(Collectors.toList()); }
3.26
pulsar_MultiTopicsConsumerImpl_messageReceived_rdh
// Must be called from the internalPinnedExecutor thread private void messageReceived(ConsumerImpl<T> consumer, Message<T> message) { checkArgument(message instanceof MessageImpl); TopicMessageImpl<T> topicMessage = new TopicMessageImpl<>(consumer.getTopic(), message, consumer); if (log.isDebugEnabled()) { log.debug("[{}][{}] Received message from topics-consumer {}", topic, subscription, message.getMessageId()); } // if asyncReceive is waiting : return message to callback without adding to incomingMessages queue CompletableFuture<Message<T>> receivedFuture = nextPendingReceive(); if (receivedFuture != null) { unAckedMessageTracker.add(topicMessage.getMessageId(), topicMessage.getRedeliveryCount()); completePendingReceive(receivedFuture, topicMessage); } else if (enqueueMessageAndCheckBatchReceive(topicMessage) && hasPendingBatchReceive()) { notifyPendingBatchReceivedCallBack(); } tryTriggerListener(); }
3.26
pulsar_MultiTopicsConsumerImpl_removeConsumerAsync_rdh
// Remove a consumer for a topic public CompletableFuture<Void> removeConsumerAsync(String topicName) { checkArgument(TopicName.isValid(topicName), "Invalid topic name:" + topicName); if ((getState() == State.Closing) || (getState() == State.Closed)) { return FutureUtil.failedFuture(new PulsarClientException.AlreadyClosedException("Topics Consumer was already closed")); } CompletableFuture<Void> unsubscribeFuture = new CompletableFuture<>(); String topicPartName = TopicName.get(topicName).getPartitionedTopicName(); List<ConsumerImpl<T>> v90 = consumers.values().stream().filter(consumer -> { String consumerTopicName = consumer.getTopic(); return TopicName.get(consumerTopicName).getPartitionedTopicName().equals(topicPartName); }).collect(Collectors.toList()); List<CompletableFuture<Void>> futureList = v90.stream().map(ConsumerImpl::closeAsync).collect(Collectors.toList()); FutureUtil.waitForAll(futureList).whenComplete((r, ex) -> { if (ex == null) { v90.forEach(consumer1 -> { consumers.remove(consumer1.getTopic()); pausedConsumers.remove(consumer1); allTopicPartitionsNumber.decrementAndGet(); }); removeTopic(topicName); if (unAckedMessageTracker instanceof UnAckedTopicMessageTracker) { ((UnAckedTopicMessageTracker) (unAckedMessageTracker)).removeTopicMessages(topicName); } unsubscribeFuture.complete(null); log.info("[{}] [{}] [{}] Removed Topics Consumer, allTopicPartitionsNumber: {}", topicName, subscription, consumerName, allTopicPartitionsNumber); } else { unsubscribeFuture.completeExceptionally(ex); setState(State.Failed); log.error("[{}] [{}] [{}] Could not remove Topics Consumer", topicName, subscription, consumerName, ex.getCause()); } }); return unsubscribeFuture; }
3.26
pulsar_MultiTopicsConsumerImpl_unsubscribeAsync_rdh
// un-subscribe a given topic public CompletableFuture<Void> unsubscribeAsync(String topicName) { checkArgument(TopicName.isValid(topicName), "Invalid topic name:" + topicName); if ((getState() == State.Closing) || (getState() == State.Closed)) { return FutureUtil.failedFuture(new PulsarClientException.AlreadyClosedException("Topics Consumer was already closed")); } if (partitionsAutoUpdateTimeout != null) { partitionsAutoUpdateTimeout.cancel(); partitionsAutoUpdateTimeout = null; } CompletableFuture<Void> unsubscribeFuture = new CompletableFuture<>(); String v84 = TopicName.get(topicName).getPartitionedTopicName(); List<ConsumerImpl<T>> consumersToUnsub = consumers.values().stream().filter(consumer -> { String consumerTopicName = consumer.getTopic(); return TopicName.get(consumerTopicName).getPartitionedTopicName().equals(v84); }).collect(Collectors.toList()); List<CompletableFuture<Void>> futureList = consumersToUnsub.stream().map(ConsumerImpl::unsubscribeAsync).collect(Collectors.toList()); FutureUtil.waitForAll(futureList).whenComplete((r, ex) -> { if (ex == null) { consumersToUnsub.forEach(consumer1 -> { consumers.remove(consumer1.getTopic()); pausedConsumers.remove(consumer1); allTopicPartitionsNumber.decrementAndGet(); }); removeTopic(topicName); if (unAckedMessageTracker instanceof UnAckedTopicMessageTracker) { ((UnAckedTopicMessageTracker) (unAckedMessageTracker)).removeTopicMessages(topicName); } unsubscribeFuture.complete(null); log.info("[{}] [{}] [{}] Unsubscribed Topics Consumer, allTopicPartitionsNumber: {}", topicName, subscription, consumerName, allTopicPartitionsNumber); } else { unsubscribeFuture.completeExceptionally(ex); setState(State.Failed); log.error("[{}] [{}] [{}] Could not unsubscribe Topics Consumer", topicName, subscription, consumerName, ex.getCause()); } }); return unsubscribeFuture; }
3.26
pulsar_MultiTopicsConsumerImpl_topicNamesValid_rdh
// Check topics are valid. // - each topic is valid, // - topic names are unique. private static boolean topicNamesValid(Collection<String> topics) { checkState((topics != null) && (topics.size() >= 1), "topics should contain more than 1 topic"); Optional<String> result = topics.stream().filter(topic -> !TopicName.isValid(topic)).findFirst(); if (result.isPresent()) { log.warn("Received invalid topic name: {}", result.get()); return false; } // check topic names are unique HashSet<String> set = new HashSet<>(topics); if (set.size() == topics.size()) { return true; } else { log.warn("Topic names not unique. unique/all : {}/{}", set.size(), topics.size()); return false; } }
3.26
pulsar_MultiTopicsConsumerImpl_createPartitionedConsumer_rdh
// create consumer for a single topic with already known partitions. // first create a consumer with no topic, then do subscription for already know partitionedTopic. public static <T> MultiTopicsConsumerImpl<T> createPartitionedConsumer(PulsarClientImpl client, ConsumerConfigurationData<T> conf, ExecutorProvider executorProvider, CompletableFuture<Consumer<T>> subscribeFuture, int numPartitions, Schema<T> schema, ConsumerInterceptors<T> interceptors) { checkArgument(conf.getTopicNames().size() == 1, "Should have only 1 topic for partitioned consumer"); // get topic name, then remove it from conf, so constructor will create a consumer with no topic. ConsumerConfigurationData<T> cloneConf = conf.clone(); String topicName = cloneConf.getSingleTopic(); cloneConf.getTopicNames().remove(topicName); CompletableFuture<Consumer<T>> future = new CompletableFuture<>(); MultiTopicsConsumerImpl<T> consumer = /* createTopicIfDoesNotExist */ new MultiTopicsConsumerImpl<T>(client, topicName, cloneConf, executorProvider, future, schema, interceptors, true); future.thenCompose(c -> ((MultiTopicsConsumerImpl<T>) (c)).subscribeAsync(topicName, numPartitions)).thenRun(() -> subscribeFuture.complete(consumer)).exceptionally(e -> { log.warn("Failed subscription for createPartitionedConsumer: {} {}, e:{}", topicName, numPartitions, e); consumer.cleanupMultiConsumer(); subscribeFuture.completeExceptionally(PulsarClientException.wrap(((Throwable) (e)).getCause(), String.format("Failed to subscribe %s with %d partitions", topicName, numPartitions))); return null; }); return consumer; }
3.26
pulsar_MultiTopicsConsumerImpl_m2_rdh
// get partitioned consumers public List<ConsumerImpl<T>> m2() { return consumers.values().stream().collect(Collectors.toList()); }
3.26
pulsar_MultiTopicsConsumerImpl_onTopicsExtended_rdh
// Check partitions changes of passed in topics, and subscribe new added partitions. @Override public CompletableFuture<Void> onTopicsExtended(Collection<String> topicsExtended) { CompletableFuture<Void> future = new CompletableFuture<>(); if (topicsExtended.isEmpty()) { future.complete(null); return future; } if (log.isDebugEnabled()) { log.debug("[{}] run onTopicsExtended: {}, size: {}", topic, topicsExtended.toString(), topicsExtended.size()); } List<CompletableFuture<Void>> futureList = Lists.newArrayListWithExpectedSize(topicsExtended.size()); topicsExtended.forEach(topic -> futureList.add(subscribeIncreasedTopicPartitions(topic))); FutureUtil.waitForAll(futureList).thenAccept(finalFuture -> future.complete(null)).exceptionally(ex -> { log.warn("[{}] Failed to subscribe increased topics partitions: {}", topic, ex.getMessage()); future.completeExceptionally(ex); return null; }); return future; }
3.26
pulsar_MultiTopicsConsumerImpl_subscribeAsync_rdh
// subscribe one more given topic, but already know the numberPartitions CompletableFuture<Void> subscribeAsync(String topicName, int numberPartitions) { TopicName topicNameInstance = getTopicName(topicName); if (topicNameInstance == null) { return FutureUtil.failedFuture(new PulsarClientException.AlreadyClosedException("Topic name not valid")); } String fullTopicName = topicNameInstance.toString(); if (consumers.containsKey(fullTopicName) || partitionedTopics.containsKey(topicNameInstance.getPartitionedTopicName())) { return FutureUtil.failedFuture(new PulsarClientException.AlreadyClosedException("Already subscribed to " + topicName)); } if ((getState() == State.Closing) || (getState() == State.Closed)) { return FutureUtil.failedFuture(new PulsarClientException.AlreadyClosedException("Topics Consumer was already closed")); } CompletableFuture<Void> subscribeResult = new CompletableFuture<>(); /* createTopicIfDoesNotExist */ subscribeTopicPartitions(subscribeResult, fullTopicName, numberPartitions, true); return subscribeResult; }
3.26
pulsar_MultiTopicsConsumerImpl_handleSubscribeOneTopicError_rdh
// handling failure during subscribe new topic, unsubscribe success created partitions private void handleSubscribeOneTopicError(String topicName, Throwable error, CompletableFuture<Void> subscribeFuture) { log.warn("[{}] Failed to subscribe for topic [{}] in topics consumer {}", topic, topicName, error.getMessage()); client.externalExecutorProvider().getExecutor().execute(() -> {AtomicInteger toCloseNum = new AtomicInteger(0); List<ConsumerImpl> filterConsumers = consumers.values().stream().filter(consumer1 -> { String consumerTopicName = consumer1.getTopic(); if (TopicName.get(consumerTopicName).getPartitionedTopicName().equals(TopicName.get(topicName).getPartitionedTopicName())) { toCloseNum.incrementAndGet(); return true; } else { return false; } }).collect(Collectors.toList()); if (filterConsumers.isEmpty()) { subscribeFuture.completeExceptionally(error); return; } filterConsumers.forEach(consumer2 -> { consumer2.closeAsync().whenComplete((r, ex) -> { consumer2.subscribeFuture().completeExceptionally(error); allTopicPartitionsNumber.decrementAndGet(); consumers.remove(consumer2.getTopic()); if (toCloseNum.decrementAndGet() == 0) { log.warn("[{}] Failed to subscribe for topic [{}] in topics consumer, subscribe error: {}", topic, topicName, error.getMessage()); removeTopic(topicName); subscribeFuture.completeExceptionally(error); } return; }); }); }); }
3.26
pulsar_MultiTopicsConsumerImpl_subscribeIncreasedTopicPartitions_rdh
// subscribe increased partitions for a given topic private CompletableFuture<Void> subscribeIncreasedTopicPartitions(String topicName) { int oldPartitionNumber = partitionedTopics.get(topicName); return client.getPartitionsForTopic(topicName).thenCompose(list -> { int currentPartitionNumber = Long.valueOf(list.stream().filter(t -> TopicName.get(t).isPartitioned()).count()).intValue(); if (log.isDebugEnabled()) { log.debug("[{}] partitions number. old: {}, new: {}", topicName, oldPartitionNumber, currentPartitionNumber); } if (oldPartitionNumber == currentPartitionNumber) { // topic partition number not changed return CompletableFuture.completedFuture(null); } else if (currentPartitionNumber == PartitionedTopicMetadata.NON_PARTITIONED) { // The topic was initially partitioned but then it was deleted. We keep it in the topics partitionedTopics.put(topicName, 0); allTopicPartitionsNumber.addAndGet(-oldPartitionNumber); List<CompletableFuture<Void>> futures = new ArrayList<>(); for (Iterator<Map.Entry<String, ConsumerImpl<T>>> it = consumers.entrySet().iterator(); it.hasNext();) { Map.Entry<String, ConsumerImpl<T>> e = it.next(); String partitionedTopicName = TopicName.get(e.getKey()).getPartitionedTopicName(); // Remove the consumers that belong to the deleted partitioned topic if (partitionedTopicName.equals(topicName)) { futures.add(e.getValue().closeAsync()); consumers.remove(e.getKey());} } return FutureUtil.waitForAll(futures); } else if (oldPartitionNumber < currentPartitionNumber) { allTopicPartitionsNumber.addAndGet(currentPartitionNumber - oldPartitionNumber); partitionedTopics.put(topicName, currentPartitionNumber); List<String> newPartitions = list.subList(oldPartitionNumber, currentPartitionNumber); // subscribe new added partitions List<CompletableFuture<Consumer<T>>> futureList = newPartitions.stream().map(partitionName -> { int partitionIndex = TopicName.getPartitionIndex(partitionName); CompletableFuture<Consumer<T>> subFuture = new CompletableFuture<>(); ConsumerConfigurationData<T> configurationData = getInternalConsumerConfig(); configurationData.setStartPaused(paused); ConsumerImpl<T> newConsumer = createInternalConsumer(configurationData, partitionName, partitionIndex, subFuture, true, schema); synchronized(pauseMutex) { if (paused) { newConsumer.pause(); } else { newConsumer.resume(); } consumers.putIfAbsent(newConsumer.getTopic(), newConsumer); } if (log.isDebugEnabled()) { log.debug("[{}] create consumer {} for partitionName: {}", topicName, newConsumer.getTopic(), partitionName); } return subFuture; }).collect(Collectors.toList()); // call interceptor onPartitionsChange(topicName, currentPartitionNumber); // wait for all partitions subscribe future complete, then startReceivingMessages return FutureUtil.waitForAll(futureList).thenAccept(finalFuture -> { List<ConsumerImpl<T>> newConsumerList = newPartitions.stream().map(partitionTopic -> consumers.get(partitionTopic)).collect(Collectors.toList()); startReceivingMessages(newConsumerList); }); } else { log.error("[{}] not support shrink topic partitions. old: {}, new: {}", topicName, oldPartitionNumber, currentPartitionNumber); return FutureUtil.failedFuture(new NotSupportedException("not support shrink topic partitions")); } }).exceptionally(throwable -> { log.warn("Failed to get partitions for topic to determine if new partitions are added", throwable); return null; }); }
3.26
pulsar_GracefulExecutorServicesShutdown_terminationTimeout_rdh
/** * Sets the timeout for waiting for executors to complete in forceful termination. * * @param terminationTimeout * duration for the timeout * @return the current instance for controlling graceful shutdown */ public GracefulExecutorServicesShutdown terminationTimeout(Duration terminationTimeout) { this.terminationTimeout = terminationTimeout; return this; }
3.26
pulsar_GracefulExecutorServicesShutdown_handle_rdh
/** * Starts the handler for polling frequently for the completed termination of enlisted executors. * * If the termination times out or the future is cancelled, all active executors will be forcefully * terminated by calling {@link ExecutorService#shutdownNow()}. * * @return a future which completes when all executors have terminated */ public CompletableFuture<Void> handle() { // if termination timeout isn't provided, calculate a termination timeout based on the shutdown timeout if (terminationTimeout == null) { terminationTimeout = Duration.ofNanos(((long) (timeout.toNanos() * DEFAULT_TERMINATION_TIMEOUT_RATIO))); } return new GracefulExecutorServicesTerminationHandler(timeout, terminationTimeout, executorServices).getFuture(); }
3.26
pulsar_GracefulExecutorServicesShutdown_timeout_rdh
/** * Sets the timeout for graceful shutdown. * * @param timeout * duration for the timeout * @return the current instance for controlling graceful shutdown */ public GracefulExecutorServicesShutdown timeout(Duration timeout) { this.timeout = timeout; return this;}
3.26
pulsar_GracefulExecutorServicesShutdown_shutdown_rdh
/** * Calls {@link ExecutorService#shutdown()} and enlists the executor as part of the * shutdown handling. * * @param executorServices * one or many executors to shutdown * @return the current instance for controlling graceful shutdown */ public GracefulExecutorServicesShutdown shutdown(ExecutorService... executorServices) { for (ExecutorService executorService : executorServices) { if (executorService != null) { executorService.shutdown(); this.executorServices.add(executorService);} } return this; }
3.26
pulsar_AbstractMetadataStore_isValidPath_rdh
/** * valid path in metadata store should be * 1. not blank * 2. starts with '/' * 3. not ends with '/', except root path "/" */ static boolean isValidPath(String path) { return StringUtils.equals(path, "/") || ((StringUtils.isNotBlank(path) && path.startsWith("/")) && (!path.endsWith("/"))); }
3.26
pulsar_AbstractMetadataStore_execute_rdh
/** * Run the task in the executor thread and fail the future if the executor is shutting down. */ @VisibleForTesting public void execute(Runnable task, Supplier<List<CompletableFuture<?>>> futures) { try { executor.execute(task); } catch (final Throwable t) { futures.get().forEach(f -> f.completeExceptionally(t)); } }
3.26
pulsar_ThreadRuntime_start_rdh
/** * The core logic that initialize the thread container and executes the function. */ @Override public void start() throws Exception { // extract class loader for function ClassLoader functionClassLoader = getFunctionClassLoader(instanceConfig, instanceConfig.getFunctionId(), jarFile, narExtractionDirectory, fnCache, connectorsManager, functionsManager, InstanceUtils.calculateSubjectType(instanceConfig.getFunctionDetails())); ClassLoader transformFunctionClassLoader = (transformFunctionFile == null) ? null : getFunctionClassLoader(instanceConfig, instanceConfig.getTransformFunctionId(), transformFunctionFile, narExtractionDirectory, fnCache, connectorsManager, functionsManager, ComponentType.FUNCTION); // re-initialize JavaInstanceRunnable so that variables in constructor can be re-initialized this.javaInstanceRunnable = new JavaInstanceRunnable(instanceConfig, clientBuilder, pulsarClient, pulsarAdmin, stateStorageImplClass, stateStorageServiceUrl, secretsProvider, collectorRegistry, functionClassLoader, transformFunctionClassLoader); log.info("ThreadContainer starting function with instanceId {} functionId {} namespace {}", instanceConfig.getInstanceId(), instanceConfig.getFunctionId(), instanceConfig.getFunctionDetails().getNamespace()); this.fnThread = new Thread(threadGroup, javaInstanceRunnable, String.format("%s-%s", FunctionCommon.getFullyQualifiedName(instanceConfig.getFunctionDetails()), instanceConfig.getInstanceId())); this.fnThread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { log.error("Uncaught exception in thread {}", t, e); } }); this.fnThread.start(); }
3.26
pulsar_TopicsBase_lookUpBrokerForTopic_rdh
// Look up topic owner for non-partitioned topic or single topic partition. private CompletableFuture<Void> lookUpBrokerForTopic(TopicName partitionedTopicName, boolean authoritative, List<String> redirectAddresses) { CompletableFuture<Void> future = new CompletableFuture<>(); if (!pulsar().getBrokerService().getLookupRequestSemaphore().tryAcquire()) { if (log.isDebugEnabled()) { log.debug("Too many concurrent lookup request."); } future.completeExceptionally(new BrokerServiceException.TooManyRequestsException("Too many " + "concurrent lookup request")); return future; } CompletableFuture<Optional<LookupResult>> lookupFuture = pulsar().getNamespaceService().getBrokerServiceUrlAsync(partitionedTopicName, LookupOptions.builder().authoritative(authoritative).loadTopicsInBundle(false).build()); lookupFuture.thenAccept(optionalResult -> { if ((optionalResult == null) || (!optionalResult.isPresent())) { if (log.isDebugEnabled()) { log.debug("Fail to lookup topic for rest produce message request for topic {}.", partitionedTopicName); } completeLookup(Pair.of(Collections.emptyList(), false), redirectAddresses, future); return; } LookupResult result = optionalResult.get(); String httpUrl = result.getLookupData().getHttpUrl(); String httpUrlTls = result.getLookupData().getHttpUrlTls(); if ((StringUtils.isNotBlank(httpUrl) && httpUrl.equals(pulsar().getWebServiceAddress())) || (StringUtils.isNotBlank(httpUrlTls) && httpUrlTls.equals(pulsar().getWebServiceAddressTls()))) { // Current broker owns the topic, add to owning topic. if (log.isDebugEnabled()) { log.debug("Complete topic look up for rest produce message request for topic {}, " + "current broker is owner broker: {}", partitionedTopicName, result.getLookupData()); }pulsar().getBrokerService().getOwningTopics().computeIfAbsent(partitionedTopicName.getPartitionedTopicName(), key -> ConcurrentOpenHashSet.<Integer>newBuilder().build()).add(partitionedTopicName.getPartitionIndex()); completeLookup(Pair.of(Collections.emptyList(), false), redirectAddresses, future); } else { // Current broker doesn't own the topic or doesn't know who own the topic. if (log.isDebugEnabled()) {log.debug("Complete topic look up for rest produce message request for topic {}, " + "current broker is not owner broker: {}", partitionedTopicName, result.getLookupData()); } if (result.isRedirect()) {// Redirect lookup. completeLookup(Pair.of(Arrays.asList(httpUrl, httpUrlTls), false), redirectAddresses, future); } else { // Found owner for topic. completeLookup(Pair.of(Arrays.asList(httpUrl, httpUrlTls), true), redirectAddresses, future); } } }).exceptionally(exception -> { if (log.isDebugEnabled()) { log.debug("Fail to lookup broker with rest produce message request for topic {}: {}", partitionedTopicName, exception.getMessage()); } completeLookup(Pair.of(Collections.emptyList(), false), redirectAddresses, future); return null; }); return future; }
3.26
pulsar_TopicsBase_findOwnerBrokerForTopic_rdh
// Look up topic owner for given topic. Return if asyncResponse has been completed // which indicating redirect or exception. private boolean findOwnerBrokerForTopic(boolean authoritative, AsyncResponse asyncResponse) { PartitionedTopicMetadata metadata = internalGetPartitionedMetadata(authoritative, false); List<String> redirectAddresses = Collections.synchronizedList(new ArrayList<>()); CompletableFuture<Boolean> future = new CompletableFuture<>(); List<CompletableFuture<Void>> lookupFutures = new ArrayList<>(); if ((!topicName.isPartitioned()) && (metadata.partitions > 1)) { // Partitioned topic with multiple partitions, need to do look up for each partition. for (int index = 0; index < metadata.partitions; index++) { lookupFutures.add(lookUpBrokerForTopic(topicName.getPartition(index), authoritative, redirectAddresses)); } } else { // Non-partitioned topic or specific topic partition. lookupFutures.add(lookUpBrokerForTopic(topicName, authoritative, redirectAddresses)); } FutureUtil.waitForAll(lookupFutures).thenRun(() -> { processLookUpResult(redirectAddresses, asyncResponse, future); }).exceptionally(e -> { processLookUpResult(redirectAddresses, asyncResponse, future); return null; }); try { return future.get(); } catch (Exception e) { if (log.isDebugEnabled()) { log.debug("Fail to lookup topic for rest produce message request for topic {}.", topicName.toString()); } if (!asyncResponse.isDone()) { asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Internal error: " + e.getMessage())); } return true; } }
3.26
pulsar_TopicsBase_completeLookup_rdh
// Release lookup semaphore and add result to redirectAddresses if current broker doesn't own the topic. private synchronized void completeLookup(Pair<List<String>, Boolean> result, List<String> redirectAddresses, CompletableFuture<Void> future) {pulsar().getBrokerService().getLookupRequestSemaphore().release(); // Left is lookup result of secure/insecure address if lookup succeed, Right is address is the owner's address // or it's a address to redirect lookup. if (!result.getLeft().isEmpty()) { if (result.getRight()) { // If address is for owner of topic partition, add to head and it'll have higher priority // compare to broker for look redirect. redirectAddresses.add(0, isRequestHttps() ? result.getLeft().get(1) : result.getLeft().get(0)); } else { redirectAddresses.add(redirectAddresses.size(), isRequestHttps() ? result.getLeft().get(1) : result.getLeft().get(0)); } } future.complete(null); }
3.26
pulsar_TopicsBase_messageToByteBuf_rdh
// Convert message to ByteBuf public ByteBuf messageToByteBuf(Message message) { checkArgument(message instanceof MessageImpl, "Message must be type of MessageImpl."); MessageImpl msg = ((MessageImpl) (message)); MessageMetadata messageMetadata = msg.getMessageBuilder(); ByteBuf payload = msg.getDataBuffer(); messageMetadata.setCompression(CompressionCodecProvider.convertToWireProtocol(CompressionType.NONE)); messageMetadata.setUncompressedSize(payload.readableBytes()); return Commands.serializeMetadataAndPayload(ChecksumType.Crc32c, messageMetadata, payload); }
3.26
pulsar_TopicsBase_extractException_rdh
// Return error code depends on exception we got indicating if client should retry with same broker. private void extractException(Exception e, ProducerAck produceMessageResult) { if (!((e instanceof BrokerServiceException.TopicFencedException) && (e instanceof ManagedLedgerException))) { produceMessageResult.setErrorCode(2); } else { produceMessageResult.setErrorCode(1); } produceMessageResult.setErrorMsg(e.getMessage()); }
3.26
pulsar_TopicsBase_publishMessages_rdh
// Publish message to a topic, can be partitioned or non-partitioned protected void publishMessages(AsyncResponse asyncResponse, ProducerMessages request, boolean authoritative) { String topic = topicName.getPartitionedTopicName(); try { if (pulsar().getBrokerService().getOwningTopics().containsKey(topic) || (!findOwnerBrokerForTopic(authoritative, asyncResponse))) { // If we've done look up or or after look up this broker owns some of the partitions // then proceed to publish message else asyncResponse will be complete by look up. addOrGetSchemaForTopic(getSchemaData(request.getKeySchema(), request.getValueSchema()), request.getSchemaVersion() == (-1) ? null : new LongSchemaVersion(request.getSchemaVersion())).thenAccept(schemaMeta -> { // Both schema version and schema data are necessary. if ((schemaMeta.getLeft() != null) && (schemaMeta.getRight() != null)) { internalPublishMessages(topicName, request, pulsar().getBrokerService().getOwningTopics().get(topic).values(), asyncResponse, AutoConsumeSchema.getSchema(schemaMeta.getLeft().toSchemaInfo()), schemaMeta.getRight()); } else { asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to add or retrieve schema."));} }).exceptionally(e -> { if (log.isDebugEnabled()) { log.debug("Fail to publish message: " + e.getMessage()); } asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message:" + e.getMessage())); return null; }); } } catch (Exception e) { asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message: " + e.getMessage())); } }
3.26
pulsar_TopicsBase_buildMessage_rdh
// Build pulsar message from REST request. private List<Message> buildMessage(ProducerMessages producerMessages, Schema schema, String producerName, TopicName topicName) { List<ProducerMessage> messages; List<Message> pulsarMessages = new ArrayList<>(); messages = producerMessages.getMessages(); for (ProducerMessage message : messages) { MessageMetadata messageMetadata = new MessageMetadata(); messageMetadata.setProducerName(producerName); messageMetadata.setPublishTime(System.currentTimeMillis()); messageMetadata.setSequenceId(message.getSequenceId()); if (null != message.getReplicationClusters()) { messageMetadata.addAllReplicateTos(message.getReplicationClusters()); } if (null != message.getProperties()) { messageMetadata.addAllProperties(message.getProperties().entrySet().stream().map(entry -> { KeyValue keyValue = new KeyValue(); keyValue.setKey(entry.getKey());keyValue.setValue(entry.getValue()); return keyValue; }).collect(Collectors.toList())); } if (null != message.getKey()) { // If has key schema, encode partition key, else use plain text. if (schema.getSchemaInfo().getType() == SchemaType.KEY_VALUE) { KeyValueSchemaImpl kvSchema = ((KeyValueSchemaImpl) (schema)); messageMetadata.setPartitionKey(Base64.getEncoder().encodeToString(encodeWithSchema(message.getKey(), kvSchema.getKeySchema()))); messageMetadata.setPartitionKeyB64Encoded(true); } else { messageMetadata.setPartitionKey(message.getKey()); messageMetadata.setPartitionKeyB64Encoded(false); } } if ((null != message.getEventTime()) && (!message.getEventTime().isEmpty())) { messageMetadata.setEventTime(Long.parseLong(message.getEventTime())); } if (message.isDisableReplication()) { messageMetadata.clearReplicateTo(); messageMetadata.addReplicateTo("__local__"); }if ((message.getDeliverAt() != 0) && messageMetadata.hasEventTime()) { messageMetadata.setDeliverAtTime(message.getDeliverAt()); } else if (message.getDeliverAfterMs() != 0) { messageMetadata.setDeliverAtTime(messageMetadata.getEventTime() + message.getDeliverAfterMs()); } if (schema.getSchemaInfo().getType() == SchemaType.KEY_VALUE) { KeyValueSchemaImpl kvSchema = ((KeyValueSchemaImpl) (schema)); pulsarMessages.add(MessageImpl.create(messageMetadata, ByteBuffer.wrap(encodeWithSchema(message.getPayload(), kvSchema.getValueSchema())), schema, topicName.toString())); } else { pulsarMessages.add(MessageImpl.create(messageMetadata, ByteBuffer.wrap(encodeWithSchema(message.getPayload(), schema)), schema, topicName.toString()));} } return pulsarMessages; }
3.26
pulsar_TopicsBase_getSchemaData_rdh
// Build schemaData from passed in schema string. private SchemaData getSchemaData(String keySchema, String valueSchema) { try { SchemaInfoImpl valueSchemaInfo = ((valueSchema == null) || valueSchema.isEmpty()) ? ((SchemaInfoImpl) (StringSchema.utf8().getSchemaInfo())) : SCHEMA_INFO_READER.readValue(valueSchema); if (null == valueSchemaInfo.getName()) { valueSchemaInfo.setName(valueSchemaInfo.getType().toString()); } // Value schema only if ((keySchema == null) || keySchema.isEmpty()) { return SchemaData.builder().data(valueSchemaInfo.getSchema()).isDeleted(false).user("Rest Producer").timestamp(System.currentTimeMillis()).type(valueSchemaInfo.getType()).props(valueSchemaInfo.getProperties()).build();} else { // Key_Value schema SchemaInfoImpl keySchemaInfo = SCHEMA_INFO_READER.readValue(keySchema); if (null == keySchemaInfo.getName()) { keySchemaInfo.setName(keySchemaInfo.getType().toString());} SchemaInfo schemaInfo = KeyValueSchemaInfo.encodeKeyValueSchemaInfo("KVSchema-" + topicName.getPartitionedTopicName(), keySchemaInfo, valueSchemaInfo, KeyValueEncodingType.SEPARATED); return SchemaData.builder().data(schemaInfo.getSchema()).isDeleted(false).user("Rest Producer").timestamp(System.currentTimeMillis()).type(schemaInfo.getType()).props(schemaInfo.getProperties()).build(); } } catch (IOException e) { if (log.isDebugEnabled()) { log.debug("Fail to parse schema info for rest produce request with key schema {} and value schema {}", keySchema, valueSchema); } return null; } }
3.26
pulsar_TopicsBase_encodeWithSchema_rdh
// Encode message with corresponding schema, do necessary conversion before encoding private byte[] encodeWithSchema(String input, Schema schema) { try { switch (schema.getSchemaInfo().getType()) { case INT8 : return schema.encode(Byte.parseByte(input)); case INT16 : return schema.encode(Short.parseShort(input)); case INT32 : return schema.encode(Integer.parseInt(input)); case INT64 : return schema.encode(Long.parseLong(input)); case STRING : return schema.encode(input); case FLOAT : return schema.encode(Float.parseFloat(input)); case DOUBLE : return schema.encode(Double.parseDouble(input)); case BOOLEAN : return schema.encode(Boolean.parseBoolean(input)); case BYTES : return schema.encode(input.getBytes()); case DATE : return schema.encode(DateFormat.getDateInstance().parse(input)); case TIME : return schema.encode(new Time(Long.parseLong(input))); case TIMESTAMP : return schema.encode(new Timestamp(Long.parseLong(input))); case INSTANT : return schema.encode(Instant.parse(input)); case LOCAL_DATE : return schema.encode(LocalDate.parse(input)); case LOCAL_TIME : return schema.encode(LocalTime.parse(input)); case LOCAL_DATE_TIME : return schema.encode(LocalDateTime.parse(input)); case JSON : GenericJsonWriter v53 = new GenericJsonWriter(); return v53.write(new GenericJsonRecord(null, null, ObjectMapperFactory.getMapper().reader().readTree(input), schema.getSchemaInfo())); case AVRO : AvroBaseStructSchema avroSchema = ((AvroBaseStructSchema) (schema)); Decoder decoder = DecoderFactory.get().jsonDecoder(avroSchema.getAvroSchema(), input); DatumReader<GenericData.Record> v56 = new GenericDatumReader(avroSchema.getAvroSchema()); GenericRecord genericRecord = v56.read(null, decoder); GenericAvroWriter avroWriter = new GenericAvroWriter(avroSchema.getAvroSchema()); return avroWriter.write(new GenericAvroRecord(null, avroSchema.getAvroSchema(), null, genericRecord)); case PROTOBUF_NATIVE : case KEY_VALUE : default :throw new PulsarClientException.InvalidMessageException(""); } } catch (Exception e) { if (log.isDebugEnabled()) { log.debug("Fail to encode value {} with schema {} for rest produce request", input, new String(schema.getSchemaInfo().getSchema())); } return new byte[0]; } }
3.26
pulsar_TopicsBase_publishMessagesToPartition_rdh
// Publish message to single partition of a partitioned topic. protected void publishMessagesToPartition(AsyncResponse asyncResponse, ProducerMessages request, boolean authoritative, int partition) { if (topicName.isPartitioned()) { asyncResponse.resume(new RestException(Status.BAD_REQUEST, "Topic name can't contain " + "'-partition-' suffix.")); } String topic = topicName.getPartitionedTopicName(); try { // If broker owns the partition then proceed to publish message, else do look up. if ((pulsar().getBrokerService().getOwningTopics().containsKey(topic) && pulsar().getBrokerService().getOwningTopics().get(topic).contains(partition)) || (!findOwnerBrokerForTopic(authoritative, asyncResponse))) { addOrGetSchemaForTopic(getSchemaData(request.getKeySchema(), request.getValueSchema()), request.getSchemaVersion() == (-1) ? null : new LongSchemaVersion(request.getSchemaVersion())).thenAccept(schemaMeta -> { // Both schema version and schema data are necessary. if ((schemaMeta.getLeft() != null) && (schemaMeta.getRight() != null)) { internalPublishMessagesToPartition(topicName, request, partition, asyncResponse, AutoConsumeSchema.getSchema(schemaMeta.getLeft().toSchemaInfo()), schemaMeta.getRight()); } else { asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to add or retrieve schema.")); } }).exceptionally(e -> { if (log.isDebugEnabled()) { log.debug("Fail to publish message to single partition: " + e.getLocalizedMessage()); } asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, ("Fail to publish message" + "to single partition: ") + e.getMessage())); return null; }); } } catch (Exception e) { asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message: " + e.getMessage())); } }
3.26
pulsar_TopicsBase_processPublishMessageResults_rdh
// Process results for all message publishing attempts private void processPublishMessageResults(List<ProducerAck> produceMessageResults, List<CompletableFuture<PositionImpl>> publishResults) { // process publish message result for (int index = 0; index < publishResults.size(); index++) { try { PositionImpl position = publishResults.get(index).get(); MessageId messageId = new MessageIdImpl(position.getLedgerId(), position.getEntryId(), Integer.parseInt(produceMessageResults.get(index).getMessageId())); produceMessageResults.get(index).setMessageId(messageId.toString()); } catch (Exception e) { if (log.isDebugEnabled()) { log.debug("Fail publish [{}] message with rest produce message request for topic {}", index, topicName); } if (e instanceof BrokerServiceException.TopicNotFoundException) { // Topic ownership might changed, force to look up again. pulsar().getBrokerService().getOwningTopics().remove(topicName.getPartitionedTopicName()); } extractException(e, produceMessageResults.get(index)); } } }
3.26
pulsar_TopicsBase_addSchema_rdh
// Add a new schema to schema registry for a topic private CompletableFuture<SchemaVersion> addSchema(SchemaData schemaData) { // Only need to add to first partition the broker owns since the schema id in schema registry are // same for all partitions which is the partitionedTopicName List<Integer> partitions = pulsar().getBrokerService().getOwningTopics().get(topicName.getPartitionedTopicName()).values(); CompletableFuture<SchemaVersion> result = new CompletableFuture<>(); for (int index = 0; index < partitions.size(); index++) { CompletableFuture<SchemaVersion> future = new CompletableFuture<>(); String topicPartitionName = topicName.getPartition(partitions.get(index)).toString(); pulsar().getBrokerService().getTopic(topicPartitionName, false).thenAccept(topic -> {if (!topic.isPresent()) { future.completeExceptionally(new BrokerServiceException.TopicNotFoundException(("Topic " + topicPartitionName) + " not found")); } else { topic.get().addSchema(schemaData).thenAccept(schemaVersion -> future.complete(schemaVersion)).exceptionally(exception -> { future.completeExceptionally(exception); return null; }); } }); try { result.complete(future.get()); break;} catch (Exception e) { if (log.isDebugEnabled()) { log.debug(((("Fail to add schema to topic " + topicName.getPartitionedTopicName()) + " for partition ") + partitions.get(index)) + " for REST produce request."); } }} // Not able to add schema to any partition if (!result.isDone()) { result.completeExceptionally(new SchemaException((("Unable to add schema " + schemaData) + " to topic ") + topicName.getPartitionedTopicName())); }return result; }
3.26
pulsar_LocalBrokerData_updateSystemResourceUsage_rdh
// Update resource usage given each individual usage. private void updateSystemResourceUsage(final ResourceUsage cpu, final ResourceUsage memory, final ResourceUsage directMemory, final ResourceUsage bandwidthIn, final ResourceUsage bandwidthOut) { this.cpu = cpu; this.memory = memory; this.directMemory = directMemory; this.bandwidthIn = bandwidthIn; this.bandwidthOut = bandwidthOut; }
3.26
pulsar_LocalBrokerData_equals_rdh
/** * Since the broker data is also used as a lock for the broker, we need to have a stable comparison * operator that is not affected by the actual load on the broker. */ @Override public boolean equals(Object o) { if (o instanceof LocalBrokerData) { LocalBrokerData other = ((LocalBrokerData) (o)); return ((Objects.equals(webServiceUrl, other.webServiceUrl) && Objects.equals(webServiceUrlTls, other.webServiceUrlTls)) && Objects.equals(pulsarServiceUrl, other.pulsarServiceUrl)) && Objects.equals(pulsarServiceUrlTls, other.pulsarServiceUrlTls); } return false;}
3.26
pulsar_LocalBrokerData_updateBundleData_rdh
// Aggregate all message, throughput, topic count, bundle count, consumer // count, and producer count across the // given data. Also keep track of bundle gains and losses. private void updateBundleData(final Map<String, NamespaceBundleStats> bundleStats) { msgRateIn = 0; msgRateOut = 0; msgThroughputIn = 0; msgThroughputOut = 0; int totalNumTopics = 0; int totalNumBundles = 0; int totalNumConsumers = 0; int totalNumProducers = 0; final Iterator<String> oldBundleIterator = bundles.iterator(); while (oldBundleIterator.hasNext()) { final String bundle = oldBundleIterator.next(); if (!bundleStats.containsKey(bundle)) { // If this bundle is in the old bundle set but not the new one, // we lost it. lastBundleLosses.add(bundle); oldBundleIterator.remove(); } } for (Map.Entry<String, NamespaceBundleStats> entry : bundleStats.entrySet()) { final String bundle = entry.getKey(); final NamespaceBundleStats stats = entry.getValue(); if (!bundles.contains(bundle)) { // If this bundle is in the new bundle set but not the old one, // we gained it. lastBundleGains.add(bundle);bundles.add(bundle); } msgThroughputIn += stats.msgThroughputIn; msgThroughputOut += stats.msgThroughputOut; msgRateIn += stats.msgRateIn; msgRateOut += stats.msgRateOut; totalNumTopics += stats.topics; ++totalNumBundles; totalNumConsumers += stats.consumerCount; totalNumProducers += stats.producerCount; } numTopics = totalNumTopics; numBundles = totalNumBundles; numConsumers = totalNumConsumers; numProducers = totalNumProducers; }
3.26
pulsar_LocalBrokerData_update_rdh
/** * Using another LocalBrokerData, update this. * * @param other * LocalBrokerData to update from. */ public void update(final LocalBrokerData other) { updateSystemResourceUsage(other.cpu, other.memory, other.directMemory, other.bandwidthIn, other.bandwidthOut); updateBundleData(other.lastStats); lastStats = other.lastStats; }
3.26
pulsar_FileSystemManagedLedgerOffloader_offload_rdh
/* ledgerMetadata stored in an index of -1 */ @Override public CompletableFuture<Void> offload(ReadHandle readHandle, UUID uuid, Map<String, String> extraMetadata) { CompletableFuture<Void> promise = new CompletableFuture<>(); scheduler.chooseThread(readHandle.getId()).execute(new LedgerReader(readHandle, uuid, extraMetadata, promise, f1, configuration, assignmentScheduler, offloadPolicies.getManagedLedgerOffloadPrefetchRounds(), this.offloaderStats));return promise; }
3.26
pulsar_TransactionMetadataStore_getLowWaterMark_rdh
/** * Get the low water mark of this tc, in order to delete unless transaction in transaction buffer and pending ack. * * @return long {@link long} the lowWaterMark */ default long getLowWaterMark() { return Long.MIN_VALUE; }
3.26
pulsar_StateChangeListeners_notifyOnCompletion_rdh
/** * Notify all currently added listeners on completion of the future. * * @return future of a new completion stage */ public <T> CompletableFuture<T> notifyOnCompletion(CompletableFuture<T> future, String serviceUnit, ServiceUnitStateData data) { return future.whenComplete((r, ex) -> notify(serviceUnit, data, ex)); }
3.26
pulsar_MultiRolesTokenAuthorizationProvider_canConsumeAsync_rdh
/** * Check if the specified role has permission to receive messages from the specified fully qualified topic * name. * * @param topicName * the fully qualified topic name associated with the topic. * @param role * the app id used to receive messages from the topic. * @param subscription * the subscription name defined by the client */ @Override public CompletableFuture<Boolean> canConsumeAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData, String subscription) { return authorize(role, authenticationData, r -> super.canConsumeAsync(topicName, r, authenticationData, subscription)); }
3.26
pulsar_MultiRolesTokenAuthorizationProvider_canLookupAsync_rdh
/** * Check whether the specified role can perform a lookup for the specified topic. * <p> * For that the caller needs to have producer or consumer permission. * * @param topicName * @param role * @return * @throws Exception */ @Override public CompletableFuture<Boolean> canLookupAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData) { return authorize(role, authenticationData, r -> super.canLookupAsync(topicName, r, authenticationData)); }
3.26
pulsar_MultiRolesTokenAuthorizationProvider_canProduceAsync_rdh
/** * Check if the specified role has permission to send messages to the specified fully qualified topic name. * * @param topicName * the fully qualified topic name associated with the topic. * @param role * the app id used to send messages to the topic. */ @Override public CompletableFuture<Boolean> canProduceAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData) { return authorize(role, authenticationData, r -> super.canProduceAsync(topicName, r, authenticationData)); }
3.26
pulsar_PulsarAdmin_builder_rdh
/** * Get a new builder instance that can used to configure and build a {@link PulsarAdmin} instance. * * @return the {@link PulsarAdminBuilder} */ static PulsarAdminBuilder builder() { return DefaultImplementation.newAdminClientBuilder(); }
3.26
pulsar_LoadManagerShared_fillNamespaceToBundlesMap_rdh
/** * Using the given bundles, populate the namespace to bundle range map. * * @param bundles * Bundles with which to populate. * @param target * Map to fill. */ public static void fillNamespaceToBundlesMap(final Set<String> bundles, final ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>> target) { bundles.forEach(bundleName -> { final String namespaceName = getNamespaceNameFromBundleName(bundleName); final String bundleRange = getBundleRangeFromBundleName(bundleName); target.computeIfAbsent(namespaceName, k -> ConcurrentOpenHashSet.<String>newBuilder().build()).add(bundleRange); }); }
3.26
pulsar_LoadManagerShared_shouldAntiAffinityNamespaceUnload_rdh
/** * It checks if given anti-affinity namespace should be unloaded by broker due to load-shedding. If all the brokers * are owning same number of anti-affinity namespaces then unloading this namespace again ends up at the same broker * from which it was unloaded. So, this util checks that given namespace should be unloaded only if it can be loaded * by different broker. * * @param namespace * @param bundle * @param currentBroker * @param pulsar * @param brokerToNamespaceToBundleRange * @param candidateBrokers * @return * @throws Exception */ public static boolean shouldAntiAffinityNamespaceUnload(String namespace, String bundle, String currentBroker, final PulsarService pulsar, final ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange, Set<String> candidateBrokers) throws Exception { Map<String, Integer> brokerNamespaceCount = getAntiAffinityNamespaceOwnedBrokers(pulsar, namespace, brokerToNamespaceToBundleRange).get(10, TimeUnit.SECONDS); return shouldAntiAffinityNamespaceUnload(currentBroker, candidateBrokers, brokerNamespaceCount); }
3.26
pulsar_LoadManagerShared_filterAntiAffinityGroupOwnedBrokers_rdh
/** * It tries to filter out brokers which own namespace with same anti-affinity-group as given namespace. If all the * domains own namespace with same anti-affinity group then it will try to keep brokers with domain that has least * number of namespaces. It also tries to keep brokers which has least number of namespace with in domain. * eg. * <pre> * Before: * Domain-count BrokersBase-count * ____________ ____________ * d1-3 b1-2,b2-1 * d2-3 b3-2,b4-1 * d3-4 b5-2,b6-2 * * After filtering: "candidates" brokers * Domain-count BrokersBase-count * ____________ ____________ * d1-3 b2-1 * d2-3 b4-1 * * "candidate" broker to own anti-affinity-namespace = b2 or b4 * * </pre> * * @param pulsar * @param assignedBundleName * @param candidates * @param brokerToNamespaceToBundleRange */ public static void filterAntiAffinityGroupOwnedBrokers(final PulsarService pulsar, final String assignedBundleName, final Set<String> candidates, final ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange, Map<String, String> brokerToDomainMap) { if (candidates.isEmpty()) { return; } final String namespaceName = getNamespaceNameFromBundleName(assignedBundleName); try { final Map<String, Integer> brokerToAntiAffinityNamespaceCount = getAntiAffinityNamespaceOwnedBrokers(pulsar, namespaceName, brokerToNamespaceToBundleRange).get(30, TimeUnit.SECONDS);filterAntiAffinityGroupOwnedBrokers(pulsar, candidates, brokerToDomainMap, brokerToAntiAffinityNamespaceCount); } catch (Exception e) { LOG.error("Failed to filter anti-affinity group namespace {}", e.getMessage()); } }
3.26
pulsar_LoadManagerShared_filterDomainsNotHavingLeastNumberAntiAffinityNamespaces_rdh
/** * It computes least number of namespace owned by any of the domain and then it filters out all the domains that own * namespaces more than this count. * * @param brokerToAntiAffinityNamespaceCount * @param candidates * @param brokerToDomainMap */ private static void filterDomainsNotHavingLeastNumberAntiAffinityNamespaces(Map<String, Integer> brokerToAntiAffinityNamespaceCount, Set<String> candidates, Map<String, String> brokerToDomainMap) { if ((brokerToDomainMap == null) || brokerToDomainMap.isEmpty()) { return; } final Map<String, Integer> domainNamespaceCount = new HashMap<>(); int leastNamespaceCount = Integer.MAX_VALUE; candidates.forEach(broker -> { final String domain = brokerToDomainMap.getOrDefault(broker, DEFAULT_DOMAIN); final int count = brokerToAntiAffinityNamespaceCount.getOrDefault(broker, 0); domainNamespaceCount.compute(domain, (domainName, nsCount) -> nsCount == null ? count : nsCount + count); }); // find leastNameSpaceCount for (Entry<String, Integer> domainNsCountEntry : domainNamespaceCount.entrySet()) { if (domainNsCountEntry.getValue() < leastNamespaceCount) { leastNamespaceCount = domainNsCountEntry.getValue(); } } final int finalLeastNamespaceCount = leastNamespaceCount; // only keep domain brokers which has leastNamespaceCount candidates.removeIf(broker -> { Integer nsCount = domainNamespaceCount.get(brokerToDomainMap.getOrDefault(broker, DEFAULT_DOMAIN)); return (nsCount != null) && (nsCount != finalLeastNamespaceCount); }); }
3.26
pulsar_LoadManagerShared_applyNamespacePolicies_rdh
// Determines the brokers available for the given service unit according to the given policies. // The brokers are put into brokerCandidateCache. public static void applyNamespacePolicies(final ServiceUnitId serviceUnit, final SimpleResourceAllocationPolicies policies, final Set<String> brokerCandidateCache, final Set<String> availableBrokers, final BrokerTopicLoadingPredicate brokerTopicLoadingPredicate) { Set<String> primariesCache = localPrimariesCache.get(); primariesCache.clear(); Set<String> secondaryCache = localSecondaryCache.get(); secondaryCache.clear(); NamespaceName namespace = serviceUnit.getNamespaceObject(); boolean isIsolationPoliciesPresent = policies.areIsolationPoliciesPresent(namespace); boolean isNonPersistentTopic = (serviceUnit instanceof NamespaceBundle) ? ((NamespaceBundle) (serviceUnit)).hasNonPersistentTopic() : false; if (isIsolationPoliciesPresent) { LOG.debug("Isolation Policies Present for namespace - [{}]", namespace.toString()); } for (final String broker : availableBrokers) { final String brokerUrlString = String.format("http://%s", broker); URL brokerUrl; try { brokerUrl = new URL(brokerUrlString); } catch (MalformedURLException e) { LOG.error("Unable to parse brokerUrl from ResourceUnitId", e); continue; } // todo: in future check if the resource unit has resources to take the namespace if (isIsolationPoliciesPresent) { // note: serviceUnitID is namespace name and ResourceID is brokerName if (policies.isPrimaryBroker(namespace, brokerUrl.getHost())) { primariesCache.add(broker); if (LOG.isDebugEnabled()) { LOG.debug("Added Primary Broker - [{}] as possible Candidates for" + " namespace - [{}] with policies", brokerUrl.getHost(), namespace.toString()); } } else if (policies.isSecondaryBroker(namespace, brokerUrl.getHost())) { secondaryCache.add(broker); if (LOG.isDebugEnabled()) {LOG.debug("Added Shared Broker - [{}] as possible " + "Candidates for namespace - [{}] with policies", brokerUrl.getHost(), namespace.toString()); } } else if (LOG.isDebugEnabled()) { LOG.debug("Skipping Broker - [{}] not primary broker and not shared" + " for namespace - [{}] ", brokerUrl.getHost(), namespace.toString()); } } else // non-persistent topic can be assigned to only those brokers that enabled for non-persistent topic if (isNonPersistentTopic && (!brokerTopicLoadingPredicate.isEnableNonPersistentTopics(brokerUrlString))) { if (LOG.isDebugEnabled()) { LOG.debug("Filter broker- [{}] because it doesn't support non-persistent namespace - [{}]", brokerUrl.getHost(), namespace.toString()); } } else if ((!isNonPersistentTopic) && (!brokerTopicLoadingPredicate.isEnablePersistentTopics(brokerUrlString))) { // persistent topic can be assigned to only brokers that enabled for persistent-topic if (LOG.isDebugEnabled()) { LOG.debug("Filter broker- [{}] because broker only supports non-persistent namespace - [{}]", brokerUrl.getHost(), namespace.toString()); } } else if (policies.isSharedBroker(brokerUrl.getHost())) { secondaryCache.add(broker); if (LOG.isDebugEnabled()) { LOG.debug("Added Shared Broker - [{}] as possible Candidates for namespace - [{}]", brokerUrl.getHost(), namespace.toString()); } } } if (isIsolationPoliciesPresent) { brokerCandidateCache.addAll(primariesCache); if (policies.shouldFailoverToSecondaries(namespace, primariesCache.size())) { LOG.debug("Not enough of primaries [{}] available for namespace - [{}], " + "adding shared [{}] as possible candidate owners", primariesCache.size(), namespace.toString(), secondaryCache.size()); brokerCandidateCache.addAll(secondaryCache); } } else { LOG.debug("Policies not present for namespace - [{}] so only " + "considering shared [{}] brokers for possible owner", namespace.toString(), secondaryCache.size()); brokerCandidateCache.addAll(secondaryCache); } }
3.26
pulsar_LoadManagerShared_getAntiAffinityNamespaceOwnedBrokers_rdh
/** * It returns map of broker and count of namespace that are belong to the same anti-affinity group as given. * * @param pulsar * @param namespaceName * @param brokerToNamespaceToBundleRange * @return */ public static CompletableFuture<Map<String, Integer>> getAntiAffinityNamespaceOwnedBrokers(final PulsarService pulsar, final String namespaceName, final ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange) { CompletableFuture<Map<String, Integer>> antiAffinityNsBrokersResult = new CompletableFuture<>(); getNamespaceAntiAffinityGroupAsync(pulsar, namespaceName).thenAccept(antiAffinityGroupOptional -> { if (antiAffinityGroupOptional.isEmpty()) { antiAffinityNsBrokersResult.complete(null); return; } final String antiAffinityGroup = antiAffinityGroupOptional.get(); final Map<String, Integer> brokerToAntiAffinityNamespaceCount = new ConcurrentHashMap<>(); final List<CompletableFuture<Void>> futures = new ArrayList<>(); brokerToNamespaceToBundleRange.forEach((broker, nsToBundleRange) -> { nsToBundleRange.forEach((ns, bundleRange) -> { if (bundleRange.isEmpty()) { return; } CompletableFuture<Void> future = new CompletableFuture<>(); futures.add(future); countAntiAffinityNamespaceOwnedBrokers(broker, ns, future, pulsar, antiAffinityGroup, brokerToAntiAffinityNamespaceCount); }); }); FutureUtil.waitForAll(futures).thenAccept(r -> antiAffinityNsBrokersResult.complete(brokerToAntiAffinityNamespaceCount)); }).exceptionally(ex -> { // namespace-policies has not been created yet antiAffinityNsBrokersResult.complete(null);return null; }); return antiAffinityNsBrokersResult; }
3.26
pulsar_LoadManagerShared_getNamespaceNameFromBundleName_rdh
// From a full bundle name, extract the namespace name. public static String getNamespaceNameFromBundleName(String bundleName) { // the bundle format is property/cluster/namespace/0x00000000_0xFFFFFFFF int pos = bundleName.lastIndexOf('/'); checkArgument(pos != (-1)); return bundleName.substring(0, pos); }
3.26
pulsar_LoadManagerShared_getSystemResourceUsage_rdh
// Get the system resource usage for this broker. public static SystemResourceUsage getSystemResourceUsage(final BrokerHostUsage brokerHostUsage) { SystemResourceUsage systemResourceUsage = brokerHostUsage.getBrokerHostUsage(); // Override System memory usage and limit with JVM heap usage and limit double maxHeapMemoryInBytes = Runtime.getRuntime().maxMemory(); double memoryUsageInBytes = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); double memoryUsage = memoryUsageInBytes / MIBI; double memoryLimit = maxHeapMemoryInBytes / MIBI; systemResourceUsage.setMemory(new ResourceUsage(memoryUsage, memoryLimit)); // Collect JVM direct memory systemResourceUsage.setDirectMemory(new ResourceUsage(((double) (getJvmDirectMemoryUsed() / MIBI)), ((double) (DirectMemoryUtils.jvmMaxDirectMemory() / MIBI)))); return systemResourceUsage;}
3.26
pulsar_LoadManagerShared_getBundleRangeFromBundleName_rdh
// From a full bundle name, extract the bundle range. public static String getBundleRangeFromBundleName(String bundleName) {// the bundle format is property/cluster/namespace/0x00000000_0xFFFFFFFF int pos = bundleName.lastIndexOf("/"); checkArgument(pos != (-1)); return bundleName.substring(pos + 1); }
3.26
pulsar_LoadManagerShared_isLoadSheddingEnabled_rdh
/** * If load balancing is enabled, load shedding is enabled by default unless forced off by dynamic configuration. * * @return true by default */ public static boolean isLoadSheddingEnabled(final PulsarService pulsar) { return pulsar.getConfiguration().isLoadBalancerEnabled() && pulsar.getConfiguration().isLoadBalancerSheddingEnabled(); }
3.26
pulsar_LoadManagerShared_filterBrokersWithLargeTopicCount_rdh
/** * It filters out brokers which owns topic higher than configured threshold at * ServiceConfiguration.loadBalancerBrokerMaxTopics. <br/> * if all the brokers own topic higher than threshold then it resets the list with original broker candidates * * @param brokerCandidateCache * @param loadData * @param loadBalancerBrokerMaxTopics */ public static void filterBrokersWithLargeTopicCount(Set<String> brokerCandidateCache, LoadData loadData, int loadBalancerBrokerMaxTopics) { Set<String> filteredBrokerCandidates = brokerCandidateCache.stream().filter(broker -> { BrokerData brokerData = loadData.getBrokerData().get(broker); long totalTopics = ((brokerData != null) && (brokerData.getPreallocatedBundleData() != null)) ? brokerData.getPreallocatedBundleData().values().stream().mapToLong(preAllocatedBundle -> preAllocatedBundle.getTopics()).sum() + brokerData.getLocalData().getNumTopics() : 0; return totalTopics <= loadBalancerBrokerMaxTopics; }).collect(Collectors.toSet()); if (!filteredBrokerCandidates.isEmpty()) { brokerCandidateCache.clear(); brokerCandidateCache.addAll(filteredBrokerCandidates); } }
3.26
pulsar_LoadManagerShared_removeMostServicingBrokersForNamespace_rdh
/** * Removes the brokers which have more bundles assigned to them in the same namespace as the incoming bundle than at * least one other available broker from consideration. * * @param assignedBundleName * Name of bundle to be assigned. * @param candidates * BrokersBase available for placement. * @param brokerToNamespaceToBundleRange * Map from brokers to namespaces to bundle ranges. */ public static void removeMostServicingBrokersForNamespace(final String assignedBundleName, final Set<String> candidates, final ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange) { if (candidates.isEmpty()) { return; } final String namespaceName = getNamespaceNameFromBundleName(assignedBundleName); int leastBundles = Integer.MAX_VALUE; for (final String broker : candidates) { int bundles = ((int) (brokerToNamespaceToBundleRange.computeIfAbsent(broker, k -> ConcurrentOpenHashMap.<String, ConcurrentOpenHashSet<String>>newBuilder().build()).computeIfAbsent(namespaceName, k -> ConcurrentOpenHashSet.<String>newBuilder().build()).size())); leastBundles = Math.min(leastBundles, bundles); if (leastBundles == 0) { break; } } // Since `brokerToNamespaceToBundleRange` can be updated by other threads, // `leastBundles` may differ from the actual value. final int v29 = leastBundles; candidates.removeIf(broker -> brokerToNamespaceToBundleRange.computeIfAbsent(broker, k -> ConcurrentOpenHashMap.<String, ConcurrentOpenHashSet<String>>newBuilder().build()).computeIfAbsent(namespaceName, k -> ConcurrentOpenHashSet.<String>newBuilder().build()).size() > v29); }
3.26
pulsar_AbstractSinkRecord_cumulativeAck_rdh
/** * Some sink sometimes wants to control the ack type. */ public void cumulativeAck() { if (sourceRecord instanceof PulsarRecord) { PulsarRecord pulsarRecord = ((PulsarRecord) (sourceRecord)); pulsarRecord.cumulativeAck(); } else { throw new RuntimeException("SourceRecord class type must be PulsarRecord"); } }
3.26
pulsar_AbstractSinkRecord_individualAck_rdh
/** * Some sink sometimes wants to control the ack type. */ public void individualAck() { if (sourceRecord instanceof PulsarRecord) { PulsarRecord v1 = ((PulsarRecord) (sourceRecord)); v1.individualAck(); } else { throw new RuntimeException("SourceRecord class type must be PulsarRecord"); } }
3.26
pulsar_RestMessagePublishContext_get_rdh
// recycler public static RestMessagePublishContext get(CompletableFuture<PositionImpl> positionFuture, Topic topic, long startTimeNs) { RestMessagePublishContext callback = RECYCLER.get(); callback.positionFuture = positionFuture; callback.topic = topic; callback.startTimeNs = startTimeNs; return callback; }
3.26
pulsar_RestMessagePublishContext_completed_rdh
/** * Executed from managed ledger thread when the message is persisted. */ @Override public void completed(Exception exception, long ledgerId, long entryId) { if (exception != null) { positionFuture.completeExceptionally(exception); if (log.isInfoEnabled()) { log.info("Failed to write entry for rest produce request: ledgerId: {}, entryId: {}. " + "triggered send callback.", ledgerId, entryId); } } else { if (log.isInfoEnabled()) { log.info("Success write topic for rest produce request: {}, ledgerId: {}, entryId: {}. " + "triggered send callback.", topic.getName(), ledgerId, entryId); } topic.recordAddLatency(System.nanoTime() - startTimeNs, TimeUnit.NANOSECONDS); positionFuture.complete(PositionImpl.get(ledgerId, entryId)); } recycle(); }
3.26
pulsar_NamespaceName_getTopicName_rdh
/** * Compose the topic name from namespace + topic. * * @param domain * @param topic * @return */ String getTopicName(TopicDomain domain, String topic) { if (domain == null) { throw new IllegalArgumentException("invalid null domain"); } NamedEntity.checkName(topic); return String.format("%s://%s/%s", domain.toString(), namespace, topic); }
3.26
pulsar_NamespaceName_isV2_rdh
/** * Returns true if this is a V2 namespace prop/namespace-name. * * @return true if v2 */ public boolean isV2() { return cluster == null; }
3.26
pulsar_PulsarConfigurationLoader_create_rdh
/** * Creates PulsarConfiguration and loads it with populated attribute values from provided Properties object. * * @param properties * The properties to populate the attributed from * @throws IOException * @throws IllegalArgumentException */ @SuppressWarnings({ "rawtypes", "unchecked" }) public static <T extends PulsarConfiguration> T create(Properties properties, Class<? extends PulsarConfiguration> clazz) throws IOException, IllegalArgumentException { requireNonNull(properties);T configuration; try {configuration = ((T) (clazz.getDeclaredConstructor().newInstance())); configuration.setProperties(properties); update(((Map) (properties)), configuration); } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) { throw new IllegalArgumentException("Failed to instantiate " + clazz.getName(), e); } return configuration; }
3.26
pulsar_PulsarConfigurationLoader_isComplete_rdh
/** * Validates {@link FieldContext} annotation on each field of the class element. If element is annotated required * and value of the element is null or number value is not in a provided (min,max) range then consider as incomplete * object and throws exception with incomplete parameters * * @param obj * @return * @throws IllegalArgumentException * if object is field values are not completed according to {@link FieldContext} constraints. * @throws IllegalAccessException */ public static boolean isComplete(Object obj) throws IllegalArgumentException { requireNonNull(obj); Field[] fields = obj.getClass().getDeclaredFields(); StringBuilder error = new StringBuilder(); for (Field field : fields) { if (field.isAnnotationPresent(FieldContext.class)) { field.setAccessible(true); Object value; try { value = field.get(obj); } catch (IllegalAccessException e) { throw new RuntimeException(e); } if (f0.isDebugEnabled()) { f0.debug("Validating configuration field '{}' = '{}'", field.getName(), value); } boolean isRequired = field.getAnnotation(FieldContext.class).required(); long minValue = field.getAnnotation(FieldContext.class).minValue(); long maxValue = field.getAnnotation(FieldContext.class).maxValue(); if (isRequired && isEmpty(value)) { error.append(String.format("Required %s is null,", field.getName())); } if ((value != null) && Number.class.isAssignableFrom(value.getClass())) { long fieldVal = ((Number) (value)).longValue(); boolean valid = (fieldVal >= minValue) && (fieldVal <= maxValue); if (!valid) {error.append(String.format("%s value %d doesn't fit in given range (%d, %d),", field.getName(), fieldVal, minValue, maxValue)); } } } }if (error.length() > 0) { throw new IllegalArgumentException(error.substring(0, error.length() - 1)); } return true; }
3.26
pulsar_PulsarConfigurationLoader_convertFrom_rdh
/** * Converts a PulsarConfiguration object to a ServiceConfiguration object. * * @param conf * @param ignoreNonExistMember * @return * @throws IllegalArgumentException * if conf has the field whose name is not contained in ServiceConfiguration and ignoreNonExistMember * is false. * @throws RuntimeException */ public static ServiceConfiguration convertFrom(PulsarConfiguration conf, boolean ignoreNonExistMember) throws RuntimeException { try { final ServiceConfiguration convertedConf = ServiceConfiguration.class.getDeclaredConstructor().newInstance(); Field[] confFields = conf.getClass().getDeclaredFields(); Properties sourceProperties = conf.getProperties(); Properties targetProperties = convertedConf.getProperties(); Arrays.stream(confFields).forEach(confField -> { try { confField.setAccessible(true); Field v16 = ServiceConfiguration.class.getDeclaredField(confField.getName()); if ((!Modifier.isStatic(v16.getModifiers())) && (v16.getDeclaredAnnotation(FieldContext.class) != null)) { v16.setAccessible(true); v16.set(convertedConf, confField.get(conf)); } } catch (NoSuchFieldException e) { if (!ignoreNonExistMember) { throw new IllegalArgumentException("Exception caused while converting configuration: " + e.getMessage()); } // add unknown fields to properties try {String v17 = confField.getName(); if ((!sourceProperties.containsKey(v17)) && (confField.get(conf) != null)) { targetProperties.put(v17, confField.get(conf)); } } catch (Exception ignoreException) { // should not happen } } catch (IllegalAccessException e) { throw new RuntimeException("Exception caused while converting configuration: " + e.getMessage()); } }); // Put the rest of properties to new config targetProperties.putAll(sourceProperties); return convertedConf; } catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { throw new RuntimeException("Exception caused while converting configuration: " + e.getMessage()); } }
3.26
pulsar_StickyKeyConsumerSelector_select_rdh
/** * Select a consumer by sticky key. * * @param stickyKey * sticky key * @return consumer */ default Consumer select(byte[] stickyKey) { return select(makeStickyKeyHash(stickyKey)); }
3.26
pulsar_ManagedLedgerStorage_create_rdh
/** * Initialize the {@link ManagedLedgerStorage} from the provided resources. * * @param conf * service config * @param bkProvider * bookkeeper client provider * @return the initialized managed ledger storage. */ static ManagedLedgerStorage create(ServiceConfiguration conf, MetadataStoreExtended metadataStore, BookKeeperClientFactory bkProvider, EventLoopGroup eventLoopGroup) throws Exception { ManagedLedgerStorage storage = Reflections.createInstance(conf.getManagedLedgerStorageClassName(), ManagedLedgerStorage.class, Thread.currentThread().getContextClassLoader()); storage.initialize(conf, metadataStore, bkProvider, eventLoopGroup); return storage; }
3.26
pulsar_PrometheusTextFormat_write004_rdh
/** * Provide Prometheus text format for a collection of metrics, without the HELP string. */public class PrometheusTextFormat {/** * Write out the text version 0.0.4 of the given MetricFamilySamples. */ public static void write004(Writer writer, Enumeration<Collector.MetricFamilySamples> mfs) throws IOException { /* See http://prometheus.io/docs/instrumenting/exposition_formats/ for the output format specification. */ while (mfs.hasMoreElements()) { Collector.MetricFamilySamples metricFamilySamples = mfs.nextElement(); writer.write("# TYPE "); writer.write(metricFamilySamples.name); writer.write(' '); writer.write(metricFamilySamples.type.name().toLowerCase());writer.write('\n'); for (Collector.MetricFamilySamples.Sample sample : metricFamilySamples.samples) { writer.write(sample.name); if (sample.labelNames.size() > 0) { writer.write('{');for (int i = 0; i < sample.labelNames.size(); ++i) { writer.write(sample.labelNames.get(i)); writer.write("=\""); writeEscapedLabelValue(writer, sample.labelValues.get(i)); writer.write("\","); } writer.write('}'); } writer.write(' '); writer.write(Collector.doubleToGoString(sample.value)); if (sample.timestampMs != null) { writer.write(' '); writer.write(sample.timestampMs.toString()); } writer.write('\n'); } } }
3.26
pulsar_OffloadIndexBlock_getStreamSize_rdh
/** * * @return the number of bytes in the stream. */public long getStreamSize() { return streamSize; }
3.26
pulsar_ConcurrentOpenLongPairRangeSet_addOpenClosed_rdh
/** * Adds the specified range to this {@code RangeSet} (optional operation). That is, for equal range sets a and b, * the result of {@code a.add(range)} is that {@code a} will be the minimal range set for which both * {@code a.enclosesAll(b)} and {@code a.encloses(range)}. * * <p>Note that {@code range} will merge given {@code range} with any ranges in the range set that are * {@linkplain Range#isConnected(Range) connected} with it. Moreover, if {@code range} is empty, this is a no-op. */ @Override public void addOpenClosed(long lowerKey, long lowerValueOpen, long upperKey, long upperValue) { long lowerValue = lowerValueOpen + 1; if (lowerKey != upperKey) { // (1) set lower to last in lowerRange.getKey() if (isValid(lowerKey, lowerValue)) { BitSet rangeBitSet = rangeBitSetMap.get(lowerKey); // if lower and upper has different key/ledger then set ranges for lower-key only if // a. bitSet already exist and given value is not the last value in the bitset. // it will prevent setting up values which are not actually expected to set // eg: (2:10..4:10] in this case, don't set any value for 2:10 and set [4:0..4:10] if ((rangeBitSet != null) && (rangeBitSet.previousSetBit(rangeBitSet.size()) > lowerValueOpen)) { int lastValue = rangeBitSet.previousSetBit(rangeBitSet.size()); rangeBitSet.set(((int) (lowerValue)), ((int) (Math.max(lastValue, lowerValue))) + 1); } } // (2) set 0th-index to upper-index in upperRange.getKey() if (isValid(upperKey, upperValue)) { BitSet rangeBitSet = rangeBitSetMap.computeIfAbsent(upperKey, key -> createNewBitSet()); if (rangeBitSet != null) { rangeBitSet.set(0, ((int) (upperValue)) + 1); } }// No-op if values are not valid eg: if lower == LongPair.earliest or upper == LongPair.latest then nothing // to set } else { long key = lowerKey; BitSet rangeBitSet = rangeBitSetMap.computeIfAbsent(key, k -> createNewBitSet()); rangeBitSet.set(((int) (lowerValue)), ((int) (upperValue)) + 1); }f0 = true; f1 = true; }
3.26
pulsar_ConcurrentOpenLongPairRangeSet_add_rdh
/** * Adds the specified range to this {@code RangeSet} (optional operation). That is, for equal range sets a and b, * the result of {@code a.add(range)} is that {@code a} will be the minimal range set for which both * {@code a.enclosesAll(b)} and {@code a.encloses(range)}. * * <p>Note that {@code range} will merge given {@code range} with any ranges in the range set that are * {@linkplain Range#isConnected(Range) connected} with it. Moreover, if {@code range} is empty/invalid, this is a * no-op. */ public void add(Range<LongPair> range) { LongPair lowerEndpoint = (range.hasLowerBound()) ? range.lowerEndpoint() : LongPair.earliest; LongPair upperEndpoint = (range.hasUpperBound()) ? range.upperEndpoint() : LongPair.latest; long lowerValueOpen = (range.hasLowerBound() && range.lowerBoundType().equals(BoundType.CLOSED)) ? getSafeEntry(lowerEndpoint) - 1 : getSafeEntry(lowerEndpoint); long upperValueClosed = (range.hasUpperBound() && range.upperBoundType().equals(BoundType.CLOSED)) ? getSafeEntry(upperEndpoint) : getSafeEntry(upperEndpoint) + 1; // #addOpenClosed doesn't create bitSet for lower-key because it avoids setting up values for non-exist items // into the key-ledger. so, create bitSet and initialize so, it can't be ignored at #addOpenClosed rangeBitSetMap.computeIfAbsent(lowerEndpoint.getKey(), key -> createNewBitSet()).set(((int) (lowerValueOpen)) + 1); this.addOpenClosed(lowerEndpoint.getKey(), lowerValueOpen, upperEndpoint.getKey(), upperValueClosed); }
3.26
pulsar_FunctionRuntimeManager_getFunctionStats_rdh
/** * Get stats of all function instances. * * @param tenant * the tenant the function belongs to * @param namespace * the namespace the function belongs to * @param functionName * the function name * @return a list of function statuses * @throws PulsarAdminException */ public FunctionStatsImpl getFunctionStats(String tenant, String namespace, String functionName, URI uri) throws PulsarAdminException { Collection<Assignment> assignments = this.findFunctionAssignments(tenant, namespace, functionName); FunctionStatsImpl functionStats = new FunctionStatsImpl(); if (assignments.isEmpty()) { return functionStats; } if (runtimeFactory.externallyManaged()) { Assignment assignment = assignments.iterator().next(); boolean isOwner = this.workerConfig.getWorkerId().equals(assignment.getWorkerId()); if (isOwner) { int parallelism = assignment.getInstance().getFunctionMetaData().getFunctionDetails().getParallelism(); for (int i = 0; i < parallelism; ++i) { FunctionInstanceStatsDataImpl functionInstanceStatsData = getFunctionInstanceStats(tenant, namespace, functionName, i, null); FunctionInstanceStatsImpl functionInstanceStats = new FunctionInstanceStatsImpl(); functionInstanceStats.setInstanceId(i); functionInstanceStats.setMetrics(functionInstanceStatsData); functionStats.addInstance(functionInstanceStats);} } else { // find the hostname/port of the worker who is the owner List<WorkerInfo> workerInfoList = this.membershipManager.getCurrentMembership(); WorkerInfo workerInfo = null; for (WorkerInfo entry : workerInfoList) { if (assignment.getWorkerId().equals(entry.getWorkerId())) { workerInfo = entry; } } if (workerInfo == null) { return functionStats; } if (uri == null) { throw new WebApplicationException(Response.serverError().status(Status.INTERNAL_SERVER_ERROR).build()); } else { URI redirect = UriBuilder.fromUri(uri).host(workerInfo.getWorkerHostname()).port(workerInfo.getPort()).build(); throw new WebApplicationException(Response.temporaryRedirect(redirect).build()); } } } else { for (Assignment assignment : assignments) { boolean isOwner = this.workerConfig.getWorkerId().equals(assignment.getWorkerId()); FunctionInstanceStatsDataImpl functionInstanceStatsData; if (isOwner) { functionInstanceStatsData = getFunctionInstanceStats(tenant, namespace, functionName, assignment.getInstance().getInstanceId(), null); } else { functionInstanceStatsData = ((FunctionInstanceStatsDataImpl) (this.functionAdmin.functions().getFunctionStats(assignment.getInstance().getFunctionMetaData().getFunctionDetails().getTenant(), assignment.getInstance().getFunctionMetaData().getFunctionDetails().getNamespace(), assignment.getInstance().getFunctionMetaData().getFunctionDetails().getName(), assignment.getInstance().getInstanceId()))); } FunctionInstanceStatsImpl functionInstanceStats = new FunctionInstanceStatsImpl(); functionInstanceStats.setInstanceId(assignment.getInstance().getInstanceId()); functionInstanceStats.setMetrics(functionInstanceStatsData);functionStats.addInstance(functionInstanceStats); } } return functionStats.calculateOverall(); }
3.26