name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_SimpleLoadManagerImpl_m2_rdh | /**
* Rank brokers by available capacity, or load percentage, based on placement strategy:
*
* - Available capacity for weighted random selection (weightedRandomSelection): ranks ResourceUnits units based on
* estimation of their capacity which is basically how many bundles each ResourceUnit is able can handle with its
* available resources (CPU, memory, network, etc);
*
* - Load percentage for least loaded server (leastLoadedServer): ranks ResourceUnits units based on estimation of
* their load percentage which is basically how many percent of resource is allocated which is
* max(resource_actually_used, resource_quota)
*
* If we fail to collect the Load Reports OR fail to process them for the first time, it means the leader does not
* have enough information to make a decision so we set it to ready when we collect and process the load reports
* successfully the first time.
*/
private synchronized void m2() {
ResourceUnitRanking.setCpuUsageByMsgRate(this.realtimeCpuLoadFactor);
String hostname = pulsar.getAdvertisedAddress();String strategy = this.getLoadBalancerPlacementStrategy();
log.info("doLoadRanking - load balancing strategy: {}", strategy);
if
(!currentLoadReports.isEmpty()) {
Map<Long, Set<ResourceUnit>> newSortedRankings = new TreeMap<>();
Map<ResourceUnit, ResourceUnitRanking> newResourceUnitRankings = new HashMap<>();
ResourceQuota defaultResourceQuota = pulsar.getBrokerService().getBundlesQuotas().getDefaultResourceQuota().join();for (Map.Entry<ResourceUnit, LoadReport> entry : currentLoadReports.entrySet()) {
ResourceUnit resourceUnit = entry.getKey();
LoadReport loadReport = entry.getValue();
// calculate rankings
Set<String> loadedBundles = loadReport.getBundles();
Set<String> preAllocatedBundles = null;
if (resourceUnitRankings.containsKey(resourceUnit)) {
preAllocatedBundles = resourceUnitRankings.get(resourceUnit).getPreAllocatedBundles();
preAllocatedBundles.removeAll(loadedBundles);
} else {
preAllocatedBundles = new HashSet<>();
}
ResourceQuota allocatedQuota = getTotalAllocatedQuota(loadedBundles);
ResourceQuota preAllocatedQuota = getTotalAllocatedQuota(preAllocatedBundles);
ResourceUnitRanking ranking = new ResourceUnitRanking(loadReport.getSystemResourceUsage(), loadedBundles, allocatedQuota, preAllocatedBundles,
preAllocatedQuota);
newResourceUnitRankings.put(resourceUnit, ranking);
// generated sorted ranking
double loadPercentage = ranking.getEstimatedLoadPercentage();
long maxCapacity = ranking.estimateMaxCapacity(defaultResourceQuota);
long finalRank = 0;
if (strategy.equals(LOADBALANCER_STRATEGY_LLS)) {
finalRank = ((long) (loadPercentage));
} else if (strategy.equals(LOADBALANCER_STRATEGY_LEAST_MSG)) {
finalRank = ((long) (ranking.getEstimatedMessageRate()));
} else {
double idleRatio = (100
- loadPercentage) / 100;finalRank = ((long) ((maxCapacity * idleRatio) * idleRatio));
}
newSortedRankings.computeIfAbsent(finalRank, k -> new HashSet<>()).add(entry.getKey());
if (log.isDebugEnabled()) {
log.debug("Added Resource Unit [{}] with Rank [{}]", entry.getKey().getResourceId(),
finalRank);
}
// update metrics
if (resourceUnit.getResourceId().contains(hostname)) {
updateLoadBalancingMetrics(hostname, finalRank, ranking);
}}
updateBrokerToNamespaceToBundle();
this.sortedRankings.set(newSortedRankings);
this.resourceUnitRankings = newResourceUnitRankings;
} else {
log.info("Leader broker[{}] No ResourceUnits to rank this run, Using Old Ranking", pulsar.getSafeWebServiceAddress());
}
} | 3.26 |
pulsar_NettyChannelUtil_writeAndFlushWithClosePromise_rdh | /**
* Write and flush the message to the channel and the close the channel.
*
* This method is particularly helpful when the connection is in an invalid state
* and therefore a new connection must be created to continue.
*
* @param ctx
* channel's context
* @param msg
* buffer to write in the channel
*/
public static void
writeAndFlushWithClosePromise(ChannelOutboundInvoker ctx, ByteBuf msg) {ctx.writeAndFlush(msg).addListener(ChannelFutureListener.CLOSE);
} | 3.26 |
pulsar_NettyChannelUtil_writeAndFlushWithVoidPromise_rdh | /**
* Write and flush the message to the channel.
*
* The promise is an instance of {@link VoidChannelPromise} that properly propagates exceptions up to the pipeline.
* Netty has many ad-hoc optimization if the promise is an instance of {@link VoidChannelPromise}.
* Lastly, it reduces pollution of useless {@link io.netty.channel.ChannelPromise} objects created
* by the default write and flush method {@link ChannelOutboundInvoker#writeAndFlush(Object)}.
* See https://stackoverflow.com/q/54169262 and https://stackoverflow.com/a/9030420 for more details.
*
* @param ctx
* channel's context
* @param msg
* buffer to write in the channel
*/
public static void writeAndFlushWithVoidPromise(ChannelOutboundInvoker ctx, ByteBuf msg)
{
ctx.writeAndFlush(msg, ctx.voidPromise());
} | 3.26 |
pulsar_GrowableArrayBlockingQueue_terminate_rdh | /**
* Make the queue not accept new items. if there are still new data trying to enter the queue, it will be handed
* by {@param itemAfterTerminatedHandler}.
*/
public void terminate(@Nullable
Consumer<T> itemAfterTerminatedHandler) {
// After wait for the in-flight item enqueue, it means the operation of terminate is finished.
long stamp = tailLock.writeLock();
try {
terminated = true;
if (itemAfterTerminatedHandler != null) {
this.itemAfterTerminatedHandler = itemAfterTerminatedHandler;
}
} finally {
tailLock.unlockWrite(stamp);
}
} | 3.26 |
pulsar_ConsumerInterceptors_onAcknowledge_rdh | /**
* This is called when acknowledge request return from the broker.
* <p>
* This method calls {@link ConsumerInterceptor#onAcknowledge(Consumer, MessageId, Throwable)} method for each
* interceptor.
* <p>
* This method does not throw exceptions. Exceptions thrown by any of interceptors in the chain are logged, but not
* propagated.
*
* @param consumer
* the consumer which contains the interceptors
* @param messageId
* message to acknowledge.
* @param exception
* exception returned by broker.
*/public void onAcknowledge(Consumer<T> consumer, MessageId messageId, Throwable exception) {
for (int v3 = 0, interceptorsSize = interceptors.size(); v3 < interceptorsSize; v3++) {
try {
interceptors.get(v3).onAcknowledge(consumer, messageId, exception);
} catch (Throwable e) {
log.warn("Error executing interceptor onAcknowledge callback ", e);
}
}
} | 3.26 |
pulsar_ConsumerInterceptors_onNegativeAcksSend_rdh | /**
* This is called when a redelivery from a negative acknowledge occurs.
* <p>
* This method calls {@link ConsumerInterceptor#onNegativeAcksSend(Consumer, Set)
* onNegativeAcksSend(Consumer, Set<MessageId>)} method for each interceptor.
* <p>
* This method does not throw exceptions. Exceptions thrown by any of interceptors in the chain are logged, but not
* propagated.
*
* @param consumer
* the consumer which contains the interceptors.
* @param messageIds
* set of message IDs being redelivery due a negative acknowledge.
*/
public void onNegativeAcksSend(Consumer<T> consumer, Set<MessageId> messageIds) {
for (int i = 0, interceptorsSize = interceptors.size(); i < interceptorsSize; i++) {
try {
interceptors.get(i).onNegativeAcksSend(consumer, messageIds);
} catch (Throwable e) {
log.warn("Error executing interceptor onNegativeAcksSend callback", e);
}
}
} | 3.26 |
pulsar_ConsumerInterceptors_onAcknowledgeCumulative_rdh | /**
* This is called when acknowledge cumulative request return from the broker.
* <p>
* This method calls {@link ConsumerInterceptor#onAcknowledgeCumulative(Consumer, MessageId, Throwable)} method
* for each interceptor.
* <p>
* This method does not throw exceptions. Exceptions thrown by any of interceptors in the chain are logged, but not
* propagated.
*
* @param consumer
* the consumer which contains the interceptors
* @param messageId
* messages to acknowledge.
* @param exception
* exception returned by broker.
*/public void onAcknowledgeCumulative(Consumer<T> consumer, MessageId messageId, Throwable exception) {
for (int i = 0, v6 = interceptors.size(); i < v6; i++) {
try {
interceptors.get(i).onAcknowledgeCumulative(consumer, messageId, exception);
} catch (Throwable e) {
log.warn("Error executing interceptor onAcknowledgeCumulative callback ", e);
}
}
} | 3.26 |
pulsar_ConsumerInterceptors_onAckTimeoutSend_rdh | /**
* This is called when a redelivery from an acknowledge timeout occurs.
* <p>
* This method calls {@link ConsumerInterceptor#onAckTimeoutSend(Consumer, Set)
* onAckTimeoutSend(Consumer, Set<MessageId>)} method for each interceptor.
* <p>
* This method does not throw exceptions. Exceptions thrown by any of interceptors in the chain are logged, but not
* propagated.
*
* @param consumer
* the consumer which contains the interceptors.
* @param messageIds
* set of message IDs being redelivery due an acknowledge timeout.
*/public void onAckTimeoutSend(Consumer<T>
consumer, Set<MessageId> messageIds) {
for (int i = 0, interceptorsSize = interceptors.size(); i < interceptorsSize;
i++) {
try {
interceptors.get(i).onAckTimeoutSend(consumer, messageIds);
} catch (Throwable e) {
log.warn("Error executing interceptor onAckTimeoutSend callback", e);
}
}
} | 3.26 |
pulsar_ConsumerInterceptors_m0_rdh | /**
* This is called just before the message is returned by {@link Consumer#receive()},
* {@link MessageListener#received(Consumer, Message)} or the {@link java.util.concurrent.CompletableFuture}
* returned by {@link Consumer#receiveAsync()} completes.
* <p>
* This method calls {@link ConsumerInterceptor#beforeConsume(Consumer, Message)} for each interceptor. Messages
* returned from each interceptor get passed to beforeConsume() of the next interceptor in the chain of
* interceptors.
* <p>
* This method does not throw exceptions. If any of the interceptors in the chain throws an exception, it gets
* caught and logged, and next interceptor in int the chain is called with 'messages' returned by the previous
* successful interceptor beforeConsume call.
*
* @param consumer
* the consumer which contains the interceptors
* @param message
* message to be consume by the client.
* @return messages that are either modified by interceptors or same as messages passed to this method.
*/ public Message<T> m0(Consumer<T> consumer, Message<T> message) {
Message<T> interceptorMessage = message;
for (int i = 0, interceptorsSize = interceptors.size(); i < interceptorsSize; i++) {
try {
interceptorMessage = interceptors.get(i).beforeConsume(consumer, interceptorMessage);
} catch (Throwable e) {
if (consumer != null) {
log.warn("Error executing interceptor beforeConsume callback topic: {} consumerName: {}", consumer.getTopic(), consumer.getConsumerName(), e);
} else {
log.warn("Error executing interceptor beforeConsume callback", e);
}
}
}
return interceptorMessage;
} | 3.26 |
pulsar_MessageIdImpl_m0_rdh | // batchIndex is -1 if message is non-batched message and has the batchIndex for a batch message
protected byte[] m0(int batchIndex, int batchSize) {
MessageIdData msgId = writeMessageIdData(null, batchIndex, batchSize);
int size = msgId.getSerializedSize();
ByteBuf serialized = Unpooled.buffer(size, size);
msgId.writeTo(serialized);
return serialized.array();
} | 3.26 |
pulsar_AbstractReplicator_startProducer_rdh | // This method needs to be synchronized with disconnects else if there is a disconnect followed by startProducer
// the end result can be disconnect.
public synchronized void startProducer() {
if (STATE_UPDATER.get(this) == State.Stopping) {
long waitTimeMs = backOff.next();
if (log.isDebugEnabled()) {
log.debug("[{}] waiting for producer to close before attempting to reconnect, retrying in {} s", replicatorId, waitTimeMs / 1000.0);
}
// BackOff before retrying
brokerService.executor().schedule(this::checkTopicActiveAndRetryStartProducer, waitTimeMs, TimeUnit.MILLISECONDS);
return;
}
State state = STATE_UPDATER.get(this);
if (!STATE_UPDATER.compareAndSet(this, State.Stopped, State.Starting)) {
if (state == State.Started) {
// Already running
if (log.isDebugEnabled()) {
log.debug("[{}] Replicator was already running", replicatorId);
}
} else {
log.info("[{}] Replicator already being started. Replicator state: {}", replicatorId, state);
} return;
}
log.info("[{}] Starting replicator", replicatorId);producerBuilder.createAsync().thenAccept(producer -> {
readEntries(producer);
}).exceptionally(ex
-> {
if (STATE_UPDATER.compareAndSet(this, State.Starting, State.Stopped)) {
long waitTimeMs = backOff.next();
log.warn("[{}] Failed to create remote producer ({}), retrying in {} s", replicatorId, ex.getMessage(), waitTimeMs / 1000.0);
// BackOff before retrying
brokerService.executor().schedule(this::checkTopicActiveAndRetryStartProducer, waitTimeMs, TimeUnit.MILLISECONDS);
} else {
log.warn("[{}] Failed to create remote producer. Replicator state: {}", replicatorId, STATE_UPDATER.get(this), ex);
}
return null;
});
} | 3.26 |
pulsar_AbstractReplicator_validatePartitionedTopicAsync_rdh | /**
* Replication can't be started on root-partitioned-topic to avoid producer startup conflict.
*
* <pre>
* eg:
* if topic : persistent://prop/cluster/ns/my-topic is a partitioned topic with 2 partitions then
* broker explicitly creates replicator producer for: "my-topic-partition-1" and "my-topic-partition-2".
*
* However, if broker tries to start producer with root topic "my-topic" then client-lib internally
* creates individual producers for "my-topic-partition-1" and "my-topic-partition-2" which creates
* conflict with existing
* replicator producers.
* </pre>
*
* Therefore, replicator can't be started on root-partition topic which can internally create multiple partitioned
* producers.
*
* @param topic
* @param brokerService
*/
public static CompletableFuture<Void> validatePartitionedTopicAsync(String topic, BrokerService brokerService) {
TopicName topicName = TopicName.get(topic);
return brokerService.pulsar().getPulsarResources().getNamespaceResources().getPartitionedTopicResources().partitionedTopicExistsAsync(topicName).thenCompose(isPartitionedTopic -> {
if (isPartitionedTopic) {
String s = topicName + " is a partitioned-topic and replication can't be started for partitioned-producer ";
log.error(s);
return FutureUtil.failedFuture(new NamingException(s));
}
return CompletableFuture.completedFuture(null);
});
} | 3.26 |
pulsar_MessagePayload_release_rdh | /**
* Release the resources if necessary.
*
* NOTE: For a MessagePayload object that is created from {@link MessagePayloadFactory#DEFAULT}, this method must be
* called to avoid memory leak.
*/
default void release() {
// No ops
} | 3.26 |
pulsar_ManagedLedger_skipNonRecoverableLedger_rdh | /**
* If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is
* used to delete information about this ledger in the ManagedCursor.
*/
default void skipNonRecoverableLedger(long ledgerId) {
} | 3.26 |
pulsar_ResourceLockImpl_acquireWithNoRevalidation_rdh | // Simple operation of acquiring the lock with no retries, or checking for the lock content
private CompletableFuture<Void> acquireWithNoRevalidation(T newValue) {
if (log.isDebugEnabled()) {
log.debug("acquireWithNoRevalidation,newValue={},version={}", newValue, version);
}
byte[] payload;
try {
payload = serde.serialize(path, newValue);
} catch (Throwable t) {
return FutureUtils.exception(t);
}
CompletableFuture<Void> result = new CompletableFuture<>();
store.put(path, payload, Optional.of(version), EnumSet.of(CreateOption.Ephemeral)).thenAccept(stat -> {
synchronized(this) {
state = State.Valid;
version = stat.getVersion();
value = newValue;
}
log.info("Acquired resource lock on {}", path);
result.complete(null);
}).exceptionally(ex -> {
if (ex.getCause() instanceof BadVersionException) {
result.completeExceptionally(new LockBusyException(("Resource at " + path) + " is already locked"));
} else {
result.completeExceptionally(ex.getCause());
}return null;
});
return result;
} | 3.26 |
pulsar_ResourceLockImpl_silentRevalidateOnce_rdh | /**
* Revalidate the distributed lock if it is not released.
* This method is thread-safe and it will perform multiple re-validation operations in turn.
*/
synchronized CompletableFuture<Void> silentRevalidateOnce() {
return sequencer.sequential(() -> revalidate(value)).thenRun(() -> log.info("Successfully revalidated the lock on {}", path)).exceptionally(ex -> {
synchronized(this) {
Throwable realCause
= FutureUtil.unwrapCompletionException(ex);
if ((realCause instanceof BadVersionException) || (realCause instanceof LockBusyException)) {
log.warn("Failed to revalidate the lock at {}. Marked as expired. {}", path, realCause.getMessage());
state = State.Released;
expiredFuture.complete(null);
} else {
// We failed to revalidate the lock due to connectivity issue
// Continue assuming we hold the lock, until we can revalidate it, either
// on Reconnected or SessionReestablished events.
revalidateAfterReconnection = true;
log.warn("Failed to revalidate the lock at {}. Retrying later on reconnection {}", path, realCause.getMessage());
}
}
return null;
});
} | 3.26 |
pulsar_WorkerServiceLoader_load_rdh | /**
* Load the worker services for the given <tt>protocol</tt> list.
*
* @param wsNarPackage
* worker service nar package
* @param narExtractionDirectory
* the directory to extract nar directory
* @return the worker service
*/
static WorkerService load(String wsNarPackage, String narExtractionDirectory) {
if (isEmpty(wsNarPackage)) {
return new PulsarWorkerService();
}
WorkerServiceDefinition definition;
try {
definition = getWorkerServiceDefinition(wsNarPackage, narExtractionDirectory);
} catch (IOException ioe) {
log.error("Failed to get the worker service definition from {}", wsNarPackage, ioe);
throw new RuntimeException("Failed to get the worker service definition from " + wsNarPackage, ioe);
}
WorkerServiceMetadata metadata = new WorkerServiceMetadata();Path narPath = Paths.get(wsNarPackage);
metadata.setArchivePath(narPath);
metadata.setDefinition(definition);
WorkerServiceWithClassLoader service;
try {
service = load(metadata, narExtractionDirectory); } catch (IOException e) {
log.error("Failed to load the worker service {}", metadata, e);
throw new RuntimeException("Failed to load the worker service " + metadata, e);
}
log.info("Successfully loaded worker service {}", metadata);
return service;
} | 3.26 |
pulsar_WorkerServiceLoader_getWorkerServiceDefinition_rdh | /**
* Retrieve the functions worker service definition from the provided worker service nar package.
*
* @param narPath
* the path to the worker service NAR package
* @return the worker service definition
* @throws IOException
* when fail to load the worker service or get the definition
*/
public static WorkerServiceDefinition
getWorkerServiceDefinition(String narPath, String narExtractionDirectory) throws IOException {
try (NarClassLoader ncl = NarClassLoaderBuilder.builder().narFile(new File(narPath)).extractionDirectory(narExtractionDirectory).build()) {
return getWorkerServiceDefinition(ncl);
}
} | 3.26 |
pulsar_TimeWindow_current_rdh | /**
* return current time window data.
*
* @param function
* generate data.
* @return */
public synchronized WindowWrap<T> current(Function<T, T> function) {
long millis = System.currentTimeMillis();
if (millis < 0) {
return null;
}
int idx = calculateTimeIdx(millis);long windowStart = calculateWindowStart(millis);
while (true) {
WindowWrap<T> old = array.get(idx);
if (old == null) {
WindowWrap<T> window = new WindowWrap<>(interval, windowStart, null);
if (array.compareAndSet(idx, null, window)) { T value = (null == function) ? null : function.apply(null);
window.value(value);
return window;
} else {
Thread.yield();
}
} else if (windowStart == old.start()) {
return old;
} else if (windowStart > old.start()) {
T value = (null == function) ? null : function.apply(old.value());
old.value(value);
old.resetWindowStart(windowStart);
return old;
} else {
// it should never goes here
throw new IllegalStateException();
}
}
} | 3.26 |
pulsar_OpenIDProviderMetadataCache_verifyIssuer_rdh | /**
* Verify the issuer url, as required by the OpenID Connect spec:
*
* Per the OpenID Connect Discovery spec, the issuer value returned MUST be identical to the
* Issuer URL that was directly used to retrieve the configuration information. This MUST also
* be identical to the iss Claim value in ID Tokens issued from this Issuer.
* https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation
*
* @param issuer
* - the issuer used to retrieve the metadata
* @param metadata
* - the OpenID Provider Metadata
* @param isK8s
* - whether the issuer is represented by the Kubernetes API server. This affects error reporting.
* @throws AuthenticationException
* if the issuer does not exactly match the metadata issuer
*/
private void verifyIssuer(@Nonnull
String issuer, OpenIDProviderMetadata metadata, boolean isK8s) throws AuthenticationException {
if (!issuer.equals(metadata.getIssuer())) {
if (isK8s) {
incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ISSUER);
throw new AuthenticationException("Issuer not allowed: " + issuer);
} else {
incrementFailureMetric(AuthenticationExceptionCode.ISSUER_MISMATCH);
throw new AuthenticationException(String.format("Issuer URL mismatch: [%s] should match [%s]",
issuer, metadata.getIssuer()));
}
}
} | 3.26 |
pulsar_OpenIDProviderMetadataCache_getOpenIDProviderMetadataForKubernetesApiServer_rdh | /**
* Retrieve the OpenID Provider Metadata for the Kubernetes API server. This method is used instead of
* {@link #getOpenIDProviderMetadataForIssuer(String)} because different validations are done. The Kubernetes
* API server does not technically implement the complete OIDC spec for discovery, but it does implement some of
* it, so this method validates what it can. Specifically, it skips validation that the Discovery Document
* provider's URI matches the issuer. It verifies that the issuer on the discovery document matches the issuer
* claim
*
* @return */
CompletableFuture<OpenIDProviderMetadata> getOpenIDProviderMetadataForKubernetesApiServer(String issClaim) {
return cache.get(Optional.empty()).thenCompose(openIDProviderMetadata -> {
CompletableFuture<OpenIDProviderMetadata> future = new
CompletableFuture<>();
try
{
verifyIssuer(issClaim, openIDProviderMetadata, true);
future.complete(openIDProviderMetadata);
} catch (AuthenticationException e) {
incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PROVIDER_METADATA);
future.completeExceptionally(e);
}
return future;
});
} | 3.26 |
pulsar_LegacyHierarchicalLedgerRangeIterator_getLedgerRangeByLevel_rdh | /**
* Get a single node level1/level2.
*
* @param level1
* 1st level node name
* @param level2
* 2nd level node name
* @throws IOException
*/
LedgerRange getLedgerRangeByLevel(final
String level1, final String level2) throws IOException {
StringBuilder nodeBuilder = threadLocalNodeBuilder.get();
nodeBuilder.setLength(0);
nodeBuilder.append(ledgersRoot).append("/").append(level1).append("/").append(level2);
String nodePath = nodeBuilder.toString();
List<String> ledgerNodes = null;
try {
ledgerNodes = store.getChildren(nodePath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
} catch (ExecutionException | TimeoutException e) {
throw new IOException("Error when get child nodes from zk", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Error when get child nodes from zk", e);
}
NavigableSet<Long> zkActiveLedgers = HierarchicalLedgerUtils.ledgerListToSet(ledgerNodes,
ledgersRoot, nodePath);
if (log.isDebugEnabled()) {
log.debug((((("All active ledgers from ZK for hash node " + level1) + "/") + level2) + " : ") + zkActiveLedgers);
}
return new LedgerManager.LedgerRange(zkActiveLedgers.subSet(getStartLedgerIdByLevel(level1, level2), true, getEndLedgerIdByLevel(level1, level2), true));
} | 3.26 |
pulsar_LegacyHierarchicalLedgerRangeIterator_nextL1Node_rdh | /**
* Iterate next level1 znode.
*
* @return false if have visited all level1 nodes
* @throws InterruptedException/KeeperException
* if error occurs reading zookeeper children
*/
private boolean nextL1Node() throws ExecutionException, InterruptedException, TimeoutException {
l2NodesIter = null;
while (l2NodesIter == null) {
if (l1NodesIter.hasNext()) {
curL1Nodes = l1NodesIter.next();
} else { return false;
}
// Top level nodes are always exactly 2 digits long. (Don't pick up long hierarchical top level nodes)
if (!isLedgerParentNode(curL1Nodes)) {
continue;
}
List<String> l2Nodes = store.getChildren((ledgersRoot + "/")
+ curL1Nodes).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
l2NodesIter = l2Nodes.iterator();
if
(!l2NodesIter.hasNext()) {
l2NodesIter = null;
continue;
}
} return true;
} | 3.26 |
pulsar_LegacyHierarchicalLedgerRangeIterator_getEndLedgerIdByLevel_rdh | /**
* Get the largest cache id in a specified node /level1/level2.
*
* @param level1
* 1st level node name
* @param level2
* 2nd level node name
* @return the largest ledger id
*/
private long getEndLedgerIdByLevel(String level1, String level2) throws IOException {
return StringUtils.stringToHierarchicalLedgerId(level1, level2, MAX_ID_SUFFIX);
} | 3.26 |
pulsar_LedgerOffloader_scanLedgers_rdh | /**
* Scans all the ManagedLedgers stored on this Offloader (usually a Bucket).
* The callback should not modify/delete the ledgers.
*
* @param consumer
* receives the
* @param offloadDriverMetadata
* additional metadata
* @throws ManagedLedgerException
*/
default void scanLedgers(OffloadedLedgerMetadataConsumer consumer, Map<String, String> offloadDriverMetadata) throws ManagedLedgerException {
throw ManagedLedgerException.getManagedLedgerException(new UnsupportedOperationException());
} | 3.26 |
pulsar_TxnLogBufferedWriter_m0_rdh | /**
* If reach the thresholds {@link #batchedWriteMaxRecords} or {@link #batchedWriteMaxSize}, do flush.
*/
private void m0() {
if (flushContext.asyncAddArgsList.size() >= batchedWriteMaxRecords) {
metrics.triggerFlushByRecordsCount(flushContext.asyncAddArgsList.size(), bytesSize, System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime);
doFlush();
return; }
if (bytesSize >= batchedWriteMaxSize) {
metrics.triggerFlushByBytesSize(flushContext.asyncAddArgsList.size(), bytesSize, System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime);
doFlush();
}
} | 3.26 |
pulsar_TxnLogBufferedWriter_nextTimingTrigger_rdh | /**
* *
* Why not use {@link ScheduledExecutorService#scheduleAtFixedRate(Runnable, long, long, TimeUnit)} ?
* Because: when the {@link #singleThreadExecutorForWrite} thread processes slowly, the scheduleAtFixedRate task
* will continue to append tasks to the ledger thread, this burdens the ledger thread and leads to an avalanche.
* see: https://github.com/apache/pulsar/pull/16679.
*/
private void nextTimingTrigger() {
try {
if ((state == State.CLOSED) || (state == State.CLOSING)) {
return;
}
timeout = timer.newTimeout(timingFlushTask, batchedWriteMaxDelayInMillis, TimeUnit.MILLISECONDS);
} catch (Exception e) {
log.error(("Start timing flush trigger failed." + " managedLedger: ") + managedLedger.getName(), e);
}} | 3.26 |
pulsar_TxnLogBufferedWriter_asyncAddData_rdh | /**
* Append a new entry to the end of a managed ledger. All writes will be performed in the same thread. Callbacks are
* executed in strict write order,but after {@link #close()}, callbacks that fail by state check will execute
* earlier, and successful callbacks will not be affected.
*
* @param data
* data entry to be persisted.
* @param callback
* Will call {@link AddDataCallback#addComplete(Position, Object)} when
* add complete.
* Will call {@link AddDataCallback#addFailed(ManagedLedgerException, Object)} when
* add failure.
* @throws ManagedLedgerException
*/
public void asyncAddData(T data, AddDataCallback callback, Object ctx) {
if (!batchEnabled) {
if ((state == State.CLOSING) || (state == State.CLOSED)) {
callback.addFailed(BUFFERED_WRITER_CLOSED_EXCEPTION, ctx);
return;
}
ByteBuf byteBuf = dataSerializer.serialize(data);
managedLedger.asyncAddEntry(byteBuf, DisabledBatchCallback.INSTANCE, AsyncAddArgs.newInstance(callback, ctx, System.currentTimeMillis(), byteBuf));
return;
}
CompletableFuture.runAsync(() -> internalAsyncAddData(data, callback, ctx), singleThreadExecutorForWrite).exceptionally(e -> {
log.warn("Execute 'internalAsyncAddData' fail", e);
return null;
});
} | 3.26 |
pulsar_TxnLogBufferedWriter_close_rdh | /**
* Release resources and cancel pending tasks.
*/
public CompletableFuture<Void> close() {
// If batch feature is disabled, there is nothing to close, so set the stat only.
if (!batchEnabled) {
STATE_UPDATER.compareAndSet(this,
State.OPEN, State.CLOSED);
return CompletableFuture.completedFuture(null);
}
// If other thread already called "close()", so do nothing.
if (!STATE_UPDATER.compareAndSet(this, State.OPEN, State.CLOSING)) {
return CompletableFuture.completedFuture(null);
}
CompletableFuture closeFuture = new CompletableFuture();
// Cancel pending tasks and release resources.
FutureUtil.safeRunAsync(() -> {
// If some requests are flushed, BK will trigger these callbacks, and the remaining requests in should
// fail.
failureCallbackByContextAndRecycle(flushContext, new ManagedLedgerException.ManagedLedgerFencedException(new Exception("Transaction log buffered write has closed")));// Cancel the timing task.
if (!timeout.isCancelled()) {
this.timeout.cancel();
}
STATE_UPDATER.set(this, State.CLOSED);closeFuture.complete(null);}, singleThreadExecutorForWrite, closeFuture);
return closeFuture;
} | 3.26 |
pulsar_TxnLogBufferedWriter_newInstance_rdh | /**
* This constructor is used only when batch is disabled. Different to
* {@link AsyncAddArgs#newInstance(AddDataCallback, Object, long)} has {@param byteBuf}. The {@param byteBuf}
* generated by {@link DataSerializer#serialize(Object)} will be released during callback when
* {@link #recycle()} executed.
*/
private static AsyncAddArgs newInstance(AddDataCallback callback, Object ctx, long addedTime, ByteBuf byteBuf) {
AsyncAddArgs asyncAddArgs = newInstance(callback, ctx, addedTime);
asyncAddArgs.byteBuf = byteBuf;
return asyncAddArgs;
} | 3.26 |
pulsar_TxnLogBufferedWriter_internalAsyncAddData_rdh | /**
* Append data to queue, if reach {@link #batchedWriteMaxRecords} or {@link #batchedWriteMaxSize}, do flush. And if
* accept a request that {@param data} is too large (larger than {@link #batchedWriteMaxSize}), then two flushes
* are executed:
* 1. Write the data cached in the queue to BK.
* 2. Direct write the large data to BK, this flush event will not record to Metrics.
* This ensures the sequential nature of multiple writes to BK.
*/
private void internalAsyncAddData(T data, AddDataCallback callback, Object ctx) {
// Avoid missing callback, do failed callback when error occur before add data to the array.
if ((state == State.CLOSING) || (state == State.CLOSED)) {
callback.addFailed(BUFFERED_WRITER_CLOSED_EXCEPTION, ctx);
return;
}
int dataLength;
try {
dataLength = dataSerializer.getSerializedSize(data);
} catch (Exception e) {
callback.addFailed(new ManagedLedgerInterceptException(e), ctx);
return;
}
if (dataLength >= batchedWriteMaxSize) {
trigFlushByLargeSingleData();
ByteBuf byteBuf = null;
try {
byteBuf = dataSerializer.serialize(data);
} catch (Exception e) {
callback.addFailed(new ManagedLedgerInterceptException(e), ctx);return;
}managedLedger.asyncAddEntry(byteBuf, DisabledBatchCallback.INSTANCE, AsyncAddArgs.newInstance(callback, ctx, System.currentTimeMillis(), byteBuf));
return;
}
try
{
// Why should try-catch here?
// If the recycle mechanism is not executed as expected, exception occurs.
flushContext.addCallback(callback, ctx);
} catch (Exception e)
{
callback.addFailed(new
ManagedLedgerInterceptException(e), ctx);
return;
}
dataArray.add(data);
bytesSize += dataLength;
m0();
} | 3.26 |
pulsar_WebServer_addRestResource_rdh | /**
* Add a REST resource to the servlet context.
*
* @param basePath
* The base path for the resource.
* @param attribute
* An attribute associated with the resource.
* @param attributeValue
* The value of the attribute.
* @param resourceClass
* The class representing the resource.
* @param requireAuthentication
* A boolean indicating whether authentication is required for this resource.
*/
public void addRestResource(String basePath, String attribute, Object attributeValue, Class<?> resourceClass, boolean requireAuthentication) {
ResourceConfig config = new ResourceConfig();
config.register(resourceClass);
config.register(JsonMapperProvider.class);
ServletHolder servletHolder = new ServletHolder(new ServletContainer(config));
servletHolder.setAsyncSupported(true);
// This method has not historically checked for existing paths, so we don't check here either. The
// method call is added to reduce code duplication.
addServlet(basePath, servletHolder, Collections.singletonList(Pair.of(attribute, attributeValue)), requireAuthentication, false);
} | 3.26 |
pulsar_LongHierarchicalLedgerRangeIterator_isLedgerParentNode_rdh | /**
* whether the child of ledgersRootPath is a top level parent znode for
* ledgers (in HierarchicalLedgerManager) or znode of a ledger (in
* FlatLedgerManager).
*/
public boolean isLedgerParentNode(String path) {
return path.matches(StringUtils.LONGHIERARCHICAL_LEDGER_PARENT_NODE_REGEX);
} | 3.26 |
pulsar_LongHierarchicalLedgerRangeIterator_getChildrenAt_rdh | /**
* Returns all children with path as a parent. If path is non-existent,
* returns an empty list anyway (after all, there are no children there).
* Maps all exceptions (other than NoNode) to IOException in keeping with
* LedgerRangeIterator.
*
* @param path
* @return Iterator into set of all children with path as a parent
* @throws IOException
*/
List<String> getChildrenAt(String path) throws IOException {
try {
return store.getChildren(path).get(AbstractMetadataDriver.BLOCKING_CALL_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (ExecutionException | TimeoutException e) {
if (log.isDebugEnabled()) {
log.debug("Failed to get children at {}", path);
}
throw new IOException(e);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("Interrupted while reading ledgers at path " + path, ie);
}
} | 3.26 |
pulsar_LongHierarchicalLedgerRangeIterator_advance_rdh | /**
* Resolves the difference between cases 1 and 2 after nextLevelIterator is exhausted.
* Pre-condition: nextLevelIterator == null, thisLevelIterator != null
* Post-condition: nextLevelIterator == null && !thisLevelIterator.hasNext() OR
* nextLevelIterator.hasNext() == true and nextLevelIterator.next()
* yields the next result of next()
*
* @throws IOException
* Exception representing error
*/
void advance() throws IOException {
while (thisLevelIterator.hasNext()) {
String node = thisLevelIterator.next();
if ((level
== 0) && (!isLedgerParentNode(node))) {
continue;
}
LedgerManager.LedgerRangeIterator nextIterator = (level < 3) ? new InnerIterator((path + "/") + node, level + 1) : new LeafIterator((path + "/") + node);
if (nextIterator.hasNext()) {
nextLevelIterator = nextIterator;
break;
}
}
} | 3.26 |
pulsar_Function_close_rdh | /**
* Called once to properly close resources when function instance is stopped.
*
* @throws Exception
* if an error occurs
*/
default void close() throws Exception {
} | 3.26 |
pulsar_Function_initialize_rdh | /**
* Called once to initialize resources when function instance is started.
*
* @param context
* The Function context
* @throws Exception
* if an error occurs
*/
default void initialize(Context
context) throws Exception {
} | 3.26 |
pulsar_AbstractAwsConnector_defaultCredentialProvider_rdh | /**
* It creates a default credential provider which takes accessKey and secretKey form configuration and creates.
* {@link AWSCredentials}
*
* @param awsCredentialPluginParam
* @return */
public AwsCredentialProviderPlugin defaultCredentialProvider(String awsCredentialPluginParam) {
Map<String, String> credentialMap = new Gson().fromJson(awsCredentialPluginParam, new TypeToken<Map<String, String>>() {}.getType());
String accessKey = credentialMap.get(ACCESS_KEY_NAME);
String secretKey = credentialMap.get(SECRET_KEY_NAME);
if (!(StringUtils.isNotBlank(accessKey) && StringUtils.isNotBlank(secretKey))) {
throw new IllegalArgumentException(String.format("Default %s and %s must be present into json-map " + "if AwsCredentialProviderPlugin not provided", ACCESS_KEY_NAME, SECRET_KEY_NAME));
}
return new AwsCredentialProviderPlugin() {
@Override
public void
init(String param) {
// noop
}
@Override
public AWSCredentialsProvider m0() {
return defaultCredentialProvider(accessKey, secretKey);
}
@Override
public void close() throws IOException {
}
};
} | 3.26 |
pulsar_FixedColumnLengthTableMaker_addSpace_rdh | // Helper function to pad with white space.
private void addSpace(final int amount, final StringBuilder builder) {
for
(int i = 0; i < amount; ++i) {
builder.append(' ');
}
} | 3.26 |
pulsar_FixedColumnLengthTableMaker_addHorizontalBorder_rdh | // Helper function to add top and bottom borders.
private void addHorizontalBorder(final int length, final StringBuilder builder, final char borderChar) {
for (int i = 0; i < length; ++i) {
builder.append(borderChar);
}
} | 3.26 |
pulsar_FixedColumnLengthTableMaker_make_rdh | /**
* Make a table using the specified settings.
*
* @param rows
* Rows to construct the table from.
* @return A String version of the table.
*/
public String make(final Object[][] rows) {
final StringBuilder builder = new StringBuilder();
int numColumns = 0;
for (final Object[] row : rows) {
// Take the largest number of columns out of any row to be the total.
numColumns = Math.max(numColumns, row.length);
}
// Total length of the table in characters.
int totalLength = (((numColumns * ((leftPadding + rightPadding) + separator.length())) - separator.length()) + leftBorder.length()) + f0.length();
for (int i = 0; i < numColumns; ++i) {
totalLength += lengthFor(i);
}
addHorizontalBorder(totalLength, builder, topBorder);
builder.append('\n');
int i;
for (final Object[] row : rows) {i = 0;
builder.append(leftBorder);
for (final Object element : row) {
addSpace(leftPadding, builder);
String elementString;
if (((element instanceof Float) || (element instanceof Double)) && (decimalFormatter != null)) {
elementString = String.format(decimalFormatter, element);
} else {
// Avoid throwing NPE
elementString = Objects.toString(element, "");
}
if (elementString.length() > lengthFor(i)) {
// Trim down to the maximum number of characters.
elementString = elementString.substring(0, lengthFor(i));
}
builder.append(elementString);
// Add the space due to remaining characters and the right padding.
addSpace((lengthFor(i) - elementString.length()) + rightPadding, builder);
if (i != (numColumns - 1)) {
// Don't add separator for the last column.
builder.append(separator);
}
i += 1;
}
// Put empty elements for remaining columns.
for (; i < numColumns; ++i) {
addSpace((leftPadding + rightPadding) + lengthFor(i), builder);
if (i != (numColumns - 1)) {
builder.append(separator); }
}
builder.append(f0);
builder.append('\n');
}
addHorizontalBorder(totalLength, builder, bottomBorder);
return builder.toString();
} | 3.26 |
pulsar_TopicMessageIdImpl_getTopicPartitionName_rdh | /**
* Get the topic name which contains partition part for this message.
*
* @return the topic name which contains Partition part
*/
@Deprecated
public String getTopicPartitionName() {
return getOwnerTopic();
} | 3.26 |
pulsar_ProducerConfiguration_m2_rdh | /**
*
* @return the configured compression type for this producer
*/
public CompressionType m2() {
return conf.getCompressionType();
} | 3.26 |
pulsar_ProducerConfiguration_setMaxPendingMessagesAcrossPartitions_rdh | /**
* Set the number of max pending messages across all the partitions
* <p>
* This setting will be used to lower the max pending messages for each partition
* ({@link #setMaxPendingMessages(int)}), if the total exceeds the configured value.
*
* @param maxPendingMessagesAcrossPartitions
*/
public void setMaxPendingMessagesAcrossPartitions(int maxPendingMessagesAcrossPartitions) {
conf.setMaxPendingMessagesAcrossPartitions(maxPendingMessagesAcrossPartitions);
}
/**
*
* @return whether the producer will block {@link Producer#send} and {@link Producer#sendAsync} | 3.26 |
pulsar_ProducerConfiguration_getCryptoFailureAction_rdh | /**
*
* @return The ProducerCryptoFailureAction
*/
public ProducerCryptoFailureAction getCryptoFailureAction() {
return conf.getCryptoFailureAction();
} | 3.26 |
pulsar_ProducerConfiguration_setMaxPendingMessages_rdh | /**
* Set the max size of the queue holding the messages pending to receive an acknowledgment from the broker.
* <p>
* When the queue is full, by default, all calls to {@link Producer#send} and {@link Producer#sendAsync} will fail
* unless blockIfQueueFull is set to true. Use {@link #setBlockIfQueueFull} to change the blocking behavior.
*
* @param maxPendingMessages
* @return */
public ProducerConfiguration setMaxPendingMessages(int maxPendingMessages) {
conf.setMaxPendingMessages(maxPendingMessages);
return this;
} | 3.26 |
pulsar_ProducerConfiguration_setProperty_rdh | /**
* Set a name/value property with this producer.
*
* @param key
* @param value
* @return */
public ProducerConfiguration setProperty(String key,
String value) {
checkArgument(key != null);
checkArgument(value != null);
conf.getProperties().put(key, value);
return this;
} | 3.26 |
pulsar_ProducerConfiguration_setBlockIfQueueFull_rdh | /**
* Set whether the {@link Producer#send} and {@link Producer#sendAsync} operations should block when the outgoing
* message queue is full.
* <p>
* Default is <code>false</code>. If set to <code>false</code>, send operations will immediately fail with
* {@link PulsarClientException.ProducerQueueIsFullError} when there is no space left in pending queue.
*
* @param blockIfQueueFull
* whether to block {@link Producer#send} and {@link Producer#sendAsync} operations on queue full
* @return */
public ProducerConfiguration setBlockIfQueueFull(boolean blockIfQueueFull) {
conf.setBlockIfQueueFull(blockIfQueueFull);
return this;
} | 3.26 |
pulsar_ProducerConfiguration_isEncryptionEnabled_rdh | /**
* Returns true if encryption keys are added.
*/
public boolean isEncryptionEnabled() {
return conf.isEncryptionEnabled();} | 3.26 |
pulsar_ProducerConfiguration_setMessageRouter_rdh | /**
* Set a custom message routing policy by passing an implementation of MessageRouter.
*
* @param messageRouter
*/
public ProducerConfiguration setMessageRouter(MessageRouter messageRouter) {
Objects.requireNonNull(messageRouter);
setMessageRoutingMode(MessageRoutingMode.CustomPartition);
conf.setCustomMessageRouter(messageRouter);
return this;
}
/**
* Get the message router set by {@link #setMessageRouter(MessageRouter)}.
*
* @return message router.
* @deprecated since 1.22.0-incubating. <tt>numPartitions</tt> is already passed as parameter in
{@link MessageRouter#choosePartition(Message, TopicMetadata)} | 3.26 |
pulsar_ProducerConfiguration_getMaxPendingMessagesAcrossPartitions_rdh | /**
*
* @return the maximum number of pending messages allowed across all the partitions
*/
public int getMaxPendingMessagesAcrossPartitions() {
return
conf.getMaxPendingMessagesAcrossPartitions();
} | 3.26 |
pulsar_ProducerConfiguration_getSendTimeoutMs_rdh | /**
*
* @return the message send timeout in ms
*/
public long getSendTimeoutMs() {
return conf.getSendTimeoutMs();
} | 3.26 |
pulsar_ProducerConfiguration_getMessageRoutingMode_rdh | /**
* Get the message routing mode for the partitioned producer.
*
* @return message routing mode, default is round-robin routing.
* @see MessageRoutingMode#RoundRobinPartition
*/
public MessageRoutingMode getMessageRoutingMode() {
return MessageRoutingMode.valueOf(conf.getMessageRoutingMode().toString());
} | 3.26 |
pulsar_ProducerConfiguration_setProperties_rdh | /**
* Add all the properties in the provided map.
*
* @param properties
* @return */
public ProducerConfiguration setProperties(Map<String, String> properties) {
conf.getProperties().putAll(properties);return this;
} | 3.26 |
pulsar_ProducerConfiguration_setMessageRoutingMode_rdh | /**
* Set the message routing mode for the partitioned producer.
*
* @param messageRouteMode
* message routing mode.
* @return producer configuration
* @see MessageRoutingMode
*/
public ProducerConfiguration setMessageRoutingMode(MessageRoutingMode messageRouteMode) {
Objects.requireNonNull(messageRouteMode);
conf.setMessageRoutingMode(MessageRoutingMode.valueOf(messageRouteMode.toString()));
return this;
} | 3.26 |
pulsar_ProducerConfiguration_setSendTimeout_rdh | /**
* Set the send timeout <i>(default: 30 seconds)</i>
* <p>
* If a message is not acknowledged by the server before the sendTimeout expires, an error will be reported.
*
* @param sendTimeout
* the send timeout
* @param unit
* the time unit of the {@code sendTimeout}
*/
public ProducerConfiguration setSendTimeout(int sendTimeout, TimeUnit unit) {
conf.setSendTimeoutMs(sendTimeout, unit);
return this;
} | 3.26 |
pulsar_ProducerConfiguration_setProducerName_rdh | /**
* Specify a name for the producer
* <p>
* If not assigned, the system will generate a globally unique name which can be access with
* {@link Producer#getProducerName()}.
* <p>
* When specifying a name, it is app to the user to ensure that, for a given topic, the producer name is unique
* across all Pulsar's clusters.
* <p>
* If a producer with the same name is already connected to a particular topic, the
* {@link PulsarClient#createProducer(String)} operation will fail with {@link ProducerBusyException}.
*
* @param producerName
* the custom name to use for the producer
* @since 1.20.0
*/
public void setProducerName(String producerName) {
conf.setProducerName(producerName);
} | 3.26 |
pulsar_ProducerConfiguration_setInitialSequenceId_rdh | /**
* Set the baseline for the sequence ids for messages published by the producer.
* <p>
* First message will be using (initialSequenceId + 1) as its sequence id and subsequent messages will be assigned
* incremental sequence ids, if not otherwise specified.
*
* @param initialSequenceId
* @return */
public ProducerConfiguration setInitialSequenceId(long initialSequenceId) {
conf.setInitialSequenceId(initialSequenceId);
return this;
} | 3.26 |
pulsar_ProducerConfiguration_addEncryptionKey_rdh | /**
* Add public encryption key, used by producer to encrypt the data key.
*
* At the time of producer creation, Pulsar client checks if there are keys added to encryptionKeys. If keys are
* found, a callback getKey(String keyName) is invoked against each key to load the values of the key. Application
* should implement this callback to return the key in pkcs8 format. If compression is enabled, message is encrypted
* after compression. If batch messaging is enabled, the batched message is encrypted.
*/
public void addEncryptionKey(String key) {
conf.getEncryptionKeys().add(key);
} | 3.26 |
pulsar_ProducerConfiguration_getProducerName_rdh | /**
*
* @return the configured custom producer name or null if no custom name was specified
* @since 1.20.0
*/
public String getProducerName() {
return conf.getProducerName();
} | 3.26 |
pulsar_ProducerConfiguration_getMaxPendingMessages_rdh | /**
*
* @return the maximum number of messages allowed in the outstanding messages queue for the producer
*/
public int getMaxPendingMessages() {
return conf.getMaxPendingMessages();
} | 3.26 |
pulsar_ProducerConfiguration_getCryptoKeyReader_rdh | /**
*
* @return the CryptoKeyReader
*/
public CryptoKeyReader getCryptoKeyReader() {
return conf.getCryptoKeyReader();
} | 3.26 |
pulsar_ProducerConfiguration_getMessageRouter_rdh | /**
* Get the message router set by {@link #setMessageRouter(MessageRouter)}.
*
* @return message router set by {@link #setMessageRouter(MessageRouter)}.
*/
public MessageRouter getMessageRouter() {
return conf.getCustomMessageRouter();
} | 3.26 |
pulsar_ProducerConfiguration_getEncryptionKeys_rdh | /**
*
* @return encryptionKeys
*/
public Set<String> getEncryptionKeys() {
return conf.getEncryptionKeys();
} | 3.26 |
pulsar_ProducerConfiguration_setCryptoKeyReader_rdh | /**
* Sets a {@link CryptoKeyReader}.
*
* @param cryptoKeyReader
* CryptoKeyReader object
*/
public ProducerConfiguration setCryptoKeyReader(CryptoKeyReader cryptoKeyReader) {
Objects.requireNonNull(cryptoKeyReader);
conf.setCryptoKeyReader(cryptoKeyReader);
return this;
} | 3.26 |
pulsar_ComponentImpl_isSuperUser_rdh | /**
*
* @deprecated use {@link #isSuperUser(AuthenticationParameters)}
*/
@Deprecated
public boolean isSuperUser(String clientRole, AuthenticationDataSource authenticationData) {
AuthenticationParameters authParams =
AuthenticationParameters.builder().clientRole(clientRole).clientAuthenticationDataSource(authenticationData).build();
return isSuperUser(authParams);
} | 3.26 |
pulsar_ComponentImpl_isAuthorizedRole_rdh | /**
*
* @deprecated use {@link #isAuthorizedRole(String, String, AuthenticationParameters)} instead.
*/
@Deprecated
public boolean isAuthorizedRole(String tenant, String namespace, String clientRole, AuthenticationDataSource authenticationData) throws PulsarAdminException {
AuthenticationParameters authParams = AuthenticationParameters.builder().clientRole(clientRole).clientAuthenticationDataSource(authenticationData).build();
return isAuthorizedRole(tenant, namespace, authParams);
} | 3.26 |
pulsar_ComponentImpl_allowFunctionOps_rdh | /**
*
* @deprecated use {@link #isSuperUser(AuthenticationParameters)}
*/
@Deprecated
public boolean allowFunctionOps(NamespaceName namespaceName, String role, AuthenticationDataSource authenticationData) {
AuthenticationParameters authParams = AuthenticationParameters.builder().clientRole(role).clientAuthenticationDataSource(authenticationData).build();
return allowFunctionOps(namespaceName, authParams);
} | 3.26 |
pulsar_AbstractHierarchicalLedgerManager_asyncProcessLevelNodes_rdh | /**
* Process hash nodes in a given path.
*/
void asyncProcessLevelNodes(final String path, final BookkeeperInternalCallbacks.Processor<String> processor, final AsyncCallback.VoidCallback finalCb, final Object context, final int successRc, final int failureRc) {
store.getChildren(path).thenAccept(levelNodes -> {
if (levelNodes.isEmpty()) {
finalCb.processResult(successRc, null, context);
return;
}
AsyncListProcessor<String> listProcessor = new AsyncListProcessor<>(scheduler);
// process its children
listProcessor.process(levelNodes, processor, finalCb, context, successRc, failureRc);
}).exceptionally(ex -> {
log.error("Error polling hash nodes of {}: {}", path, ex.getMessage());
finalCb.processResult(failureRc, null, context);
return null;
});
} | 3.26 |
pulsar_AbstractHierarchicalLedgerManager_isLedgerParentNode_rdh | /**
* whether the child of ledgersRootPath is a top level parent znode for
* ledgers (in HierarchicalLedgerManager) or znode of a ledger (in
* FlatLedgerManager).
*/
public boolean
isLedgerParentNode(String path) {
return path.matches(getLedgerParentNodeRegex());} | 3.26 |
pulsar_AbstractHierarchicalLedgerManager_getLedgerId_rdh | // get ledger from all level nodes
long getLedgerId(String... levelNodes) throws IOException {
return StringUtils.stringToHierarchicalLedgerId(levelNodes);} | 3.26 |
pulsar_AbstractHierarchicalLedgerManager_asyncProcessLedgersInSingleNode_rdh | /**
* Process ledgers in a single zk node.
*
* <p>
* for each ledger found in this zk node, processor#process(ledgerId) will be triggerred
* to process a specific ledger. after all ledgers has been processed, the finalCb will
* be called with provided context object. The RC passed to finalCb is decided by :
* <ul>
* <li> All ledgers are processed successfully, successRc will be passed.
* <li> Either ledger is processed failed, failureRc will be passed.
* </ul>
* </p>
*
* @param path
* Zk node path to store ledgers
* @param processor
* Processor provided to process ledger
* @param finalCb
* Callback object when all ledgers are processed
* @param ctx
* Context object passed to finalCb
* @param successRc
* RC passed to finalCb when all ledgers are processed successfully
* @param failureRc
* RC passed to finalCb when either ledger is processed failed
*/
protected void asyncProcessLedgersInSingleNode(final String path, final BookkeeperInternalCallbacks.Processor<Long> processor, final AsyncCallback.VoidCallback finalCb, final Object ctx, final int successRc, final int failureRc) {
store.getChildren(path).thenAccept(ledgerNodes -> {
Set<Long> activeLedgers = HierarchicalLedgerUtils.ledgerListToSet(ledgerNodes, ledgerRootPath, path);
if (log.isDebugEnabled()) {
log.debug("Processing ledgers: {}", activeLedgers);
}
// no ledgers found, return directly
if (activeLedgers.isEmpty()) {
finalCb.processResult(successRc, null, ctx);
return;
}
BookkeeperInternalCallbacks.MultiCallback mcb = new BookkeeperInternalCallbacks.MultiCallback(activeLedgers.size(), finalCb, ctx, successRc, failureRc);
// start loop over all ledgers
scheduler.submit(() -> {
for (Long ledger : activeLedgers) {
processor.process(ledger, mcb);
}
});
}).exceptionally(ex -> {
finalCb.processResult(failureRc, null, ctx);
return null;
});
} | 3.26 |
pulsar_AbstractHierarchicalLedgerManager_process_rdh | /**
* Process list of items.
*
* @param data
* List of data to process
* @param processor
* Callback to process element of list when success
* @param finalCb
* Final callback to be called after all elements in the list are processed
* @param context
* Context of final callback
* @param successRc
* RC passed to final callback on success
* @param failureRc
* RC passed to final callback on failure
*/
public void process(final List<T> data, final BookkeeperInternalCallbacks.Processor<T> processor, final AsyncCallback.VoidCallback finalCb, final Object context, final int successRc, final int failureRc) {
if ((data ==
null) || (data.size() == 0)) {
finalCb.processResult(successRc, null, context);
return;
}
final int size = data.size();
final AtomicInteger current = new AtomicInteger(0);
T firstElement = data.get(0);processor.process(firstElement, new AsyncCallback.VoidCallback() {
@Override
public void processResult(int rc, String path, Object ctx) {
if
(rc != successRc) {
// terminal immediately
finalCb.processResult(failureRc, null, context);
return;
}
// process next element
int next = current.incrementAndGet();
if (next >= size) {
// reach the end of list
finalCb.processResult(successRc, null, context);
return;
}
final T dataToProcess = data.get(next);
final AsyncCallback.VoidCallback stub = this;
scheduler.execute(() -> processor.process(dataToProcess, stub));
}
});
} | 3.26 |
pulsar_SubscriptionStatsImpl_add_rdh | // if the stats are added for the 1st time, we will need to make a copy of these stats and add it to the current
// stats
public SubscriptionStatsImpl add(SubscriptionStatsImpl
stats) {
Objects.requireNonNull(stats);
this.msgRateOut +=
stats.msgRateOut;
this.msgThroughputOut += stats.msgThroughputOut;
this.bytesOutCounter += stats.bytesOutCounter;
this.msgOutCounter += stats.msgOutCounter;
this.msgRateRedeliver += stats.msgRateRedeliver;
this.messageAckRate += stats.messageAckRate;
this.chunkedMessageRate += stats.chunkedMessageRate;
this.msgBacklog
+= stats.msgBacklog;
this.backlogSize += stats.backlogSize;
this.msgBacklogNoDelayed += stats.msgBacklogNoDelayed;
this.msgDelayed += stats.msgDelayed;
this.unackedMessages += stats.unackedMessages;
this.type = stats.type;
this.msgRateExpired += stats.msgRateExpired;
this.totalMsgExpired += stats.totalMsgExpired;this.isReplicated |= stats.isReplicated;
this.isDurable |= stats.isDurable;
if (this.consumers.size() != stats.consumers.size()) {
for (int i = 0; i < stats.consumers.size(); i++) {
ConsumerStatsImpl consumerStats = new ConsumerStatsImpl();
this.consumers.add(consumerStats.add(stats.consumers.get(i)));
}
} else {
for (int v2 = 0; v2 < stats.consumers.size(); v2++) {
this.consumers.get(v2).add(stats.consumers.get(v2));
}
}
this.allowOutOfOrderDelivery |= stats.allowOutOfOrderDelivery;
this.consumersAfterMarkDeletePosition.putAll(stats.consumersAfterMarkDeletePosition);
this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges;
this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize;if ((this.earliestMsgPublishTimeInBacklog != 0) && (stats.earliestMsgPublishTimeInBacklog != 0)) {
this.earliestMsgPublishTimeInBacklog = Math.min(this.earliestMsgPublishTimeInBacklog, stats.earliestMsgPublishTimeInBacklog);
} else {
this.earliestMsgPublishTimeInBacklog = Math.max(this.earliestMsgPublishTimeInBacklog, stats.earliestMsgPublishTimeInBacklog);
}
this.delayedMessageIndexSizeInBytes += stats.delayedMessageIndexSizeInBytes;
this.subscriptionProperties.putAll(stats.subscriptionProperties);
this.filterProcessedMsgCount += stats.filterProcessedMsgCount;
this.filterAcceptedMsgCount += stats.filterAcceptedMsgCount;
this.filterRejectedMsgCount += stats.filterRejectedMsgCount;
this.filterRescheduledMsgCount += stats.filterRescheduledMsgCount;
stats.bucketDelayedIndexStats.forEach((k, v) -> {
TopicMetricBean topicMetricBean = this.bucketDelayedIndexStats.computeIfAbsent(k, __ -> new TopicMetricBean());
topicMetricBean.name = v.name;topicMetricBean.labelsAndValues = v.labelsAndValues;
topicMetricBean.value += v.value;
});
return this;
} | 3.26 |
pulsar_AuthorizationService_allowTopicOperation_rdh | /**
*
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
public Boolean allowTopicOperation(TopicName topicName, TopicOperation operation, String originalRole, String role, AuthenticationDataSource authData) throws Exception {
try {
return allowTopicOperationAsync(topicName, operation, originalRole, role, authData).get(conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
}
catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.26 |
pulsar_AuthorizationService_allowTopicPolicyOperationAsync_rdh | /**
* Grant authorization-action permission on a topic to the given client.
*
* @param topicName
* @param policy
* @param operation
* @param role
* @param authData
* additional authdata in json for targeted authorization provider
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> allowTopicPolicyOperationAsync(TopicName topicName, PolicyName policy, PolicyOperation operation, String role, AuthenticationDataSource authData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.allowTopicPolicyOperationAsync(topicName, role, policy, operation, authData);
} | 3.26 |
pulsar_AuthorizationService_revokePermissionAsync_rdh | /**
* Revoke authorization-action permission on a topic to the given client.
*
* @param topicName
* @param role
*/
public CompletableFuture<Void> revokePermissionAsync(TopicName
topicName, String role) {
return provider.revokePermissionAsync(topicName, role);
} | 3.26 |
pulsar_AuthorizationService_m1_rdh | /**
* Grant authorization-action permission on a namespace to the given client.
*
* @param namespaceName
* @param operation
* @param role
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when namespace not found
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> m1(NamespaceName namespaceName, NamespaceOperation operation, String role, AuthenticationDataSource authData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.allowNamespaceOperationAsync(namespaceName, role, operation, authData);
} | 3.26 |
pulsar_AuthorizationService_isSuperUserOrAdmin_rdh | /**
* Functions, sources, and sinks each have their own method in this class. This method first checks for
* tenant admin access, then for namespace level permission.
*/
private CompletableFuture<Boolean> isSuperUserOrAdmin(NamespaceName namespaceName, String
role, AuthenticationDataSource authenticationData) {
return isSuperUser(role, authenticationData).thenCompose(isSuperUserOrAdmin -> isSuperUserOrAdmin ? CompletableFuture.completedFuture(true) : isTenantAdmin(namespaceName.getTenant(), role, authenticationData));
} | 3.26 |
pulsar_AuthorizationService_allowTenantOperation_rdh | /**
*
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
public boolean allowTenantOperation(String tenantName, TenantOperation operation, String
originalRole, String role, AuthenticationDataSource authData) throws Exception {
try {
return allowTenantOperationAsync(tenantName, operation, originalRole, role, authData).get(conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
} catch (InterruptedException e) {throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.26 |
pulsar_AuthorizationService_canLookupAsync_rdh | /**
* Check whether the specified role can perform a lookup for the specified topic.
*
* For that the caller needs to have producer or consumer permission.
*
* @param topicName
* @param role
* @return * @throws Exception
*/
public CompletableFuture<Boolean> canLookupAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.isSuperUser(role, authenticationData, conf).thenComposeAsync(isSuperUser -> {
if (isSuperUser) {
return CompletableFuture.completedFuture(true);
} else {
return provider.canLookupAsync(topicName, role, authenticationData);
}
});
} | 3.26 |
pulsar_AuthorizationService_allowTopicOperationAsync_rdh | /**
* Grant authorization-action permission on a topic to the given client.
*
* @param topicName
* @param operation
* @param role
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when namespace not found
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> allowTopicOperationAsync(TopicName topicName, TopicOperation operation, String role, AuthenticationDataSource authData) {
if (log.isDebugEnabled()) {
log.debug("Check if role {} is allowed to execute topic operation {} on topic {}", role, operation, topicName);
}
if (!this.conf.isAuthorizationEnabled()) {return CompletableFuture.completedFuture(true);
}
CompletableFuture<Boolean> allowFuture = provider.allowTopicOperationAsync(topicName, role, operation, authData);
if (log.isDebugEnabled()) {
return allowFuture.whenComplete((allowed, exception) -> {if (exception == null) {
if (allowed) {
log.debug("Topic operation {} on topic {} is allowed: role = {}", operation, topicName, role);
} else {
log.debug("Topic operation {} on topic {} is NOT allowed: role = {}", operation, topicName, role);
}
} else {
log.debug("Failed to check if topic operation {} on topic {} is allowed:" + " role = {}", operation, topicName, role, exception);
}
});
} else {
return allowFuture;
}
} | 3.26 |
pulsar_AuthorizationService_allowTenantOperationAsync_rdh | /**
* Grant authorization-action permission on a tenant to the given client.
*
* @param tenantName
* tenant name
* @param operation
* tenant operation
* @param role
* role name
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when tenant not found
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> allowTenantOperationAsync(String tenantName, TenantOperation operation, String role, AuthenticationDataSource authData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.allowTenantOperationAsync(tenantName, role,
operation, authData);
} | 3.26 |
pulsar_AuthorizationService_canProduceAsync_rdh | /**
* Check if the specified role has permission to send messages to the specified fully qualified topic name.
*
* @param topicName
* the fully qualified topic name associated with the topic.
* @param role
* the app id used to send messages to the topic.
*/
public CompletableFuture<Boolean> canProduceAsync(TopicName
topicName, String role, AuthenticationDataSource authenticationData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.isSuperUser(role, authenticationData, conf).thenComposeAsync(isSuperUser -> {
if (isSuperUser) {
return CompletableFuture.completedFuture(true);
} else {
return provider.canProduceAsync(topicName, role, authenticationData);
}
});
} | 3.26 |
pulsar_AuthorizationService_allowNamespacePolicyOperationAsync_rdh | /**
* Grant authorization-action permission on a namespace to the given client.
*
* @param namespaceName
* @param operation
* @param role
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when namespace not found
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> allowNamespacePolicyOperationAsync(NamespaceName namespaceName, PolicyName policy, PolicyOperation operation, String role, AuthenticationDataSource authData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.allowNamespacePolicyOperationAsync(namespaceName, policy, operation, role, authData);
} | 3.26 |
pulsar_AuthorizationService_allowNamespacePolicyOperation_rdh | /**
*
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
public boolean allowNamespacePolicyOperation(NamespaceName namespaceName, PolicyName policy, PolicyOperation operation, String originalRole, String role, AuthenticationDataSource authData) throws Exception {
try {
return m3(namespaceName, policy, operation, originalRole, role, authData).get(conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
} catch (InterruptedException e) {
throw new RestException(e);} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.26 |
pulsar_AuthorizationService_allowTopicPolicyOperation_rdh | /**
*
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
public Boolean allowTopicPolicyOperation(TopicName topicName, PolicyName policy, PolicyOperation operation, String originalRole, String role, AuthenticationDataSource authData) throws Exception {
try {
return allowTopicPolicyOperationAsync(topicName, policy, operation, originalRole, role, authData).get(conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.26 |
pulsar_AuthorizationService_isValidOriginalPrincipal_rdh | /**
* Validates that the authenticatedPrincipal and the originalPrincipal are a valid combination.
* Valid combinations fulfill one of the following two rules:
* <p>
* 1. The authenticatedPrincipal is in {@link ServiceConfiguration#getProxyRoles()}, if, and only if,
* the originalPrincipal is set to a role that is not also in {@link ServiceConfiguration#getProxyRoles()}.
* <p>
* 2. The authenticatedPrincipal and the originalPrincipal are the same, but are not a proxyRole, when
* allowNonProxyPrincipalsToBeEqual is true.
*
* @return true when roles are a valid combination and false when roles are an invalid combination
*/
public boolean isValidOriginalPrincipal(String authenticatedPrincipal, String originalPrincipal, SocketAddress remoteAddress, boolean allowNonProxyPrincipalsToBeEqual) {
String errorMsg = null;
if (conf.getProxyRoles().contains(authenticatedPrincipal)) {
if (StringUtils.isBlank(originalPrincipal)) {
errorMsg =
"originalPrincipal must be provided when connecting with a proxy role.";
} else if (conf.getProxyRoles().contains(originalPrincipal)) {errorMsg = "originalPrincipal cannot be a proxy role.";
}
} else if (StringUtils.isNotBlank(originalPrincipal) && (!(allowNonProxyPrincipalsToBeEqual && originalPrincipal.equals(authenticatedPrincipal)))) {
errorMsg = "cannot specify originalPrincipal when connecting without valid proxy role.";
}
if (errorMsg != null) {
log.warn("[{}] Illegal combination of role [{}] and originalPrincipal [{}]: {}", remoteAddress, authenticatedPrincipal, originalPrincipal, errorMsg);
return false;
} else {
return true;
}
} | 3.26 |
pulsar_AuthorizationService_canLookup_rdh | /**
* Check whether the specified role can perform a lookup for the specified topic.
*
* For that the caller needs to have producer or consumer permission.
*
* @param topicName
* @param role
* @return * @throws Exception
*/public boolean canLookup(TopicName topicName, String role, AuthenticationDataSource authenticationData) throws Exception {
try {
return canLookupAsync(topicName, role, authenticationData).get(conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
} catch (TimeoutException e) {
log.warn("Time-out {} sec while checking authorization on {} ", conf.getMetadataStoreOperationTimeoutSeconds(), topicName);
throw e;} catch (Exception e) {
log.warn("Role - {} failed to get lookup permissions for topic - {}. {}", role, topicName, e.getMessage());
throw e;
}
} | 3.26 |
pulsar_AuthorizationService_revokeSubscriptionPermissionAsync_rdh | /**
* Revoke subscription admin-api access for a role.
*
* @param namespace
* @param subscriptionName
* @param role
* @return */
public CompletableFuture<Void> revokeSubscriptionPermissionAsync(NamespaceName namespace, String subscriptionName, String role, String authDataJson) {
return provider.revokeSubscriptionPermissionAsync(namespace, subscriptionName, role, authDataJson);
} | 3.26 |
pulsar_AuthorizationService_grantPermissionAsync_rdh | /**
* Grant authorization-action permission on a topic to the given client.
*
* NOTE: used to complete with {@link IllegalArgumentException} when namespace not found or with
* {@link IllegalStateException} when failed to grant permission.
*
* @param topicName
* @param role
* @param authDataJson
* additional authdata in json for targeted authorization provider
* @completesWith null when the permissions are updated successfully.
* @completesWith {@link MetadataStoreException} when the MetadataStore is not updated.
*/
public CompletableFuture<Void> grantPermissionAsync(TopicName topicName, Set<AuthAction> actions, String role, String authDataJson) {
return provider.grantPermissionAsync(topicName, actions, role, authDataJson);
} | 3.26 |
pulsar_AuthorizationService_grantSubscriptionPermissionAsync_rdh | /**
* Grant permission to roles that can access subscription-admin api.
*
* @param namespace
* @param subscriptionName
* @param roles
* @param authDataJson
* additional authdata in json for targeted authorization provider
* @return */
public CompletableFuture<Void> grantSubscriptionPermissionAsync(NamespaceName namespace, String subscriptionName, Set<String> roles, String authDataJson) {
return provider.grantSubscriptionPermissionAsync(namespace, subscriptionName, roles, authDataJson);
} | 3.26 |
pulsar_JwksCache_getJwkAndMaybeReload_rdh | /**
* Retrieve the JWK for the given key ID from the given JWKS URI. If the key ID is not found, and failOnMissingKeyId
* is false, then the JWK will be reloaded from the JWKS URI and the key ID will be searched for again.
*/
private CompletableFuture<Jwk> getJwkAndMaybeReload(Optional<String> maybeJwksUri, String keyId, boolean failOnMissingKeyId) {
return cache.get(maybeJwksUri).thenCompose(jwks -> {
try {
return CompletableFuture.completedFuture(getJwkForKID(maybeJwksUri, jwks, keyId));
} catch (IllegalArgumentException e) {
if (failOnMissingKeyId) {
throw e;
} else {
Long lastRefresh = jwksLastRefreshTime.get(maybeJwksUri);
if ((lastRefresh == null) || ((System.nanoTime() - lastRefresh) > keyIdCacheMissRefreshNanos)) {
// In this case, the key ID was not found, but we haven't refreshed the JWKS in a while,
// so it is possible the key ID was added. Refresh the JWKS and try again.
cache.synchronous().invalidate(maybeJwksUri);
}
// There is a small race condition where the JWKS could be refreshed by another thread,
// so we retry getting the JWK, even though we might not have invalidated the cache.
return getJwkAndMaybeReload(maybeJwksUri, keyId, true);
}
}
});
} | 3.26 |
pulsar_JwksCache_convertToJwks_rdh | /**
* The JWK Set is stored in the "keys" key see https://www.rfc-editor.org/rfc/rfc7517#section-5.1.
*
* @param jwksUri
* - the URI used to retrieve the JWKS
* @param jwks
* - the JWKS to convert
* @return a list of {@link Jwk}
*/
private List<Jwk> convertToJwks(String jwksUri, Map<String, Object> jwks) throws AuthenticationException {
try {
@SuppressWarnings("unchecked")
List<Map<String, Object>> jwkList = ((List<Map<String, Object>>) (jwks.get("keys")));
final List<Jwk> result = new ArrayList<>();
for (Map<String, Object> jwk : jwkList) {
result.add(Jwk.fromValues(jwk));
}
return result;
} catch (ClassCastException e) {
throw new AuthenticationException("Malformed JWKS returned by: " + jwksUri);
}
} | 3.26 |
pulsar_BookieServiceInfoSerde_extractBookiedIdFromPath_rdh | /**
* Extract the BookieId
* The path should look like /ledgers/available/bookieId
* or /ledgers/available/readonly/bookieId.
* But the prefix depends on the configuration.
*
* @param path
* @return the bookieId
*/private
static String extractBookiedIdFromPath(String path) throws IOException {
// https://github.com/apache/bookkeeper/blob/
// 034ef8566ad037937a4d58a28f70631175744f53/bookkeeper-server/
// src/main/java/org/apache/bookkeeper/discover/ZKRegistrationClient.java#L258
if (path == null) {
path = "";
}
int last = path.lastIndexOf("/");
if (last >= 0) {
return path.substring(last + 1);
} else {
throw new IOException(("The path " + path) + " doesn't look like a valid path for a BookieServiceInfo node");
}
} | 3.26 |
pulsar_KubernetesRuntime_start_rdh | /**
* The core logic that creates a service first followed by statefulset.
*/
@Override
public void start() throws Exception {
try {
submitService();submitStatefulSet();
} catch (Exception e) {
log.error("Failed start function {}/{}/{} in Kubernetes", f1.getFunctionDetails().getTenant(), f1.getFunctionDetails().getNamespace(), f1.getFunctionDetails().getName(), e);
stop();
throw e;
}
setupGrpcChannelIfNeeded();
} | 3.26 |
pulsar_PulsarAuth_cleanSession_rdh | /**
* When the session is closed, this method needs to be called to clear the session's auth verification status.
*/
public void cleanSession(ConnectorSession session) {
authorizedQueryTopicsMap.remove(session.getQueryId());
} | 3.26 |
pulsar_PulsarAuth_checkTopicAuth_rdh | /**
* Check if the session has read access to the topic.
* It will try to subscribe to that topic using the Pulsar Reader to check the consumption privilege.
* The same topic will only be checked once during the same session.
*/
public void checkTopicAuth(ConnectorSession session, String topic) {
Set<String> authorizedTopics = authorizedQueryTopicsMap.computeIfAbsent(session.getQueryId(), query -> new HashSet<>());if (authorizedTopics.contains(topic)) {
if (log.isDebugEnabled()) {
log.debug("The topic %s is already authorized.", topic);
}
return;
}
if (log.isDebugEnabled()) {
log.debug("Checking the authorization for the topic: %s", topic);
}
Map<String, String> extraCredentials = session.getIdentity().getExtraCredentials();
if (extraCredentials.isEmpty()) {
// the extraCredentials won't be null
throw new TrinoException(QUERY_REJECTED, String.format("Failed to check the authorization for topic %s: The credential information is empty.", topic));
}
String authMethod = extraCredentials.get(CREDENTIALS_AUTH_PLUGIN);
String authParams = extraCredentials.get(CREDENTIALS_AUTH_PARAMS);
if (StringUtils.isEmpty(authMethod) || StringUtils.isEmpty(authParams)) {
throw new TrinoException(QUERY_REJECTED, String.format(("Failed to check the authorization for topic %s: Required credential parameters are " + "missing. Please specify the auth-method and auth-params in the extra ") + "credentials.", topic));
}
try {@Cleanup
PulsarClient client = PulsarClient.builder().serviceUrl(pulsarConnectorConfig.getBrokerBinaryServiceUrl()).authentication(authMethod, authParams).build();
client.newConsumer().topic(topic).subscriptionName("pulsar-sql-auth" + session.getQueryId()).subscriptionType(SubscriptionType.Exclusive).subscriptionMode(SubscriptionMode.NonDurable).startPaused(true).subscribe().close();
authorizedQueryTopicsMap.computeIfPresent(session.getQueryId(), (query, topics) -> {
topics.add(topic);
return topics;
});
if (log.isDebugEnabled()) {
log.debug("Check the authorization for the topic %s successfully.", topic); }
} catch (PulsarClientException | PulsarClientException e) {
throw new TrinoException(PERMISSION_DENIED, String.format("Failed to access topic %s: %s", topic, e.getLocalizedMessage()));
} catch (IOException e) {
throw new TrinoException(QUERY_REJECTED, String.format("Failed to check authorization for topic %s: %s", topic, e.getLocalizedMessage()));
}
} | 3.26 |
pulsar_CliCommand_getOneArgument_rdh | /**
*
* @param params
* List of positional arguments
* @param pos
* Positional arguments start with index as 1
* @param maxArguments
* Validate against max arguments
* @return */
static String getOneArgument(List<String> params, int pos, int maxArguments) {
if (params.size() != maxArguments) {
throw new ParameterException(String.format("Need to provide %s parameters", maxArguments));
}
return params.get(pos);
} | 3.26 |
pulsar_ThreadLocalStateCleaner_cleanupThreadLocal_rdh | // use reflection to clear the state of the given thread local and thread
public <T> void cleanupThreadLocal(ThreadLocal<?> threadLocal, Thread thread, BiConsumer<Thread, T> cleanedValueListener) {
Objects.nonNull(threadLocal);
Objects.nonNull(thread);
try {
Object threadLocalMap = GET_THREADLOCAL_MAP_METHOD.invoke(threadLocal, thread);
if
(threadLocalMap != null)
{
if (cleanedValueListener != null) {
callCleanedValueListener(threadLocal, thread, cleanedValueListener, threadLocalMap);
}
if (removeThreadlocalMethod == null) {
removeThreadlocalMethod = MethodUtils.getMatchingMethod(threadLocalMap.getClass(), "remove", ThreadLocal.class);
removeThreadlocalMethod.setAccessible(true);
}
removeThreadlocalMethod.invoke(threadLocalMap, threadLocal);
}
} catch (IllegalAccessException | InvocationTargetException e) {
LOG.warn("Cannot cleanup thread local", e);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.