name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_BacklogQuotaManager_dropBacklogForSizeLimit_rdh | /**
* Drop the backlog on the topic.
*
* @param persistentTopic
* The topic from which backlog should be dropped
* @param quota
* Backlog quota set for the topic
*/
private void dropBacklogForSizeLimit(PersistentTopic persistentTopic, BacklogQuota quota) {
// Set the reduction factor to 90%. The aim is to drop down the backlog to 90% of the quota limit.
double reductionFactor = 0.9;
double targetSize = reductionFactor * quota.getLimitSize();
// Get estimated unconsumed size for the managed ledger associated with this topic. Estimated size is more
// useful than the actual storage size. Actual storage size gets updated only when managed ledger is trimmed.
ManagedLedgerImpl mLedger = ((ManagedLedgerImpl) (persistentTopic.getManagedLedger()));
long backlogSize = mLedger.getEstimatedBacklogSize();
if (log.isDebugEnabled()) {
log.debug("[{}] target size is [{}] for quota limit [{}], backlog size is [{}]", persistentTopic.getName(), targetSize, targetSize / reductionFactor, backlogSize);
}
ManagedCursor previousSlowestConsumer = null;
while (backlogSize > targetSize) {
// Get the slowest consumer for this managed ledger and save the ledger id of the marked delete position of
// slowest consumer. Calculate the factor which is used in calculating number of messages to be skipped.
ManagedCursor slowestConsumer = mLedger.getSlowestConsumer();
if (slowestConsumer == null) {
if (log.isDebugEnabled()) {
log.debug("[{}] slowest consumer null.", persistentTopic.getName());
}
break;
}
double messageSkipFactor = (backlogSize - targetSize) / backlogSize;
if (slowestConsumer == previousSlowestConsumer) {
log.info("[{}] Cursors not progressing, target size is [{}] for quota limit [{}], backlog size is [{}]", persistentTopic.getName(), targetSize, targetSize / reductionFactor, backlogSize);
break;
}
// Calculate number of messages to be skipped using the current backlog and the skip factor.
long entriesInBacklog = slowestConsumer.getNumberOfEntriesInBacklog(false);
int v10 = ((int) (messageSkipFactor * entriesInBacklog));
try {
// If there are no messages to skip, break out of the loop
if (v10 == 0) {
if (log.isDebugEnabled()) {
log.debug("no messages to skip for [{}]", slowestConsumer);
}
break;
}
// Skip messages on the slowest consumer
if (log.isDebugEnabled()) {
log.debug("[{}] Skipping [{}] messages on slowest consumer [{}] having backlog entries : [{}]", persistentTopic.getName(), v10, slowestConsumer.getName(), entriesInBacklog);
}
slowestConsumer.skipEntries(v10, IndividualDeletedEntries.Include);
} catch (Exception e) {
log.error("[{}] Error skipping [{}] messages from slowest consumer [{}]", persistentTopic.getName(), v10, slowestConsumer.getName(), e);
}
// Make sure that unconsumed size is updated every time when we skip the messages.
backlogSize = mLedger.getEstimatedBacklogSize();
previousSlowestConsumer = slowestConsumer;
if (log.isDebugEnabled()) {
log.debug("[{}] Updated unconsumed size = [{}]. skipFactor: [{}]", persistentTopic.getName(), backlogSize, messageSkipFactor);
}
}
} | 3.26 |
pulsar_BacklogQuotaManager_m0_rdh | /**
* Disconnect producers on given topic.
*
* @param persistentTopic
* The topic on which all producers should be disconnected
*/
private void m0(PersistentTopic persistentTopic) {
List<CompletableFuture<Void>> futures = new
ArrayList<>();
Map<String, Producer> v21 = persistentTopic.getProducers();v21.values().forEach(producer -> {
log.info("Producer [{}] has exceeded backlog quota on topic [{}]. Disconnecting producer", producer.getProducerName(), persistentTopic.getName());
futures.add(producer.disconnect());
});
FutureUtil.waitForAll(futures).thenRun(() -> {
log.info("All producers on topic [{}] are disconnected", persistentTopic.getName());
}).exceptionally(exception -> {
log.error("Error in disconnecting producers on topic [{}] [{}]", persistentTopic.getName(), exception);return null;
});
} | 3.26 |
pulsar_BacklogQuotaManager_dropBacklogForTimeLimit_rdh | /**
* Drop the backlog on the topic.
*
* @param persistentTopic
* The topic from which backlog should be dropped
* @param quota
* Backlog quota set for the topic
*/
private void dropBacklogForTimeLimit(PersistentTopic persistentTopic, BacklogQuota quota, boolean preciseTimeBasedBacklogQuotaCheck) {
// If enabled precise time based backlog quota check, will expire message based on the timeBaseQuota
if (preciseTimeBasedBacklogQuotaCheck) {
// Set the reduction factor to 90%. The aim is to drop down the backlog to 90% of the quota limit.
double reductionFactor = 0.9;int target = ((int) (reductionFactor * quota.getLimitTime()));
if (log.isDebugEnabled()) {
log.debug("[{}] target backlog expire time is [{}]", persistentTopic.getName(), target);
}
persistentTopic.getSubscriptions().forEach((__, subscription) -> subscription.getExpiryMonitor().expireMessages(target));
} else {
// If disabled precise time based backlog quota check, will try to remove whole ledger from cursor's backlog
Long currentMillis = ((ManagedLedgerImpl) (persistentTopic.getManagedLedger())).getClock().millis();
ManagedLedgerImpl mLedger = ((ManagedLedgerImpl) (persistentTopic.getManagedLedger()));
try {
for (; ;) {
ManagedCursor slowestConsumer = mLedger.getSlowestConsumer();
Position oldestPosition = slowestConsumer.getMarkDeletedPosition();
if (log.isDebugEnabled()) {
log.debug("[{}] slowest consumer mark delete position is [{}], read position is [{}]", slowestConsumer.getName(), oldestPosition, slowestConsumer.getReadPosition());
}
ManagedLedgerInfo.LedgerInfo ledgerInfo = mLedger.getLedgerInfo(oldestPosition.getLedgerId()).get();
if (ledgerInfo == null) {
PositionImpl nextPosition = PositionImpl.get(mLedger.getNextValidLedger(oldestPosition.getLedgerId()), -1);
slowestConsumer.markDelete(nextPosition);
continue;
}
// Timestamp only > 0 if ledger has been closed
if ((ledgerInfo.getTimestamp() > 0) && ((currentMillis - ledgerInfo.getTimestamp()) > (quota.getLimitTime() * 1000))) {// skip whole ledger for the slowest cursor
PositionImpl nextPosition = PositionImpl.get(mLedger.getNextValidLedger(ledgerInfo.getLedgerId()), -1);
if (!nextPosition.equals(oldestPosition)) {
slowestConsumer.markDelete(nextPosition);
continue;
}
}
break;
}
} catch (Exception e) {
log.error("[{}] Error resetting cursor for slowest consumer [{}]", persistentTopic.getName(), mLedger.getSlowestConsumer().getName(), e);
}
}
} | 3.26 |
pulsar_BacklogQuotaManager_handleExceededBacklogQuota_rdh | /**
* Handle exceeded size backlog by using policies set in the zookeeper for given topic.
*
* @param persistentTopic
* Topic on which backlog has been exceeded
*/
public void handleExceededBacklogQuota(PersistentTopic persistentTopic, BacklogQuotaType backlogQuotaType, boolean preciseTimeBasedBacklogQuotaCheck) {BacklogQuota quota = persistentTopic.getBacklogQuota(backlogQuotaType);
log.info("Backlog quota type {} exceeded for topic [{}]. Applying [{}] policy", backlogQuotaType, persistentTopic.getName(), quota.getPolicy());
switch (quota.getPolicy()) {
case consumer_backlog_eviction :
switch (backlogQuotaType) {
case destination_storage :
dropBacklogForSizeLimit(persistentTopic, quota);
break;
case message_age :
dropBacklogForTimeLimit(persistentTopic, quota, preciseTimeBasedBacklogQuotaCheck);
break;
default :
break;
}
break;
case producer_exception :
case producer_request_hold :
if (!advanceSlowestSystemCursor(persistentTopic)) {
// The slowest is not a system cursor. Disconnecting producers to put backpressure.
m0(persistentTopic);
}
break;
default : break;
}
} | 3.26 |
pulsar_NamespacesBase_internalSetReplicatorDispatchRate_rdh | /**
* Base method for setReplicatorDispatchRate v1 and v2.
* Notion: don't re-use this logic.
*/protected void internalSetReplicatorDispatchRate(AsyncResponse asyncResponse, DispatchRateImpl dispatchRate) {
validateSuperUserAccessAsync().thenAccept(__ -> {
log.info("[{}] Set namespace replicator dispatch-rate {}/{}", clientAppId(), namespaceName, dispatchRate);
}).thenCompose(__ -> namespaceResources().setPoliciesAsync(namespaceName, policies -> {
String clusterName = pulsar().getConfiguration().getClusterName();
policies.replicatorDispatchRate.put(clusterName, dispatchRate);
return policies;
})).thenAccept(__ -> {
asyncResponse.resume(Response.noContent().build());
log.info("[{}] Successfully updated the replicatorDispatchRate for cluster on namespace {}", clientAppId(), namespaceName);
}).exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to update the replicatorDispatchRate for cluster on namespace {}", clientAppId(), namespaceName, ex);
return null;
});
} | 3.26 |
pulsar_NamespacesBase_m1_rdh | // clear zk-node resources for deleting namespace
protected CompletableFuture<Void> m1() {
// clear resource of `/namespace/{namespaceName}` for zk-node
return // clear /loadbalance/bundle-data
// clear z-node of local policies
// we have successfully removed all the ownership for the namespace, the policies
// z-node can be deleted now
// clear resource for manager-ledger z-node
namespaceResources().deleteNamespaceAsync(namespaceName).thenCompose(ignore -> namespaceResources().getPartitionedTopicResources().clearPartitionedTopicMetadataAsync(namespaceName)).thenCompose(ignore -> pulsar().getPulsarResources().getTopicResources().clearDomainPersistence(namespaceName)).thenCompose(ignore -> pulsar().getPulsarResources().getTopicResources().clearNamespacePersistence(namespaceName)).thenCompose(ignore -> namespaceResources().deletePoliciesAsync(namespaceName)).thenCompose(ignore -> getLocalPolicies().deleteLocalPoliciesAsync(namespaceName)).thenCompose(ignore -> loadBalanceResources().getBundleDataResources().deleteBundleDataAsync(namespaceName));
} | 3.26 |
pulsar_NamespacesBase_internalDeleteNamespaceAsync_rdh | /**
* Delete the namespace and retry to resolve some topics that were not created successfully(in metadata)
* during the deletion.
*/
@Nonnull
protected CompletableFuture<Void> internalDeleteNamespaceAsync(boolean force) {
final
CompletableFuture<Void> future = new CompletableFuture<>();
m0(force, 5, future);
return future;
} | 3.26 |
pulsar_NamespacesBase_internalGetBacklogQuotaMap_rdh | /**
* Base method for getBackLogQuotaMap v1 and v2.
* Notion: don't re-use this logic.
*/
protected void internalGetBacklogQuotaMap(AsyncResponse asyncResponse) {
validateNamespacePolicyOperationAsync(namespaceName, PolicyName.BACKLOG,
PolicyOperation.READ).thenCompose(__ -> namespaceResources().getPoliciesAsync(namespaceName)).thenAccept(policiesOpt -> {
Map<BacklogQuotaType, BacklogQuota> backlogQuotaMap = policiesOpt.orElseThrow(()
->
new RestException(Response.Status.NOT_FOUND, "Namespace does not exist")).backlog_quota_map;
asyncResponse.resume(backlogQuotaMap);
}).exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to get backlog quota map on namespace {}", clientAppId(), namespaceName, ex);
return null;
});
} | 3.26 |
pulsar_NamespacesBase_internalRemoveReplicatorDispatchRate_rdh | /**
* Base method for removeReplicatorDispatchRate v1 and v2.
* Notion: don't re-use this logic.
*/
protected void
internalRemoveReplicatorDispatchRate(AsyncResponse asyncResponse) {
validateSuperUserAccessAsync().thenCompose(__ -> namespaceResources().setPoliciesAsync(namespaceName, policies -> {
String clusterName = pulsar().getConfiguration().getClusterName();
policies.replicatorDispatchRate.remove(clusterName);
return policies;
})).thenAccept(__ -> {
asyncResponse.resume(Response.noContent().build());log.info("[{}] Successfully delete the replicatorDispatchRate for cluster on namespace {}", clientAppId(), namespaceName);
}).exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to delete the replicatorDispatchRate for cluster on namespace {}", clientAppId(),
namespaceName, ex);
return null;});
} | 3.26 |
pulsar_NamespacesBase_internalGetReplicatorDispatchRate_rdh | /**
* Base method for getReplicatorDispatchRate v1 and v2.
* Notion: don't re-use this logic.
*/
protected void internalGetReplicatorDispatchRate(AsyncResponse asyncResponse) {
validateNamespacePolicyOperationAsync(namespaceName, PolicyName.REPLICATION_RATE, PolicyOperation.READ).thenCompose(__ -> namespaceResources().getPoliciesAsync(namespaceName)).thenApply(policiesOpt -> {
if (!policiesOpt.isPresent()) {
throw new RestException(Response.Status.NOT_FOUND, "Namespace policies does not exist");
}
String clusterName = pulsar().getConfiguration().getClusterName();
return policiesOpt.get().replicatorDispatchRate.get(clusterName);
}).thenAccept(asyncResponse::resume).exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to get replicator dispatch-rate configured for the namespace {}", clientAppId(), namespaceName,
ex);
return null;
});
} | 3.26 |
pulsar_NamespacesBase_internalSetBacklogQuota_rdh | /**
* Base method for setBacklogQuota v1 and v2.
* Notion: don't re-use this logic.
*/
protected void internalSetBacklogQuota(AsyncResponse asyncResponse, BacklogQuotaType backlogQuotaType, BacklogQuota backlogQuota) {
validateNamespacePolicyOperationAsync(namespaceName, PolicyName.BACKLOG, PolicyOperation.WRITE).thenCompose(__ -> validatePoliciesReadOnlyAccessAsync()).thenCompose(__ -> setBacklogQuotaAsync(backlogQuotaType, backlogQuota)).thenAccept(__ -> {
asyncResponse.resume(Response.noContent().build());
log.info("[{}] Successfully updated backlog quota map: namespace={}, map={}", clientAppId(), namespaceName, backlogQuota);
}).exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to update backlog quota map for namespace {}", clientAppId(), namespaceName, ex);
return null;
});
} | 3.26 |
pulsar_ClientConfiguration_isTlsAllowInsecureConnection_rdh | /**
*
* @return whether the Pulsar client accept untrusted TLS certificate from broker
*/
public boolean isTlsAllowInsecureConnection() {
return confData.isTlsAllowInsecureConnection();
} | 3.26 |
pulsar_ClientConfiguration_setStatsInterval_rdh | /**
* Set the interval between each stat info <i>(default: 60 seconds)</i> Stats will be activated with positive.
* statsIntervalSeconds It should be set to at least 1 second
*
* @param statsInterval
* the interval between each stat info
* @param unit
* time unit for {@code statsInterval}
*/
public void setStatsInterval(long statsInterval, TimeUnit unit) {
confData.setStatsIntervalSeconds(unit.toSeconds(statsInterval));
} | 3.26 |
pulsar_ClientConfiguration_isUseTcpNoDelay_rdh | /**
*
* @return whether TCP no-delay should be set on the connections
*/
public boolean isUseTcpNoDelay() {
return confData.isUseTcpNoDelay();} | 3.26 |
pulsar_ClientConfiguration_getConnectionsPerBroker_rdh | /**
*
* @return the max number of connections per single broker
*/
public int getConnectionsPerBroker() {
return confData.getConnectionsPerBroker();} | 3.26 |
pulsar_ClientConfiguration_setAuthentication_rdh | /**
* Set the authentication provider to use in the Pulsar client instance.
* <p>
* Example:
* <p>
*
* <pre>
* <code>
* ClientConfiguration confData = new ClientConfiguration();
* String authPluginClassName = "org.apache.pulsar.client.impl.auth.MyAuthentication";
* Map<String, String> authParams = new HashMap<String, String>();
* authParams.put("key1", "val1");
* confData.setAuthentication(authPluginClassName, authParams);
* PulsarClient client = PulsarClient.create(serviceUrl, confData);
* ....
* </code>
* </pre>
*
* @param authPluginClassName
* name of the Authentication-Plugin you want to use
* @param authParams
* map which represents parameters for the Authentication-Plugin
* @throws UnsupportedAuthenticationException
* failed to instantiate specified Authentication-Plugin
*/
public void setAuthentication(String authPluginClassName, Map<String, String> authParams) throws UnsupportedAuthenticationException {
confData.setAuthentication(AuthenticationFactory.create(authPluginClassName, authParams));
} | 3.26 |
pulsar_ClientConfiguration_setUseTcpNoDelay_rdh | /**
* Configure whether to use TCP no-delay flag on the connection, to disable Nagle algorithm.
* <p>
* No-delay features make sure packets are sent out on the network as soon as possible, and it's critical to achieve
* low latency publishes. On the other hand, sending out a huge number of small packets might limit the overall
* throughput, so if latency is not a concern, it's advisable to set the <code>useTcpNoDelay</code> flag to false.
* <p>
* Default value is true
*
* @param useTcpNoDelay
*/
public void setUseTcpNoDelay(boolean useTcpNoDelay) {
confData.setUseTcpNoDelay(useTcpNoDelay);
} | 3.26 |
pulsar_ClientConfiguration_getIoThreads_rdh | /**
*
* @return the number of threads to use for handling connections
*/ public int getIoThreads() {
return confData.getNumIoThreads();
} | 3.26 |
pulsar_ClientConfiguration_setTlsTrustCertsFilePath_rdh | /**
* Set the path to the trusted TLS certificate file.
*
* @param tlsTrustCertsFilePath
*/
public void setTlsTrustCertsFilePath(String tlsTrustCertsFilePath) {
confData.setTlsTrustCertsFilePath(tlsTrustCertsFilePath);
} | 3.26 |
pulsar_ClientConfiguration_getOperationTimeoutMs_rdh | /**
*
* @return the operation timeout in ms
*/
public long getOperationTimeoutMs() {
return confData.getOperationTimeoutMs();
} | 3.26 |
pulsar_ClientConfiguration_getMaxNumberOfRejectedRequestPerConnection_rdh | /**
* Get configured max number of reject-request in a time-frame (60 seconds) after which connection will be closed.
*
* @return */
public int getMaxNumberOfRejectedRequestPerConnection() {return confData.getMaxNumberOfRejectedRequestPerConnection();
} | 3.26 |
pulsar_ClientConfiguration_getTlsTrustCertsFilePath_rdh | /**
*
* @return path to the trusted TLS certificate file
*/
public String getTlsTrustCertsFilePath() {return confData.getTlsTrustCertsFilePath();
} | 3.26 |
pulsar_ClientConfiguration_setTlsHostnameVerificationEnable_rdh | /**
* It allows to validate hostname verification when client connects to broker over tls. It validates incoming x509
* certificate and matches provided hostname(CN/SAN) with expected broker's host name. It follows RFC 2818, 3.1.
* Server Identity hostname verification.
*
* @see <a href="https://tools.ietf.org/html/rfc2818">rfc2818</a>
* @param tlsHostnameVerificationEnable
*/
public void setTlsHostnameVerificationEnable(boolean tlsHostnameVerificationEnable) {
confData.setTlsHostnameVerificationEnable(tlsHostnameVerificationEnable);
} | 3.26 |
pulsar_ClientConfiguration_setConnectionsPerBroker_rdh | /**
* Sets the max number of connection that the client library will open to a single broker.
* <p>
* By default, the connection pool will use a single connection for all the producers and consumers. Increasing this
* parameter may improve throughput when using many producers over a high latency connection.
* <p>
*
* @param connectionsPerBroker
* max number of connections per broker (needs to be greater than 0)
*/
public void setConnectionsPerBroker(int connectionsPerBroker) {
checkArgument(connectionsPerBroker > 0, "Connections per broker need to be greater than 0");
confData.setConnectionsPerBroker(connectionsPerBroker);
} | 3.26 |
pulsar_ClientConfiguration_setIoThreads_rdh | /**
* Set the number of threads to be used for handling connections to brokers <i>(default: 1 thread)</i>.
*
* @param numIoThreads
*/
public void setIoThreads(int numIoThreads)
{
checkArgument(numIoThreads > 0);
confData.setNumIoThreads(numIoThreads);
} | 3.26 |
pulsar_ClientConfiguration_getStatsIntervalSeconds_rdh | /**
* Stats will be activated with positive statsIntervalSeconds.
*
* @return the interval between each stat info <i>(default: 60 seconds)</i>
*/
public long getStatsIntervalSeconds() {
return confData.getStatsIntervalSeconds();
} | 3.26 |
pulsar_ClientConfiguration_getListenerThreads_rdh | /**
*
* @return the number of threads to use for message listeners
*/
public int getListenerThreads() {
return confData.getNumListenerThreads();
} | 3.26 |
pulsar_ClientConfiguration_isUseTls_rdh | /**
*
* @return whether TLS encryption is used on the connection
*/
public boolean isUseTls() {
return confData.isUseTls();
} | 3.26 |
pulsar_ClientConfiguration_getAuthentication_rdh | /**
*
* @return the authentication provider to be used
*/
public Authentication getAuthentication() {
return confData.getAuthentication();
} | 3.26 |
pulsar_ClientConfiguration_setUseTls_rdh | /**
* Configure whether to use TLS encryption on the connection <i>(default: false)</i>.
*
* @param useTls
*/
public void setUseTls(boolean useTls) {confData.setUseTls(useTls);
} | 3.26 |
pulsar_ClientConfiguration_getConcurrentLookupRequest_rdh | /**
* Get configured total allowed concurrent lookup-request.
*
* @return */
public int getConcurrentLookupRequest() {
return confData.getConcurrentLookupRequest();
} | 3.26 |
pulsar_ClientConfiguration_setOperationTimeout_rdh | /**
* Set the operation timeout <i>(default: 30 seconds)</i>.
* <p>
* Producer-create, subscribe and unsubscribe operations will be retried until this interval, after which the
* operation will be marked as failed
*
* @param operationTimeout
* operation timeout
* @param unit
* time unit for {@code operationTimeout}
*/
public void setOperationTimeout(int operationTimeout, TimeUnit unit) {
checkArgument(operationTimeout >= 0);
confData.setOperationTimeoutMs(unit.toMillis(operationTimeout));} | 3.26 |
pulsar_ClientConfiguration_setConnectionTimeout_rdh | /**
* Set the duration of time to wait for a connection to a broker to be established. If the duration
* passes without a response from the broker, the connection attempt is dropped.
*
* @param duration
* the duration to wait
* @param unit
* the time unit in which the duration is defined
*/public void setConnectionTimeout(int duration, TimeUnit unit) {
confData.setConnectionTimeoutMs(((int) (unit.toMillis(duration))));
} | 3.26 |
pulsar_ClientCnx_addPendingLookupRequests_rdh | // caller of this method needs to be protected under pendingLookupRequestSemaphore
private void addPendingLookupRequests(long requestId, TimedCompletableFuture<LookupDataResult> future) {
pendingRequests.put(requestId, future);
requestTimeoutQueue.add(new RequestTime(requestId, RequestType.Lookup));
} | 3.26 |
pulsar_ClientCnx_exceptionCaught_rdh | // Command Handlers
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable
cause) throws Exception {
if (state != State.Failed) {
// No need to report stack trace for known exceptions that happen in disconnections
log.warn("[{}] Got exception {}", remoteAddress, ClientCnx.isKnownException(cause) ? cause
: ExceptionUtils.getStackTrace(cause));state = State.Failed;
} else // At default info level, suppress all subsequent exceptions that are thrown when the connection has already
// failed
if (log.isDebugEnabled()) {
log.debug("[{}] Got exception: {}", remoteAddress, cause.getMessage(), cause);
}
ctx.close();
} | 3.26 |
pulsar_ClientCnx_idleCheck_rdh | /**
* Check client connection is now free. This method will not change the state to idle.
*
* @return true if the connection is eligible.
*/
public boolean idleCheck() {
if ((pendingRequests != null) && (!pendingRequests.isEmpty())) {
return false;
}
if ((waitingLookupRequests != null) && (!waitingLookupRequests.isEmpty())) {
return false;
}
if (!consumers.isEmpty()) {
return false;
}
if (!producers.isEmpty()) {
return false;
}
if (!transactionMetaStoreHandlers.isEmpty()) {
return false;
}
return true;
} | 3.26 |
pulsar_ClientBuilderImpl_description_rdh | /**
* Set the description.
*
* <p> By default, when the client connects to the broker, a version string like "Pulsar-Java-v<x.y.z>" will be
* carried and saved by the broker. The client version string could be queried from the topic stats.
*
* <p> This method provides a way to add more description to a specific PulsarClient instance. If it's configured,
* the description will be appended to the original client version string, with '-' as the separator.
*
* <p>For example, if the client version is 3.0.0, and the description is "forked", the final client version string
* will be "Pulsar-Java-v3.0.0-forked".
*
* @param description
* the description of the current PulsarClient instance
* @throws IllegalArgumentException
* if the length of description exceeds 64
*/
public ClientBuilder description(String description) {
if ((description != null) && (description.length() > 64)) {
throw new
IllegalArgumentException("description should be at most 64 characters");
}
conf.setDescription(description);
return this;
} | 3.26 |
pulsar_FunctionUtils_getFunctionClass_rdh | /**
* Extract the Pulsar Function class from a function or archive.
*/
public static String getFunctionClass(ClassLoader classLoader) throws IOException {
NarClassLoader ncl = ((NarClassLoader) (classLoader));
String configStr = ncl.getServiceDefinition(f0);
FunctionDefinition conf = ObjectMapperFactory.getYamlMapper().reader().readValue(configStr, FunctionDefinition.class);
if (StringUtils.isEmpty(conf.getFunctionClass())) {
throw new IOException(String.format("The '%s' functionctor does not provide a function implementation", conf.getName()));
}
try {
// Try to load source class and check it implements Function interface
Class functionClass = ncl.loadClass(conf.getFunctionClass());
if (!Function.class.isAssignableFrom(functionClass)) {
throw
new IOException((("Class " + conf.getFunctionClass()) +
" does not implement interface ") + Function.class.getName());
}
} catch (Throwable t) {
Exceptions.rethrowIOException(t);
}
return conf.getFunctionClass();
} | 3.26 |
pulsar_ResourceUnitRanking_getEstimatedMessageRate_rdh | /**
* Get the estimated message rate.
*/public double getEstimatedMessageRate() {
return this.f0;
} | 3.26 |
pulsar_ResourceUnitRanking_getAllocatedLoadPercentageMemory_rdh | /**
* Percetage of memory allocated to bundle's quota.
*/
public double getAllocatedLoadPercentageMemory() {return this.allocatedLoadPercentageMemory;
} | 3.26 |
pulsar_ResourceUnitRanking_calculateBrokerCapacity_rdh | /**
* Calculate how many bundles could be handle with the specified resources.
*/
private static long calculateBrokerCapacity(ResourceQuota defaultQuota, double usableCPU, double usableMem, double usableBandwidthOut, double usableBandwidthIn) {// estimate capacity with usable CPU
double cpuCapacity = (usableCPU / cpuUsageByMsgRate) / (defaultQuota.getMsgRateIn() + defaultQuota.getMsgRateOut());
// estimate capacity with usable memory
double memCapacity = usableMem / defaultQuota.getMemory();// estimate capacity with usable outbound bandwidth
double bandwidthOutCapacity = usableBandwidthOut / defaultQuota.getBandwidthOut();
// estimate capacity with usable inbound bandwidth
double bandwidthInCapacity = usableBandwidthIn / defaultQuota.getBandwidthIn();
// the ServiceUnit capacity is determined by the minimum capacity of resources
double capacity = Math.min(cpuCapacity, Math.min(memCapacity, Math.min(bandwidthOutCapacity, bandwidthInCapacity)));
return ((long) (Math.max(capacity, 0)));
} | 3.26 |
pulsar_ResourceUnitRanking_removeLoadedServiceUnit_rdh | /**
* Remove a service unit from the loaded bundle list.
*/
public void removeLoadedServiceUnit(String suName, ResourceQuota quota) {
if (this.loadedBundles.remove(suName)) {
this.allocatedQuota.substract(quota);
estimateLoadPercentage();
}
} | 3.26 |
pulsar_ResourceUnitRanking_getLoadedBundles_rdh | /**
* Get the loaded bundles.
*/
public Set<String>
getLoadedBundles() {
return loadedBundles;
} | 3.26 |
pulsar_ResourceUnitRanking_getEstimatedLoadPercentage_rdh | /**
* Get the estimated load percentage.
*/public double
getEstimatedLoadPercentage() {
return this.estimatedLoadPercentage;
} | 3.26 |
pulsar_ResourceUnitRanking_isServiceUnitPreAllocated_rdh | /**
* Check if a ServiceUnit is pre-allocated to this ResourceUnit.
*/
public boolean isServiceUnitPreAllocated(String suName) {
return this.preAllocatedBundles.contains(suName);
} | 3.26 |
pulsar_ResourceUnitRanking_calculateBrokerMaxCapacity_rdh | /**
* Estimate the maximum number namespace bundles a ResourceUnit is able to handle with all resource.
*/
public static long calculateBrokerMaxCapacity(SystemResourceUsage
systemResourceUsage, ResourceQuota defaultQuota) {
double bandwidthOutLimit = systemResourceUsage.bandwidthOut.limit * KBITS_TO_BYTES;
double bandwidthInLimit = systemResourceUsage.bandwidthIn.limit * KBITS_TO_BYTES;long capacity = calculateBrokerCapacity(defaultQuota, systemResourceUsage.cpu.limit, systemResourceUsage.memory.limit, bandwidthOutLimit, bandwidthInLimit);
return capacity;
} | 3.26 |
pulsar_ResourceUnitRanking_addPreAllocatedServiceUnit_rdh | /**
* Pre-allocate a ServiceUnit to this ResourceUnit.
*/
public void addPreAllocatedServiceUnit(String suName, ResourceQuota quota) {
this.preAllocatedBundles.add(suName);
this.preAllocatedQuota.add(quota);
estimateLoadPercentage();
} | 3.26 |
pulsar_ResourceUnitRanking_estimateMaxCapacity_rdh | /**
* Estimate the maximum number of namespace bundles ths ResourceUnit is able to handle with all resource.
*/
public long estimateMaxCapacity(ResourceQuota defaultQuota) {
return calculateBrokerMaxCapacity(this.systemResourceUsage, defaultQuota);
} | 3.26 |
pulsar_ResourceUnitRanking_compareMessageRateTo_rdh | /**
* Compare two loads based on message rate only.
*/
public int compareMessageRateTo(ResourceUnitRanking other) {
return Double.compare(this.f0, other.f0);
} | 3.26 |
pulsar_ResourceUnitRanking_getPreAllocatedBundles_rdh | /**
* Get the pre-allocated bundles.
*/
public Set<String> getPreAllocatedBundles() {
return this.preAllocatedBundles;
} | 3.26 |
pulsar_ResourceUnitRanking_getAllocatedLoadPercentageBandwidthIn_rdh | /**
* Percentage of inbound bandwidth allocated to bundle's quota.
*/
public double getAllocatedLoadPercentageBandwidthIn() {
return this.allocatedLoadPercentageBandwidthIn;
} | 3.26 |
pulsar_ResourceUnitRanking_isServiceUnitLoaded_rdh | /**
* Check if a ServiceUnit is already loaded by this ResourceUnit.
*/
public boolean isServiceUnitLoaded(String suName) {
return this.loadedBundles.contains(suName);
} | 3.26 |
pulsar_ResourceUnitRanking_getAllocatedLoadPercentageBandwidthOut_rdh | /**
* Percentage of outbound bandwidth allocated to bundle's quota.
*/
public double
getAllocatedLoadPercentageBandwidthOut() {
return this.allocatedLoadPercentageBandwidthOut;
} | 3.26 |
pulsar_ResourceUnitRanking_getAllocatedLoadPercentageCPU_rdh | /**
* Percentage of CPU allocated to bundle's quota.
*/
public double getAllocatedLoadPercentageCPU() {
return this.allocatedLoadPercentageCPU;
} | 3.26 |
pulsar_AbstractBaseDispatcher_filterEntriesForConsumer_rdh | /**
* Filter messages that are being sent to a consumers.
* <p>
* Messages can be filtered out for multiple reasons:
* <ul>
* <li>Checksum or metadata corrupted
* <li>Message is an internal marker
* <li>Message is not meant to be delivered immediately
* </ul>
*
* @param entries
* a list of entries as read from storage
* @param batchSizes
* an array where the batch size for each entry (the number of messages within an entry) is stored. This
* array needs to be of at least the same size as the entries list
* @param sendMessageInfo
* an object where the total size in messages and bytes will be returned back to the caller
*/
public int filterEntriesForConsumer(List<? extends Entry> entries, EntryBatchSizes batchSizes, SendMessageInfo sendMessageInfo, EntryBatchIndexesAcks indexesAcks, ManagedCursor cursor, boolean isReplayRead, Consumer
consumer) {
return filterEntriesForConsumer(null, 0, entries, batchSizes, sendMessageInfo, indexesAcks, cursor, isReplayRead, consumer);
} | 3.26 |
pulsar_ServiceURI_selectOne_rdh | /**
* Create a new URI from the service URI which only specifies one of the hosts.
*
* @return a pulsar service URI with a single host specified
*/
public String selectOne() {
StringBuilder sb
= new StringBuilder();
if (serviceName != null) {sb.append(serviceName);
for (int i = 0; i < serviceInfos.length; i++) {
sb.append('+').append(serviceInfos[i]);
}
sb.append("://");
}
if (serviceUser != null) {
sb.append(serviceUser).append('@');
}
int hostIndex = ThreadLocalRandom.current().nextInt(serviceHosts.length);
sb.append(serviceHosts[hostIndex]);
return sb.append(servicePath).toString();
} | 3.26 |
pulsar_ServiceURI_create_rdh | /**
* Create a service uri instance from a {@link URI} instance.
*
* @param uri
* {@link URI} instance
* @return a service uri instance
* @throws NullPointerException
* if {@code uriStr} is null
* @throws IllegalArgumentException
* if the given string violates RFC 2396
*/
public static ServiceURI create(URI uri) {requireNonNull(uri, "service uri instance is null");
String serviceName;
final String[] serviceInfos;
String scheme = uri.getScheme();
if (null != scheme) {
scheme = scheme.toLowerCase();
final String serviceSep = "+";
String[] schemeParts = StringUtils.split(scheme, serviceSep);
serviceName = schemeParts[0];serviceInfos = new String[schemeParts.length - 1];
System.arraycopy(schemeParts, 1, serviceInfos, 0, serviceInfos.length);
} else {
serviceName = null;
serviceInfos = new String[0];
}
String userAndHostInformation = uri.getAuthority();
checkArgument(!Strings.isNullOrEmpty(userAndHostInformation), "authority component is missing in service uri : " + uri); String serviceUser;
List<String> serviceHosts;
int atIndex = userAndHostInformation.indexOf('@');
Splitter splitter = Splitter.on(CharMatcher.anyOf(",;"));
if (atIndex > 0) {
serviceUser = userAndHostInformation.substring(0, atIndex);
serviceHosts = splitter.splitToList(userAndHostInformation.substring(atIndex + 1));
} else {
serviceUser = null;
serviceHosts = splitter.splitToList(userAndHostInformation);
}
serviceHosts = serviceHosts.stream().map(host -> validateHostName(serviceName, serviceInfos, host)).collect(Collectors.toList());
String servicePath = uri.getPath();
checkArgument(null != servicePath, "service path component is missing in service uri : " + uri);
return new ServiceURI(serviceName, serviceInfos, serviceUser, serviceHosts.toArray(new String[serviceHosts.size()]), servicePath, uri);
} | 3.26 |
pulsar_NonPersistentSubscription_deleteForcefully_rdh | /**
* Forcefully close all consumers and deletes the subscription.
*
* @return */
@Override
public CompletableFuture<Void> deleteForcefully() {
return delete(true);
} | 3.26 |
pulsar_NonPersistentSubscription_disconnect_rdh | /**
* Disconnect all consumers attached to the dispatcher and close this subscription.
*
* @return CompletableFuture indicating the completion of disconnect operation
*/
@Override
public synchronized CompletableFuture<Void> disconnect() {
CompletableFuture<Void> disconnectFuture = new CompletableFuture<>();
// block any further consumers on this subscription
IS_FENCED_UPDATER.set(this, TRUE);
(dispatcher != null ? dispatcher.close() : CompletableFuture.completedFuture(null)).thenCompose(v -> close()).thenRun(() -> {
log.info("[{}][{}] Successfully disconnected and closed subscription", topicName, subName);
disconnectFuture.complete(null);
}).exceptionally(exception -> {
IS_FENCED_UPDATER.set(this, FALSE);
if (dispatcher != null) {
dispatcher.reset();
}
log.error("[{}][{}] Error disconnecting consumers from subscription", topicName, subName, exception);
disconnectFuture.completeExceptionally(exception);
return null;
});
return disconnectFuture;} | 3.26 |
pulsar_NonPersistentSubscription_delete_rdh | /**
* Delete the subscription by closing and deleting its managed cursor. Handle unsubscribe call from admin layer.
*
* @param closeIfConsumersConnected
* Flag indicate whether explicitly close connected consumers before trying to delete subscription. If
* any consumer is connected to it and if this flag is disable then this operation fails.
* @return CompletableFuture indicating the completion of delete operation
*/
private CompletableFuture<Void> delete(boolean closeIfConsumersConnected) {
CompletableFuture<Void> deleteFuture = new CompletableFuture<>();
log.info("[{}][{}] Unsubscribing", topicName, subName);
CompletableFuture<Void> closeSubscriptionFuture = new CompletableFuture<>();
if (closeIfConsumersConnected) {
this.disconnect().thenRun(() -> {
closeSubscriptionFuture.complete(null);
}).exceptionally(ex -> {
log.error("[{}][{}] Error disconnecting and closing subscription", topicName, subName, ex);
closeSubscriptionFuture.completeExceptionally(ex);
return null;});
} else {
this.close().thenRun(() -> {
closeSubscriptionFuture.complete(null);
}).exceptionally(exception -> {
log.error("[{}][{}] Error closing subscription", topicName, subName, exception);
closeSubscriptionFuture.completeExceptionally(exception);
return null;
});
}
// cursor close handles pending delete (ack) operations
closeSubscriptionFuture.thenCompose(v -> topic.unsubscribe(subName)).thenAccept(v -> {
synchronized(this) {
(dispatcher != null ? dispatcher.close() : CompletableFuture.completedFuture(null)).thenRun(() -> {
log.info("[{}][{}] Successfully deleted subscription", topicName, subName);
deleteFuture.complete(null);
}).exceptionally(ex -> {
IS_FENCED_UPDATER.set(this, FALSE);
if (dispatcher != null) {
dispatcher.reset();
}log.error("[{}][{}] Error deleting subscription", topicName, subName, ex);
deleteFuture.completeExceptionally(ex);
return null;
});
}
}).exceptionally(exception -> {IS_FENCED_UPDATER.set(this, FALSE);
log.error("[{}][{}] Error deleting subscription", topicName, subName, exception);
deleteFuture.completeExceptionally(exception);
return null;
});
return
deleteFuture;
} | 3.26 |
pulsar_NonPersistentSubscription_doUnsubscribe_rdh | /**
* Handle unsubscribe command from the client API Check with the dispatcher is this consumer can proceed with
* unsubscribe.
*
* @param consumer
* consumer object that is initiating the unsubscribe operation
* @return CompletableFuture indicating the completion of ubsubscribe operation
*/
@Override
public CompletableFuture<Void> doUnsubscribe(Consumer consumer) {
CompletableFuture<Void> future = new CompletableFuture<>();
try {
if (dispatcher.canUnsubscribe(consumer)) {consumer.close();
return delete(); }
future.completeExceptionally(new ServerMetadataException("Unconnected or shared consumer attempting to unsubscribe"));
} catch (BrokerServiceException e) {
log.warn("Error removing consumer {}", consumer);
future.completeExceptionally(e);
}
return future;
} | 3.26 |
pulsar_IScheduler_rebalance_rdh | /**
* Rebalances function instances scheduled to workers.
*
* @param currentAssignments
* current assignments
* @param workers
* current list of active workers
* @return A list of new assignments
*/
default List<Function.Assignment> rebalance(List<Function.Assignment> currentAssignments, Set<String> workers) {
return Collections.emptyList();
} | 3.26 |
pulsar_TopicMessageImpl_getTopicName_rdh | /**
* Get the topic name with partition part of this message.
*
* @return the name of the topic on which this message was published
*/
@Override
public String getTopicName() {
return msg.getTopicName();
} | 3.26 |
pulsar_TopicMessageImpl_getTopicPartitionName_rdh | /**
* Get the topic name which contains partition part for this message.
*
* @return the topic name which contains Partition part
*/
@Deprecated
public String getTopicPartitionName() {
return topicPartitionName;
} | 3.26 |
pulsar_AbstractHdfsConnector_resetHDFSResources_rdh | /* Reset Hadoop Configuration and FileSystem based on the supplied configuration resources. */
protected HdfsResources resetHDFSResources(HdfsSinkConfig hdfsSinkConfig) throws IOException {
Configuration config =
new ExtendedConfiguration();
config.setClassLoader(Thread.currentThread().getContextClassLoader());
getConfig(config, connectorConfig.getHdfsConfigResources());
// first check for timeout on HDFS connection, because FileSystem has a hard coded 15 minute timeout
checkHdfsUriForTimeout(config);
/* Disable caching of Configuration and FileSystem objects, else we cannot reconfigure
the processor without a complete restart
*/
String disableCacheName = String.format("fs.%s.impl.disable.cache", FileSystem.getDefaultUri(config).getScheme());
config.set(disableCacheName, "true");
// If kerberos is enabled, create the file system as the kerberos principal
// -- use RESOURCE_LOCK to guarantee UserGroupInformation is accessed by only a single thread at at time
FileSystem fs;
UserGroupInformation ugi;
synchronized(RESOURCES_LOCK) {
if (SecurityUtil.isSecurityEnabled(config)) {
ugi = SecurityUtil.loginKerberos(config, connectorConfig.getKerberosUserPrincipal(), connectorConfig.getKeytab());
fs = getFileSystemAsUser(config, ugi);
} else {
config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
config.set("hadoop.security.authentication", "simple");
ugi = SecurityUtil.loginSimple(config);
fs = getFileSystemAsUser(config, ugi);
} }
return new HdfsResources(config, fs, ugi);
} | 3.26 |
pulsar_AbstractHdfsConnector_getFileSystem_rdh | /**
* This exists in order to allow unit tests to override it so that they don't take several
* minutes waiting for UDP packets to be received.
*
* @param config
* the configuration to use
* @return the FileSystem that is created for the given Configuration
* @throws IOException
* if unable to create the FileSystem
*/
protected FileSystem getFileSystem(final Configuration config) throws IOException {
return FileSystem.get(config);
} | 3.26 |
pulsar_AbstractHdfsConnector_checkHdfsUriForTimeout_rdh | /* Reduce the timeout of a socket connection from the default in FileSystem.get() */
protected void checkHdfsUriForTimeout(Configuration config) throws IOException {
URI hdfsUri = FileSystem.getDefaultUri(config);
String address = hdfsUri.getAuthority();
int port = hdfsUri.getPort();
if (((address == null) || address.isEmpty()) || (port < 0)) {
return;
}
InetSocketAddress namenode = NetUtils.createSocketAddr(address, port);
SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(config);
try (Socket socket = socketFactory.createSocket()) {NetUtils.connect(socket, namenode, 1000);// 1 second timeout
}
} | 3.26 |
pulsar_DebeziumSource_topicNamespace_rdh | // namespace for output topics, default value is "tenant/namespace"
public static String topicNamespace(SourceContext sourceContext) {
String v1 = sourceContext.getTenant();
String namespace = sourceContext.getNamespace();
return ((StringUtils.isEmpty(v1)
? TopicName.PUBLIC_TENANT : v1) + "/") + (StringUtils.isEmpty(namespace) ? TopicName.DEFAULT_NAMESPACE : namespace);
} | 3.26 |
pulsar_JettyRequestLogFactory_createRequestLogger_rdh | /**
* Build a new Jetty request logger using the format defined in this class.
*
* @return a request logger
*/public static CustomRequestLog createRequestLogger() {
return new CustomRequestLog(new Slf4jRequestLogWriter(), LOG_FORMAT);
} | 3.26 |
pulsar_RequestWrapper_getBody_rdh | // Use this method to read the request body N times
public byte[] getBody() {
return this.body;
} | 3.26 |
pulsar_BlobStoreManagedLedgerOffloader_offload_rdh | /**
* Upload the DataBlocks associated with the given ReadHandle using MultiPartUpload,
* Creating indexBlocks for each corresponding DataBlock that is uploaded.
*/
@Override
public CompletableFuture<Void> offload(ReadHandle readHandle, UUID uuid, Map<String, String> extraMetadata) {
final String managedLedgerName = extraMetadata.get(f0);
final String topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
CompletableFuture<Void> promise = new CompletableFuture<>();
scheduler.chooseThread(readHandle.getId()).execute(() -> {
final BlobStore writeBlobStore = getBlobStore(config.getBlobStoreLocation());
log.info("offload {} uuid {} extraMetadata {} to {} {}", readHandle.getId(), uuid, extraMetadata, config.getBlobStoreLocation(), writeBlobStore);
if (((readHandle.getLength() == 0) || (!readHandle.isClosed())) || (readHandle.getLastAddConfirmed() < 0)) {
promise.completeExceptionally(new IllegalArgumentException("An empty or open ledger should never be offloaded"));
return;
}
OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create().withLedgerMetadata(readHandle.getLedgerMetadata()).withDataBlockHeaderLength(BlockAwareSegmentInputStreamImpl.getHeaderSize());
String dataBlockKey = DataBlockUtils.dataBlockOffloadKey(readHandle.getId(), uuid);
String indexBlockKey = DataBlockUtils.indexBlockOffloadKey(readHandle.getId(), uuid);
log.info("ledger {} dataBlockKey {} indexBlockKey {}", readHandle.getId(), dataBlockKey, indexBlockKey);
MultipartUpload mpu = null;
List<MultipartPart> parts = Lists.newArrayList();
// init multi part upload for data block.
try {
BlobBuilder v9
= writeBlobStore.blobBuilder(dataBlockKey);
Map<String, String> objectMetadata = new HashMap<>(userMetadata);objectMetadata.put("role", "data");
if
(extraMetadata != null) {
objectMetadata.putAll(extraMetadata);
}
DataBlockUtils.addVersionInfo(v9, objectMetadata);
Blob blob = v9.build();
log.info("initiateMultipartUpload bucket {}, metadata {} ", config.getBucket(), blob.getMetadata());
mpu = writeBlobStore.initiateMultipartUpload(config.getBucket(), blob.getMetadata(), new PutOptions());
} catch (Throwable t) {
promise.completeExceptionally(t);
return;
}
long dataObjectLength = 0;
// start multi part upload for data block.
try {
long startEntry = 0;
int partId = 1;
long start = System.nanoTime();
long entryBytesWritten = 0;
while (startEntry <= readHandle.getLastAddConfirmed()) {
int blockSize = BlockAwareSegmentInputStreamImpl.calculateBlockSize(config.getMaxBlockSizeInBytes(), readHandle, startEntry, entryBytesWritten);
try (BlockAwareSegmentInputStream blockStream = new BlockAwareSegmentInputStreamImpl(readHandle, startEntry,
blockSize, this.offloaderStats, managedLedgerName)) {
Payload partPayload = Payloads.newInputStreamPayload(blockStream);
partPayload.getContentMetadata().setContentLength(((long) (blockSize)));
partPayload.getContentMetadata().setContentType("application/octet-stream");
parts.add(writeBlobStore.uploadMultipartPart(mpu, partId, partPayload));
log.debug("UploadMultipartPart. container: {}, blobName: {}, partId: {}, mpu: {}", config.getBucket(), dataBlockKey, partId, mpu.id());
indexBuilder.addBlock(startEntry, partId, blockSize);
if (blockStream.getEndEntryId() != (-1)) {
startEntry = blockStream.getEndEntryId() + 1;
} else {
// could not read entry from ledger.
break;
}
entryBytesWritten += blockStream.getBlockEntryBytesCount();
partId++;
this.offloaderStats.recordOffloadBytes(topicName, blockStream.getBlockEntryBytesCount());
}
dataObjectLength += blockSize;
}
String etag = writeBlobStore.completeMultipartUpload(mpu, parts);
log.info("Ledger {}, upload finished, etag {}", readHandle.getId(), etag);
mpu = null;
} catch (Throwable t) {
try {
if (mpu != null)
{
writeBlobStore.abortMultipartUpload(mpu);
}
}
catch (Throwable throwable) {
log.error("Failed abortMultipartUpload in bucket - {} with key - {}, uploadId - {}.", config.getBucket(), dataBlockKey, mpu.id(), throwable);
}
this.offloaderStats.recordWriteToStorageError(topicName);
this.offloaderStats.recordOffloadError(topicName);
promise.completeExceptionally(t);
return;}
// upload index block
try (OffloadIndexBlock index = indexBuilder.withDataObjectLength(dataObjectLength).build();IndexInputStream indexStream = index.toStream())
{
// write the index block
BlobBuilder blobBuilder = writeBlobStore.blobBuilder(indexBlockKey);
Map<String, String> objectMetadata = new HashMap<>(userMetadata);
objectMetadata.put("role", "index");
if (extraMetadata != null) {objectMetadata.putAll(extraMetadata);
}
DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata);
Payload indexPayload = Payloads.newInputStreamPayload(indexStream);
indexPayload.getContentMetadata().setContentLength(((long) (indexStream.getStreamSize())));
indexPayload.getContentMetadata().setContentType("application/octet-stream");Blob blob = blobBuilder.payload(indexPayload).contentLength(((long) (indexStream.getStreamSize()))).build();
writeBlobStore.putBlob(config.getBucket(), blob);
promise.complete(null);
} catch (Throwable t) {
try {
writeBlobStore.removeBlob(config.getBucket(), dataBlockKey);
} catch (Throwable throwable) {
log.error("Failed deleteObject in bucket - {} with key - {}.", config.getBucket(), dataBlockKey,
throwable);
}
this.offloaderStats.recordWriteToStorageError(topicName);
this.offloaderStats.recordOffloadError(topicName);
promise.completeExceptionally(t);return;
}
});
return promise;
} | 3.26 |
pulsar_BlobStoreManagedLedgerOffloader_getBlobStoreLocation_rdh | /**
* Attempts to create a BlobStoreLocation from the values in the offloadDriverMetadata,
* however, if no values are available, it defaults to the currently configured
* provider, region, bucket, etc.
*
* @param offloadDriverMetadata
* @return */
private BlobStoreLocation getBlobStoreLocation(Map<String, String> offloadDriverMetadata) {
return !offloadDriverMetadata.isEmpty() ? new BlobStoreLocation(offloadDriverMetadata) : new BlobStoreLocation(getOffloadDriverMetadata());
} | 3.26 |
pulsar_BrokerUsage_populateFrom_rdh | /**
* Factory method that returns an instance of this class populated from metrics we expect the keys that we are
* looking there's no explicit type checked object which guarantees that we have a specific type of metrics.
*
* @param metrics
* metrics object containing the metrics collected per minute from mbeans
* @return new instance of the class populated from metrics
*/
public static BrokerUsage populateFrom(Map<String, Object> metrics) {
BrokerUsage brokerUsage = null;
if (metrics.containsKey("brk_conn_cnt")) {
brokerUsage = new BrokerUsage();
brokerUsage.connectionCount = ((Long) (metrics.get("brk_conn_cnt")));
}if (metrics.containsKey("brk_repl_conn_cnt")) {
if (brokerUsage == null) {
brokerUsage = new BrokerUsage();
}
brokerUsage.replicationConnectionCount = ((Long) (metrics.get("brk_repl_conn_cnt")));
}
return brokerUsage;
} | 3.26 |
pulsar_OffloadIndexBlockImpl_toStream_rdh | /**
* Get the content of the index block as InputStream.
* Read out in format:
* | index_magic_header | index_block_len | data_object_len | data_header_len |
* | index_entry_count | segment_metadata_len | segment metadata | index entries... |
*/
@Override
public IndexInputStream toStream() throws IOException {
int indexEntryCount = this.indexEntries.size();
byte[] ledgerMetadataByte = buildLedgerMetadataFormat(this.segmentMetadata);
int segmentMetadataLength = ledgerMetadataByte.length;
int indexBlockLength = ((((((4/* magic header */
+ 4)/* index block length */
+ 8)/* data object length */
+ 8)/* data header length */
+ 4)/* index entry count */
+ 4)/* segment metadata length */
+ segmentMetadataLength) + (indexEntryCount * ((8 + 4) + 8));/* messageEntryId + blockPartId + blockOffset */
ByteBuf out = PulsarByteBufAllocator.DEFAULT.buffer(indexBlockLength, indexBlockLength);
out.writeInt(INDEX_MAGIC_WORD).writeInt(indexBlockLength).writeLong(dataObjectLength).writeLong(dataHeaderLength).writeInt(indexEntryCount).writeInt(segmentMetadataLength);
// write metadata
out.writeBytes(ledgerMetadataByte);
// write entries
this.indexEntries.entrySet().forEach(entry -> out.writeLong(entry.getValue().getEntryId()).writeInt(entry.getValue().getPartId()).writeLong(entry.getValue().getOffset()));
return new OffloadIndexBlock.IndexInputStream(new ByteBufInputStream(out, true), indexBlockLength);
} | 3.26 |
pulsar_ClientCnxIdleState_tryMarkReleasing_rdh | /**
* Changes the idle-state of the connection to #{@link State#RELEASING}, This method only changes this
* connection from the #{@link State#IDLE} state to the #{@link State#RELEASING} state.
*
* @return Whether change idle-stat to #{@link State#RELEASING} success.
*/
public boolean tryMarkReleasing() {
return compareAndSetIdleStat(State.IDLE, State.RELEASING);
} | 3.26 |
pulsar_ClientCnxIdleState_m0_rdh | /**
*
* @return Whether this connection is in idle.
*/
public boolean m0() {
return getIdleStat() == State.IDLE;
} | 3.26 |
pulsar_ClientCnxIdleState_tryMarkIdleAndInitIdleTime_rdh | /**
* Try to transform the state of the connection to #{@link State#IDLE}, state should only be
* transformed to #{@link State#IDLE} from state #{@link State#USING}. if the state
* is successfully transformed, "idleMarkTime" will be assigned to current time.
*/
public void tryMarkIdleAndInitIdleTime() {
if (compareAndSetIdleStat(State.USING, State.IDLE)) {
idleMarkTime = System.currentTimeMillis();
}
}
/**
* Changes the idle-state of the connection to #{@link State#USING} as much as possible, This method
* is used when connection borrow, and reset {@link #idleMarkTime} if change state to
* #{@link State#USING} success.
*
* @return Whether change idle-stat to #{@link State#USING} | 3.26 |
pulsar_ClientCnxIdleState_isReleasing_rdh | /**
*
* @return Whether this connection is in idle and will be released soon.
*/
public boolean isReleasing() {return getIdleStat() == State.RELEASING;
} | 3.26 |
pulsar_ClientCnxIdleState_doIdleDetect_rdh | /**
* Check whether the connection is idle, and if so, set the idle-state to #{@link State#IDLE}.
* If the state is already idle and the {@param maxIdleSeconds} is reached, set the state to
* #{@link State#RELEASING}.
*/
public void doIdleDetect(long maxIdleSeconds) {
if (isReleasing())
{
return;
}
if (m0()) {
if (((maxIdleSeconds * 1000)
+ idleMarkTime) < System.currentTimeMillis()) {
tryMarkReleasing();
}
return;
}
if (clientCnx.idleCheck()) {
tryMarkIdleAndInitIdleTime();
}
} | 3.26 |
pulsar_ClientCnxIdleState_tryMarkReleasedAndCloseConnection_rdh | /**
* Changes the idle-state of the connection to #{@link State#RELEASED}, This method only changes this
* connection from the #{@link State#RELEASING} state to the #{@link State#RELEASED}
* state, and close {@param clientCnx} if change state to #{@link State#RELEASED} success.
*
* @return Whether change idle-stat to #{@link State#RELEASED} and close connection success.
*/
public boolean tryMarkReleasedAndCloseConnection() {
if (!compareAndSetIdleStat(State.RELEASING, State.RELEASED)) {
return false;
}
clientCnx.close();
return true;
} | 3.26 |
pulsar_ClientCnxIdleState_isReleased_rdh | /**
*
* @return Whether this connection has already been released.
*/
public boolean isReleased() {
return getIdleStat() == State.RELEASED;
} | 3.26 |
pulsar_ClientCnxIdleState_getIdleStat_rdh | /**
* Get idle-stat.
*
* @return connection idle-stat
*/
public State getIdleStat() {
return STATE_UPDATER.get(this);
} | 3.26 |
pulsar_ClientCnxIdleState_isUsing_rdh | /**
*
* @return Whether this connection is in use.
*/
public boolean isUsing() {
return getIdleStat() == State.USING;
} | 3.26 |
pulsar_ClientCnxIdleState_compareAndSetIdleStat_rdh | /**
* Compare and switch idle-stat.
*
* @return Whether the update is successful.Because there may be other threads competing, possible return false.
*/
boolean compareAndSetIdleStat(State originalStat, State newStat) {
return STATE_UPDATER.compareAndSet(this, originalStat, newStat);
} | 3.26 |
pulsar_PulsarRecord_individualAck_rdh | /**
* Some sink sometimes wants to control the ack type.
*/
public void individualAck() {
this.customAckFunction.accept(false);
} | 3.26 |
pulsar_SchemaData_fromSchemaInfo_rdh | /**
* Convert a schema info to a schema data.
*
* @param schemaInfo
* schema info
* @return the converted schema schema data
*/
public static SchemaData fromSchemaInfo(SchemaInfo schemaInfo) {
return SchemaData.builder().type(schemaInfo.getType()).data(schemaInfo.getSchema()).props(schemaInfo.getProperties()).build();
} | 3.26 |
pulsar_SchemaData_toSchemaInfo_rdh | /**
* Convert a schema data to a schema info.
*
* @return the converted schema info.
*/
public SchemaInfo toSchemaInfo() {
return SchemaInfo.builder().name("").type(type).schema(data).properties(props).build();
} | 3.26 |
pulsar_MessageImpl_create_rdh | // Constructor for out-going message
public static <T> MessageImpl<T> create(MessageMetadata msgMetadata, ByteBuffer payload, Schema<T> schema, String topic) {
@SuppressWarnings("unchecked")
MessageImpl<T> msg = ((MessageImpl<T>) (RECYCLER.get()));
msg.msgMetadata.clear();
msg.msgMetadata.copyFrom(msgMetadata);
msg.messageId = null;
msg.topic = topic;
msg.cnx = null;msg.payload = Unpooled.wrappedBuffer(payload);
msg.properties = null;
msg.schema = schema;
msg.schemaHash = SchemaHash.of(schema);
msg.uncompressedSize = payload.remaining();
return msg;
} | 3.26 |
pulsar_MessageImpl_getPayload_rdh | /**
* used only for unit-test to validate payload's state and ref-cnt.
*
* @return */
@VisibleForTesting
ByteBuf getPayload() {
return payload;
} | 3.26 |
pulsar_MessageImpl_m2_rdh | // For messages produced by older version producers without schema, the schema version is an empty byte array
// rather than null.
@Override
public byte[] m2() {
if (msgMetadata.hasSchemaVersion()) {
byte[] schemaVersion = msgMetadata.getSchemaVersion();
return schemaVersion.length == 0 ? null
: schemaVersion;
} else {
return null;
}
} | 3.26 |
pulsar_BindAddressValidator_migrateBindAddresses_rdh | /**
* Generates bind addresses based on legacy configuration properties.
*/
private static List<BindAddress> migrateBindAddresses(ServiceConfiguration config) {
List<BindAddress> addresses = new ArrayList<>(2);
if (config.getBrokerServicePort().isPresent()) {
addresses.add(new BindAddress(null, URI.create(ServiceConfigurationUtils.brokerUrl(config.getBindAddress(), config.getBrokerServicePort().get()))));
}
if (config.getBrokerServicePortTls().isPresent()) {
addresses.add(new BindAddress(null, URI.create(ServiceConfigurationUtils.brokerUrlTls(config.getBindAddress(), config.getBrokerServicePortTls().get()))));
}
if (config.getWebServicePort().isPresent()) {
addresses.add(new BindAddress(null, URI.create(ServiceConfigurationUtils.webServiceUrl(config.getBindAddress(), config.getWebServicePort().get()))));
}
if (config.getWebServicePortTls().isPresent()) {
addresses.add(new BindAddress(null, URI.create(ServiceConfigurationUtils.webServiceUrlTls(config.getBindAddress(), config.getWebServicePortTls().get()))));
}
return addresses;
} | 3.26 |
pulsar_BindAddressValidator_validateBindAddresses_rdh | /**
* Validate the configuration of `bindAddresses`.
*
* @param config
* the pulsar broker configure.
* @param schemes
* a filter on the schemes of the bind addresses, or null to not apply a filter.
* @return a list of bind addresses.
*/
public static List<BindAddress> validateBindAddresses(ServiceConfiguration config, Collection<String> schemes) {
// migrate the existing configuration properties
List<BindAddress> addresses =
migrateBindAddresses(config);
// parse the list of additional bind addresses
Arrays.stream(StringUtils.split(StringUtils.defaultString(config.getBindAddresses()), ",")).map(s -> {Matcher m = BIND_ADDRESSES_PATTERN.matcher(s);
if (!m.matches()) {
throw new IllegalArgumentException("bindAddresses: malformed: " +
s);
}
return m;
}).map(m -> new BindAddress(m.group("name"), URI.create(m.group("url")))).forEach(addresses::add);
// apply the filter
if (schemes != null) {
addresses.removeIf(a -> !schemes.contains(a.getAddress().getScheme()));
}
return addresses;
} | 3.26 |
pulsar_PulsarMockLedgerHandle_readAsync_rdh | // ReadHandle interface
@Override
public CompletableFuture<LedgerEntries> readAsync(long firstEntry, long lastEntry) { return readHandle.readAsync(firstEntry, lastEntry);
} | 3.26 |
pulsar_CompletableFutureCancellationHandler_setCancelAction_rdh | /**
* Set the action to run when the future gets cancelled or timeouts.
* The cancellation or timeout might be originating from any "upstream" future.
* The implementation ensures that the cancel action gets called once.
* Handles possible race conditions that might happen when the future gets cancelled
* before the cancel action is set to this handler. In this case, the
* cancel action gets called when the action is set.
*
* @param cancelAction
* the action to run when the the future gets cancelled or timeouts
*/public void setCancelAction(Runnable cancelAction) {if ((this.f0 != null) || cancelHandled.get()) {
throw new IllegalStateException("cancelAction can only be set once.");
}
this.f0 = Objects.requireNonNull(cancelAction);
// handle race condition in the case that the future was already cancelled when the handler is set
runCancelActionOnceIfCancelled();
} | 3.26 |
pulsar_CompletableFutureCancellationHandler_attachToFuture_rdh | /**
* Attaches the cancellation handler to handle cancels
* and timeouts. A cancellation handler instance can be used only once.
*
* @param future
* the future to attach the handler to
*/
public synchronized void
attachToFuture(CompletableFuture<?> future) {
if (attached) {
throw new IllegalStateException("A future has already been attached to this instance.");
}attached = true;
future.whenComplete(whenCompleteFunction());
} | 3.26 |
pulsar_CompletableFutureCancellationHandler_createFuture_rdh | /**
* Creates a new {@link CompletableFuture} and attaches the cancellation handler
* to handle cancels and timeouts.
*
* @param <T>
* the result type of the future
* @return a new future instance
*/
public <T> CompletableFuture<T> createFuture() {
CompletableFuture<T> future
= new CompletableFuture<>();
attachToFuture(future);
return future;
} | 3.26 |
pulsar_BasicKubernetesManifestCustomizer_partialDeepClone_rdh | /**
* A clone where the maps and lists are properly cloned. The k8s resources themselves are shallow clones.
*/
public RuntimeOpts partialDeepClone() {
return new RuntimeOpts(jobNamespace, jobName, extraLabels != null ? new HashMap<>(extraLabels) : null, extraAnnotations != null ? new HashMap<>(extraAnnotations) : null, nodeSelectorLabels != null ? new HashMap<>(nodeSelectorLabels) : null, resourceRequirements, tolerations != null ? new ArrayList<>(tolerations) : null);
} | 3.26 |
pulsar_RawBatchConverter_rebatchMessage_rdh | /**
* Take a batched message and a filter, and returns a message with the only the sub-messages
* which match the filter. Returns an empty optional if no messages match.
*
* NOTE: this message does not alter the reference count of the RawMessage argument.
*/
public static Optional<RawMessage> rebatchMessage(RawMessage msg, BiPredicate<String, MessageId> filter, boolean retainNullKey) throws IOException {
checkArgument(msg.getMessageIdData().getBatchIndex() == (-1));
ByteBuf payload = msg.getHeadersAndPayload();
int readerIndex = payload.readerIndex();
ByteBuf brokerMeta = null;
if (payload.getShort(readerIndex) == magicBrokerEntryMetadata)
{
payload.skipBytes(Short.BYTES);
int brokerEntryMetadataSize = payload.readInt();
payload.readerIndex(readerIndex);
brokerMeta = payload.readSlice((brokerEntryMetadataSize + Short.BYTES) + Integer.BYTES);
}
MessageMetadata metadata = Commands.parseMessageMetadata(payload);
ByteBuf batchBuffer = PulsarByteBufAllocator.DEFAULT.buffer(payload.capacity());
CompressionType compressionType = metadata.getCompression();
CompressionCodec codec = CompressionCodecProvider.getCompressionCodec(compressionType);
int uncompressedSize = metadata.getUncompressedSize();
ByteBuf uncompressedPayload = codec.decode(payload, uncompressedSize);
try {
int batchSize = metadata.getNumMessagesInBatch();
int messagesRetained = 0;
SingleMessageMetadata emptyMetadata = new SingleMessageMetadata().setCompactedOut(true);
SingleMessageMetadata singleMessageMetadata = new SingleMessageMetadata();
for (int i = 0; i < batchSize; i++) {
ByteBuf singleMessagePayload = Commands.deSerializeSingleMessageInBatch(uncompressedPayload, singleMessageMetadata, 0, batchSize);
MessageId id = new BatchMessageIdImpl(msg.getMessageIdData().getLedgerId(), msg.getMessageIdData().getEntryId(), msg.getMessageIdData().getPartition(), i);
if (!singleMessageMetadata.hasPartitionKey()) {
if (retainNullKey) {
messagesRetained++;
Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadata, singleMessagePayload, batchBuffer);
} else {
Commands.serializeSingleMessageInBatchWithPayload(emptyMetadata, Unpooled.EMPTY_BUFFER, batchBuffer);
}
} else if (filter.test(singleMessageMetadata.getPartitionKey(), id) && (singleMessagePayload.readableBytes() > 0)) {
messagesRetained++;
Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadata, singleMessagePayload, batchBuffer);
} else {Commands.serializeSingleMessageInBatchWithPayload(emptyMetadata, Unpooled.EMPTY_BUFFER, batchBuffer);
}
singleMessagePayload.release();
}
if (messagesRetained > 0) {
int newUncompressedSize = batchBuffer.readableBytes();
ByteBuf compressedPayload = codec.encode(batchBuffer);metadata.setUncompressedSize(newUncompressedSize);
ByteBuf v33 = Commands.serializeMetadataAndPayload(ChecksumType.Crc32c, metadata, compressedPayload);
if (brokerMeta != null) {
CompositeByteBuf compositeByteBuf = PulsarByteBufAllocator.DEFAULT.compositeDirectBuffer();
compositeByteBuf.addComponents(true, brokerMeta.retain(), v33);
v33 = compositeByteBuf;
}
Optional<RawMessage> result = Optional.of(new RawMessageImpl(msg.getMessageIdData(), v33));
v33.release();
compressedPayload.release();
return result;
} else {
return Optional.empty();
}
} finally {
uncompressedPayload.release();
batchBuffer.release();
}
} | 3.26 |
pulsar_PositionImpl_toString_rdh | /**
* String representation of virtual cursor - LedgerId:EntryId.
*/
@Override
public String toString() {return (ledgerId
+ ":") +
entryId;
} | 3.26 |
pulsar_PositionImpl_getPositionAfterEntries_rdh | /**
* Position after moving entryNum messages,
* if entryNum < 1, then return the current position.
*/
public PositionImpl getPositionAfterEntries(int entryNum) {
if
(entryNum < 1) {
return this;
}
if (entryId < 0) {
return PositionImpl.get(ledgerId, entryNum - 1);
} else {
return PositionImpl.get(ledgerId, entryId + entryNum);
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.