name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ReplicationPeerManager_isStringEquals_rdh | /**
* For replication peer cluster key or endpoint class, null and empty string is same. So here
* don't use {@link StringUtils#equals(CharSequence, CharSequence)} directly.
*/
private boolean isStringEquals(String s1, String s2) {
if (StringUtils.isBlank(s1)) {
return StringUtils.isBlank(s2);
}
return s1.equals(s2);
} | 3.26 |
hbase_ReplicationPeerManager_checkNamespacesAndTableCfsConfigConflict_rdh | /**
* Set a namespace in the peer config means that all tables in this namespace will be replicated
* to the peer cluster.
* <ol>
* <li>If peer config already has a namespace, then not allow set any table of this namespace to
* the peer config.</li>
* <li>If peer config already has a table, then not allow set this table's namespace to the peer
* config.</li>
* </ol>
* <p>
* Set a exclude namespace in the peer config means that all tables in this namespace can't be
* replicated to the peer cluster.
* <ol>
* <li>If peer config already has a exclude namespace, then not allow set any exclude table of
* this namespace to the peer config.</li>
* <li>If peer config already has a exclude table, then not allow set this table's namespace as a
* exclude namespace.</li>
* </ol>
*/
private void checkNamespacesAndTableCfsConfigConflict(Set<String> namespaces, Map<TableName, ? extends Collection<String>> tableCfs) throws DoNotRetryIOException {
if ((namespaces == null) || namespaces.isEmpty()) {
return;
}
if ((tableCfs == null) || tableCfs.isEmpty()) {
return;
}
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
if (namespaces.contains(table.getNamespaceAsString())) {
throw new DoNotRetryIOException(((("Table-cfs " + table) + " is conflict with namespaces ") + table.getNamespaceAsString()) + " in peer config");
}
}
} | 3.26 |
hbase_ReplicationPeerManager_migrateQueuesFromZk_rdh | /**
* Submit the migration tasks to the given {@code executor}.
*/
CompletableFuture<?> migrateQueuesFromZk(ZKWatcher zookeeper, ExecutorService executor) {// the replication queue table creation is asynchronous and will be triggered by addPeer, so
// here we need to manually initialize it since we will not call addPeer.
try {
initializeQueueStorage();
} catch (IOException e) {
return FutureUtils.failedFuture(e);
}
ZKReplicationQueueStorageForMigration oldStorage = new ZKReplicationQueueStorageForMigration(zookeeper, conf);
return CompletableFuture.allOf(runAsync(() -> migrateQueues(oldStorage), executor), runAsync(() -> migrateLastPushedSeqIds(oldStorage), executor), runAsync(() -> migrateHFileRefs(oldStorage), executor));
} | 3.26 |
hbase_ReplicationPeerManager_preUpdatePeerConfig_rdh | /**
* Return the old peer description. Can never be null.
*/
ReplicationPeerDescription preUpdatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) throws DoNotRetryIOException {
checkPeerConfig(peerConfig);
ReplicationPeerDescription desc = checkPeerExists(peerId);
ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig();
if (!isStringEquals(peerConfig.getClusterKey(), oldPeerConfig.getClusterKey())) {
throw new DoNotRetryIOException(((((("Changing the cluster key on an existing peer is not allowed. Existing key '" + oldPeerConfig.getClusterKey()) + "' for peer ") + peerId) + " does not match new key '") + peerConfig.getClusterKey()) + "'"); }
if (!isStringEquals(peerConfig.getReplicationEndpointImpl(), oldPeerConfig.getReplicationEndpointImpl())) {
throw new DoNotRetryIOException((((((("Changing the replication endpoint implementation class " + "on an existing peer is not allowed. Existing class '") + oldPeerConfig.getReplicationEndpointImpl()) + "' for peer ") + peerId) + " does not match new class '") + peerConfig.getReplicationEndpointImpl()) + "'");
}
if (!isStringEquals(peerConfig.getRemoteWALDir(), oldPeerConfig.getRemoteWALDir())) {
throw new DoNotRetryIOException((((((("Changing the remote wal dir on an existing peer is not allowed. Existing remote wal " + "dir '") + oldPeerConfig.getRemoteWALDir()) + "' for peer ") + peerId) + " does not match new remote wal dir '") + peerConfig.getRemoteWALDir()) + "'");
}
if (oldPeerConfig.isSyncReplication()) {
if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldPeerConfig, peerConfig)) {
throw new DoNotRetryIOException((("Changing the replicated namespace/table config on a synchronous replication " + "peer(peerId: ") + peerId) + ") is not allowed.");
}
}
return desc;
} | 3.26 |
hbase_ReplicationPeerManager_preTransitPeerSyncReplicationState_rdh | /**
* Returns the old desciption of the peer
*/
ReplicationPeerDescription preTransitPeerSyncReplicationState(String peerId, SyncReplicationState state) throws DoNotRetryIOException {
ReplicationPeerDescription desc = checkPeerExists(peerId);
SyncReplicationState fromState = desc.getSyncReplicationState();
EnumSet<SyncReplicationState> allowedToStates = allowedTransition.get(fromState);
if ((allowedToStates == null) || (!allowedToStates.contains(state))) {
throw new DoNotRetryIOException((((("Can not transit current cluster state from " + fromState) + " to ") + state) + " for peer id=") + peerId);
}return desc;
} | 3.26 |
pulsar_EventLoopUtil_getClientSocketChannelClass_rdh | /**
* Return a SocketChannel class suitable for the given EventLoopGroup implementation.
*
* @param eventLoopGroup
* @return */
public static Class<? extends SocketChannel> getClientSocketChannelClass(EventLoopGroup eventLoopGroup) {
if (eventLoopGroup instanceof IOUringEventLoopGroup) {
return IOUringSocketChannel.class;
} else if (eventLoopGroup instanceof EpollEventLoopGroup) {
return EpollSocketChannel.class;
} else {
return NioSocketChannel.class;
}
} | 3.26 |
pulsar_EventLoopUtil_shutdownGracefully_rdh | /**
* Shutdowns the EventLoopGroup gracefully. Returns a {@link CompletableFuture}
*
* @param eventLoopGroup
* the event loop to shutdown
* @return CompletableFuture that completes when the shutdown has completed
*/
public static CompletableFuture<Void> shutdownGracefully(EventLoopGroup eventLoopGroup) {
return NettyFutureUtil.toCompletableFutureVoid(eventLoopGroup.shutdownGracefully());
} | 3.26 |
pulsar_EventLoopUtil_newEventLoopGroup_rdh | /**
*
* @return an EventLoopGroup suitable for the current platform
*/
public static EventLoopGroup newEventLoopGroup(int nThreads, boolean enableBusyWait, ThreadFactory threadFactory) {
if (Epoll.isAvailable()) {
String enableIoUring = System.getProperty(ENABLE_IO_URING);
// By default, io_uring will not be enabled, even if available. The environment variable will be used:
// enable.io_uring=1
if (StringUtils.equalsAnyIgnoreCase(enableIoUring, "1", "true")) {
// Throw exception if IOUring cannot be used
IOUring.ensureAvailability();
return new IOUringEventLoopGroup(nThreads, threadFactory);
} else {
if (!enableBusyWait) {
// Regular Epoll based event loop
return new EpollEventLoopGroup(nThreads, threadFactory);
}
// With low latency setting, put the Netty event loop on busy-wait loop to reduce cost of
// context switches
EpollEventLoopGroup eventLoopGroup = new EpollEventLoopGroup(nThreads, threadFactory, () -> (selectSupplier, hasTasks) -> SelectStrategy.BUSY_WAIT);
// Enable CPU affinity on IO threads
for (int i = 0; i < nThreads; i++) {
eventLoopGroup.next().submit(() -> {
try {
CpuAffinity.acquireCore();
} catch (Throwable t) {
log.warn("Failed to acquire CPU core for thread {} {}", Thread.currentThread().getName(), t.getMessage(), t);
}
});
}
return eventLoopGroup;
}
} else {
// Fallback to NIO
return new NioEventLoopGroup(nThreads, threadFactory);
}
} | 3.26 |
pulsar_OffloaderUtils_getOffloaderFactory_rdh | /**
* Extract the Pulsar offloader class from a offloader archive.
*
* @param narPath
* nar package path
* @return the offloader class name
* @throws IOException
* when fail to retrieve the pulsar offloader class
*/
static Pair<NarClassLoader, LedgerOffloaderFactory> getOffloaderFactory(String narPath, String narExtractionDirectory) throws IOException {
// need to load offloader NAR to the classloader that also loaded LedgerOffloaderFactory in case
// LedgerOffloaderFactory is loaded by a classloader that is not the default classloader
// as is the case for the pulsar presto plugin
NarClassLoader ncl = NarClassLoaderBuilder.builder().narFile(new File(narPath)).parentClassLoader(LedgerOffloaderFactory.class.getClassLoader()).extractionDirectory(narExtractionDirectory).build();
String configStr = ncl.getServiceDefinition(PULSAR_OFFLOADER_SERVICE_NAME);
OffloaderDefinition conf = ObjectMapperFactory.getYamlMapper().getObjectMapper().readValue(configStr, OffloaderDefinition.class);
if (StringUtils.isEmpty(conf.getOffloaderFactoryClass())) {
throw new IOException(String.format("The '%s' offloader does not provide an offloader factory implementation", conf.getName()));
}
try {
// Try to load offloader factory class and check it implements Offloader interface
Class factoryClass = ncl.loadClass(conf.getOffloaderFactoryClass());
CompletableFuture<LedgerOffloaderFactory> loadFuture = new CompletableFuture<>();
Thread loadingThread = new Thread(() -> {
Thread.currentThread().setContextClassLoader(ncl);
try {
Object offloader = factoryClass.getDeclaredConstructor().newInstance();
if (!(offloader instanceof LedgerOffloaderFactory)) {
throw new IOException(((("Class "
+ conf.getOffloaderFactoryClass()) + " does not implement ") + "interface ") + LedgerOffloaderFactory.class.getName());
}
loadFuture.complete(((LedgerOffloaderFactory) (offloader)));
} catch (Throwable t) {
loadFuture.completeExceptionally(t);
}
}, "load-factory-" + factoryClass);
try {
loadingThread.start();
return Pair.of(ncl, loadFuture.get());
} finally {loadingThread.join();
}
} catch (Throwable t) {
rethrowIOException(t);
}
return null;
} | 3.26 |
pulsar_Transactions_getPendingAckStatsAsync_rdh | /**
* Get transaction pending ack stats.
*
* @param topic
* the topic of this transaction pending ack stats
* @param subName
* the subscription name of this transaction pending ack stats
* @return the stats of transaction pending ack.
*/
default CompletableFuture<TransactionPendingAckStats> getPendingAckStatsAsync(String topic, String subName) {
return getPendingAckStatsAsync(topic, subName, false);
} | 3.26 |
pulsar_Transactions_getTransactionBufferStats_rdh | /**
* Get transaction buffer stats.
*
* @param topic
* the topic of getting transaction buffer stats
* @return the stats of transaction buffer in topic.
*/
default TransactionBufferStats getTransactionBufferStats(String topic) throws PulsarAdminException {
return getTransactionBufferStats(topic, false, false);
} | 3.26 |
pulsar_Transactions_m0_rdh | /**
* Get transaction pending ack stats.
*
* @param topic
* the topic of this transaction pending ack stats
* @param subName
* the subscription name of this transaction pending ack stats
* @return the stats of transaction pending ack.
*/
default TransactionPendingAckStats m0(String topic, String subName) throws PulsarAdminException {
return getPendingAckStats(topic, subName, false);
} | 3.26 |
pulsar_ResourceQuota_substract_rdh | /**
* Substract quota.
*
* @param quota
* <code>ResourceQuota</code> to substract
*/
public void substract(ResourceQuota quota) {
this.msgRateIn -= quota.msgRateIn;
this.msgRateOut -= quota.msgRateOut;
this.bandwidthIn -= quota.bandwidthIn;
this.bandwidthOut -= quota.bandwidthOut;
this.memory -= quota.memory;
} | 3.26 |
pulsar_ResourceQuota_add_rdh | /**
* Add quota.
*
* @param quota
* <code>ResourceQuota</code> to add
*/
public void add(ResourceQuota quota) {this.msgRateIn += quota.msgRateIn;this.msgRateOut += quota.msgRateOut;
this.bandwidthIn += quota.bandwidthIn;
this.bandwidthOut += quota.bandwidthOut;
this.memory += quota.memory;
} | 3.26 |
pulsar_LedgerMetadataUtils_buildBaseManagedLedgerMetadata_rdh | /**
* Build base metadata for every ManagedLedger.
*
* @param name
* the name of the ledger
* @return an immutable map which describes a ManagedLedger
*/
static Map<String, byte[]> buildBaseManagedLedgerMetadata(String name) {
return Map.of(METADATA_PROPERTY_APPLICATION, METADATA_PROPERTY_APPLICATION_PULSAR, METADATA_PROPERTY_COMPONENT, METADATA_PROPERTY_COMPONENT_MANAGED_LEDGER, METADATA_PROPERTY_MANAGED_LEDGER_NAME, name.getBytes(StandardCharsets.UTF_8));
} | 3.26 |
pulsar_LedgerMetadataUtils_buildMetadataForSchema_rdh | /**
* Build additional metadata for a Schema.
*
* @param schemaId
* id of the schema
* @return an immutable map which describes the schema
*/
public static Map<String, byte[]> buildMetadataForSchema(String
schemaId) {
return Map.of(METADATA_PROPERTY_APPLICATION, METADATA_PROPERTY_APPLICATION_PULSAR, METADATA_PROPERTY_COMPONENT, f0, METADATA_PROPERTY_SCHEMAID, schemaId.getBytes(StandardCharsets.UTF_8));} | 3.26 |
pulsar_LedgerMetadataUtils_buildMetadataForDelayedIndexBucket_rdh | /**
* Build additional metadata for a delayed message index bucket.
*
* @param bucketKey
* key of the delayed message bucket
* @param topicName
* name of the topic
* @param cursorName
* name of the cursor
* @return an immutable map which describes the schema
*/
public static Map<String, byte[]> buildMetadataForDelayedIndexBucket(String
bucketKey, String topicName, String cursorName) {
return Map.of(METADATA_PROPERTY_APPLICATION, METADATA_PROPERTY_APPLICATION_PULSAR, METADATA_PROPERTY_COMPONENT, METADATA_PROPERTY_COMPONENT_DELAYED_INDEX_BUCKET, METADATA_PROPERTY_DELAYED_INDEX_BUCKET_KEY, bucketKey.getBytes(StandardCharsets.UTF_8), METADATA_PROPERTY_DELAYED_INDEX_TOPIC, topicName.getBytes(StandardCharsets.UTF_8), METADATA_PROPERTY_DELAYED_INDEX_CURSOR, cursorName.getBytes(StandardCharsets.UTF_8));
} | 3.26 |
pulsar_LedgerMetadataUtils_buildAdditionalMetadataForCursor_rdh | /**
* Build additional metadata for a Cursor.
*
* @param name
* the name of the cursor
* @return an immutable map which describes the cursor
* @see #buildBaseManagedLedgerMetadata(java.lang.String)
*/
static Map<String, byte[]> buildAdditionalMetadataForCursor(String name) {
return Map.of(METADATA_PROPERTY_CURSOR_NAME, name.getBytes(StandardCharsets.UTF_8));
} | 3.26 |
pulsar_LedgerMetadataUtils_buildMetadataForCompactedLedger_rdh | /**
* Build additional metadata for a CompactedLedger.
*
* @param compactedTopic
* reference to the compacted topic.
* @param compactedToMessageId
* last mesasgeId.
* @return an immutable map which describes the compacted ledger
*/
public static Map<String, byte[]> buildMetadataForCompactedLedger(String compactedTopic, byte[] compactedToMessageId) {
return Map.of(METADATA_PROPERTY_APPLICATION, METADATA_PROPERTY_APPLICATION_PULSAR, METADATA_PROPERTY_COMPONENT, METADATA_PROPERTY_COMPONENT_COMPACTED_LEDGER, METADATA_PROPERTY_COMPACTEDTOPIC, compactedTopic.getBytes(StandardCharsets.UTF_8), METADATA_PROPERTY_COMPACTEDTO, compactedToMessageId);
} | 3.26 |
pulsar_OverloadShedder_findBundlesForUnloading_rdh | /**
* Attempt to shed some bundles off every broker which is overloaded.
*
* @param loadData
* The load data to used to make the unloading decision.
* @param conf
* The service configuration.
* @return A map from bundles to unload to the brokers on which they are loaded.
*/
@Override
public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) {
selectedBundlesCache.clear();
final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0;
final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
// Check every broker and select
loadData.getBrokerData().forEach((broker, brokerData) -> {
final LocalBrokerData localData = brokerData.getLocalData();
final double currentUsage = localData.getMaxResourceUsage();
if (currentUsage < overloadThreshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] Broker is not overloaded, ignoring at this point ({})", broker, localData.printResourceUsage());
}
return;
}
// We want to offload enough traffic such that this broker will go below the overload threshold
// Also, add a small margin so that this broker won't be very close to the threshold edge.
double percentOfTrafficToOffload = (currentUsage - overloadThreshold) + f0;
double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut();
double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload;
log.info("Attempting to shed load on {}, which has resource usage {}% above threshold {}%" + " -- Offloading at least {} MByte/s of traffic ({})", broker, 100 * currentUsage, 100 * overloadThreshold, (minimumThroughputToOffload / 1024) / 1024, localData.printResourceUsage());
MutableDouble trafficMarkedToOffload = new MutableDouble(0);
MutableBoolean atLeastOneBundleSelected = new MutableBoolean(false);
if (localData.getBundles().size() > 1) {
// Sort bundles by throughput, then pick the biggest N which combined
// make up for at least the minimum throughput to offload
loadData.getBundleDataForLoadShedding().entrySet().stream().filter(e -> localData.getBundles().contains(e.getKey())).map(e -> {
// Map to throughput value
// Consider short-term byte rate to address system resource burden
String bundle = e.getKey();
BundleData v10 = e.getValue();
TimeAverageMessageData shortTermData = v10.getShortTermData();
double throughput =
shortTermData.getMsgThroughputIn() + shortTermData.getMsgThroughputOut();
return Pair.of(bundle, throughput);
}).filter(e -> {
// Only consider bundles that were not already unloaded recently
return !recentlyUnloadedBundles.containsKey(e.getLeft());
}).sorted((e1, e2) -> {
// Sort by throughput in reverse order
return Double.compare(e2.getRight(), e1.getRight());
}).forEach(e -> {
if ((trafficMarkedToOffload.doubleValue() < minimumThroughputToOffload) || atLeastOneBundleSelected.isFalse()) {
selectedBundlesCache.put(broker, e.getLeft());
trafficMarkedToOffload.add(e.getRight());
atLeastOneBundleSelected.setTrue();
}
});
} else if (localData.getBundles().size() == 1) {
log.warn("HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shedding will be done on this broker", localData.getBundles().iterator().next(), broker);
} else {
log.warn("Broker {} is overloaded despite having no bundles", broker);
}
});
return selectedBundlesCache;
} | 3.26 |
pulsar_LinuxBrokerHostUsageImpl_getTotalCpuUsageForEntireHost_rdh | /**
* Reads first line of /proc/stat to get total cpu usage.
*
* <pre>
* cpu user nice system idle iowait irq softirq steal guest guest_nice
* cpu 317808 128 58637 2503692 7634 0 13472 0 0 0
* </pre>
*
* Line is split in "words", filtering the first. The sum of all numbers give the amount of cpu cycles used this
* far. Real CPU usage should equal the sum substracting the idle cycles(that is idle+iowait), this would include
* cpu, user, nice, system, irq, softirq, steal, guest and guest_nice.
*/
private double getTotalCpuUsageForEntireHost() {
LinuxInfoUtils.ResourceUsage cpuUsageForEntireHost = getCpuUsageForEntireHost();
if (cpuUsageForEntireHost.isEmpty()) {
return -1;
}
double currentUsage = ((cpuUsageForEntireHost.getUsage() - lastCpuUsage) / (cpuUsageForEntireHost.getTotal() - lastCpuTotalTime)) * getTotalCpuLimit(isCGroupsEnabled);
lastCpuUsage = cpuUsageForEntireHost.getUsage();
lastCpuTotalTime = cpuUsageForEntireHost.getTotal();
return currentUsage;
} | 3.26 |
pulsar_SchemaReader_setSchemaInfoProvider_rdh | /**
* Set schema info provider, this method support multi version reader.
*
* @param schemaInfoProvider
* the stream of message
*/
default void setSchemaInfoProvider(SchemaInfoProvider schemaInfoProvider) {
} | 3.26 |
pulsar_SchemaReader_m0_rdh | /**
* Returns the underling Schema if possible.
*
* @return the schema, or an empty Optional if it is not possible to access it
*/
default Optional<Object> m0() {
return Optional.empty();
} | 3.26 |
pulsar_SchemaReader_read_rdh | /**
* serialize bytes convert pojo.
*
* @param inputStream
* the stream of message
* @param schemaVersion
* the schema version of message
* @return the serialized object
*/
default T read(InputStream inputStream, byte[] schemaVersion) {
return read(inputStream);
} | 3.26 |
pulsar_PulsarClient_create_rdh | /**
* Create a new PulsarClient object.
*
* @param serviceUrl
* the url of the Pulsar endpoint to be used
* @param conf
* the client configuration
* @return a new pulsar client object
* @throws PulsarClientException.InvalidServiceURL
* if the serviceUrl is invalid
* @deprecated use {@link #builder()} to construct a client instance
*/
@Deprecated
static PulsarClient create(String serviceUrl, ClientConfiguration conf) throws PulsarClientException {
return new PulsarClientV1Impl(serviceUrl, conf);} | 3.26 |
pulsar_ResourceUsage_compareTo_rdh | /**
* this may be wrong since we are comparing available and not the usage.
*
* @param o
* @return */
public int compareTo(ResourceUsage o) {
double required = o.limit - o.usage;
double available = limit - usage;
return Double.compare(available, required);
} | 3.26 |
pulsar_AuthenticationProviderSasl_authRoleFromHttpRequest_rdh | /**
* Returns null if authentication has not completed.
* Return auth role if authentication has completed, and httpRequest's role token contains the authRole
*/
public String authRoleFromHttpRequest(HttpServletRequest httpRequest) throws AuthenticationException {String tokenStr = httpRequest.getHeader(SASL_AUTH_ROLE_TOKEN);
if (tokenStr == null) {
return null;
}
String unSigned = signer.verifyAndExtract(tokenStr);
SaslRoleToken token;
try {
token = SaslRoleToken.parse(unSigned);
if (log.isDebugEnabled()) {
log.debug("server side get role token: {}, session in token:{}, session in request:{}", token, token.getSession(), httpRequest.getRemoteAddr());
}
} catch (Exception e) {
log.error("token parse failed, with exception: ", e);
return SASL_AUTH_ROLE_TOKEN_EXPIRED;
}if (!token.isExpired()) {
return token.getUserRole();
} else if (token.isExpired()) {
return SASL_AUTH_ROLE_TOKEN_EXPIRED;
} | 3.26 |
pulsar_AuthenticationProviderSasl_authenticateHttpRequest_rdh | /**
* Passed in request, set response, according to request.
* and return whether we should do following chain.doFilter or not.
*/
@Override
public boolean authenticateHttpRequest(HttpServletRequest request, HttpServletResponse response) throws Exception {
AuthenticationState v14 = getAuthState(request);
String saslAuthRoleToken =
authRoleFromHttpRequest(request);
// role token exist
if (saslAuthRoleToken
!= null) {
// role token expired, send role token expired to client.
if (saslAuthRoleToken.equalsIgnoreCase(SASL_AUTH_ROLE_TOKEN_EXPIRED)) {
setResponseHeaderState(response, SASL_AUTH_ROLE_TOKEN_EXPIRED);
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Role token expired");
if (log.isDebugEnabled()) {
log.debug("[{}] Server side role token expired: {}", request.getRequestURI(), saslAuthRoleToken);}
return false;
}
// role token OK to use,
// if request is ask for role token verify, send auth complete to client
// if request is a real request with valid role token, pass this request down.
if (request.getHeader(SASL_HEADER_STATE).equalsIgnoreCase(SASL_STATE_COMPLETE)) {
request.setAttribute(AuthenticatedRoleAttributeName, saslAuthRoleToken);
request.setAttribute(AuthenticatedDataAttributeName, new AuthenticationDataHttps(request));
if (log.isDebugEnabled()) {
log.debug("[{}] Server side role token OK to go on: {}", request.getRequestURI(), saslAuthRoleToken);
}
return true;
} else {
checkState(request.getHeader(SASL_HEADER_STATE).equalsIgnoreCase(SASL_STATE_SERVER_CHECK_TOKEN));
setResponseHeaderState(response, SASL_STATE_COMPLETE);
response.setHeader(SASL_STATE_SERVER, request.getHeader(SASL_STATE_SERVER));
response.setStatus(HttpServletResponse.SC_OK);
if (log.isDebugEnabled()) {
log.debug("[{}] Server side role token verified success: {}", request.getRequestURI(), saslAuthRoleToken);
}
return false;
}
} else {
// no role token, do sasl auth
// need new authState
if ((v14 == null) || request.getHeader(SASL_HEADER_STATE).equalsIgnoreCase(SASL_STATE_CLIENT_INIT)) {
v14 = newAuthState(null, null, null);
authStates.put(v14.getStateId(), v14);
}
checkState(request.getHeader(SASL_AUTH_TOKEN) != null, "Header token should exist if no role token.");
// do the sasl auth
AuthData clientData = AuthData.of(Base64.getDecoder().decode(request.getHeader(SASL_AUTH_TOKEN)));
AuthData brokerData =
v14.authenticate(clientData);
// authentication has completed, it has get the auth role.
if (v14.isComplete()) {
if (log.isDebugEnabled()) {log.debug("[{}] SASL server authentication complete, send OK to client.", request.getRequestURI());
}
String v18 = v14.getAuthRole();
String authToken = createAuthRoleToken(v18, String.valueOf(v14.getStateId()));
response.setHeader(SASL_AUTH_ROLE_TOKEN, authToken);
// auth request complete, return OK, wait for a new real request to come.
response.setHeader(SASL_STATE_SERVER, String.valueOf(v14.getStateId()));
setResponseHeaderState(response, SASL_STATE_COMPLETE);
response.setStatus(HttpServletResponse.SC_OK);
// auth completed, no need to keep authState
authStates.invalidate(v14.getStateId());
return false;
} else {
// auth not complete
if (log.isDebugEnabled()) {
log.debug("[{}] SASL server authentication not complete, send {} back to client.", request.getRequestURI(), HttpServletResponse.SC_UNAUTHORIZED);
}
setResponseHeaderState(response, SASL_STATE_NEGOTIATE);
response.setHeader(SASL_STATE_SERVER, String.valueOf(v14.getStateId()));
response.setHeader(SASL_AUTH_TOKEN, Base64.getEncoder().encodeToString(brokerData.getBytes()));
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "SASL Authentication not complete.");
return false;
}
}
} | 3.26 |
pulsar_AuthenticationProviderSasl_getAuthState_rdh | // return authState if it is in cache.
private AuthenticationState getAuthState(HttpServletRequest request) {
String id = request.getHeader(SASL_STATE_SERVER);
if (id == null) {
return null;
}
try {
return authStates.getIfPresent(Long.parseLong(id));
} catch (NumberFormatException e) {
log.error("[{}] Wrong Id String in Token {}. e:", request.getRequestURI(), id, e);
return null;
}
} | 3.26 |
pulsar_PulsarLedgerUnderreplicationManager_listLedgersToRereplicate_rdh | /**
* Get a list of all the underreplicated ledgers which have been
* marked for rereplication, filtered by the predicate on the replicas list.
*
* <p>Replicas list of an underreplicated ledger is the list of the bookies which are part of
* the ensemble of this ledger and are currently unavailable/down.
*
* @param predicate
* filter to use while listing under replicated ledgers. 'null' if filtering is not required.
* @return an iterator which returns underreplicated ledgers.
*/
@Override
public Iterator<UnderreplicatedLedger> listLedgersToRereplicate(final Predicate<List<String>> predicate) {
final Queue<String> queue = new LinkedList<>();
queue.add(urLedgerPath);
return new Iterator<UnderreplicatedLedger>() {
final Queue<UnderreplicatedLedger> curBatch = new LinkedList<>();
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasNext() {
if (curBatch.size() > 0) {
return true;
}
while ((queue.size() > 0) && (curBatch.size() == 0)) {
String parent = queue.remove();
try {
for (String c : store.getChildren(parent).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) {
String child = (parent + "/") + c;
if (c.startsWith("urL")) {
long ledgerId = getLedgerId(child);
UnderreplicatedLedger underreplicatedLedger = getLedgerUnreplicationInfo(ledgerId);
if (underreplicatedLedger != null) {
List<String> replicaList = underreplicatedLedger.getReplicaList();
if ((predicate == null) || predicate.test(replicaList)) {
curBatch.add(underreplicatedLedger);
}
}
} else {
queue.add(child);}
}
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
return false;
} catch
(Exception e) {
throw new RuntimeException("Error reading list", e);
}
}
return curBatch.size() > 0;
}
@Override
public UnderreplicatedLedger
next() {
assert curBatch.size() > 0;
return curBatch.remove(); }
};
} | 3.26 |
pulsar_PulsarLedgerUnderreplicationManager_isLedgerBeingReplicated_rdh | /**
* Check whether the ledger is being replicated by any bookie.
*/
@Override
public boolean isLedgerBeingReplicated(long ledgerId) throws ReplicationException {
try {
return store.exists(getUrLedgerLockPath(f0, ledgerId)).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
} catch (Exception e) {
throw new ReplicationException.UnavailableException("Failed to check if ledger is beinge replicated", e);
}
} | 3.26 |
pulsar_Runnables_catchingAndLoggingThrowables_rdh | /**
* Wraps a Runnable so that throwables are caught and logged when a Runnable is run.
*
* The main usecase for this method is to be used in
* {@link java.util.concurrent.ScheduledExecutorService#scheduleAtFixedRate(Runnable, long, long, TimeUnit)}
* calls to ensure that the scheduled task doesn't get cancelled as a result of an uncaught exception.
*
* @param runnable
* The runnable to wrap
* @return a wrapped Runnable
*/
public static Runnable catchingAndLoggingThrowables(Runnable runnable) {
return new CatchingAndLoggingRunnable(runnable);
} | 3.26 |
pulsar_KerberosName_toString_rdh | /**
* Put the name back together from the parts.
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(serviceName);
if (hostName != null) {
result.append('/');
result.append(hostName);
}
if (realm != null) {
result.append('@');
result.append(realm);
}
return result.toString();
} | 3.26 |
pulsar_KerberosName_getRealm_rdh | /**
* Get the realm of the name.
*
* @return the realm of the name, may be null
*/
public String getRealm() {
return realm;
} | 3.26 |
pulsar_KerberosName_getServiceName_rdh | /**
* Get the first component of the name.
*
* @return the first section of the Kerberos principal name
*/
public String getServiceName() {
return serviceName;
} | 3.26 |
pulsar_KerberosName_m0_rdh | /**
* Get the configured default realm.
*
* @return the default realm from the krb5.conf
*/
public String
m0() {
return defaultRealm;} | 3.26 |
pulsar_KerberosName_getShortName_rdh | /**
* Get the translation of the principal name into an operating system
* user name.
*
* @return the short name
* @throws IOException
*/
public String getShortName() throws IOException {
String[] v19;
if (hostName == null) {
// if it is already simple, just return it
if (realm == null) {
return serviceName;
}
v19 = new String[]{ realm, serviceName };
} else {
v19 = new String[]{ realm, serviceName, hostName };
}
for (Rule r : rules) {
String result = r.apply(v19);
if (result != null) {
return result;
}
}
throw new NoMatchingRule("No rules applied to " + toString());
} | 3.26 |
pulsar_KerberosName_replaceParameters_rdh | /**
* Replace the numbered parameters of the form $n where n is from 1 to
* the length of params. Normal text is copied directly and $n is replaced
* by the corresponding parameter.
*
* @param format
* the string to replace parameters again
* @param params
* the list of parameters
* @return the generated string with the parameter references replaced.
* @throws BadFormatString
*/
static String replaceParameters(String format, String[] params) throws BadFormatString {
Matcher match = parameterPattern.matcher(format);
int start = 0;
StringBuilder result = new StringBuilder();
while ((start < format.length()) && match.find(start)) {
result.append(match.group(1));
String paramNum = match.group(3);
if (paramNum != null) {
try {
int num = Integer.parseInt(paramNum);
if ((num < 0) || (num >= params.length)) {
throw new BadFormatString((((("index " + num) + " from ") + format) + " is outside of the valid range 0 to ") + (params.length - 1));
}result.append(params[num]);
} catch (NumberFormatException nfe) {
throw new BadFormatString("bad format in username mapping in " + paramNum, nfe);
}
}
start = match.end();
}
return result.toString();
} | 3.26 |
pulsar_KerberosName_replaceSubstitution_rdh | /**
* Replace the matches of the from pattern in the base string with the value
* of the to string.
*
* @param base
* the string to transform
* @param from
* the pattern to look for in the base string
* @param to
* the string to replace matches of the pattern with
* @param repeat
* whether the substitution should be repeated
* @return */
static String replaceSubstitution(String base, Pattern from, String to, boolean repeat) {
Matcher match = from.matcher(base);
if (repeat) {
return match.replaceAll(to);
} else {
return match.replaceFirst(to);
}
} | 3.26 |
pulsar_KerberosName_apply_rdh | /**
* Try to apply this rule to the given name represented as a parameter
* array.
*
* @param params
* first element is the realm, second and later elements are
* are the components of the name "a/b@FOO" -> {"FOO", "a", "b"}
* @return the short name if this rule applies or null
* @throws IOException
* throws if something is wrong with the rules
*/
String apply(String[] params) throws IOException {
String result = null;
if (isDefault) {
if (defaultRealm.equals(params[0])) {
result = params[1];
}
} else if ((params.length - 1) == numOfComponents) {
String base = replaceParameters(format, params);
if ((match == null) || match.matcher(base).matches()) {
if (fromPattern == null) {
result = base;
} else {
result = replaceSubstitution(base, fromPattern, toPattern, repeat);}
}
}
if ((result != null) && nonSimplePattern.matcher(result).find()) {
throw new NoMatchingRule((("Non-simple name " + result) + " after auth_to_local rule ") + this);
}
return result;
} | 3.26 |
pulsar_KerberosName_getHostName_rdh | /**
* Get the second component of the name.
*
* @return the second section of the Kerberos principal name, and may be null
*/public
String getHostName() {
return hostName;
} | 3.26 |
pulsar_KerberosName_setConfiguration_rdh | /**
* Set the static configuration to get the rules.
*
* @throws IOException
*/
public static void setConfiguration() throws IOException {
String ruleString = System.getProperty("zookeeper.security.auth_to_local", "DEFAULT");
rules = m1(ruleString);
} | 3.26 |
pulsar_ThreadLeakDetectorListener_extractRunnableTarget_rdh | // use reflection to extract the Runnable target from a thread so that we can detect threads created by
// Testcontainers based on the Runnable's class name.
private static Runnable extractRunnableTarget(Thread thread) {
if (THREAD_TARGET_FIELD == null) {
return null;
}
Runnable target = null;
try {
target = ((Runnable) (THREAD_TARGET_FIELD.get(thread)));
} catch (IllegalAccessException e) {
LOG.warn("Cannot access target field in Thread.class", e);
}
return target;
} | 3.26 |
pulsar_ContextImpl_tryGetConsumer_rdh | // returns null if consumer not found
private Consumer<?> tryGetConsumer(String topic, int partition) {
if (partition == 0) {
// maybe a non-partitioned topic
Consumer<?> consumer = topicConsumers.get(TopicName.get(topic));
if (consumer != null) {
return consumer;
}
}
// maybe partitioned topic
return topicConsumers.get(TopicName.get(topic).getPartition(partition));
} | 3.26 |
pulsar_AbstractMetrics_populateDimensionMap_rdh | /**
* Helper to manage populating topics map.
*
* @param ledgersByDimensionMap
* @param metrics
* @param ledger
*/
protected void populateDimensionMap(Map<Metrics, List<ManagedLedgerImpl>> ledgersByDimensionMap, Metrics metrics, ManagedLedgerImpl ledger) {
ledgersByDimensionMap.computeIfAbsent(metrics, __ -> new ArrayList<>()).add(ledger);
} | 3.26 |
pulsar_AbstractMetrics_getManagedLedgerCacheStats_rdh | /**
* Returns the managed ledger cache statistics from ML factory.
*
* @return */
protected ManagedLedgerFactoryMXBean getManagedLedgerCacheStats() {
return ((ManagedLedgerFactoryImpl) (f0.getManagedLedgerFactory())).getCacheStats();
} | 3.26 |
pulsar_AbstractMetrics_getManagedLedgers_rdh | /**
* Returns managed ledgers map from ML factory.
*
* @return */
protected Map<String, ManagedLedgerImpl> getManagedLedgers() {
return ((ManagedLedgerFactoryImpl)
(f0.getManagedLedgerFactory())).getManagedLedgers();
} | 3.26 |
pulsar_AbstractMetrics_createMetrics_rdh | /**
* Creates a metrics with empty immutable dimension.
* <p>
* Use this for metrics that doesn't need any dimension - i.e global metrics
*
* @return */
protected Metrics createMetrics() {
return createMetrics(new HashMap<String, String>());
} | 3.26 |
pulsar_AbstractMetrics_createMetricsByDimension_rdh | /**
* Creates a dimension key for replication metrics.
*
* @param namespace
* @param fromClusterName
* @param toClusterName
* @return */
protected Metrics createMetricsByDimension(String namespace, String fromClusterName, String toClusterName) {
Map<String, String> dimensionMap = new HashMap<>();
dimensionMap.put("namespace", namespace);
dimensionMap.put("from_cluster", fromClusterName);
dimensionMap.put("to_cluster", toClusterName);
return createMetrics(dimensionMap);
} | 3.26 |
pulsar_PulsarRegistrationClient_updatedBookies_rdh | /**
* This method will receive metadata store notifications and then update the
* local cache in background sequentially.
*/
private void updatedBookies(Notification n) {
// make the notification callback run sequential in background.
final String path = n.getPath();
if ((!path.startsWith(bookieReadonlyRegistrationPath)) && (!path.startsWith(bookieRegistrationPath))) {
// ignore unknown path
return;
}
if (path.equals(bookieReadonlyRegistrationPath) || path.equals(bookieRegistrationPath)) {
// ignore root path
return;
}final BookieId bookieId = stripBookieIdFromPath(n.getPath());
sequencer.sequential(() -> {
switch (n.getType()) {
case Created :
log.info("Bookie {} created. path: {}", bookieId, n.getPath());
if (path.startsWith(bookieReadonlyRegistrationPath)) {
return getReadOnlyBookies().thenAccept(bookies -> readOnlyBookiesWatchers.forEach(w -> executor.execute(() -> w.onBookiesChanged(bookies))));
}
return getWritableBookies().thenAccept(bookies -> writableBookiesWatchers.forEach(w -> executor.execute(() ->
w.onBookiesChanged(bookies))));
case Modified :
if (bookieId == null) {
return completedFuture(null);
}
log.info("Bookie {} modified. path: {}", bookieId, n.getPath());
if (path.startsWith(bookieReadonlyRegistrationPath)) {
return readBookieInfoAsReadonlyBookie(bookieId).thenApply(__ -> null);
}
return readBookieInfoAsWritableBookie(bookieId).thenApply(__ -> null);
case Deleted :
if (bookieId == null) {
return completedFuture(null);
}log.info("Bookie {} deleted. path: {}", bookieId, n.getPath());
if (path.startsWith(bookieReadonlyRegistrationPath)) {
readOnlyBookieInfo.remove(bookieId);
return getReadOnlyBookies().thenAccept(bookies -> {
readOnlyBookiesWatchers.forEach(w -> executor.execute(() -> w.onBookiesChanged(bookies)));
});
}
if (path.startsWith(bookieRegistrationPath)) {
writableBookieInfo.remove(bookieId);
return getWritableBookies().thenAccept(bookies ->
{
writableBookiesWatchers.forEach(w -> executor.execute(() -> w.onBookiesChanged(bookies)));
});
}
return completedFuture(null);
default :
return completedFuture(null);
}
});
} | 3.26 |
pulsar_PulsarRegistrationClient_getBookiesThenFreshCache_rdh | /**
*
* @throws IllegalArgumentException
* if parameter path is null or empty.
*/
private CompletableFuture<Versioned<Set<BookieId>>> getBookiesThenFreshCache(String path) {
if ((path == null) || path.isEmpty()) {
return failedFuture(new IllegalArgumentException("parameter [path] can not be null or empty."));
}
return
store.getChildren(path).thenComposeAsync(children -> {
final Set<BookieId> bookieIds = PulsarRegistrationClient.convertToBookieAddresses(children);
final List<CompletableFuture<?>> bookieInfoUpdated = new ArrayList<>(bookieIds.size());
for (BookieId id : bookieIds) {
// update the cache for new bookies
if (path.equals(bookieReadonlyRegistrationPath) && (readOnlyBookieInfo.get(id) == null)) {
bookieInfoUpdated.add(readBookieInfoAsReadonlyBookie(id));
continue;
}
if (path.equals(bookieRegistrationPath) && (writableBookieInfo.get(id) == null)) {
bookieInfoUpdated.add(readBookieInfoAsWritableBookie(id));
continue;
}
if (path.equals(bookieAllRegistrationPath)) {
if ((writableBookieInfo.get(id) != null) || (readOnlyBookieInfo.get(id) != null)) {
// jump to next bookie id
continue;
}
// check writable first
final CompletableFuture<?> revalidateAllBookiesFuture = readBookieInfoAsWritableBookie(id).thenCompose(writableBookieInfo -> // check read-only then
writableBookieInfo.<CompletableFuture<Optional<CacheGetResult<BookieServiceInfo>>>>map(bookieServiceInfo -> completedFuture(null)).orElseGet(() -> readBookieInfoAsReadonlyBookie(id)));
bookieInfoUpdated.add(revalidateAllBookiesFuture);
}
}
if (bookieInfoUpdated.isEmpty()) {
return completedFuture(bookieIds);
} else {
return waitForAll(bookieInfoUpdated).thenApply(___ -> bookieIds);
}
}).thenApply(s
-> new Versioned<>(s, Version.NEW));
} | 3.26 |
pulsar_Message_getReaderSchema_rdh | /**
* Get the schema associated to the message.
* Please note that this schema is usually equal to the Schema you passed
* during the construction of the Consumer or the Reader.
* But if you are consuming the topic using the GenericObject interface
* this method will return the schema associated with the message.
*
* @return The schema used to decode the payload of message.
* @see Schema#AUTO_CONSUME()
*/
default Optional<Schema<?>> getReaderSchema() {
return Optional.empty();
} | 3.26 |
pulsar_TieredStorageConfiguration_getConfigProperty_rdh | /**
* Used to find a specific configuration property other than
* one of the predefined ones. This allows for any number of
* provider specific, or new properties to added in the future.
*
* @param propertyName
* @return */
public String getConfigProperty(String propertyName) {
return configProperties.get(propertyName);
} | 3.26 |
pulsar_ProxyExtensionsUtils_searchForExtensions_rdh | /**
* Search and load the available extensions.
*
* @param extensionsDirectory
* the directory where all the extensions are stored
* @return a collection of extensions
* @throws IOException
* when fail to load the available extensions from the provided directory.
*/
public static ExtensionsDefinitions searchForExtensions(String extensionsDirectory, String narExtractionDirectory) throws IOException {
Path path = Paths.get(extensionsDirectory).toAbsolutePath();
log.info("Searching for extensions in {}", path);
ExtensionsDefinitions extensions = new ExtensionsDefinitions();
if (!path.toFile().exists()) {
log.warn("extension directory not found");
return extensions;
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path, "*.nar")) {
for (Path archive : stream) {
try {
ProxyExtensionDefinition phDef = ProxyExtensionsUtils.getProxyExtensionDefinition(archive.toString(), narExtractionDirectory);
log.info("Found extension from {} : {}", archive, phDef);
checkArgument(StringUtils.isNotBlank(phDef.getName()));
checkArgument(StringUtils.isNotBlank(phDef.getExtensionClass()));ProxyExtensionMetadata metadata = new ProxyExtensionMetadata();
metadata.setDefinition(phDef);
metadata.setArchivePath(archive);
extensions.extensions().put(phDef.getName(), metadata);
} catch (Throwable t) {
log.warn((("Failed to load connector from {}." + " It is OK however if you want to use this extension,")
+ " please make sure you put the correct extension NAR") + " package in the extensions directory.", archive, t);
}
}
}
return extensions;} | 3.26 |
pulsar_ProxyExtensionsUtils_getProxyExtensionDefinition_rdh | /**
* Retrieve the extension definition from the provided handler nar package.
*
* @param narPath
* the path to the extension NAR package
* @return the extension definition
* @throws IOException
* when fail to load the extension or get the definition
*/
public static ProxyExtensionDefinition
getProxyExtensionDefinition(String narPath, String narExtractionDirectory) throws IOException {
try (NarClassLoader ncl = NarClassLoaderBuilder.builder().narFile(new File(narPath)).extractionDirectory(narExtractionDirectory).build()) {
return getProxyExtensionDefinition(ncl);
}
} | 3.26 |
pulsar_ProxyExtensionsUtils_load_rdh | /**
* Load the extension according to the handler definition.
*
* @param metadata
* the extension definition.
* @return */
static ProxyExtensionWithClassLoader load(ProxyExtensionMetadata metadata, String narExtractionDirectory) throws IOException {
final File narFile = metadata.getArchivePath().toAbsolutePath().toFile();
NarClassLoader ncl = NarClassLoaderBuilder.builder().narFile(narFile).parentClassLoader(ProxyExtension.class.getClassLoader()).extractionDirectory(narExtractionDirectory).build();
ProxyExtensionDefinition phDef = getProxyExtensionDefinition(ncl);
if (StringUtils.isBlank(phDef.getExtensionClass()))
{
throw
new IOException((("extension `" + phDef.getName()) + "` does NOT provide a protocol") + " handler implementation");
}
try {
Class extensionClass
= ncl.loadClass(phDef.getExtensionClass());
Object extension = extensionClass.newInstance();
if (!(extension instanceof ProxyExtension)) {
throw new IOException(("Class " + phDef.getExtensionClass()) + " does not implement extension interface");
}
ProxyExtension ph
= ((ProxyExtension) (extension));
return new ProxyExtensionWithClassLoader(ph, ncl);
} catch (Throwable t) {
rethrowIOException(t);
return null;
}} | 3.26 |
pulsar_TopicSchema_getSchemaTypeOrDefault_rdh | /**
* If the topic is already created, we should be able to fetch the schema type (avro, json, ...).
*/
private SchemaType getSchemaTypeOrDefault(String topic, Class<?> clazz) {
if (GenericObject.class.isAssignableFrom(clazz)) {
return SchemaType.AUTO_CONSUME;
} else if ((byte[].class.equals(clazz) || ByteBuf.class.equals(clazz)) || ByteBuffer.class.equals(clazz)) {
// if function uses bytes, we should ignore
return SchemaType.NONE;
} else {
Optional<SchemaInfo> schema = ((PulsarClientImpl) (client)).getSchema(topic).join();
if (schema.isPresent()) {
if (schema.get().getType() == SchemaType.NONE) {
return getDefaultSchemaType(clazz);
} else {
return schema.get().getType();
}
} else {
return getDefaultSchemaType(clazz);
}
}
} | 3.26 |
pulsar_WorkerStatsApiV2Resource_clientAppId_rdh | /**
*
* @deprecated use {@link AuthenticationParameters} instead
*/@Deprecated
public String clientAppId() {
return httpRequest != null ? ((String) (httpRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName))) : null;
} | 3.26 |
pulsar_FileUtils_deleteFile_rdh | /**
* Deletes the given file. If the given file exists but could not be deleted
* this will be printed as a warning to the given logger
*
* @param file
* to delete
* @param logger
* to notify
* @param attempts
* indicates how many times an attempt to delete should be
* made
* @return true if given file no longer exists
*/
public static boolean deleteFile(final File file, final
Logger logger, final int attempts) {
if (file == null) {
return false;
}
boolean isGone = false;
try {
if (file.exists()) {
final int effectiveAttempts =
Math.max(1, attempts);
for (int i = 0; (i
< effectiveAttempts) && (!isGone); i++) {
isGone = file.delete() || (!file.exists());
if ((!isGone) && ((effectiveAttempts - i) > 1)) {
FileUtils.sleepQuietly(f0);
}
}
if ((!isGone) && (logger != null)) {
logger.warn("File appears to exist but unable to delete file: " + file.getAbsolutePath());
}
}
} catch (final Throwable t) {
if (logger != null) {
logger.warn((("Unable to delete file: '" + file.getAbsolutePath()) + "' due to ") + t);
}
}
return isGone;
} | 3.26 |
pulsar_FileUtils_deleteFilesInDirectory_rdh | /**
* Deletes all files (not directories) in the given directory (recursive)
* that match the given filename filter. If any file cannot be deleted then
* this is printed at warn to the given logger.
*
* @param directory
* to delete contents of
* @param filter
* if null then no filter is used
* @param logger
* to notify
* @param recurse
* will look for contents of sub directories.
* @param deleteEmptyDirectories
* default is false; if true will delete
* directories found that are empty
* @throws IOException
* if abstract pathname does not denote a directory, or
* if an I/O error occurs
*/
public static void deleteFilesInDirectory(final File directory, final FilenameFilter filter, final Logger logger, final boolean recurse, final boolean deleteEmptyDirectories) throws IOException {
// ensure the specified directory is actually a directory and that it exists
if ((null != directory) && directory.isDirectory()) {
final File ingestFiles[] = directory.listFiles();
if (ingestFiles == null) {
// null if abstract pathname does not denote a directory, or if an I/O error occurs
throw new IOException("Unable to list directory content in: " + directory.getAbsolutePath());
}
for (File ingestFile :
ingestFiles)
{
boolean process = (filter == null) ? true : filter.accept(directory, ingestFile.getName());
if (ingestFile.isFile() && process) {
FileUtils.deleteFile(ingestFile, logger, 3);
}
if (ingestFile.isDirectory() && recurse) {
FileUtils.deleteFilesInDirectory(ingestFile, filter, logger, recurse, deleteEmptyDirectories);
String[] ingestFileList = ingestFile.list();
if ((deleteEmptyDirectories && (ingestFileList != null)) && (ingestFileList.length == 0)) {
FileUtils.deleteFile(ingestFile, logger, 3);
}
}
}
}
} | 3.26 |
pulsar_AutoConsumeSchema_fetchSchemaIfNeeded_rdh | /**
* It may happen that the schema is not loaded but we need it, for instance in order to call getSchemaInfo()
* We cannot call this method in getSchemaInfo, because getSchemaInfo is called in many
* places and we will introduce lots of deadlocks.
*/
public void fetchSchemaIfNeeded(SchemaVersion schemaVersion) throws SchemaSerializationException {
if (schemaVersion == null) {
schemaVersion = BytesSchemaVersion.of(new byte[0]);
}
if (!schemaMap.containsKey(schemaVersion)) {
if (schemaInfoProvider == null) {
throw new SchemaSerializationException(("Can't get accurate schema information for topic " + f0) + "using AutoConsumeSchema because SchemaInfoProvider is not set yet");
} else {
SchemaInfo schemaInfo = null;
try {
schemaInfo = schemaInfoProvider.getSchemaByVersion(schemaVersion.bytes()).get();
if (schemaInfo == null) {
// schemaless topic
schemaInfo = BytesSchema.of().getSchemaInfo();
}
} catch (InterruptedException | ExecutionException e) {
if (e instanceof InterruptedException) {Thread.currentThread().interrupt();
}
log.error("Can't get last schema for topic {} using AutoConsumeSchema", f0);
throw new SchemaSerializationException(e.getCause());
}
// schemaInfo null means that there is no schema attached to the topic.
Schema<?> schema = generateSchema(schemaInfo);
schema.setSchemaInfoProvider(schemaInfoProvider);
setSchema(schemaVersion, schema);
log.info("Configure {} schema {} for topic {} : {}", componentName, schemaVersion, f0, schemaInfo.getSchemaDefinition());
}
}
} | 3.26 |
pulsar_PositionAckSetUtil_compareToWithAckSet_rdh | // This method is compare two position which position is bigger than another one.
// When the ledgerId and entryId in this position is same to another one and two position all have ack set, it will
// compare the ack set next bit index is bigger than another one.
public static int compareToWithAckSet(PositionImpl currentPosition, PositionImpl otherPosition) {
if ((currentPosition == null) || (otherPosition == null)) {
throw new IllegalArgumentException((((("Two positions can't be null! " + "current position : [") + currentPosition) + "] other position : [") + otherPosition) + "]");
}
int result = currentPosition.compareTo(otherPosition);
if (result == 0) {
BitSetRecyclable otherAckSet;
BitSetRecyclable currentAckSet;
if (otherPosition.getAckSet() == null) {
otherAckSet = BitSetRecyclable.create();
} else {
otherAckSet = BitSetRecyclable.valueOf(otherPosition.getAckSet());
}
if (currentPosition.getAckSet() == null) {
currentAckSet = BitSetRecyclable.create();
} else {
currentAckSet
= BitSetRecyclable.valueOf(currentPosition.getAckSet());
}
if (currentAckSet.isEmpty() || otherAckSet.isEmpty()) {
// when ack set is empty, the nextSetBit will return -1, so we should return the inverse value.
result = -(currentAckSet.nextSetBit(0) - otherAckSet.nextSetBit(0));
} else {
result = currentAckSet.nextSetBit(0) - otherAckSet.nextSetBit(0);
}
currentAckSet.recycle();
otherAckSet.recycle();
}
return result;
} | 3.26 |
pulsar_PositionAckSetUtil_andAckSet_rdh | // This method is do `and` operation for ack set
public static long[] andAckSet(long[] firstAckSet, long[] secondAckSet) {
BitSetRecyclable thisAckSet = BitSetRecyclable.valueOf(firstAckSet);
BitSetRecyclable otherAckSet = BitSetRecyclable.valueOf(secondAckSet);
thisAckSet.and(otherAckSet);
long[] ackSet = thisAckSet.toLongArray();
thisAckSet.recycle();
otherAckSet.recycle();
return ackSet;
} | 3.26 |
pulsar_PositionAckSetUtil_m0_rdh | // This method is to compare two ack set whether overlap or not
public static boolean m0(long[] currentAckSet, long[] otherAckSet) {
if ((currentAckSet
== null) || (otherAckSet == null)) {
return false;
}
BitSetRecyclable currentBitSet = BitSetRecyclable.valueOf(currentAckSet);
BitSetRecyclable v1 = BitSetRecyclable.valueOf(otherAckSet);
currentBitSet.flip(0, currentBitSet.size());
v1.flip(0, v1.size());
currentBitSet.and(v1);
boolean isAckSetRepeated = !currentBitSet.isEmpty();
currentBitSet.recycle();
v1.recycle();
return isAckSetRepeated;
} | 3.26 |
pulsar_InMemoryDelayedDeliveryTracker_hasMessageAvailable_rdh | /**
* Return true if there's at least a message that is scheduled to be delivered already.
*/
@Override
public boolean hasMessageAvailable() {
boolean hasMessageAvailable = (!priorityQueue.isEmpty()) && (priorityQueue.peekN1() <= getCutoffTime());
if (!hasMessageAvailable) {
updateTimer();
}
return hasMessageAvailable;} | 3.26 |
pulsar_InMemoryDelayedDeliveryTracker_m0_rdh | /**
* Get a set of position of messages that have already reached.
*/
@Override
public NavigableSet<PositionImpl> m0(int maxMessages) {
int n = maxMessages;
NavigableSet<PositionImpl> positions = new TreeSet<>();
long cutoffTime =
getCutoffTime();
while ((n > 0) && (!priorityQueue.isEmpty())) {
long v4 = priorityQueue.peekN1();
if (v4 > cutoffTime) {
break;
}
long ledgerId = priorityQueue.peekN2();
long entryId = priorityQueue.peekN3();
positions.add(new PositionImpl(ledgerId, entryId));
priorityQueue.pop();
--n;
}
if (log.isDebugEnabled()) {
log.debug("[{}] Get scheduled messages - found {}", dispatcher.getName(), positions.size());
}
if (priorityQueue.isEmpty()) {
// Reset to initial state
f0 = 0;
messagesHaveFixedDelay = true;
}
updateTimer();
return
positions;
} | 3.26 |
pulsar_InMemoryDelayedDeliveryTracker_checkAndUpdateHighest_rdh | /**
* Check that new delivery time comes after the current highest, or at
* least within a single tick time interval of 1 second.
*/
private void
checkAndUpdateHighest(long deliverAt) {
if (deliverAt < (f0 - tickTimeMillis)) {
messagesHaveFixedDelay = false;
}f0 = Math.max(f0, deliverAt);
} | 3.26 |
pulsar_RestException_getExceptionData_rdh | /**
* Exception used to provide better error messages to clients of the REST API.
*/@SuppressWarnings("serial")public class RestException extends WebApplicationException {
static String getExceptionData(Throwable t) {
StringWriter writer = new StringWriter();
writer.append("\n --- An unexpected error occurred in the server ---\n\n");
writer.append("Message: ").append(t.getMessage()).append("\n\n");
writer.append("Stacktrace:\n\n");
t.printStackTrace(new PrintWriter(writer));
return writer.toString();
} | 3.26 |
pulsar_BytesSchemaVersion_toString_rdh | /**
* Write a printable representation of a byte array. Non-printable
* characters are hex escaped in the format \\x%02X, eg:
* \x00 \x05 etc.
*
* <p>This function is brought from org.apache.hadoop.hbase.util.Bytes
*
* @param b
* array to write out
* @param off
* offset to start at
* @param len
* length to write
* @return string output
*/
private static String toString(final byte[] b, int off, int len) {StringBuilder result = new StringBuilder();
if (b == null) {
return result.toString();
}
// just in case we are passed a 'len' that is > buffer length...
if (off >= b.length) {
return result.toString();
}
if ((off + len) > b.length) {
len = b.length - off;
}
for (int i = off; i < (off + len); ++i) {
int ch
= b[i] & 0xff;
if (((ch >= ' ') && (ch <= '~')) && (ch != '\\')) {
result.append(((char) (ch)));} else {
result.append("\\x");
result.append(HEX_CHARS_UPPER[ch / 0x10]);
result.append(HEX_CHARS_UPPER[ch % 0x10]); }
}
return result.toString();
} | 3.26 |
pulsar_BytesSchemaVersion_get_rdh | /**
* Get the data from the Bytes.
*
* @return The underlying byte array
*/
public byte[] get() {
return this.bytes;
} | 3.26 |
pulsar_BytesSchemaVersion_hashCode_rdh | /**
* The hashcode is cached except for the case where it is computed as 0, in which
* case we compute the hashcode on every call.
*
* @return the hashcode
*/
@Override public int hashCode() {
if (hashCode == 0) {
hashCode = Arrays.hashCode(bytes);
}
return hashCode;
} | 3.26 |
pulsar_AuthenticationUtil_create_rdh | /**
* Create an instance of the Authentication-Plugin.
*
* @param authPluginClassName
* name of the Authentication-Plugin you want to use
* @param authParams
* map which represents parameters for the Authentication-Plugin
* @return instance of the Authentication-Plugin
* @throws UnsupportedAuthenticationException
*/
@SuppressWarnings("deprecation")
public static final Authentication create(String authPluginClassName, Map<String, String> authParams) throws UnsupportedAuthenticationException {
try
{
if (isNotBlank(authPluginClassName)) {
Class<?> v7 = Class.forName(authPluginClassName);
Authentication auth = ((Authentication) (v7.getDeclaredConstructor().newInstance()));
auth.configure(authParams);
return auth;
} else {
return new AuthenticationDisabled();
}
} catch (Throwable t) {
throw new UnsupportedAuthenticationException(t);
}
} | 3.26 |
pulsar_Record_getPartitionId_rdh | /**
* Retrieves the partition information if any of the record.
*
* @return The partition id where the
*/
default Optional<String> getPartitionId() {
return Optional.empty();
} | 3.26 |
pulsar_Record_getPartitionIndex_rdh | /**
* Retrieves the partition index if any of the record.
*
* @return The partition index
*/
default Optional<Integer> getPartitionIndex() {
return Optional.empty(); } | 3.26 |
pulsar_Record_getEventTime_rdh | /**
* Retrieves the event time of the record from the source.
*
* @return millis since epoch
*/
default Optional<Long> getEventTime() {
return Optional.empty();
} | 3.26 |
pulsar_Record_getRecordSequence_rdh | /**
* Retrieves the sequence of the record from a source partition.
*
* @return Sequence Id associated with the record
*/default Optional<Long> getRecordSequence() {
return Optional.empty();
} | 3.26 |
pulsar_Record_getTopicName_rdh | /**
* If the record originated from a topic, report the topic name.
*/
default Optional<String> getTopicName() {return Optional.empty();
} | 3.26 |
pulsar_KeyValueSchemaInfo_encodeKeyValueSchemaInfo_rdh | /**
* Encode key & value into schema into a KeyValue schema.
*
* @param schemaName
* the final schema name
* @param keySchemaInfo
* the key schema info
* @param valueSchemaInfo
* the value schema info
* @param keyValueEncodingType
* the encoding type to encode and decode key value pair
* @return the final schema info
*/
public static SchemaInfo encodeKeyValueSchemaInfo(String schemaName,
SchemaInfo keySchemaInfo, SchemaInfo valueSchemaInfo, KeyValueEncodingType keyValueEncodingType) {
requireNonNull(keyValueEncodingType, "Null encoding type is provided");
if ((keySchemaInfo == null) || (valueSchemaInfo == null)) {
// schema is not ready
return null;
}
// process key/value schema data
byte[] schemaData = KeyValue.encode(keySchemaInfo, SCHEMA_INFO_WRITER, valueSchemaInfo, SCHEMA_INFO_WRITER);
// process key/value schema properties
Map<String, String> properties = new HashMap<>();
encodeSubSchemaInfoToParentSchemaProperties(keySchemaInfo, KEY_SCHEMA_NAME, KEY_SCHEMA_TYPE, KEY_SCHEMA_PROPS, properties);
encodeSubSchemaInfoToParentSchemaProperties(valueSchemaInfo, VALUE_SCHEMA_NAME, VALUE_SCHEMA_TYPE, VALUE_SCHEMA_PROPS, properties);
properties.put(KV_ENCODING_TYPE, String.valueOf(keyValueEncodingType));
// generate the final schema info
return SchemaInfoImpl.builder().name(schemaName).type(SchemaType.KEY_VALUE).schema(schemaData).properties(properties).build();
} | 3.26 |
pulsar_KeyValueSchemaInfo_decodeKeyValueSchemaInfo_rdh | /**
* Decode the key/value schema info to get key schema info and value schema info.
*
* @param schemaInfo
* key/value schema info.
* @return the pair of key schema info and value schema info
*/
public static KeyValue<SchemaInfo, SchemaInfo> decodeKeyValueSchemaInfo(SchemaInfo schemaInfo) {
checkArgument(SchemaType.KEY_VALUE == schemaInfo.getType(), "Not a KeyValue schema");
return KeyValue.decode(schemaInfo.getSchema(), (keyBytes, valueBytes) -> {
SchemaInfo keySchemaInfo = decodeSubSchemaInfo(schemaInfo, KEY_SCHEMA_NAME, KEY_SCHEMA_TYPE, KEY_SCHEMA_PROPS, keyBytes);
SchemaInfo valueSchemaInfo = decodeSubSchemaInfo(schemaInfo, VALUE_SCHEMA_NAME, VALUE_SCHEMA_TYPE, VALUE_SCHEMA_PROPS, valueBytes);
return new KeyValue<>(keySchemaInfo, valueSchemaInfo);
});
} | 3.26 |
pulsar_KeyValueSchemaInfo_decodeKeyValueEncodingType_rdh | /**
* Decode the kv encoding type from the schema info.
*
* @param schemaInfo
* the schema info
* @return the kv encoding type
*/
public static KeyValueEncodingType decodeKeyValueEncodingType(SchemaInfo schemaInfo) {
checkArgument(SchemaType.KEY_VALUE == schemaInfo.getType(), "Not a KeyValue schema");
String encodingTypeStr = schemaInfo.getProperties().get(KV_ENCODING_TYPE);
if (StringUtils.isEmpty(encodingTypeStr)) {
return KeyValueEncodingType.INLINE;
} else {
return KeyValueEncodingType.valueOf(encodingTypeStr);
}
} | 3.26 |
pulsar_ShadowReplicator_getShadowReplicatorName_rdh | /**
* Cursor name fot this shadow replicator.
*
* @param replicatorPrefix
* @param shadowTopic
* @return */
public static String getShadowReplicatorName(String replicatorPrefix, String shadowTopic) {
return (replicatorPrefix + "-") + Codec.encode(shadowTopic);
} | 3.26 |
pulsar_WindowFunction_process_rdh | /**
* Process the input.
*
* @return the output
*/
T process(Collection<Record<X>> input, WindowContext context) throws Exception {
} | 3.26 |
pulsar_SafeCollectionUtils_longArrayToList_rdh | /**
* Safe collection utils.
*/public class SafeCollectionUtils {
public static List<Long> longArrayToList(long[] array) {
return (array == null) || (array.length == 0) ? Collections.emptyList() : Arrays.stream(array).boxed().collect(Collectors.toList());
} | 3.26 |
pulsar_SchemaDefinitionImpl_getAlwaysAllowNull_rdh | /**
* get schema whether always allow null or not.
*
* @return schema always null or not
*/
public boolean getAlwaysAllowNull() {
return alwaysAllowNull;
} | 3.26 |
pulsar_SchemaDefinitionImpl_getPojo_rdh | /**
* Get pojo schema definition.
*
* @return pojo class
*/
@Overridepublic Class<T> getPojo() {
return pojo;
} | 3.26 |
pulsar_SchemaDefinitionImpl_getProperties_rdh | /**
* Get schema class.
*
* @return schema class
*/public Map<String, String> getProperties() {
return Collections.unmodifiableMap(properties);
} | 3.26 |
pulsar_PublicSuffixMatcher_matches_rdh | /**
* Tests whether the given domain matches any of entry from the public suffix list.
*
* @param domain
* @param expectedType
* expected domain type or {@code null} if any.
* @return {@code true} if the given domain matches any of the public suffixes.
* @since 4.5
*/
public boolean matches(final String domain, final DomainType expectedType) {
if (domain == null) {
return false;
}
final String domainRoot = getDomainRoot(domain.startsWith(".") ? domain.substring(1) : domain, expectedType);
return domainRoot == null;} | 3.26 |
pulsar_PublicSuffixMatcher_getDomainRoot_rdh | /**
* Returns registrable part of the domain for the given domain name or {@code null}
* if given domain represents a public suffix.
*
* @param domain
* @param expectedType
* expected domain type or {@code null} if any.
* @return domain root
* @since 4.5
*/
public String getDomainRoot(final String domain, final DomainType expectedType) {
if (domain == null) {
return null;
}
if (domain.startsWith(".")) {
return null;
}
String domainName = null;
String segment = domain.toLowerCase(Locale.ROOT);
while (segment != null) {
// An exception rule takes priority over any other matching rule.
if (hasException(IDN.toUnicode(segment), expectedType)) {
return segment;
}
if (hasRule(IDN.toUnicode(segment), expectedType)) {
break;
}
final int nextdot = segment.indexOf('.');
final String nextSegment = (nextdot != (-1)) ? segment.substring(nextdot + 1) : null;
if (nextSegment != null) {
if (hasRule("*." + IDN.toUnicode(nextSegment), expectedType)) {
break;
}
}
if (nextdot != (-1)) {
domainName = segment;
}
segment = nextSegment;
}
return domainName;
} | 3.26 |
pulsar_RangeCache_put_rdh | /**
* Insert.
*
* @param key
* @param value
* ref counted value with at least 1 ref to pass on the cache
* @return whether the entry was inserted in the cache
*/
public boolean put(Key key, Value value) {
// retain value so that it's not released before we put it in the cache and calculate the weight
value.retain();try {
if (entries.putIfAbsent(key, value) == null) {
size.addAndGet(weighter.getSize(value));
return true;
} else {
return false;
}
} finally {
value.release();
}
} | 3.26 |
pulsar_RangeCache_evictLeastAccessedEntries_rdh | /**
*
* @param minSize
* @return a pair containing the number of entries evicted and their total size
*/
public Pair<Integer, Long> evictLeastAccessedEntries(long minSize) {
checkArgument(minSize > 0);
long removedSize = 0;
int removedEntries = 0;
while (removedSize < minSize) {
Map.Entry<Key, Value> entry = entries.pollFirstEntry();
if (entry == null) {
break;
}
Value v11 = entry.getValue();
++removedEntries;
removedSize += weighter.getSize(v11);
v11.release();
}
size.addAndGet(-removedSize);
return Pair.of(removedEntries, removedSize);
} | 3.26 |
pulsar_RangeCache_evictLEntriesBeforeTimestamp_rdh | /**
*
* @param maxTimestamp
* the max timestamp of the entries to be evicted
* @return the tota
*/
public Pair<Integer, Long> evictLEntriesBeforeTimestamp(long maxTimestamp) {
long removedSize = 0;
int removedCount = 0;
while (true) {
Map.Entry<Key, Value> entry = entries.firstEntry();
if ((entry == null) || (timestampExtractor.getTimestamp(entry.getValue()) > maxTimestamp)) {
break;
}
Value value = entry.getValue();
boolean removeHits = entries.remove(entry.getKey(), value);
if (!removeHits) {
break;
}
removedSize += weighter.getSize(value);
removedCount++;
value.release();
}
size.addAndGet(-removedSize);
return Pair.of(removedCount, removedSize);
} | 3.26 |
pulsar_RangeCache_getRange_rdh | /**
*
* @param first
* the first key in the range
* @param last
* the last key in the range (inclusive)
* @return a collections of the value found in cache
*/public Collection<Value> getRange(Key first,
Key last) {
List<Value> values = new ArrayList();// Return the values of the entries found in cache
for (Value value : entries.subMap(first, true, last, true).values()) {
try {
value.retain();
values.add(value);
} catch (Throwable t) {
// Value was already destroyed between get() and retain()
}
}return values;
} | 3.26 |
pulsar_RangeCache_removeRange_rdh | /**
*
* @param first
* @param last
* @param lastInclusive
* @return an pair of ints, containing the number of removed entries and the total size
*/
public Pair<Integer, Long> removeRange(Key first, Key last, boolean lastInclusive) {
Map<Key, Value> subMap
= entries.subMap(first, true, last, lastInclusive);
int v4 = 0;
long removedSize = 0;
for (Key key : subMap.keySet()) {
Value value = entries.remove(key);
if (value == null) {
continue;
}
removedSize += weighter.getSize(value);
value.release();
++v4;
}
size.addAndGet(-removedSize);
return Pair.of(v4, removedSize);
} | 3.26 |
pulsar_OffloadIndexBlockBuilder_create_rdh | /**
* create an OffloadIndexBlockBuilder.
*/ static OffloadIndexBlockBuilder create() {
return new OffloadIndexBlockV2BuilderImpl();
} | 3.26 |
pulsar_ShutdownUtil_triggerImmediateForcefulShutdown_rdh | /**
* Triggers an immediate forceful shutdown of the current process using 1 as the status code.
*
* @see Runtime#halt(int)
*/
public static void triggerImmediateForcefulShutdown() {
triggerImmediateForcefulShutdown(1);
} | 3.26 |
pulsar_NettyFutureUtil_toCompletableFuture_rdh | /**
* Converts a Netty {@link Future} to {@link CompletableFuture}.
*
* @param future
* Netty future
* @param <V>
* value type
* @return converted future instance
*/
public static <V> CompletableFuture<V> toCompletableFuture(Future<V> future) {
Objects.requireNonNull(future, "future cannot be null");
CompletableFuture<V> adapter = new CompletableFuture<>();
if (future.isDone()) {
if (future.isSuccess()) {
adapter.complete(future.getNow());
} else {
adapter.completeExceptionally(future.cause());
}
} else {
future.addListener((Future<V> f) -> {
if (f.isSuccess()) {
adapter.complete(f.getNow());
} else {
adapter.completeExceptionally(f.cause());
}
});
}
return adapter;
} | 3.26 |
pulsar_NettyFutureUtil_toCompletableFutureVoid_rdh | /**
* Converts a Netty {@link Future} to {@link CompletableFuture} with Void type.
*
* @param future
* Netty future
* @return converted future instance
*/
public static CompletableFuture<Void> toCompletableFutureVoid(Future<?> future) {
Objects.requireNonNull(future, "future cannot be null");
CompletableFuture<Void> adapter = new CompletableFuture<>();
if (future.isDone()) {
if (future.isSuccess())
{adapter.complete(null);
} else {
adapter.completeExceptionally(future.cause());}
} else {
future.addListener(f
-> {
if (f.isSuccess()) {
adapter.complete(null);
} else {
adapter.completeExceptionally(f.cause());
}
});
}
return adapter;
} | 3.26 |
pulsar_ProtocolHandlerUtils_getProtocolHandlerDefinition_rdh | /**
* Retrieve the protocol handler definition from the provided handler nar package.
*
* @param narPath
* the path to the protocol handler NAR package
* @return the protocol handler definition
* @throws IOException
* when fail to load the protocol handler or get the definition
*/
public static ProtocolHandlerDefinition getProtocolHandlerDefinition(String narPath, String narExtractionDirectory) throws IOException {
try (NarClassLoader ncl = NarClassLoaderBuilder.builder().narFile(new File(narPath)).extractionDirectory(narExtractionDirectory).build()) {
return getProtocolHandlerDefinition(ncl);
}
} | 3.26 |
pulsar_ProtocolHandlerUtils_searchForHandlers_rdh | /**
* Search and load the available protocol handlers.
*
* @param handlersDirectory
* the directory where all the protocol handlers are stored
* @return a collection of protocol handlers
* @throws IOException
* when fail to load the available protocol handlers from the provided directory.
*/
public static ProtocolHandlerDefinitions searchForHandlers(String handlersDirectory, String narExtractionDirectory) throws IOException {
Path path = Paths.get(handlersDirectory).toAbsolutePath();
log.info("Searching for protocol handlers in {}", path);
ProtocolHandlerDefinitions handlers = new ProtocolHandlerDefinitions();
if (!path.toFile().exists()) {
log.warn("Protocol handler directory not found");
return handlers;
}try (DirectoryStream<Path> stream = Files.newDirectoryStream(path, "*.nar")) {
for (Path archive : stream) {
try {
ProtocolHandlerDefinition phDef = ProtocolHandlerUtils.getProtocolHandlerDefinition(archive.toString(), narExtractionDirectory);
log.info("Found protocol handler from {} : {}", archive, phDef);
checkArgument(StringUtils.isNotBlank(phDef.getName()));
checkArgument(StringUtils.isNotBlank(phDef.getHandlerClass()));
ProtocolHandlerMetadata metadata = new ProtocolHandlerMetadata();metadata.setDefinition(phDef);
metadata.setArchivePath(archive);
handlers.handlers().put(phDef.getName(), metadata);
} catch (Throwable t) {
log.warn((("Failed to load connector from {}." + " It is OK however if you want to use this protocol handler,") + " please make sure you put the correct protocol handler NAR") + " package in the handlers directory.", archive, t);
}
}
}
return handlers;
} | 3.26 |
pulsar_ProtocolHandlerUtils_load_rdh | /**
* Load the protocol handler according to the handler definition.
*
* @param metadata
* the protocol handler definition.
* @return */
static ProtocolHandlerWithClassLoader load(ProtocolHandlerMetadata metadata, String narExtractionDirectory) throws IOException {
final File narFile = metadata.getArchivePath().toAbsolutePath().toFile();NarClassLoader ncl = NarClassLoaderBuilder.builder().narFile(narFile).parentClassLoader(ProtocolHandler.class.getClassLoader()).extractionDirectory(narExtractionDirectory).build();
ProtocolHandlerDefinition phDef = getProtocolHandlerDefinition(ncl);
if (StringUtils.isBlank(phDef.getHandlerClass())) {
throw new IOException((("Protocol handler `" + phDef.getName()) + "` does NOT provide a protocol") + " handler implementation");
}
try {
Class handlerClass = ncl.loadClass(phDef.getHandlerClass());
Object handler =
handlerClass.getDeclaredConstructor().newInstance();
if (!(handler instanceof ProtocolHandler)) {
throw new IOException(("Class " + phDef.getHandlerClass()) + " does not implement protocol handler interface");
}
ProtocolHandler ph = ((ProtocolHandler) (handler));
return new ProtocolHandlerWithClassLoader(ph, ncl);
} catch (Throwable t) {
rethrowIOException(t);
return null;
}
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.