name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
pulsar_ResourceGroupService_getPublishRateLimiters_rdh
// Visibility for testing. protected BytesAndMessagesCount getPublishRateLimiters(String rgName) throws PulsarAdminException { ResourceGroup rg = this.getResourceGroupInternal(rgName); if (rg == null) { throw new PulsarAdminException("Resource group does not exist: " + rgName); } return rg.getRgPublishRateLimiterValues();}
3.26
pulsar_ResourceGroupService_registerTenant_rdh
/** * Registers a tenant as a user of a resource group. * * @param resourceGroupName * @param tenantName * @throws if * the RG does not exist, or if the NS already references the RG. */ public void registerTenant(String resourceGroupName, String tenantName) throws PulsarAdminException { ResourceGroup rg = checkResourceGroupExists(resourceGroupName); // Check that the tenant-name doesn't already have a RG association. // [If it does, that should be unregistered before putting a different association.] ResourceGroup oldRG = this.tenantToRGsMap.get(tenantName); if (oldRG != null) { String errMesg = (("Tenant " + tenantName) + " already references a resource group: ") + oldRG.getID(); throw new PulsarAdminException(errMesg); } ResourceGroupOpStatus status = rg.registerUsage(tenantName, ResourceGroupRefTypes.Tenants, true, this.resourceUsageTransportManagerMgr);if (status == ResourceGroupOpStatus.Exists) { String errMesg = (("Tenant " + tenantName) + " already references the resource group ") + resourceGroupName; errMesg += "; this is unexpected"; throw new PulsarAdminException(errMesg);} // Associate this tenant name with the RG. this.tenantToRGsMap.put(tenantName, rg); rgTenantRegisters.labels(resourceGroupName).inc(); }
3.26
pulsar_ResourceGroupService_getRGUsage_rdh
// Visibility for testing. protected BytesAndMessagesCount getRGUsage(String rgName, ResourceGroupMonitoringClass monClass, ResourceGroupUsageStatsType statsType) throws PulsarAdminException { final ResourceGroup rg = this.getResourceGroupInternal(rgName);if (rg != null) { switch (statsType) { default : String errStr = "Unsupported statsType: " + statsType; throw new PulsarAdminException(errStr); case Cumulative : return rg.getLocalUsageStatsCumulative(monClass); case LocalSinceLastReported : return rg.getLocalUsageStats(monClass); case ReportFromTransportMgr : return rg.getLocalUsageStatsFromBrokerReports(monClass); } } BytesAndMessagesCount retCount = new BytesAndMessagesCount(); retCount.bytes = -1; retCount.messages = -1; return retCount; }
3.26
pulsar_ResourceGroupService_getRgTenantUnRegistersCount_rdh
// Visibility for testing. protected static double getRgTenantUnRegistersCount(String rgName) {return rgTenantUnRegisters.labels(rgName).get(); }
3.26
pulsar_ResourceGroupService_resourceGroupUpdate_rdh
/** * Update RG. * * @throws if * RG with that name does not exist. */ public void resourceGroupUpdate(String rgName, ResourceGroup rgConfig) throws PulsarAdminException {if (rgConfig == null) { throw new IllegalArgumentException("ResourceGroupUpdate: Invalid null ResourceGroup config"); } ResourceGroup rg = this.getResourceGroupInternal(rgName); if (rg == null) { throw new PulsarAdminException("Resource group does not exist: " + rgName); } rg.updateResourceGroup(rgConfig); rgUpdates.labels(rgName).inc(); }
3.26
pulsar_ResourceGroupService_getRgLocalUsageByteCount_rdh
// Visibility for testing. protected static double getRgLocalUsageByteCount(String rgName, String monClassName) { return rgLocalUsageBytes.labels(rgName, monClassName).get(); }
3.26
pulsar_ResourceGroupService_getRgLocalUsageMessageCount_rdh
// Visibility for testing. protected static double getRgLocalUsageMessageCount(String rgName, String monClassName) { return rgLocalUsageMessages.labels(rgName, monClassName).get(); }
3.26
pulsar_ResourceGroupService_getRgQuotaCalculationTime_rdh
// Visibility for testing. protected static Value getRgQuotaCalculationTime() { return rgQuotaCalculationLatency.get(); }
3.26
pulsar_ResourceGroupService_calculateQuotaForAllResourceGroups_rdh
// Periodically calculate the updated quota for all RGs in the background, // from the reports received from other brokers. // [Visibility for unit testing.] protected void calculateQuotaForAllResourceGroups() { // Calculate the quota for the next window for this RG, based on the observed usage. final Summary.Timer quotaCalcTimer = rgQuotaCalculationLatency.startTimer(); BytesAndMessagesCount updatedQuota = new BytesAndMessagesCount(); this.resourceGroupsMap.forEach((rgName, resourceGroup) -> { BytesAndMessagesCount globalUsageStats; BytesAndMessagesCount localUsageStats; BytesAndMessagesCount confCounts; for (ResourceGroupMonitoringClass monClass : ResourceGroupMonitoringClass.values()) { try { globalUsageStats = resourceGroup.getGlobalUsageStats(monClass); localUsageStats = resourceGroup.getLocalUsageStatsFromBrokerReports(monClass); confCounts = resourceGroup.getConfLimits(monClass); long[] globUsageBytesArray = new long[]{ globalUsageStats.bytes }; updatedQuota.bytes = this.quotaCalculator.computeLocalQuota(confCounts.bytes, localUsageStats.bytes, globUsageBytesArray); long[] globUsageMessagesArray = new long[]{ globalUsageStats.messages }; updatedQuota.messages = this.quotaCalculator.computeLocalQuota(confCounts.messages, localUsageStats.messages, globUsageMessagesArray); BytesAndMessagesCount oldBMCount = resourceGroup.updateLocalQuota(monClass, updatedQuota); // Guard against unconfigured quota settings, for which computeLocalQuota will return negative. if (updatedQuota.messages >= 0) { rgCalculatedQuotaMessages.labels(rgName, monClass.name()).inc(updatedQuota.messages); } if (updatedQuota.bytes >= 0) { f1.labels(rgName, monClass.name()).inc(updatedQuota.bytes); } if (oldBMCount != null) { long messagesIncrement = updatedQuota.messages - oldBMCount.messages; long v63 = updatedQuota.bytes - oldBMCount.bytes; if (log.isDebugEnabled()) { log.debug(("calculateQuota for RG={} [class {}]: " + "updatedlocalBytes={}, updatedlocalMesgs={}; ") + "old bytes={}, old mesgs={}; incremented bytes by {}, messages by {}", rgName, monClass, updatedQuota.bytes, updatedQuota.messages, oldBMCount.bytes, oldBMCount.messages, v63, messagesIncrement); } } else if (log.isDebugEnabled()) { log.debug("calculateQuota for RG={} [class {}]: got back null from updateLocalQuota", rgName, monClass); } } catch (Throwable t) { log.error("Got exception={} while calculating new quota for monitoring-class={} of RG={}", t.getMessage(), monClass, rgName); } } }); double diffTimeSeconds = quotaCalcTimer.observeDuration(); if (log.isDebugEnabled()) {log.debug("calculateQuotaForAllResourceGroups took {} milliseconds", diffTimeSeconds * 1000);} // Check any re-scheduling requirements for next time. // Use the same period as getResourceUsagePublishIntervalInSecs; // cancel and re-schedule this task if the period of execution has changed. ServiceConfiguration config = pulsar.getConfiguration(); long newPeriodInSeconds = config.getResourceUsageTransportPublishIntervalInSecs(); if (newPeriodInSeconds != this.resourceUsagePublishPeriodInSeconds) { if (this.calculateQuotaPeriodicTask == null) { log.error("calculateQuotaForAllResourceGroups: Unable to find running task to cancel when " + "publish period changed from {} to {} {}", this.resourceUsagePublishPeriodInSeconds, newPeriodInSeconds, timeUnitScale); } else { boolean cancelStatus = this.calculateQuotaPeriodicTask.cancel(true); log.info("calculateQuotaForAllResourceGroups: Got status={} in cancel of periodic " + " when publish period changed from {} to {} {}", cancelStatus, this.resourceUsagePublishPeriodInSeconds, newPeriodInSeconds, timeUnitScale); }this.calculateQuotaPeriodicTask = pulsar.getExecutor().scheduleAtFixedRate(catchingAndLoggingThrowables(this::calculateQuotaForAllResourceGroups), newPeriodInSeconds, newPeriodInSeconds, timeUnitScale); this.resourceUsagePublishPeriodInSeconds = newPeriodInSeconds; maxIntervalForSuppressingReportsMSecs = this.resourceUsagePublishPeriodInSeconds * MaxUsageReportSuppressRounds; } }
3.26
pulsar_ResourceGroupService_getResourceGroupInternal_rdh
/** * Get the RG with the given name. For internal operations only. */ private ResourceGroup getResourceGroupInternal(String resourceGroupName) { if (resourceGroupName == null) { throw new IllegalArgumentException("Invalid null resource group name: " + resourceGroupName); } return resourceGroupsMap.get(resourceGroupName); }
3.26
pulsar_ResourceGroupService_getRgNamespaceUnRegistersCount_rdh
// Visibility for testing. protected static double getRgNamespaceUnRegistersCount(String rgName) { return rgNamespaceUnRegisters.labels(rgName).get(); }
3.26
pulsar_ResourceGroupService_aggregateResourceGroupLocalUsages_rdh
// Periodically aggregate the usage from all topics known to the BrokerService. // Visibility for unit testing. protected void aggregateResourceGroupLocalUsages() { final Summary.Timer aggrUsageTimer = rgUsageAggregationLatency.startTimer(); BrokerService bs = this.pulsar.getBrokerService(); Map<String, TopicStatsImpl> topicStatsMap = bs.getTopicStats(); for (Map.Entry<String, TopicStatsImpl> entry : topicStatsMap.entrySet()) { final String topicName = entry.getKey(); final TopicStats topicStats = entry.getValue(); final TopicName topic = TopicName.get(topicName); final String tenantString = topic.getTenant(); final String nsString = topic.getNamespacePortion(); final NamespaceName fqNamespace = topic.getNamespaceObject(); // Can't use containsKey here, as that checks for exact equality // (we need a check for string-comparison). val tenantRG = this.tenantToRGsMap.get(tenantString); val namespaceRG = this.namespaceToRGsMap.get(fqNamespace); if ((tenantRG == null) && (namespaceRG == null)) { // This topic's NS/tenant are not registered to any RG. continue; } this.updateStatsWithDiff(topicName, tenantString, nsString, topicStats.getBytesInCounter(), topicStats.getMsgInCounter(), ResourceGroupMonitoringClass.Publish); this.updateStatsWithDiff(topicName, tenantString, nsString, topicStats.getBytesOutCounter(), topicStats.getMsgOutCounter(), ResourceGroupMonitoringClass.Dispatch); } double diffTimeSeconds = aggrUsageTimer.observeDuration(); if (log.isDebugEnabled()) { log.debug("aggregateResourceGroupLocalUsages took {} milliseconds", diffTimeSeconds * 1000); } // Check any re-scheduling requirements for next time. // Use the same period as getResourceUsagePublishIntervalInSecs; // cancel and re-schedule this task if the period of execution has changed. ServiceConfiguration config = pulsar.getConfiguration(); long newPeriodInSeconds = config.getResourceUsageTransportPublishIntervalInSecs(); if (newPeriodInSeconds != this.f0) { if (this.aggregateLocalUsagePeriodicTask == null) { log.error("aggregateResourceGroupLocalUsages: Unable to find running task to cancel when " + "publish period changed from {} to {} {}", this.f0, newPeriodInSeconds, timeUnitScale); } else { boolean cancelStatus = this.aggregateLocalUsagePeriodicTask.cancel(true); log.info("aggregateResourceGroupLocalUsages: Got status={} in cancel of periodic " + "when publish period changed from {} to {} {}", cancelStatus, this.f0, newPeriodInSeconds, timeUnitScale); } this.aggregateLocalUsagePeriodicTask = pulsar.getExecutor().scheduleAtFixedRate(catchingAndLoggingThrowables(this::aggregateResourceGroupLocalUsages), newPeriodInSeconds, newPeriodInSeconds, timeUnitScale); this.f0 = newPeriodInSeconds; } }
3.26
pulsar_ResourceGroupService_unRegisterTenant_rdh
/** * UnRegisters a tenant from a resource group. * * @param resourceGroupName * @param tenantName * @throws if * the RG does not exist, or if the tenant does not references the RG yet. */ public void unRegisterTenant(String resourceGroupName, String tenantName) throws PulsarAdminException { ResourceGroup rg = checkResourceGroupExists(resourceGroupName); ResourceGroupOpStatus status = rg.registerUsage(tenantName, ResourceGroupRefTypes.Tenants, false, this.resourceUsageTransportManagerMgr); if (status == ResourceGroupOpStatus.DoesNotExist) { String errMesg = (("Tenant " + tenantName) + " does not yet reference resource group ") + resourceGroupName;throw new PulsarAdminException(errMesg); } // Dissociate this tenant name from the RG. this.tenantToRGsMap.remove(tenantName, rg); rgTenantUnRegisters.labels(resourceGroupName).inc(); }
3.26
pulsar_ResourceGroupService_resourceGroupGet_rdh
/** * Get a copy of the RG with the given name. */ public ResourceGroup resourceGroupGet(String resourceGroupName) { ResourceGroup retrievedRG = this.getResourceGroupInternal(resourceGroupName); if (retrievedRG == null) { return null; } // Return a copy. return new ResourceGroup(retrievedRG); }
3.26
pulsar_ResourceGroupService_getRgTenantRegistersCount_rdh
// Visibility for testing. protected static double getRgTenantRegistersCount(String rgName) { return rgTenantRegisters.labels(rgName).get(); }
3.26
pulsar_ResourceGroupService_resourceGroupCreate_rdh
/** * Create RG, with non-default functions for resource-usage transport-manager. * * @throws if * RG with that name already exists (even if the resource usage handlers are different). */ public void resourceGroupCreate(String rgName, ResourceGroup rgConfig, ResourceUsagePublisher rgPublisher, ResourceUsageConsumer rgConsumer) throws PulsarAdminException { this.checkRGCreateParams(rgName, rgConfig); ResourceGroup rg = new ResourceGroup(this, rgName, rgConfig, rgPublisher, rgConsumer); resourceGroupsMap.put(rgName, rg); }
3.26
pulsar_ResourceGroupService_getRgUpdatesCount_rdh
// Visibility for testing. protected static double getRgUpdatesCount(String rgName) { return rgUpdates.labels(rgName).get(); }
3.26
pulsar_TimeAverageBrokerData_reset_rdh
/** * Reuse this TimeAverageBrokerData using new data. * * @param bundles * The bundles belonging to the broker. * @param data * Map from bundle names to the data for that bundle. * @param defaultStats * The stats to use when a bundle belonging to this broker is not found in the bundle data map. */ public void reset(final Set<String> bundles, final Map<String, BundleData> data, final NamespaceBundleStats defaultStats) { shortTermMsgThroughputIn = 0; f0 = 0; shortTermMsgRateIn = 0;shortTermMsgRateOut = 0; longTermMsgThroughputIn = 0; longTermMsgThroughputOut = 0; longTermMsgRateIn = 0; longTermMsgRateOut = 0; for (String bundle : bundles) { final BundleData bundleData = data.get(bundle); if (bundleData == null) { shortTermMsgThroughputIn += defaultStats.msgThroughputIn; f0 += defaultStats.msgThroughputOut; shortTermMsgRateIn += defaultStats.msgRateIn; shortTermMsgRateOut += defaultStats.msgRateOut; longTermMsgThroughputIn += defaultStats.msgThroughputIn; longTermMsgThroughputOut += defaultStats.msgThroughputOut; longTermMsgRateIn += defaultStats.msgRateIn;longTermMsgRateOut += defaultStats.msgRateOut; } else { final TimeAverageMessageData shortTermData = bundleData.getShortTermData(); final TimeAverageMessageData longTermData = bundleData.getLongTermData(); shortTermMsgThroughputIn += shortTermData.getMsgThroughputIn(); f0 += shortTermData.getMsgThroughputOut(); shortTermMsgRateIn += shortTermData.getMsgRateIn(); shortTermMsgRateOut += shortTermData.getMsgRateOut(); longTermMsgThroughputIn += longTermData.getMsgThroughputIn(); longTermMsgThroughputOut += longTermData.getMsgThroughputOut(); longTermMsgRateIn += longTermData.getMsgRateIn(); longTermMsgRateOut += longTermData.getMsgRateOut(); } } }
3.26
pulsar_TransactionImpl_registerProducedTopic_rdh
// register the topics that will be modified by this transaction public CompletableFuture<Void> registerProducedTopic(String topic) { CompletableFuture<Void> completableFuture = new CompletableFuture<>(); if (checkIfOpen(completableFuture)) { synchronized(this) { // we need to issue the request to TC to register the produced topic return registerPartitionMap.compute(topic, (key, future) -> { if (future != null) { return future.thenCompose(ignored -> CompletableFuture.completedFuture(null)); } else { return tcClient.addPublishPartitionToTxnAsync(txnId, Lists.newArrayList(topic)).thenCompose(ignored -> CompletableFuture.completedFuture(null)); } }); } } return completableFuture; }
3.26
pulsar_TransactionImpl_registerAckedTopic_rdh
// register the topics that will be modified by this transaction public CompletableFuture<Void> registerAckedTopic(String topic, String subscription) { CompletableFuture<Void> completableFuture = new CompletableFuture<>(); if (checkIfOpen(completableFuture)) { synchronized(this) { // we need to issue the request to TC to register the acked topic return registerSubscriptionMap.compute(Pair.of(topic, subscription), (key, future) -> { if (future != null) { return future.thenCompose(ignored -> CompletableFuture.completedFuture(null)); } else { return tcClient.addSubscriptionToTxnAsync(txnId, topic, subscription).thenCompose(ignored -> CompletableFuture.completedFuture(null)); } }); } } return completableFuture; }
3.26
pulsar_PulsarConnectorConfig_getManagedLedgerOffloadMaxThreads_rdh
// --- Ledger Offloading --- public int getManagedLedgerOffloadMaxThreads() { return this.managedLedgerOffloadMaxThreads; }
3.26
pulsar_PulsarConnectorConfig_getZookeeperUri_rdh
/** * * @deprecated use {@link #getMetadataUrl()} */ @Deprecated @NotNull public String getZookeeperUri() { return getMetadataUrl(); }
3.26
pulsar_PulsarConnectorConfig_setZookeeperUri_rdh
/** * * @deprecated use {@link #setMetadataUrl(String)} */ @Deprecated @Config("pulsar.zookeeper-uri") public PulsarConnectorConfig setZookeeperUri(String zookeeperUri) { if (hasMetadataUrl) { return this; } this.metadataUrl = zookeeperUri; return this; }
3.26
pulsar_PulsarConnectorConfig_getBookkeeperThrottleValue_rdh
// --- Bookkeeper Config --- public int getBookkeeperThrottleValue() { return bookkeeperThrottleValue; }
3.26
pulsar_PulsarConnectorConfig_m3_rdh
// --- Nar extraction config public String m3() { return narExtractionDirectory;}
3.26
pulsar_PulsarConnectorConfig_getAuthPlugin_rdh
// --- Authentication --- public String getAuthPlugin() { return this.authPluginClassName; }
3.26
pulsar_CmdProduce_updateConfig_rdh
/** * Set Pulsar client configuration. */ public void updateConfig(ClientBuilder newBuilder, Authentication authentication, String serviceURL) { this.clientBuilder = newBuilder; this.authentication = authentication; this.serviceURL = serviceURL; }
3.26
pulsar_CmdProduce_run_rdh
/** * Run the producer. * * @return 0 for success, < 0 otherwise * @throws Exception */ public int run() throws PulsarClientException { if (mainOptions.size() != 1) { throw new ParameterException("Please provide one and only one topic name."); }if (this.numTimesProduce <= 0) {throw new ParameterException("Number of times need to be positive number."); } if (messages.size() > 0) { messages = messages.stream().map(str -> str.split(separator)).flatMap(Stream::of).toList(); } if ((messages.size() == 0) && (messageFileNames.size() == 0)) { throw new ParameterException("Please supply message content with either --messages or --files"); } if (keyValueEncodingType == null) { keyValueEncodingType = KEY_VALUE_ENCODING_TYPE_NOT_SET; } else { switch (keyValueEncodingType) { case KEY_VALUE_ENCODING_TYPE_SEPARATED : case KEY_VALUE_ENCODING_TYPE_INLINE :break; default : throw new ParameterException(("--key-value-encoding-type " + keyValueEncodingType) + " is not valid, only 'separated' or 'inline'"); } } int totalMessages = (messages.size() + messageFileNames.size()) * numTimesProduce; if (totalMessages > MAX_MESSAGES) { String msg = ((("Attempting to send " + totalMessages) + " messages. Please do not send more than ") + MAX_MESSAGES) + " messages"; throw new ParameterException(msg); } String topic = this.mainOptions.get(0); if (this.serviceURL.startsWith("ws")) { return publishToWebSocket(topic); } else { return publish(topic); } }
3.26
pulsar_FunctionRecord_from_rdh
/** * Creates a builder for a Record from a Function Context. * The builder is initialized with the output topic from the Context and with the topicName, key, eventTime, * properties, partitionId, partitionIndex and recordSequence from the Context input Record. * It doesn't initialize a Message at the moment. * * @param context * a Function Context * @param <T> * type of Record to build * @return a Record builder initialised with values from the Function Context */ public static <T> FunctionRecord.FunctionRecordBuilder<T> from(Context context, Schema<T> schema) { if (schema == null) { throw new IllegalArgumentException("Schema should not be null."); } Record<?> currentRecord = context.getCurrentRecord(); FunctionRecordBuilder<T> builder = new FunctionRecordBuilder<T>().schema(schema).destinationTopic(context.getOutputTopic()).properties(currentRecord.getProperties()); currentRecord.getTopicName().ifPresent(builder::topicName); currentRecord.getKey().ifPresent(builder::key); currentRecord.getEventTime().ifPresent(builder::eventTime); currentRecord.getPartitionId().ifPresent(builder::partitionId); currentRecord.getPartitionIndex().ifPresent(builder::partitionIndex); currentRecord.getRecordSequence().ifPresent(builder::recordSequence); return builder; }
3.26
pulsar_AuthenticationDataTls_hasDataForTls_rdh
/* TLS */ @Override public boolean hasDataForTls() { return true; }
3.26
pulsar_JavaInstanceRunnable_setup_rdh
/** * NOTE: this method should be called in the instance thread, in order to make class loading work. */ private synchronized void setup() throws Exception { this.instanceCache = InstanceCache.getInstanceCache(); if (this.collectorRegistry == null) { this.collectorRegistry = FunctionCollectorRegistry.getDefaultImplementation(); } this.stats = ComponentStatsManager.getStatsManager(this.collectorRegistry, this.metricsLabels, this.instanceCache.getScheduledExecutorService(), this.componentType); // initialize the thread context ThreadContext.put("function", FunctionCommon.getFullyQualifiedName(instanceConfig.getFunctionDetails())); ThreadContext.put("functionname", instanceConfig.getFunctionDetails().getName()); ThreadContext.put("instance", instanceConfig.getInstanceName()); log.info("Starting Java Instance {} : \n Details = {}", instanceConfig.getFunctionDetails().getName(), instanceConfig.getFunctionDetails()); Object object;if (instanceConfig.getFunctionDetails().getClassName().equals(WindowFunctionExecutor.class.getName())) { object = Reflections.createInstance(instanceConfig.getFunctionDetails().getClassName(), instanceClassLoader); } else { object = Reflections.createInstance(instanceConfig.getFunctionDetails().getClassName(), functionClassLoader); } if ((!(object instanceof Function)) && (!(object instanceof Function))) { throw new RuntimeException("User class must either be Function or java.util.Function"); } // start the state table setupStateStore(); ContextImpl contextImpl = setupContext(); // start the output producer setupOutput(contextImpl); // start the input consumer setupInput(contextImpl); // start any log topic handler setupLogHandler(); if ((!(object instanceof IdentityFunction)) && (!(sink instanceof PulsarSink))) { sinkSchemaInfoProvider = new SinkSchemaInfoProvider(); } javaInstance = new JavaInstance(contextImpl, object, instanceConfig); try { Thread.currentThread().setContextClassLoader(functionClassLoader); javaInstance.initialize(); } finally { Thread.currentThread().setContextClassLoader(instanceClassLoader); } // to signal member variables are initialized isInitialized = true; }
3.26
pulsar_JavaInstanceRunnable_close_rdh
/** * NOTE: this method is be synchronized because it is potentially called by two different places * one inside the run/finally clause and one inside the ThreadRuntime::stop. */ @Override public synchronized void close() { isInitialized = false; if (stats != null) { stats.close(); stats = null; } if (source != null) { if (componentType == ComponentType.SOURCE) { Thread.currentThread().setContextClassLoader(componentClassLoader); } try { source.close(); } catch (Throwable e) { log.error("Failed to close source {}", instanceConfig.getFunctionDetails().getSource().getClassName(), e); } finally { Thread.currentThread().setContextClassLoader(instanceClassLoader); } source = null; } if (sink != null) { if (componentType == ComponentType.SINK) { Thread.currentThread().setContextClassLoader(componentClassLoader); } try { sink.close(); } catch (Throwable e) { log.error("Failed to close sink {}", instanceConfig.getFunctionDetails().getSource().getClassName(), e); } finally { Thread.currentThread().setContextClassLoader(instanceClassLoader); } sink = null; } if (null != javaInstance) { try { Thread.currentThread().setContextClassLoader(functionClassLoader); javaInstance.close(); } finally { Thread.currentThread().setContextClassLoader(instanceClassLoader); javaInstance = null; } } if (null != stateManager) { stateManager.close(); } if (null != stateStoreProvider) { stateStoreProvider.close(); } instanceCache = null; if (logAppender != null) { removeLogTopicAppender(LoggerContext.getContext()); removeLogTopicAppender(LoggerContext.getContext(false)); logAppender.stop(); logAppender = null; } }
3.26
pulsar_JavaInstanceRunnable_interpolateSecretsIntoConfigs_rdh
/** * Recursively interpolate configured secrets into the config map by calling * {@link SecretsProvider#interpolateSecretForValue(String)}. * * @param secretsProvider * - the secrets provider that will convert secret's values into config values. * @param configs * - the connector configuration map, which will be mutated. */ private static void interpolateSecretsIntoConfigs(SecretsProvider secretsProvider, Map<String, Object> configs) { for (Map.Entry<String, Object> entry : configs.entrySet()) { Object value = entry.getValue(); if (value instanceof String) { String updatedValue = secretsProvider.interpolateSecretForValue(((String) (value))); if (updatedValue != null) { entry.setValue(updatedValue); } } else if (value instanceof Map) { interpolateSecretsIntoConfigs(secretsProvider, ((Map<String, Object>) (value)));} } }
3.26
pulsar_JavaInstanceRunnable_run_rdh
/** * The core logic that initialize the instance thread and executes the function. */ @Override public void run() { try { setup(); Thread v5 = Thread.currentThread(); Consumer<Throwable> asyncErrorHandler = throwable -> v5.interrupt(); AsyncResultConsumer asyncResultConsumer = this::handleResult; while (true) { currentRecord = readInput(); // increment number of records received from source stats.incrTotalReceived(); if (instanceConfig.getFunctionDetails().getProcessingGuarantees() == ProcessingGuarantees.ATMOST_ONCE) { if (instanceConfig.getFunctionDetails().getAutoAck()) { currentRecord.ack(); }} JavaExecutionResult result; // set last invocation time stats.setLastInvocation(System.currentTimeMillis()); // start time for process latency stat stats.processTimeStart(); // process the message Thread.currentThread().setContextClassLoader(functionClassLoader); result = javaInstance.handleMessage(currentRecord, currentRecord.getValue(), asyncResultConsumer, asyncErrorHandler); Thread.currentThread().setContextClassLoader(instanceClassLoader); // register end time stats.processTimeEnd(); if (result != null) { // process the synchronous results handleResult(currentRecord, result); } if (deathException != null) { // Ideally the current java instance thread will be interrupted when the deathException is set. // But if the CompletableFuture returned by the Pulsar Function is completed exceptionally(the // function has invoked the fatal method) before being put into the JavaInstance // .pendingAsyncRequests, the interrupted exception may be thrown when putting this future to // JavaInstance.pendingAsyncRequests. The interrupted exception would be caught by the JavaInstance // and be skipped. // Therefore, we need to handle this case by checking the deathException here and rethrow it. throw deathException; } } } catch (Throwable t) { if (deathException != null) { log.error("[{}] Fatal exception occurred in the instance", FunctionCommon.getFullyQualifiedInstanceId(instanceConfig.getFunctionDetails().getTenant(), instanceConfig.getFunctionDetails().getNamespace(), instanceConfig.getFunctionDetails().getName(), instanceConfig.getInstanceId()), deathException); } else { log.error("[{}] Uncaught exception in Java Instance", FunctionCommon.getFullyQualifiedInstanceId(instanceConfig.getFunctionDetails().getTenant(), instanceConfig.getFunctionDetails().getNamespace(), instanceConfig.getFunctionDetails().getName(), instanceConfig.getInstanceId()), t); deathException = t; } if (stats != null) { stats.incrSysExceptions(deathException); } } finally { log.info("Closing instance"); close(); } }
3.26
pulsar_LookupProxyHandler_getBrokerServiceUrl_rdh
/** * Get default broker service url or discovery an available broker. */ private String getBrokerServiceUrl(long clientRequestId) { if (StringUtils.isNotBlank(brokerServiceURL)) {return brokerServiceURL; } ServiceLookupData availableBroker; try { availableBroker = discoveryProvider.nextBroker(); } catch (Exception e) { log.warn("[{}] Failed to get next active broker {}", clientAddress, e.getMessage(), e); writeAndFlush(Commands.newError(clientRequestId, ServerError.ServiceNotReady, e.getMessage())); return null; } return this.connectWithTLS ? availableBroker.getPulsarServiceUrlTls() : availableBroker.getPulsarServiceUrl(); }
3.26
pulsar_LookupProxyHandler_handlePartitionMetadataResponse_rdh
/** * Always get partition metadata from broker service. */ private void handlePartitionMetadataResponse(CommandPartitionedTopicMetadata partitionMetadata, long clientRequestId) { TopicName topicName = TopicName.get(partitionMetadata.getTopic()); String serviceUrl = getBrokerServiceUrl(clientRequestId); if (serviceUrl == null) { log.warn("No available broker for {} to lookup partition metadata", topicName); return; } InetSocketAddress addr = getAddr(serviceUrl, clientRequestId); if (addr == null) { return; } if (log.isDebugEnabled()) { log.debug("Getting connections to '{}' for Looking up topic '{}' with clientReq Id '{}'", addr, topicName.getPartitionedTopicName(), clientRequestId); } proxyConnection.getConnectionPool().getConnection(addr).thenAccept(clientCnx -> { // Connected to backend broker long requestId = proxyConnection.newRequestId(); ByteBuf command; command = Commands.newPartitionMetadataRequest(topicName.toString(), requestId); clientCnx.newLookup(command, requestId).whenComplete((r, t) -> { if (t != null) { log.warn("[{}] failed to get Partitioned metadata : {}", topicName.toString(), t.getMessage(), t); writeAndFlush(Commands.newLookupErrorResponse(getServerError(t), t.getMessage(), clientRequestId)); } else { writeAndFlush(Commands.newPartitionMetadataResponse(r.partitions, clientRequestId)); } proxyConnection.getConnectionPool().releaseConnection(clientCnx); });}).exceptionally(ex -> { // Failed to connect to backend broker writeAndFlush(Commands.newPartitionMetadataResponse(getServerError(ex), ex.getMessage(), clientRequestId)); return null; }); }
3.26
pulsar_LinuxInfoUtils_m0_rdh
/** * Get paths of all usable physical nic. * * @return All usable physical nic paths. */ public static List<String> m0() { try (Stream<Path> stream = Files.list(Paths.get(NIC_PATH))) { return stream.filter(LinuxInfoUtils::isPhysicalNic).filter(LinuxInfoUtils::isUsable).map(path -> path.getFileName().toString()).collect(Collectors.toList()); } catch (IOException e) { log.error("[LinuxInfo] Failed to find NICs", e); return Collections.emptyList(); } }
3.26
pulsar_LinuxInfoUtils_isPhysicalNic_rdh
/** * Determine whether the VM has physical nic. * * @param nicPath * Nic path * @return whether The VM has physical nic. */ private static boolean isPhysicalNic(Path nicPath) {try { if (nicPath.toString().contains("/virtual/")) { return false; } // Check the type to make sure it's ethernet (type "1") String type = readTrimStringFromFile(nicPath.resolve("type")); // wireless NICs don't report speed, ignore them. return Integer.parseInt(type) == ARPHRD_ETHER; } catch (Exception e) { log.warn("[LinuxInfo] Failed to read {} NIC type, the detail is: {}", nicPath, e.getMessage()); // Read type got error. return false; } }
3.26
pulsar_LinuxInfoUtils_getTotalNicLimit_rdh
/** * Get all physical nic limit. * * @param nics * All nic path * @param bitRateUnit * Bit rate unit * @return Total nic limit */ public static double getTotalNicLimit(List<String> nics, BitRateUnit bitRateUnit) { return bitRateUnit.convert(nics.stream().mapToDouble(nicPath -> { try { return readDoubleFromFile(m1(NIC_SPEED_TEMPLATE, nicPath)); } catch (IOException e) { log.error("[LinuxInfo] Failed to get total nic limit.", e); return 0.0; } }).sum(), BitRateUnit.Megabit); }
3.26
pulsar_LinuxInfoUtils_isLinux_rdh
/** * Determine whether the OS is the linux kernel. * * @return Whether the OS is the linux kernel */ public static boolean isLinux() { return SystemUtils.IS_OS_LINUX; }
3.26
pulsar_LinuxInfoUtils_getTotalCpuLimit_rdh
/** * Get total cpu limit. * * @param isCGroupsEnabled * Whether CGroup is enabled * @return Total cpu limit */ public static double getTotalCpuLimit(boolean isCGroupsEnabled) { if (isCGroupsEnabled) { try { long quota; long period; if (((metrics != null) && (getCpuQuotaMethod != null)) && (getCpuPeriodMethod != null)) { quota = ((long) (getCpuQuotaMethod.invoke(metrics))); period = ((long) (getCpuPeriodMethod.invoke(metrics))); } else { quota = readLongFromFile(Paths.get(CGROUPS_CPU_LIMIT_QUOTA_PATH)); period = readLongFromFile(Paths.get(CGROUPS_CPU_LIMIT_PERIOD_PATH)); } if (quota > 0) { return (100.0 * quota) / period; } } catch (Exception e) { log.warn("[LinuxInfo] Failed to read CPU quotas from cgroup", e); // Fallback to availableProcessors } } // Fallback to JVM reported CPU quota return 100 * Runtime.getRuntime().availableProcessors(); }
3.26
pulsar_LinuxInfoUtils_isCGroupEnabled_rdh
/** * Determine whether the OS enable CG Group. */ public static boolean isCGroupEnabled() { try { if (metrics == null) { return Files.exists(Paths.get(f0)); } String provider = ((String) (getMetricsProviderMethod.invoke(metrics))); log.info("[LinuxInfo] The system metrics provider is: {}", provider); return provider.contains("cgroup"); } catch (Exception e) { log.warn("[LinuxInfo] Failed to check cgroup CPU: {}", e.getMessage()); return false; }}
3.26
pulsar_LinuxInfoUtils_checkHasNicSpeeds_rdh
/** * Check this VM has nic speed. * * @return Whether the VM has nic speed */ public static boolean checkHasNicSpeeds() { List<String> physicalNICs = m0(); if (CollectionUtils.isEmpty(physicalNICs)) { return false; } double totalNicLimit = getTotalNicLimit(physicalNICs, BitRateUnit.Kilobit); return totalNicLimit > 0;}
3.26
pulsar_LinuxInfoUtils_getTotalNicUsage_rdh
/** * Get all physical nic usage. * * @param nics * All nic path * @param type * Nic's usage type: transport, receive * @param bitRateUnit * Bit rate unit * @return Total nic usage */ public static double getTotalNicUsage(List<String> nics, NICUsageType type, BitRateUnit bitRateUnit) { return bitRateUnit.convert(nics.stream().mapToDouble(nic -> { try { return readDoubleFromFile(m1(type.template, nic)); } catch (IOException e) { log.error("[LinuxInfo] Failed to read {} bytes for NIC {} ", type, nic, e); return 0.0; } }).sum(), BitRateUnit.Byte); }
3.26
pulsar_LinuxInfoUtils_getCpuUsageForCGroup_rdh
/** * Get CGroup cpu usage. * * @return Cpu usage */ public static long getCpuUsageForCGroup() { try { if ((metrics != null) && (getCpuUsageMethod != null)) { return ((long) (getCpuUsageMethod.invoke(metrics))); } return readLongFromFile(Paths.get(f0)); } catch (Exception e) { log.error("[LinuxInfo] Failed to read CPU usage from cgroup", e); return -1; } }
3.26
pulsar_LinuxInfoUtils_getCpuUsageForEntireHost_rdh
/** * Reads first line of /proc/stat to get total cpu usage. * * <pre> * cpu user nice system idle iowait irq softirq steal guest guest_nice * cpu 317808 128 58637 2503692 7634 0 13472 0 0 0 * </pre> * <p> * Line is split in "words", filtering the first. The sum of all numbers give the amount of cpu cycles used this * far. Real CPU usage should equal the sum substracting the idle cycles(that is idle+iowait), this would include * cpu, user, nice, system, irq, softirq, steal, guest and guest_nice. */ public static ResourceUsage getCpuUsageForEntireHost() { try (Stream<String> stream = Files.lines(Paths.get(PROC_STAT_PATH))) { Optional<String> first = stream.findFirst();if (!first.isPresent()) { log.error("[LinuxInfo] Failed to read CPU usage from /proc/stat, because of empty values."); return ResourceUsage.empty(); } String[] v5 = first.get().split("\\s+"); long v6 = Arrays.stream(v5).filter(s -> !s.contains("cpu")).mapToLong(Long::parseLong).sum(); long idle = Long.parseLong(v5[4]) + Long.parseLong(v5[5]); return ResourceUsage.builder().usage(v6 - idle).idle(idle).total(v6).build(); } catch (IOException e) { log.error("[LinuxInfo] Failed to read CPU usage from /proc/stat", e); return ResourceUsage.empty(); } }
3.26
pulsar_LinuxInfoUtils_isUsable_rdh
/** * Determine whether nic is usable. * * @param nicPath * Nic path * @return whether nic is usable. */ private static boolean isUsable(Path nicPath) { try {String operstate = readTrimStringFromFile(nicPath.resolve("operstate")); Operstate operState = Operstate.valueOf(operstate.toUpperCase(Locale.ROOT)); switch (operState) { case UP : case UNKNOWN : case DORMANT : return true; default : return false; } } catch (Exception e) { log.warn("[LinuxInfo] Failed to read {} NIC operstate, the detail is: {}", nicPath, e.getMessage()); // Read operstate got error. return false; } }
3.26
pulsar_PulsarClientImplementationBinding_getBytes_rdh
/** * Retrieves ByteBuffer data into byte[]. * * @param byteBuffer * @return */ static byte[] getBytes(ByteBuffer byteBuffer) { if (byteBuffer == null) { return null; } if ((byteBuffer.hasArray() && (byteBuffer.arrayOffset() == 0)) && (byteBuffer.array().length == byteBuffer.remaining())) { return byteBuffer.array(); } // Direct buffer is not backed by array and it needs to be read from direct memory byte[] array = new byte[byteBuffer.remaining()]; byteBuffer.get(array); return array; }
3.26
pulsar_CmdConsume_run_rdh
/** * Run the consume command. * * @return 0 for success, < 0 otherwise */ public int run() throws PulsarClientException, IOException { if (mainOptions.size() != 1) { throw new ParameterException("Please provide one and only one topic name."); } if ((this.subscriptionName == null) || this.subscriptionName.isEmpty()) { throw new ParameterException("Subscription name is not provided."); } if (this.numMessagesToConsume < 0) { throw new ParameterException("Number of messages should be zero or positive."); } String topic = this.mainOptions.get(0);if (this.serviceURL.startsWith("ws")) { return consumeFromWebSocket(topic); } else { return consume(topic); } }
3.26
pulsar_ProxyConnection_doAuthentication_rdh
// According to auth result, send newConnected or newAuthChallenge command. private void doAuthentication(AuthData clientData) throws Exception { authState.authenticateAsync(clientData).whenCompleteAsync((authChallenge, throwable) -> { if (throwable == null) { authChallengeSuccessCallback(authChallenge);} else { authenticationFailedCallback(throwable); } }, ctx.executor());}
3.26
pulsar_ProxyConnection_spliceNIC2NIC_rdh
/** * Use splice to zero-copy of NIC to NIC. * * @param inboundChannel * input channel * @param outboundChannel * output channel */ protected static ChannelPromise spliceNIC2NIC(EpollSocketChannel inboundChannel, EpollSocketChannel outboundChannel, int spliceLength) { ChannelPromise promise = inboundChannel.newPromise(); inboundChannel.spliceTo(outboundChannel, spliceLength, promise); promise.addListener(((ChannelFutureListener) (future -> { if ((!future.isSuccess()) && (!(future.cause() instanceof ClosedChannelException))) { future.channel().pipeline().fireExceptionCaught(future.cause()); } }))); return promise; }
3.26
pulsar_ProxyConnection_getValidClientAuthData_rdh
/** * Thread-safe method to retrieve unexpired client auth data. Due to inherent race conditions, * the auth data may expire before it is used. */ CompletableFuture<AuthData> getValidClientAuthData() { final CompletableFuture<AuthData> clientAuthDataFuture = new CompletableFuture<>(); ctx().executor().execute(Runnables.catchingAndLoggingThrowables(() -> { // authState is not thread safe, so this must run on the ProxyConnection's event loop. if (!authState.isExpired()) { clientAuthDataFuture.complete(clientAuthData); } else if (state == State.ProxyLookupRequests) { maybeSendAuthChallenge(); if (pendingBrokerAuthChallenges == null) { pendingBrokerAuthChallenges = new HashSet<>(); } pendingBrokerAuthChallenges.add(clientAuthDataFuture); } else { clientAuthDataFuture.completeExceptionally(new PulsarClientException.AlreadyClosedException("ProxyConnection is not in a valid state to get client auth data for " + remoteAddress)); } })); return clientAuthDataFuture; }
3.26
pulsar_ProxyConnection_authChallengeSuccessCallback_rdh
// Always run in this class's event loop. protected void authChallengeSuccessCallback(AuthData authChallenge) { try { // authentication has completed, will send newConnected command. if (authChallenge == null) { clientAuthRole = authState.getAuthRole(); if (LOG.isDebugEnabled()) { LOG.debug("[{}] Client successfully authenticated with {} role {}", remoteAddress, authMethod, clientAuthRole); } // First connection if (state == State.Connecting) { // authentication has completed, will send newConnected command. completeConnect(); } return;} // auth not complete, continue auth with client side. final ByteBuf msg = Commands.newAuthChallenge(authMethod, authChallenge, protocolVersionToAdvertise); writeAndFlush(msg); if (LOG.isDebugEnabled()) { LOG.debug("[{}] Authentication in progress client by method {}.", remoteAddress, authMethod); } } catch (Exception e) { authenticationFailedCallback(e); } }
3.26
pulsar_ProtocolHandlers_protocol_rdh
/** * Return the handler for the provided <tt>protocol</tt>. * * @param protocol * the protocol to use * @return the protocol handler to handle the provided protocol */ public ProtocolHandler protocol(String protocol) { ProtocolHandlerWithClassLoader h = handlers.get(protocol); if (null == h) { return null; } else { return h.getHandler(); } }
3.26
pulsar_ProtocolHandlers_load_rdh
/** * Load the protocol handlers for the given <tt>protocol</tt> list. * * @param conf * the pulsar broker service configuration * @return the collection of protocol handlers */ public static ProtocolHandlers load(ServiceConfiguration conf) throws IOException { ProtocolHandlerDefinitions definitions = ProtocolHandlerUtils.searchForHandlers(conf.getProtocolHandlerDirectory(), conf.getNarExtractionDirectory()); ImmutableMap.Builder<String, ProtocolHandlerWithClassLoader> handlersBuilder = ImmutableMap.builder(); conf.getMessagingProtocols().forEach(protocol -> { ProtocolHandlerMetadata v2 = definitions.handlers().get(protocol); if (null == v2) { throw new RuntimeException((("No protocol handler is found for protocol `" + protocol) + "`. Available protocols are : ") + definitions.handlers()); } ProtocolHandlerWithClassLoader handler; try { handler = ProtocolHandlerUtils.load(v2, conf.getNarExtractionDirectory()); } catch (IOException e) { log.error(("Failed to load the protocol handler for protocol `" + protocol) + "`", e); throw new RuntimeException(("Failed to load the protocol handler for protocol `" + protocol) + "`");} if (!handler.accept(protocol)) { handler.close(); log.error(("Malformed protocol handler found for protocol `" + protocol) + "`"); throw new RuntimeException(("Malformed protocol handler found for protocol `" + protocol) + "`"); } handlersBuilder.put(protocol, handler); log.info("Successfully loaded protocol handler for protocol `{}`", protocol); });return new ProtocolHandlers(handlersBuilder.build()); }
3.26
pulsar_GrowablePriorityLongPairQueue_removeIf_rdh
/** * Removes all of the elements of this collection that satisfy the given predicate. * * @param filter * a predicate which returns {@code true} for elements to be removed * @return number of removed values */ public synchronized int removeIf(LongPairPredicate filter) { int removedValues = 0; int index = 0; long[] deletedItems = new long[size * 2]; int deleteItemsIndex = 0; // collect eligible items for deletion for (int i = 0; i < this.size; i++) { if (filter.test(data[index], data[index + 1])) { deletedItems[deleteItemsIndex++] = data[index]; deletedItems[deleteItemsIndex++] = data[index + 1]; removedValues++; } index = index + 2; } // delete collected items deleteItemsIndex = 0; for (int deleteItem = 0; deleteItem < removedValues; deleteItem++) { // delete item from the heap index = 0; for (int i = 0; i < this.size; i++) { if ((data[index] == deletedItems[deleteItemsIndex]) && (data[index + 1] == deletedItems[deleteItemsIndex + 1])) {removeAtWithoutLock(index);} index = index + 2; } deleteItemsIndex = deleteItemsIndex + 2; } return removedValues; }
3.26
pulsar_GrowablePriorityLongPairQueue_remove_rdh
/** * Removes min element from the heap. * * @return */ public LongPair remove() { return removeAt(0);}
3.26
pulsar_GrowablePriorityLongPairQueue_items_rdh
/** * * @return a new list of keys with max provided numberOfItems (makes a copy) */ public Set<LongPair> items(int numberOfItems) { Set<LongPair> items = new HashSet<>(this.size); forEach((item1, item2) -> { if (items.size() < numberOfItems) { items.add(new LongPair(item1, item2)); } }); return items; }
3.26
pulsar_GrowablePriorityLongPairQueue_removeAtWithoutLock_rdh
/** * it is not a thread-safe method and it should be called before acquiring a lock by a caller. * * @param index * @return */ private LongPair removeAtWithoutLock(int index) { if (this.size > 0) { LongPair item = new LongPair(data[index], data[index + 1]); data[index] = EmptyItem; data[index + 1] = EmptyItem; SIZE_UPDATER.decrementAndGet(this); int lastIndex = this.size << 1;swap(index, lastIndex); minHeapify(index, lastIndex - 2); return item; } else { return null; } }
3.26
pulsar_HttpLookupService_getBroker_rdh
/** * Calls http-lookup api to find broker-service address which can serve a given topic. * * @param topicName * topic-name * @return broker-socket-address that serves given topic */ @Override @SuppressWarnings("deprecation") public CompletableFuture<Pair<InetSocketAddress, InetSocketAddress>> getBroker(TopicName topicName) { String basePath = (topicName.isV2()) ? BasePathV2 : BasePathV1; String path = basePath + topicName.getLookupName(); path = (StringUtils.isBlank(listenerName)) ? path : (path + "?listenerName=") + Codec.encode(listenerName); return httpClient.get(path, LookupData.class).thenCompose(lookupData -> { // Convert LookupData into as SocketAddress, handling exceptions URI uri = null; try { if (useTls) { uri = new URI(lookupData.getBrokerUrlTls()); } else { String serviceUrl = lookupData.getBrokerUrl(); if (serviceUrl == null) { serviceUrl = lookupData.getNativeUrl(); } uri = new URI(serviceUrl); } InetSocketAddress brokerAddress = InetSocketAddress.createUnresolved(uri.getHost(), uri.getPort()); return CompletableFuture.completedFuture(Pair.of(brokerAddress, brokerAddress)); } catch (Exception e) { // Failed to parse url log.warn("[{}] Lookup Failed due to invalid url {}, {}", topicName, uri, e.getMessage()); return FutureUtil.failedFuture(e); } }); }
3.26
pulsar_PulsarAdminImpl_bookies_rdh
/** * * @return the bookies management object */ public Bookies bookies() { return bookies; }
3.26
pulsar_PulsarAdminImpl_resourcegroups_rdh
/** * * @return the resourcegroups management object */ public ResourceGroups resourcegroups() { return resourcegroups; }
3.26
pulsar_PulsarAdminImpl_lookups_rdh
/** * * @return does a looks up for the broker serving the topic */ public Lookup lookups() { return lookups; }
3.26
pulsar_PulsarAdminImpl_schemas_rdh
/** * * @return the schemas */ public Schemas schemas() { return schemas; }
3.26
pulsar_PulsarAdminImpl_brokers_rdh
/** * * @return the brokers management object */ public Brokers brokers() { return brokers; }
3.26
pulsar_PulsarAdminImpl_resourceQuotas_rdh
/** * * @return the resource quota management object */ public ResourceQuotas resourceQuotas() { return resourceQuotas; }
3.26
pulsar_PulsarAdminImpl_worker_rdh
/** * * @return the Worker stats */ public Worker worker() { return worker; }
3.26
pulsar_PulsarAdminImpl_namespaces_rdh
/** * * @return the namespaces management object */ public Namespaces namespaces() { return f2; }
3.26
pulsar_PulsarAdminImpl_packages_rdh
/** * * @return the packages management object */ public Packages packages() { return packages; }
3.26
pulsar_PulsarAdminImpl_clusters_rdh
/** * * @return the clusters management object */ public Clusters clusters() { return f0;}
3.26
pulsar_PulsarAdminImpl_getClientConfigData_rdh
/** * * @return the client Configuration Data that is being used */ public ClientConfigurationData getClientConfigData() { return clientConfigData; }
3.26
pulsar_PulsarAdminImpl_close_rdh
/** * Close the Pulsar admin client to release all the resources. */ @Override public void close() { try { auth.close(); } catch (IOException e) { LOG.error("Failed to close the authentication service", e); } client.close(); asyncHttpConnector.close(); }
3.26
pulsar_PulsarAdminImpl_sink_rdh
/** * * @return the sinks management object * @deprecated in favor of {@link #sinks} */ @Deprecated public Sink sink() { return ((Sink) (f3)); }
3.26
pulsar_PulsarAdminImpl_sinks_rdh
/** * * @return the sinks management object */ public Sinks sinks() { return f3; }
3.26
pulsar_PulsarAdminImpl_functions_rdh
/** * * @return the functions management object */ public Functions functions() { return functions; }
3.26
pulsar_PulsarAdminImpl_getServiceUrl_rdh
/** * * @return the service HTTP URL that is being used */ public String getServiceUrl() { return serviceUrl; }
3.26
pulsar_PulsarAdminImpl_brokerStats_rdh
/** * * @return the broker statics */ public BrokerStats brokerStats() { return f1; }
3.26
pulsar_PulsarAdminImpl_proxyStats_rdh
/** * * @return the proxy statics */ public ProxyStats proxyStats() { return proxyStats; }
3.26
pulsar_PulsarAdminImpl_nonPersistentTopics_rdh
/** * * @return the persistentTopics management object * @deprecated Since 2.0. See {@link #topics()} */ @Deprecated public NonPersistentTopics nonPersistentTopics() { return nonPersistentTopics; }
3.26
pulsar_PulsarAdminImpl_source_rdh
/** * * @return the sources management object * @deprecated in favor of {@link #sources()} */ @Deprecated public Source source() { return ((Source) (sources)); }
3.26
pulsar_PulsarAdminImpl_tenants_rdh
/** * * @return the tenants management object */ public Tenants tenants() { return tenants; }
3.26
pulsar_AuthenticationSasl_isRoleTokenExpired_rdh
// role token exists but expired return true private boolean isRoleTokenExpired(Map<String, String> responseHeaders) { if ((((saslRoleToken != null) && (responseHeaders != null)) && // header type match ((responseHeaders.get(SASL_HEADER_TYPE) != null) && responseHeaders.get(SASL_HEADER_TYPE).equalsIgnoreCase(SASL_TYPE_VALUE))) && // header state expired ((responseHeaders.get(SASL_HEADER_STATE) != null) && responseHeaders.get(SASL_HEADER_STATE).equalsIgnoreCase(SASL_AUTH_ROLE_TOKEN_EXPIRED))) { return true; } else { return false; } }
3.26
pulsar_AuthenticationSasl_newRequestHeader_rdh
// set header according to previous response @Override public Set<Entry<String, String>> newRequestHeader(String hostName, AuthenticationDataProvider authData, Map<String, String> previousRespHeaders) throws Exception { Map<String, String> headers = new HashMap<>();if (authData.hasDataForHttp()) { authData.getHttpHeaders().forEach(header -> headers.put(header.getKey(), header.getValue())); } // role token expired in last check. remove role token, new sasl client, restart auth. if (isRoleTokenExpired(previousRespHeaders)) { previousRespHeaders = null; saslRoleToken = null; authData = getAuthData(hostName); } // role token is not expired and OK to use. // 1. first time request, send server to check if expired. // 2. server checked, and return SASL_STATE_COMPLETE, ask server to complete auth // 3. server checked, and not return SASL_STATE_COMPLETE if (saslRoleToken != null) { headers.put(SASL_AUTH_ROLE_TOKEN, saslRoleToken); if (previousRespHeaders == null) {// first time auth, ask server to check the role token expired or not. if (log.isDebugEnabled()) { log.debug("request builder add token: Check token"); } headers.put(SASL_HEADER_STATE, SASL_STATE_SERVER_CHECK_TOKEN); } else if (previousRespHeaders.get(SASL_HEADER_STATE).equalsIgnoreCase(SASL_STATE_COMPLETE)) { headers.put(SASL_HEADER_STATE, SASL_STATE_COMPLETE); if (log.isDebugEnabled()) { log.debug("request builder add token. role verified by server"); } } else { if (log.isDebugEnabled()) { log.debug("request builder add token. NOT complete. state: {}", previousRespHeaders.get(SASL_HEADER_STATE)); } headers.put(SASL_HEADER_STATE, SASL_STATE_NEGOTIATE); } return headers.entrySet(); } // role token is null, need do auth. if (previousRespHeaders == null) { if (log.isDebugEnabled()) { log.debug("Init authn in client side"); } // first time init headers.put(SASL_HEADER_STATE, SASL_STATE_CLIENT_INIT); AuthData initData = authData.authenticate(AuthData.INIT_AUTH_DATA); headers.put(SASL_AUTH_TOKEN, Base64.getEncoder().encodeToString(initData.getBytes())); } else { AuthData brokerData = AuthData.of(Base64.getDecoder().decode(previousRespHeaders.get(SASL_AUTH_TOKEN))); AuthData clientData = authData.authenticate(brokerData); headers.put(SASL_STATE_SERVER, previousRespHeaders.get(SASL_STATE_SERVER)); headers.put(SASL_HEADER_TYPE, SASL_TYPE_VALUE); headers.put(SASL_HEADER_STATE, SASL_STATE_NEGOTIATE); headers.put(SASL_AUTH_TOKEN, Base64.getEncoder().encodeToString(clientData.getBytes())); } return headers.entrySet(); }
3.26
pulsar_AuthenticationSasl_setAuthParams_rdh
// use passed in parameter to config ange get jaasCredentialsContainer. private void setAuthParams(Map<String, String> authParams) throws PulsarClientException { this.configuration = authParams; // read section from config files of kerberos this.loginContextName = authParams.getOrDefault(JAAS_CLIENT_SECTION_NAME, JAAS_DEFAULT_CLIENT_SECTION_NAME); this.serverType = authParams.getOrDefault(SASL_SERVER_TYPE, SASL_BROKER_PROTOCOL); // init the static jaasCredentialsContainer that shares amongst client. if (!initializedJAAS) { synchronized(this) { if (jaasCredentialsContainer == null) { log.info("JAAS loginContext is: {}.", loginContextName); try { jaasCredentialsContainer = new JAASCredentialsContainer(loginContextName, new ClientCallbackHandler(), configuration); initializedJAAS = true; } catch (LoginException e) { log.error("JAAS login in client failed", e);throw new PulsarClientException(e); } } } } }
3.26
pulsar_PatternMultiTopicsConsumerImpl_run_rdh
// TimerTask to recheck topics change, and trigger subscribe/unsubscribe based on the change. @Override public void run(Timeout timeout) throws Exception { if (timeout.isCancelled()) { return;} client.getLookup().getTopicsUnderNamespace(namespaceName, subscriptionMode, topicsPattern.pattern(), topicsHash).thenCompose(getTopicsResult -> { if (log.isDebugEnabled()) { log.debug("Get topics under namespace {}, topics.size: {}, topicsHash: {}, filtered: {}", namespaceName, getTopicsResult.getTopics().size(), getTopicsResult.getTopicsHash(), getTopicsResult.isFiltered()); getTopicsResult.getTopics().forEach(topicName -> log.debug("Get topics under namespace {}, topic: {}", namespaceName, topicName)); } final List<String> oldTopics = new ArrayList<>(getPartitionedTopics()); for (String partition : getPartitions()) { TopicName topicName = TopicName.get(partition); if ((!topicName.isPartitioned()) || (!oldTopics.contains(topicName.getPartitionedTopicName()))) { oldTopics.add(partition); } } return updateSubscriptions(topicsPattern, this::setTopicsHash, getTopicsResult, topicsChangeListener, oldTopics); }).exceptionally(ex -> { log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); return null; }).thenAccept(__ -> { // schedule the next re-check task this.recheckPatternTimeout = client.timer().newTimeout(this, Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); }); }
3.26
pulsar_SchemaBuilder_record_rdh
/** * Build the schema for a record. * * @param name * name of the record. * @return builder to build the schema for a record. */ static RecordSchemaBuilder record(String name) { return DefaultImplementation.getDefaultImplementation().newRecordSchemaBuilder(name); }
3.26
pulsar_StateStoreProvider_init_rdh
/** * Initialize the state store provider. * * @param config * the config to init the state store provider. * @param functionDetails * the function details. * @throws Exception * when failed to init the state store provider. */ default void init(Map<String, Object> config, FunctionDetails functionDetails) throws Exception { }
3.26
pulsar_WatermarkTimeTriggerPolicy_handleWaterMarkEvent_rdh
/** * Invokes the trigger all pending windows up to the * watermark timestamp. The end ts of the window is set * in the eviction policy context so that the events falling * within that window can be processed. */ private void handleWaterMarkEvent(Event<T> event) { long watermarkTs = event.getTimestamp(); long windowEndTs = nextWindowEndTs; if (log.isDebugEnabled()) {log.debug("Window end ts {} Watermark ts {}", windowEndTs, watermarkTs); } while (windowEndTs <= watermarkTs) { long currentCount = windowManager.getEventCount(windowEndTs); evictionPolicy.setContext(new DefaultEvictionContext(windowEndTs, currentCount)); if (handler.onTrigger()) { windowEndTs += slidingIntervalMs; } else { /* No events were found in the previous window interval. Scan through the events in the queue to find the next window intervals based on event ts. */ long ts = getNextAlignedWindowTs(windowEndTs, watermarkTs); if (log.isDebugEnabled()) { log.debug("Next aligned window end ts {}", ts); } if (ts == Long.MAX_VALUE) { if (log.isDebugEnabled()) { log.debug("No events to process between {} and watermark ts {}", windowEndTs, watermarkTs); } break; } windowEndTs = ts; } } nextWindowEndTs = windowEndTs; }
3.26
pulsar_MetaStore_getManagedLedgerInfo_rdh
/** * Get the metadata used by the ManagedLedger. * * @param ledgerName * the name of the ManagedLedger * @param createIfMissing * whether the managed ledger metadata should be created if it doesn't exist already * @throws MetaStoreException */ default void getManagedLedgerInfo(String ledgerName, boolean createIfMissing, MetaStoreCallback<ManagedLedgerInfo> callback) { getManagedLedgerInfo(ledgerName, createIfMissing, null, callback); }
3.26
pulsar_NamespaceIsolationPolicies_setPolicy_rdh
/** * Set the policy data for a single policy. * * @param policyName * @param policyData */ public void setPolicy(String policyName, NamespaceIsolationData policyData) { policyData.validate(); policies.put(policyName, ((NamespaceIsolationDataImpl) (policyData))); }
3.26
pulsar_NamespaceIsolationPolicies_getPolicyByNamespace_rdh
/** * Get the namespace isolation policy for the specified namespace. * * <p>There should only be one namespace isolation policy defined for the specific namespace. If multiple policies * match, the first one will be returned. * * @param namespace * @return */ public NamespaceIsolationPolicy getPolicyByNamespace(NamespaceName namespace) { for (NamespaceIsolationData nsPolicyData : policies.values()) { if (this.namespaceMatches(namespace, nsPolicyData)) { return new NamespaceIsolationPolicyImpl(nsPolicyData); } } return null; }
3.26
pulsar_NamespaceIsolationPolicies_getBrokerAssignment_rdh
/** * Get the broker assignment based on the namespace name. * * @param nsPolicy * The namespace name * @param brokerAddress * The broker address is the format of host:port * @return The broker assignment: {primary, secondary, shared} */ private BrokerAssignment getBrokerAssignment(NamespaceIsolationPolicy nsPolicy, String brokerAddress) { if (nsPolicy != null) { if (nsPolicy.isPrimaryBroker(brokerAddress)) { return BrokerAssignment.primary; } else if (nsPolicy.isSecondaryBroker(brokerAddress)) { return BrokerAssignment.secondary; } throw new IllegalArgumentException(("The broker " + brokerAddress) + " is not among the assigned broker pools for the controlled namespace."); } // Only uncontrolled namespace will be assigned to the shared pool if (!this.isSharedBroker(brokerAddress)) { throw new IllegalArgumentException(("The broker " + brokerAddress) + " is not among the shared broker pools for the uncontrolled namespace."); } return BrokerAssignment.shared; }
3.26
pulsar_NamespaceIsolationPolicies_getPolicyByName_rdh
/** * Access method to get the namespace isolation policy by the policy name. * * @param policyName * @return */ public NamespaceIsolationPolicy getPolicyByName(String policyName) { if (policies.get(policyName) == null) { return null; } return new NamespaceIsolationPolicyImpl(policies.get(policyName)); }
3.26
pulsar_NamespaceIsolationPolicies_isSharedBroker_rdh
/** * Check to see whether a broker is in the shared broker pool or not. * * @param host * @return */ public boolean isSharedBroker(String host) { for (NamespaceIsolationData policyData : this.policies.values()) { NamespaceIsolationPolicyImpl policy = new NamespaceIsolationPolicyImpl(policyData); if (policy.isPrimaryBroker(host)) { // not free for sharing, this is some properties' primary broker return false; } } return true; }
3.26
pulsar_NamespaceIsolationPolicies_deletePolicy_rdh
/** * Delete a policy. * * @param policyName */ public void deletePolicy(String policyName) { policies.remove(policyName); }
3.26
pulsar_AuthorizationProvider_getPermissionsAsync_rdh
/** * Get authorization-action permissions on a namespace. * * @param namespaceName * @return CompletableFuture<Map<String, Set<AuthAction>>> */ default CompletableFuture<Map<String, Set<AuthAction>>> getPermissionsAsync(NamespaceName namespaceName) { return FutureUtil.failedFuture(new IllegalStateException(String.format("getPermissionsAsync on namespaceName %s is not supported by the Authorization", namespaceName))); }
3.26
pulsar_AuthorizationProvider_allowNamespacePolicyOperation_rdh
/** * * @deprecated - will be removed after 2.12. Use async variant. */ @Deprecated default Boolean allowNamespacePolicyOperation(NamespaceName namespaceName, PolicyName policy, PolicyOperation operation, String role, AuthenticationDataSource authData) { try { return allowNamespacePolicyOperationAsync(namespaceName, policy, operation, role, authData).get(); } catch (InterruptedException e) { throw new RestException(e); } catch (ExecutionException e) { throw new RestException(e.getCause()); } }
3.26
pulsar_AuthorizationProvider_allowTopicOperationAsync_rdh
/** * Check if a given <tt>role</tt> is allowed to execute a given topic <tt>operation</tt> on the topic. * * @param topic * topic name * @param role * role name * @param operation * topic operation * @param authData * authenticated data * @return CompletableFuture<Boolean> */ default CompletableFuture<Boolean> allowTopicOperationAsync(TopicName topic, String role, TopicOperation operation, AuthenticationDataSource authData) { return FutureUtil.failedFuture(new IllegalStateException((("TopicOperation [" + operation.name()) + "] is not supported by the Authorization") + "provider you are using.")); }
3.26
pulsar_AuthorizationProvider_allowNamespaceOperation_rdh
/** * * @deprecated - will be removed after 2.12. Use async variant. */ @Deprecated default Boolean allowNamespaceOperation(NamespaceName namespaceName, String role, NamespaceOperation operation, AuthenticationDataSource authData) { try { return allowNamespaceOperationAsync(namespaceName, role, operation, authData).get(); } catch (InterruptedException e) { throw new RestException(e); } catch (ExecutionException e) { throw new RestException(e.getCause()); } }
3.26
pulsar_AuthorizationProvider_allowTenantOperation_rdh
/** * * @deprecated - will be removed after 2.12. Use async variant. */ @Deprecated default Boolean allowTenantOperation(String tenantName, String role, TenantOperation operation, AuthenticationDataSource authData) { try { return allowTenantOperationAsync(tenantName, role, operation, authData).get(); } catch (InterruptedException e) { throw new RestException(e); } catch (ExecutionException e) { throw new RestException(e.getCause()); } }
3.26