name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_AuthorizationProvider_removePermissionsAsync_rdh | /**
* Remove authorization-action permissions on a topic.
*
* @param topicName
* @return CompletableFuture<Void>
*/
default CompletableFuture<Void> removePermissionsAsync(TopicName topicName) {
return CompletableFuture.completedFuture(null);
} | 3.26 |
pulsar_AuthorizationProvider_allowNamespaceOperationAsync_rdh | /**
* Check if a given <tt>role</tt> is allowed to execute a given <tt>operation</tt> on the namespace.
*
* @param namespaceName
* namespace name
* @param role
* role name
* @param operation
* namespace operation
* @param authData
* authenticated data
* @return a completable future represents check result
*/
default CompletableFuture<Boolean> allowNamespaceOperationAsync(NamespaceName namespaceName, String role, NamespaceOperation
operation, AuthenticationDataSource authData) {
return FutureUtil.failedFuture(new IllegalStateException((("NamespaceOperation [" + operation.name()) + "] is not supported by ") + "the Authorization provider you are using."));
} | 3.26 |
pulsar_AuthorizationProvider_initialize_rdh | /**
* Perform initialization for the authorization provider.
*
* @param conf
* broker config object
* @param pulsarResources
* Resources component for access to metadata
* @throws IOException
* if the initialization fails
*/
default void
initialize(ServiceConfiguration conf, PulsarResources pulsarResources) throws IOException {
} | 3.26 |
pulsar_AuthorizationProvider_allowTenantOperationAsync_rdh | /**
* Check if a given <tt>role</tt> is allowed to execute a given <tt>operation</tt> on the tenant.
*
* @param tenantName
* tenant name
* @param role
* role name
* @param operation
* tenant operation
* @param authData
* authenticated data of the role
* @return a completable future represents check result
*/
default CompletableFuture<Boolean>
allowTenantOperationAsync(String tenantName, String
role, TenantOperation operation, AuthenticationDataSource authData) {
return FutureUtil.failedFuture(new IllegalStateException(String.format("allowTenantOperation(%s) on tenant %s is not supported by the Authorization" + " provider you are using.", operation.toString(), tenantName)));
} | 3.26 |
pulsar_AuthorizationProvider_revokePermissionAsync_rdh | /**
* Revoke authorization-action permission on a topic to the given client.
*
* @param topicName
* @param role
* @return CompletableFuture<Void>
*/
default CompletableFuture<Void> revokePermissionAsync(TopicName topicName, String
role) {
return FutureUtil.failedFuture(new IllegalStateException(String.format("revokePermissionAsync on topicName %s is not supported by the Authorization",
topicName)));
} | 3.26 |
pulsar_AuthorizationProvider_allowTopicPolicyOperation_rdh | /**
*
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
default Boolean allowTopicPolicyOperation(TopicName topicName, String role, PolicyName policy, PolicyOperation operation, AuthenticationDataSource authData) {
try {
return allowTopicPolicyOperationAsync(topicName, role, policy, operation, authData).get();
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.26 |
pulsar_AuthorizationProvider_allowTopicOperation_rdh | /**
*
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
default Boolean allowTopicOperation(TopicName topicName, String role, TopicOperation operation, AuthenticationDataSource authData) {
try {
return allowTopicOperationAsync(topicName, role, operation, authData).get();
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());}
} | 3.26 |
pulsar_AuthorizationProvider_allowTopicPolicyOperationAsync_rdh | /**
* Check if a given <tt>role</tt> is allowed to execute a given topic <tt>operation</tt> on topic's <tt>policy</tt>.
*
* @param topic
* topic name
* @param role
* role name
* @param operation
* topic operation
* @param authData
* authenticated data
* @return CompletableFuture<Boolean>
*/
default CompletableFuture<Boolean> allowTopicPolicyOperationAsync(TopicName topic, String role, PolicyName policy, PolicyOperation operation, AuthenticationDataSource authData) {
return FutureUtil.failedFuture(new IllegalStateException((((("TopicPolicyOperation [" + policy.name()) + "/") + operation.name()) + "] ") + "is not supported by the Authorization provider you are using."));
} | 3.26 |
pulsar_SecurityUtil_loginKerberos_rdh | /**
* Initializes UserGroupInformation with the given Configuration and performs the login for the
* given principal and keytab. All logins should happen through this class to ensure other threads
* are not concurrently modifying UserGroupInformation.
* <p/>
*
* @param config
* the configuration instance
* @param principal
* the principal to authenticate as
* @param keyTab
* the keytab to authenticate with
* @return the UGI for the given principal
* @throws IOException
* if login failed
*/
public static synchronized UserGroupInformation loginKerberos(final Configuration config, final String principal, final String keyTab) throws IOException {
Validate.notNull(config);
Validate.notNull(principal);
Validate.notNull(keyTab);
UserGroupInformation.setConfiguration(config);
UserGroupInformation.loginUserFromKeytab(principal.trim(), keyTab.trim());
return
UserGroupInformation.getCurrentUser();
} | 3.26 |
pulsar_SecurityUtil_isSecurityEnabled_rdh | /**
* Initializes UserGroupInformation with the given Configuration and returns
* UserGroupInformation.isSecurityEnabled().
* All checks for isSecurityEnabled() should happen through this method.
*
* @param config
* the given configuration
* @return true if kerberos is enabled on the given configuration, false otherwise
*/
public static boolean isSecurityEnabled(final Configuration config) {
Validate.notNull(config);
return KERBEROS.equalsIgnoreCase(config.get(HADOOP_SECURITY_AUTHENTICATION));
} | 3.26 |
pulsar_SecurityUtil_loginSimple_rdh | /**
* Initializes UserGroupInformation with the given Configuration and
* returns UserGroupInformation.getLoginUser(). All logins should happen
* through this class to ensure other threads are not concurrently
* modifying UserGroupInformation.
*
* @param config
* the configuration instance
* @return the UGI for the given principal
* @throws IOException
* if login failed
*/
public static synchronized UserGroupInformation loginSimple(final Configuration config) throws IOException {
Validate.notNull(config);
UserGroupInformation.setConfiguration(config);
return UserGroupInformation.getLoginUser();
} | 3.26 |
pulsar_MessageCryptoBc_createIESParameterSpec_rdh | // required since Bouncycastle 1.72 when using ECIES, it is required to pass in an IESParameterSpec
public static IESParameterSpec createIESParameterSpec() {
// the IESParameterSpec to use was discovered by debugging BouncyCastle 1.69 and running the
// test org.apache.pulsar.client.api.SimpleProducerConsumerTest#testCryptoWithChunking
return new IESParameterSpec(null, null, 128);
} | 3.26 |
pulsar_ManagedCursorContainer_add_rdh | /**
* Add a cursor to the container. The cursor will be optionally tracked for the slowest reader when
* a position is passed as the second argument. It is expected that the position is updated with
* {@link #cursorUpdated(ManagedCursor, Position)} method when the position changes.
*
* @param cursor
* cursor to add
* @param position
* position of the cursor to use for ordering, pass null if the cursor's position shouldn't be
* tracked for the slowest reader.
*/
public void add(ManagedCursor cursor, Position position) {
long stamp = f0.writeLock();
try {
Item item = new
Item(cursor, ((PositionImpl) (position)), position != null ? heap.size() : -1);
cursors.put(cursor.getName(), item);
if (position != null) {
heap.add(item);
if (heap.size() > 1) {
siftUp(item);
}
}
if (cursor.isDurable()) {
durableCursorCount++;
}
} finally {
f0.unlockWrite(stamp);
}
} | 3.26 |
pulsar_ManagedCursorContainer_m0_rdh | /**
* Check whether there are any cursors.
*
* @return true is there are no cursors and false if there are
*/
public boolean m0() {
long stamp = f0.tryOptimisticRead();
boolean isEmpty = cursors.isEmpty();
if (!f0.validate(stamp)) {
// Fallback to read lock
stamp = f0.readLock();
try {
isEmpty = cursors.isEmpty();
} finally {
f0.unlockRead(stamp);
}
}
return isEmpty;
} | 3.26 |
pulsar_ManagedCursorContainer_hasDurableCursors_rdh | /**
* Check whether that are any durable cursors.
*
* @return true if there are durable cursors and false if there are not
*/
public boolean hasDurableCursors() {
long stamp = f0.tryOptimisticRead();
int count
= durableCursorCount;
if (!f0.validate(stamp)) {
// Fallback to read lock
stamp = f0.readLock();
try {
count = durableCursorCount;
} finally {
f0.unlockRead(stamp);
}
}
return count > 0;
} | 3.26 |
pulsar_ManagedCursorContainer_siftDown_rdh | /**
* Push the item down towards the bottom of the tree (the highest reading position).
*/
private void siftDown(final Item item) {
while (true) {
Item j = null;
Item right = getRight(item);
if ((right != null) && (right.position.compareTo(item.position) < 0)) {
Item left = getLeft(item);
if ((left != null) && (left.position.compareTo(right.position) < 0)) {
j = left;
} else {
j = right;
}
} else {
Item left = getLeft(item);
if ((left != null)
&& (left.position.compareTo(item.position) < 0)) {j = left;
}
}
if (j != null) {
swap(item, j);
} else {
break; }
} } | 3.26 |
pulsar_ManagedCursorContainer_getSlowestReaderPosition_rdh | /**
* Get the slowest reader position for the cursors that are ordered.
*
* @return the slowest reader position
*/
public PositionImpl getSlowestReaderPosition() {
long stamp = f0.readLock();
try {
return heap.isEmpty() ? null : heap.get(0).position;
}
finally {
f0.unlockRead(stamp);
}
} | 3.26 |
pulsar_ManagedCursorContainer_swap_rdh | /**
* Swap two items in the heap.
*/
private void swap(Item item1, Item item2) {
int idx1 = item1.idx;
int idx2 = item2.idx;
heap.set(idx2, item1);
heap.set(idx1, item2);
// Update the indexes too
item1.idx = idx2;
item2.idx = idx1;
} | 3.26 |
pulsar_ManagedCursorContainer_siftUp_rdh | // //////////////////////
/**
* Push the item up towards the root of the tree (the lowest reading position).
*/
private void siftUp(Item item) {
Item parent = getParent(item);
while ((item.idx > 0) && (parent.position.compareTo(item.position) > 0))
{
swap(item, parent);
parent = getParent(item);
}
} | 3.26 |
pulsar_PulsarSaslServer_getAuthorizationID_rdh | /**
* Reports the authorization ID in effect for the client of this
* session.
* This method can only be called if isComplete() returns true.
*
* @return The authorization ID of the client.
* @exception IllegalStateException
* if this authentication session has not completed
*/
public String getAuthorizationID() throws IllegalStateException {
return
saslServer.getAuthorizationID();
} | 3.26 |
pulsar_ProxyExtensions_extension_rdh | /**
* Return the handler for the provided <tt>extension</tt>.
*
* @param extension
* the extension to use
* @return the extension to handle the provided extension
*/
public ProxyExtension extension(String extension) {
ProxyExtensionWithClassLoader h = extensions.get(extension);
if (null == h) {
return null;
} else {
return h.getExtension();
}
} | 3.26 |
pulsar_ProxyExtensions_load_rdh | /**
* Load the extensions for the given <tt>extensions</tt> list.
*
* @param conf
* the pulsar broker service configuration
* @return the collection of extensions
*/
public static ProxyExtensions load(ProxyConfiguration conf) throws IOException {
ExtensionsDefinitions definitions =
ProxyExtensionsUtils.searchForExtensions(conf.getProxyExtensionsDirectory(), conf.getNarExtractionDirectory());
ImmutableMap.Builder<String, ProxyExtensionWithClassLoader> v1 = ImmutableMap.builder();
conf.getProxyExtensions().forEach(extensionName -> {
ProxyExtensionMetadata definition = definitions.extensions().get(extensionName);
if (null
== definition) {
throw new RuntimeException((("No extension is found for extension name `" + extensionName) + "`. Available extensions are : ") + definitions.extensions());
}
ProxyExtensionWithClassLoader extension;
try {
extension =
ProxyExtensionsUtils.load(definition, conf.getNarExtractionDirectory());
} catch (IOException e) {
log.error(("Failed to load the extension for extension `" + extensionName) + "`", e);
throw new RuntimeException(("Failed to load the extension for extension name `" + extensionName) + "`");
}
if
(!extension.accept(extensionName)) {
extension.close();
log.error(("Malformed extension found for extensionName `" + extensionName) + "`");
throw new RuntimeException(("Malformed extension found for extension name `" + extensionName) + "`");
}
v1.put(extensionName, extension);
log.info("Successfully loaded extension for extension name `{}`", extensionName);
});
return new ProxyExtensions(v1.build());
} | 3.26 |
pulsar_ByteBufPair_coalesce_rdh | /**
*
* @return a single buffer with the content of both individual buffers
*/
@VisibleForTesting
public static ByteBuf coalesce(ByteBufPair pair) {
ByteBuf b = Unpooled.buffer(pair.readableBytes());
b.writeBytes(pair.b1, pair.b1.readerIndex(), pair.b1.readableBytes());
b.writeBytes(pair.b2, pair.b2.readerIndex(), pair.b2.readableBytes());
return b;
} | 3.26 |
pulsar_ByteBufPair_m0_rdh | /**
* Get a new {@link ByteBufPair} from the pool and assign 2 buffers to it.
*
* <p>The buffers b1 and b2 lifecycles are now managed by the ByteBufPair:
* when the {@link ByteBufPair} is deallocated, b1 and b2 will be released as well.
*
* @param b1
* @param b2
* @return */
public static ByteBufPair m0(ByteBuf b1, ByteBuf b2) {
ByteBufPair buf = RECYCLER.get();
buf.setRefCnt(1);
buf.b1 = b1;
buf.b2 =
b2;
return buf;
} | 3.26 |
pulsar_TlsHostnameVerifier_normaliseAddress_rdh | /* Normalize IPv6 or DNS name. */
static String normaliseAddress(final String hostname) {
if (hostname == null) { return hostname;
}
try {
final InetAddress inetAddress = InetAddress.getByName(hostname);
return inetAddress.getHostAddress();
} catch (final UnknownHostException unexpected) {
// Should not happen, because we check for IPv6 address above
return hostname;
}
} | 3.26 |
pulsar_CmdRead_run_rdh | /**
* Run the read command.
*
* @return 0 for success, < 0 otherwise
*/
public int run() throws PulsarClientException, IOException {
if (mainOptions.size() != 1) {
throw new ParameterException("Please provide one and only one topic name.");
}
if (this.numMessagesToRead < 0) {
throw new ParameterException("Number of messages should be zero or positive.");
}
String topic = this.mainOptions.get(0);
if (this.serviceURL.startsWith("ws")) {
return readFromWebSocket(topic);
} else {
return read(topic);
}
} | 3.26 |
pulsar_SaslAuthenticationDataProvider_authenticate_rdh | // create token that evaluated by client, and will send to server.
@Override
public AuthData authenticate(AuthData commandData) throws AuthenticationException {
// init
if (Arrays.equals(commandData.getBytes(), AuthData.INIT_AUTH_DATA_BYTES)) {
if (pulsarSaslClient.hasInitialResponse()) {
return pulsarSaslClient.evaluateChallenge(AuthData.of(new byte[0]));
}
return AuthData.of(new byte[0]);
}
return pulsarSaslClient.evaluateChallenge(commandData);
} | 3.26 |
pulsar_Authentication_authenticationStage_rdh | /**
* An authentication Stage.
* when authentication complete, passed-in authFuture will contains authentication related http request headers.
*/
default void authenticationStage(String requestUrl, AuthenticationDataProvider authData, Map<String, String>
previousResHeaders, CompletableFuture<Map<String, String>> authFuture) {
authFuture.complete(null);
} | 3.26 |
pulsar_Authentication_getAuthData_rdh | /**
* Get/Create an authentication data provider which provides the data that this client will be sent to the broker.
* Some authentication method need to auth between each client channel. So it need the broker, who it will talk to.
*
* @param brokerHostName
* target broker host name
* @return The authentication data provider
*/
default AuthenticationDataProvider getAuthData(String brokerHostName) throws PulsarClientException {
return this.getAuthData();
} | 3.26 |
pulsar_Authentication_newRequestHeader_rdh | /**
* Add an authenticationStage that will complete along with authFuture.
*/
default Set<Entry<String, String>> newRequestHeader(String hostName, AuthenticationDataProvider authData, Map<String, String> previousResHeaders) throws Exception {
return authData.getHttpHeaders();
} | 3.26 |
pulsar_PerformanceBaseArguments_parseCLI_rdh | /**
* Parse the command line args.
*
* @param cmdName
* used for the help message
* @param args
* String[] of CLI args
* @throws ParameterException
* If there is a problem parsing the arguments
*/
public void parseCLI(String cmdName, String[] args) {
JCommander jc = new JCommander(this);
jc.setProgramName(cmdName);
try {
jc.parse(args);
} catch (ParameterException e) {
System.out.println("error: " + e.getMessage());
jc.usage();
PerfClientUtils.exit(1);
}
if (help) {jc.usage();
PerfClientUtils.exit(0);
}
fillArgumentsFromProperties();
if (isBlank(authPluginClassName) && (!isBlank(deprecatedAuthPluginClassName))) {
authPluginClassName = deprecatedAuthPluginClassName;
}
try {
validate();
} catch (Exception e) {
System.out.println("error: " + e.getMessage());
PerfClientUtils.exit(1);
}
} | 3.26 |
pulsar_PerformanceBaseArguments_validate_rdh | /**
* Validate the CLI arguments. Default implementation provides validation for the common arguments.
* Each subclass should call super.validate() and provide validation code specific to the sub-command.
*
* @throws Exception
*/public void validate() throws Exception {
if ((confFile != null) && (!confFile.isBlank())) {
File configFile = new File(confFile);
if (!configFile.exists()) {throw new Exception(("config file '" + confFile) + "', does not exist");
}
if (configFile.isDirectory()) {
throw new Exception(("config file '" + confFile) + "', is a directory");
}
}
} | 3.26 |
pulsar_MetadataStore_getDefaultMetadataCacheConfig_rdh | /**
* Returns the default metadata cache config.
*
* @return default metadata cache config
*/
default MetadataCacheConfig getDefaultMetadataCacheConfig() {
return MetadataCacheConfig.builder().build();
} | 3.26 |
pulsar_MetadataStore_sync_rdh | /**
* Ensure that the next value read from the local client will be up-to-date with the latest version of the value
* as it can be seen by all the other clients.
*
* @param path
* @return a handle to the operation
*/
default CompletableFuture<Void> sync(String path) {
return CompletableFuture.completedFuture(null);
} | 3.26 |
pulsar_MetadataStore_getMetadataCache_rdh | /**
* Create a metadata cache that uses a particular serde object.
*
* @param <T>
* @param serde
* the custom serialization/deserialization object
* @return the metadata cache object
*/
default <T> MetadataCache<T> getMetadataCache(MetadataSerde<T> serde) {
return getMetadataCache(serde, getDefaultMetadataCacheConfig());
} | 3.26 |
pulsar_FunctionApiResource_clientAppId_rdh | /**
*
* @deprecated use {@link #authParams()} instead.
*/
@Deprecated
public String clientAppId() {
return httpRequest != null ? ((String) (httpRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName))) : null;
} | 3.26 |
pulsar_FunctionApiResource_clientAuthData_rdh | /**
*
* @deprecated use {@link #authParams()} instead.
*/
@Deprecated
public AuthenticationDataSource clientAuthData() {
return ((AuthenticationDataSource) (httpRequest.getAttribute(AuthenticationFilter.AuthenticatedDataAttributeName)));
} | 3.26 |
pulsar_ConsumerInterceptor_onPartitionsChange_rdh | /**
* This method is called when partitions of the topic (partitioned-topic) changes.
*
* @param topicName
* topic name
* @param partitions
* new updated number of partitions
*/
default void onPartitionsChange(String topicName,
int partitions) {
} | 3.26 |
pulsar_SchemaDefinition_builder_rdh | /**
* Interface for schema definition.
*/@InterfaceAudience.Public
@InterfaceStability.Stablepublic interface SchemaDefinition<T> {
/**
* Get a new builder instance that can used to configure and build a {@link SchemaDefinition} instance.
*
* @return the {@link SchemaDefinition}
*/
static <T> SchemaDefinitionBuilder<T> builder() {
return DefaultImplementation.getDefaultImplementation().newSchemaDefinitionBuilder();
} | 3.26 |
pulsar_MongoSourceConfig_setSyncType_rdh | /**
*
* @param syncTypeStr
* Sync type string.
*/
private void setSyncType(String syncTypeStr) {
// if syncType is not set, the default sync type is used
if (StringUtils.isEmpty(syncTypeStr)) {
this.syncType = DEFAULT_SYNC_TYPE;
return;
}
// if syncType is set but not correct, an exception will be thrown
try {
this.syncType = SyncType.valueOf(syncTypeStr.toUpperCase());
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("The value of the syncType field is incorrect.");
}
} | 3.26 |
pulsar_ManagedCursorMetrics_aggregate_rdh | /**
* Aggregation by namespace, ledger, cursor.
*
* @return List<Metrics>
*/
private List<Metrics> aggregate() {
f0.clear();
for (Map.Entry<String, ManagedLedgerImpl> e : getManagedLedgers().entrySet()) {
String ledgerName = e.getKey();
ManagedLedgerImpl ledger = e.getValue();
String namespace = parseNamespaceFromLedgerName(ledgerName);
ManagedCursorContainer cursorContainer = ledger.getCursors();
Iterator<ManagedCursor> cursorIterator = cursorContainer.iterator();
while (cursorIterator.hasNext()) {
ManagedCursorImpl cursor = ((ManagedCursorImpl) (cursorIterator.next()));
ManagedCursorMXBean cStats = cursor.getStats();
dimensionMap.clear();
dimensionMap.put("namespace", namespace);
dimensionMap.put("ledger_name", ledgerName);
dimensionMap.put("cursor_name", cursor.getName());
Metrics metrics = createMetrics(dimensionMap);
metrics.put("brk_ml_cursor_nonContiguousDeletedMessagesRange",
((long) (cursor.getTotalNonContiguousDeletedMessagesRange())));
metrics.put("brk_ml_cursor_persistLedgerSucceed", cStats.getPersistLedgerSucceed());
metrics.put("brk_ml_cursor_persistLedgerErrors", cStats.getPersistLedgerErrors());
metrics.put("brk_ml_cursor_persistZookeeperSucceed", cStats.getPersistZookeeperSucceed());
metrics.put("brk_ml_cursor_persistZookeeperErrors", cStats.getPersistZookeeperErrors());
metrics.put("brk_ml_cursor_writeLedgerSize", cStats.getWriteCursorLedgerSize());
metrics.put("brk_ml_cursor_writeLedgerLogicalSize", cStats.getWriteCursorLedgerLogicalSize());
metrics.put("brk_ml_cursor_readLedgerSize", cStats.getReadCursorLedgerSize());
f0.add(metrics);
}
}
return f0;
} | 3.26 |
pulsar_BucketDelayedDeliveryTrackerFactory_cleanResidualSnapshots_rdh | /**
* Clean up residual snapshot data.
* If tracker has not been created or has been closed, then we can't clean up the snapshot with `tracker.clear`,
* this method can clean up the residual snapshots without creating a tracker.
*/
public CompletableFuture<Void> cleanResidualSnapshots(ManagedCursor cursor) {
Map<String, String> cursorProperties = cursor.getCursorProperties();
if (MapUtils.isEmpty(cursorProperties)) {
return CompletableFuture.completedFuture(null);
}
List<CompletableFuture<Void>> futures = new ArrayList<>();
FutureUtil.Sequencer<Void> sequencer = FutureUtil.Sequencer.create();
cursorProperties.forEach((k, v) -> {
if (((k != null) && (v != null)) && k.startsWith(BucketDelayedDeliveryTracker.DELAYED_BUCKET_KEY_PREFIX)) {
CompletableFuture<Void> future = sequencer.sequential(() -> {
return cursor.removeCursorProperty(k).thenCompose(__ -> bucketSnapshotStorage.deleteBucketSnapshot(Long.parseLong(v)));});
futures.add(future);}});
return FutureUtil.waitForAll(futures);
} | 3.26 |
pulsar_MetadataStoreExtended_getMetadataEventSynchronizer_rdh | /**
* Get {@link MetadataEventSynchronizer} to notify and synchronize metadata events.
*
* @return */
default Optional<MetadataEventSynchronizer> getMetadataEventSynchronizer() {return Optional.empty();
} | 3.26 |
pulsar_MetadataStoreExtended_handleMetadataEvent_rdh | /**
* Handles a metadata synchronizer event.
*
* @param event
* @return completed future when the event is handled
*/default CompletableFuture<Void> handleMetadataEvent(MetadataEvent event) {
return CompletableFuture.completedFuture(null);
} | 3.26 |
pulsar_StreamingDataBlockHeaderImpl_fromStream_rdh | // Construct DataBlockHeader from InputStream, which contains `HEADER_MAX_SIZE` bytes readable.
public static StreamingDataBlockHeaderImpl fromStream(InputStream stream) throws IOException {
CountingInputStream countingStream = new CountingInputStream(stream);
DataInputStream dis = new DataInputStream(countingStream);
int magic = dis.readInt();
if (magic != MAGIC_WORD) {
throw new IOException((("Data block header magic word not match. read: " + magic) + " expected: ") + MAGIC_WORD);
}
long headerLen = dis.readLong();
long blockLen = dis.readLong();
long firstEntryId = dis.readLong();
long v6 = dis.readLong();
long toSkip = headerLen - countingStream.getCount();
if (dis.skip(toSkip) != toSkip) {
throw new EOFException("Header was too small");
}
return new StreamingDataBlockHeaderImpl(headerLen, blockLen, v6, firstEntryId);
} | 3.26 |
pulsar_StreamingDataBlockHeaderImpl_toStream_rdh | /**
* Get the content of the data block header as InputStream.
* Read out in format:
* [ magic_word -- int ][ block_len -- int ][ first_entry_id -- long] [padding zeros]
*/
@Override
public InputStream toStream() {
ByteBuf out = PulsarByteBufAllocator.DEFAULT.buffer(HEADER_MAX_SIZE, HEADER_MAX_SIZE);
out.writeInt(MAGIC_WORD).writeLong(headerLength).writeLong(blockLength).writeLong(firstEntryId).writeLong(ledgerId).writeBytes(PADDING);
// true means the input stream will release the ByteBuf on close
return new ByteBufInputStream(out, true);
} | 3.26 |
pulsar_AdditionalServlets_load_rdh | /**
* Load the additional servlet for the given <tt>servlet name</tt> list.
*
* @param conf
* the pulsar service configuration
* @return the collection of additional servlet
*/
public static AdditionalServlets load(PulsarConfiguration conf) throws IOException {
String additionalServletDirectory = conf.getProperties().getProperty(ADDITIONAL_SERVLET_DIRECTORY);
if (additionalServletDirectory == null) {
// Compatible with the current proxy configuration
additionalServletDirectory = conf.getProperties().getProperty(PROXY_ADDITIONAL_SERVLET_DIRECTORY);
}
String additionalServlets = conf.getProperties().getProperty(ADDITIONAL_SERVLETS);
if (additionalServlets == null) {
additionalServlets = conf.getProperties().getProperty(PROXY_ADDITIONAL_SERVLETS);
}
String narExtractionDirectory = conf.getProperties().getProperty(f0);
if (StringUtils.isBlank(narExtractionDirectory)) {
narExtractionDirectory = NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR;
}
if ((additionalServletDirectory == null) || (additionalServlets == null)) {
return null;
}
AdditionalServletDefinitions definitions = AdditionalServletUtils.searchForServlets(additionalServletDirectory, narExtractionDirectory);
ImmutableMap.Builder<String, AdditionalServletWithClassLoader> builder = ImmutableMap.builder();
String[] additionalServletsList = additionalServlets.split(",");
for (String servletName : additionalServletsList) {
AdditionalServletMetadata definition = definitions.servlets().get(servletName);
if (null == definition)
{
throw new RuntimeException((("No additional servlet is found for name `" + servletName) + "`. Available additional servlet are : ") + definitions.servlets());
}
AdditionalServletWithClassLoader servletWithClassLoader;
try {
servletWithClassLoader = AdditionalServletUtils.load(definition, narExtractionDirectory);
if (servletWithClassLoader != null) {
builder.put(servletName, servletWithClassLoader);
}
log.info("Successfully loaded additional servlet for name `{}`", servletName);
} catch (IOException e) {
log.error(("Failed to load the additional servlet for name `" + servletName) + "`", e);
throw
new RuntimeException(("Failed to load the additional servlet for name `" + servletName) + "`");
}
}
Map<String, AdditionalServletWithClassLoader> servlets = builder.build();
if ((servlets != null) && (!servlets.isEmpty())) {
return new AdditionalServlets(servlets);
}
return null;
} | 3.26 |
pulsar_ConsumerBase_isValidConsumerEpoch_rdh | // If message consumer epoch is smaller than consumer epoch present that
// it has been sent to the client before the user calls redeliverUnacknowledgedMessages, this message is invalid.
// so we should release this message and receive again
protected boolean isValidConsumerEpoch(MessageImpl<T> message) {
if ((((getSubType() == SubType.Failover) || (getSubType() == SubType.Exclusive)) && (message.getConsumerEpoch() != DEFAULT_CONSUMER_EPOCH)) && (message.getConsumerEpoch() < CONSUMER_EPOCH.get(this))) {
log.info("Consumer filter old epoch message, topic : [{}], messageId : [{}], messageConsumerEpoch : [{}], " + "consumerEpoch : [{}]", topic, message.getMessageId(), message.getConsumerEpoch(), consumerEpoch);
message.release();
message.recycle();
return false;
}
return true;
} | 3.26 |
pulsar_ConsumerBase_trackUnAckedMsgIfNoListener_rdh | // if listener is not null, we will track unAcked msg in callMessageListener
protected void trackUnAckedMsgIfNoListener(MessageId messageId, int redeliveryCount) {
if (listener == null)
{
unAckedMessageTracker.add(messageId, redeliveryCount);
}
} | 3.26 |
pulsar_ConsumerStats_getPartitionStats_rdh | /**
*
* @return stats for each partition if topic is partitioned topic
*/
default Map<String, ConsumerStats> getPartitionStats() {
return Collections.emptyMap();
} | 3.26 |
pulsar_BKCluster_startBKCluster_rdh | /**
* Start cluster. Also, starts the auto recovery process for each bookie, if
* isAutoRecoveryEnabled is true.
*
* @throws Exception
*/
private void startBKCluster(int numBookies) throws Exception {
PulsarRegistrationManager rm = new PulsarRegistrationManager(store, "/ledgers", baseConf);rm.initNewCluster();
baseConf.setMetadataServiceUri("metadata-store:" + clusterConf.metadataServiceUri);
baseClientConf.setMetadataServiceUri("metadata-store:" + clusterConf.metadataServiceUri);
// Create Bookie Servers (B1, B2, B3)
for (int v2 = 0; v2 < numBookies; v2++) {
startNewBookie(v2);
}
} | 3.26 |
pulsar_BKCluster_stopBKCluster_rdh | /**
* Stop cluster. Also, stops all the auto recovery processes for the bookie
* cluster, if isAutoRecoveryEnabled is true.
*
* @throws Exception
*/
protected void stopBKCluster() throws Exception {
bookieComponents.forEach(LifecycleComponentStack::close);
bookieComponents.clear();} | 3.26 |
pulsar_BKCluster_startBookie_rdh | /**
* Helper method to startup a bookie server using a configuration object.
* Also, starts the auto recovery process if isAutoRecoveryEnabled is true.
*
* @param conf
* Server Configuration Object
*/
protected LifecycleComponentStack startBookie(ServerConfiguration conf) throws Exception {
LifecycleComponentStack server = server.Main.buildBookieServer(new BookieConfiguration(conf));
BookieId address = BookieImpl.getBookieId(conf);
ComponentStarter.startComponent(server);
// Wait for up to 30 seconds for the bookie to start
for (int v19
=
0; v19 < 3000; v19++) {
if (server.lifecycleState() == State.STARTED) {
break;
}
Thread.sleep(10);
}
if (server.lifecycleState() != State.STARTED) {
throw new RuntimeException("Bookie failed to start within timeout period");
}log.info("New bookie '{}' has been created.", address);
return server;
} | 3.26 |
pulsar_BKCluster_startNewBookie_rdh | /**
* Helper method to startup a new bookie server with the indicated port
* number. Also, starts the auto recovery process, if the
* isAutoRecoveryEnabled is set true.
*
* @param index
* Bookie index
* @throws IOException
*/
public int startNewBookie(int index) throws Exception {
ServerConfiguration conf = newServerConfiguration(index);
f0.add(conf);
log.info("Starting new bookie on port: {}", conf.getBookiePort());
LifecycleComponentStack server = startBookie(conf);
bookieComponents.add(server);
return conf.getBookiePort();
} | 3.26 |
pulsar_TopicList_minus_rdh | // get topics, which are contained in list1, and not in list2
public static Set<String> minus(Collection<String> list1, Collection<String> list2) {
HashSet<String> s1 = new HashSet<>(list1);
s1.removeAll(list2);return
s1;
} | 3.26 |
pulsar_TopicList_filterTopics_rdh | // get topics that match 'topicsPattern' from original topics list
// return result should contain only topic names, without partition part
public static List<String> filterTopics(List<String> original, String regex) {
Pattern topicsPattern = Pattern.compile(regex);
return filterTopics(original, topicsPattern);
} | 3.26 |
pulsar_BrokerVersionFilter_filterAsync_rdh | /**
* From the given set of available broker candidates, filter those old brokers using the version numbers.
*
* @param brokers
* The currently available brokers that have not already been filtered.
* @param context
* The load manager context.
*/
@Override
public CompletableFuture<Map<String, BrokerLookupData>> filterAsync(Map<String, BrokerLookupData> brokers, ServiceUnitId serviceUnit, LoadManagerContext context) {
ServiceConfiguration conf = context.brokerConfiguration();
if ((!conf.isPreferLaterVersions()) || brokers.isEmpty()) {
return CompletableFuture.completedFuture(brokers);
}
Version latestVersion;
try
{
latestVersion = getLatestVersionNumber(brokers);
if (log.isDebugEnabled()) {
log.debug("Latest broker version found was [{}]", latestVersion);
}
} catch (Exception ex) {
log.warn("Disabling PreferLaterVersions feature; reason: " + ex.getMessage());
return FutureUtil.failedFuture(new BrokerFilterBadVersionException("Cannot determine newest broker version: " + ex.getMessage()));
}
int numBrokersLatestVersion = 0;
int numBrokersOlderVersion = 0;
Iterator<Map.Entry<String, BrokerLookupData>> brokerIterator = brokers.entrySet().iterator();
while (brokerIterator.hasNext()) {
Map.Entry<String, BrokerLookupData> next = brokerIterator.next();
String v6
= next.getKey();
String version = next.getValue().brokerVersion();
Version brokerVersionVersion = Version.valueOf(version);
if (brokerVersionVersion.equals(latestVersion)) {
log.debug("Broker [{}] is running the latest version ([{}])", v6, version);
numBrokersLatestVersion++;
} else {
log.info("Broker [{}] is running an older version ([{}]); latest version is [{}]", v6, version, latestVersion);
numBrokersOlderVersion++;
brokerIterator.remove();
}
}
if (numBrokersOlderVersion == 0) {
log.info("All {} brokers are running the latest version [{}]", numBrokersLatestVersion, latestVersion);
}
return CompletableFuture.completedFuture(brokers);
} | 3.26 |
pulsar_BrokerVersionFilter_getLatestVersionNumber_rdh | /**
* Get the most recent broker version number from the broker lookup data of all the running brokers.
* The version number is from the build artifact in the pom and got added to the package when it was built by Maven
*
* @param brokerMap
* The BrokerId -> BrokerLookupData Map.
* @return The most recent broker version
* @throws BrokerFilterBadVersionException
* If the most recent version is undefined (e.g., a bad broker version was encountered or a broker
* does not have a version string in its lookup data.
*/
public Version getLatestVersionNumber(Map<String, BrokerLookupData> brokerMap) throws BrokerFilterBadVersionException {
if (brokerMap.size() == 0) {
throw new BrokerFilterBadVersionException("Unable to determine latest version since broker version map was empty");
}
Version latestVersion = null;
for (Map.Entry<String, BrokerLookupData> entry : brokerMap.entrySet()) {
String brokerId = entry.getKey();
String
version = entry.getValue().brokerVersion();
if ((null == version) || (version.length() == 0)) {
log.warn("No version string in lookup data for broker [{}]; disabling PreferLaterVersions feature", brokerId);
// Trigger the load manager to reset all the brokers to the original set
throw new BrokerFilterBadVersionException(("No version string in lookup data for broker \"" + brokerId) + "\"");}
Version brokerVersionVersion;
try {
brokerVersionVersion = Version.valueOf(version);
} catch (Exception x) {
log.warn("Invalid version string in lookup data for broker [{}]: [{}];" + " disabling PreferLaterVersions feature", brokerId, version);
// Trigger the load manager to reset all the brokers to the original set
throw new BrokerFilterBadVersionException(((("Invalid version string in lookup data for broker \"" + brokerId) + "\": \"") + version) + "\")");
}
if (latestVersion == null) {
latestVersion = brokerVersionVersion;
}
else if (Version.BUILD_AWARE_ORDER.compare(latestVersion, brokerVersionVersion) < 0)
{
latestVersion = brokerVersionVersion;
}
}
return latestVersion;
} | 3.26 |
pulsar_AuthenticationProvider_authenticateHttpRequestAsync_rdh | /**
* Validate the authentication for the given credentials with the specified authentication data.
*
* <p>Implementations of this method MUST modify the request by adding the {@link AuthenticatedRoleAttributeName}
* and the {@link AuthenticatedDataAttributeName} attributes.</p>
*
* <p>Warning: the calling thread is an IO thread. Any implementations that rely on blocking behavior
* must ensure that the execution is completed on using a separate thread pool to ensure IO threads
* are never blocked.</p>
*
* @return Set response, according to passed in request, and return whether we should do following chain.doFilter.
* @throws Exception
* when authentication failed
* and return whether we should do following chain.doFilter or not.
*/ default CompletableFuture<Boolean> authenticateHttpRequestAsync(HttpServletRequest request, HttpServletResponse response) {
try {
return CompletableFuture.completedFuture(this.authenticateHttpRequest(request, response));
} catch (Exception e) {
return FutureUtil.failedFuture(e);
}
} | 3.26 |
pulsar_AuthenticationProvider_newAuthState_rdh | /**
* Create an authentication data State use passed in AuthenticationDataSource.
*/
default AuthenticationState newAuthState(AuthData authData, SocketAddress remoteAddress, SSLSession sslSession) throws AuthenticationException {
return new OneStageAuthenticationState(authData, remoteAddress, sslSession, this);
}
/**
* Create an http authentication data State use passed in AuthenticationDataSource.
*
* @deprecated implementations that previously relied on this should update their implementation of
{@link #authenticateHttpRequest(HttpServletRequest, HttpServletResponse)} or of
{@link #authenticateHttpRequestAsync(HttpServletRequest, HttpServletResponse)} so that the desired attributes
are added in those methods.
<p>Note: this method was only ever used to generate an {@link AuthenticationState} object in order to generate
an {@link AuthenticationDataSource} that was added as the {@link AuthenticatedDataAttributeName} | 3.26 |
pulsar_AuthenticationProvider_authenticate_rdh | /**
* Validate the authentication for the given credentials with the specified authentication data.
* This method is useful in one stage authn, if you're not doing one stage or if you're providing
* your own state implementation for one stage authn, it should throw an exception.
*
* @param authData
* provider specific authentication data
* @return the "role" string for the authenticated connection, if the authentication was successful
* @throws AuthenticationException
* if the credentials are not valid
* @deprecated use and implement {@link AuthenticationProvider#authenticateAsync(AuthenticationDataSource)} instead.
*/
@Deprecated
default String authenticate(AuthenticationDataSource authData) throws AuthenticationException {
throw new AuthenticationException("Not supported");
} | 3.26 |
pulsar_AuthenticationProvider_authenticateHttpRequest_rdh | /**
* Set response, according to passed in request.
* and return whether we should do following chain.doFilter or not.
*
* <p>Implementations of this method MUST modify the request by adding the {@link AuthenticatedRoleAttributeName}
* and the {@link AuthenticatedDataAttributeName} attributes.</p>
*
* @return Set response, according to passed in request, and return whether we should do following chain.doFilter.
* @throws Exception
* when authentication failed
* @deprecated use and implement {@link AuthenticationProvider#authenticateHttpRequestAsync} instead.
*/
@Deprecated
default boolean authenticateHttpRequest(HttpServletRequest request, HttpServletResponse response) throws Exception {
try {
AuthenticationState authenticationState = newHttpAuthState(request);
String role = authenticateAsync(authenticationState.getAuthDataSource()).get();request.setAttribute(AuthenticatedRoleAttributeName, role);
request.setAttribute(AuthenticatedDataAttributeName, authenticationState.getAuthDataSource());
return true;
} catch (AuthenticationException e) {
throw e;
} catch (Exception e) {
if ((e instanceof ExecutionException) && (e.getCause() instanceof AuthenticationException)) {
throw ((AuthenticationException) (e.getCause()));
} else {
throw new AuthenticationException("Failed to authentication http request");
}
}
} | 3.26 |
pulsar_MetadataStoreFactory_create_rdh | /**
* Create a new {@link MetadataStore} instance based on the given configuration.
*
* @param metadataURL
* the metadataStore URL
* @param metadataStoreConfig
* the configuration object
* @return a new {@link MetadataStore} instance
* @throws IOException
* if the metadata store initialization fails
*/
public static MetadataStore create(String metadataURL, MetadataStoreConfig metadataStoreConfig) throws MetadataStoreException {
return MetadataStoreFactoryImpl.create(metadataURL, metadataStoreConfig);
} | 3.26 |
pulsar_PulsarConnectorUtils_createInstance_rdh | /**
* Create an instance of <code>userClassName</code> using provided <code>classLoader</code>.
* This instance should implement the provided interface <code>xface</code>.
*
* @param userClassName
* user class name
* @param xface
* the interface that the reflected instance should implement
* @param classLoader
* class loader to load the class.
* @return the instance
*/
public static <T> T createInstance(String userClassName, Class<T> xface, ClassLoader classLoader) {
Class<?> theCls;
try {
theCls = Class.forName(userClassName, true, classLoader);
} catch (ClassNotFoundException | NoClassDefFoundError cnfe) {
throw new RuntimeException("User class must be in class path", cnfe);
}
if (!xface.isAssignableFrom(theCls)) {throw new RuntimeException((userClassName + " not ") + xface.getName());
}
Class<T> tCls = ((Class<T>) (theCls.asSubclass(xface)));
try {
Constructor<T> meth = tCls.getDeclaredConstructor();
return meth.newInstance();
} catch (InstantiationException ie) {
throw new RuntimeException("User class must be concrete", ie);
} catch (NoSuchMethodException e) {
throw new RuntimeException("User class must have a no-arg constructor", e);
} catch (IllegalAccessException e) {throw new RuntimeException("User class must a public constructor", e);
} catch (InvocationTargetException e) {
throw new RuntimeException("User class constructor throws exception", e);
}
} | 3.26 |
pulsar_TopicStatsImpl_add_rdh | // if the stats are added for the 1st time, we will need to make a copy of these stats and add it to the current
// stats. This stat addition is not thread-safe.
public TopicStatsImpl add(TopicStats ts) {
TopicStatsImpl stats = ((TopicStatsImpl) (ts));
this.count++;
this.msgRateIn += stats.msgRateIn;
this.msgThroughputIn += stats.msgThroughputIn;
this.msgRateOut += stats.msgRateOut;
this.msgThroughputOut += stats.msgThroughputOut;
this.bytesInCounter += stats.bytesInCounter;
this.msgInCounter += stats.msgInCounter;this.bytesOutCounter += stats.bytesOutCounter;
this.msgOutCounter += stats.msgOutCounter;
this.waitingPublishers += stats.waitingPublishers;
double newAverageMsgSize = ((this.averageMsgSize * (this.count - 1)) + stats.averageMsgSize) / this.count;
this.averageMsgSize = newAverageMsgSize;
this.storageSize += stats.storageSize;
this.backlogSize += stats.backlogSize;
this.publishRateLimitedTimes += stats.publishRateLimitedTimes;
this.offloadedStorageSize += stats.offloadedStorageSize;
this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges;
this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize;
this.delayedMessageIndexSizeInBytes += stats.delayedMessageIndexSizeInBytes;
this.ongoingTxnCount = stats.ongoingTxnCount;
this.abortedTxnCount = stats.abortedTxnCount;
this.committedTxnCount = stats.committedTxnCount;
stats.bucketDelayedIndexStats.forEach((k, v) -> {
TopicMetricBean v2 = this.bucketDelayedIndexStats.computeIfAbsent(k, __ -> new TopicMetricBean());
topicMetricBean.name
= v.name;
topicMetricBean.labelsAndValues = v.labelsAndValues;
topicMetricBean.value += v.value;
});
List<?
extends PublisherStats> publisherStats = stats.getPublishers();
for (int index = 0; index <
publisherStats.size(); index++) {
PublisherStats s = publisherStats.get(index);
if (s.isSupportsPartialProducer() && (s.getProducerName() != null)) {
this.publishersMap.computeIfAbsent(s.getProducerName(), key -> {
final PublisherStatsImpl newStats = new PublisherStatsImpl();
newStats.setSupportsPartialProducer(true);
newStats.setProducerName(s.getProducerName());
return newStats;
}).add(((PublisherStatsImpl) (s)));
} else {
// Add a publisher stat entry to this.publishers
// if this.publishers.size() is smaller than
// the input stats.publishers.size().
// Here, index == this.publishers.size() means
// this.publishers.size() is smaller than the input stats.publishers.size()
if (index == this.publishers.size()) {
PublisherStatsImpl v7 = new PublisherStatsImpl();
v7.setSupportsPartialProducer(false);
this.publishers.add(v7);
}
this.publishers.get(index).add(((PublisherStatsImpl) (s)));
}
}
for (Map.Entry<String, SubscriptionStatsImpl> v8 : stats.subscriptions.entrySet()) {
SubscriptionStatsImpl subscriptionStats =
this.subscriptions.computeIfAbsent(v8.getKey(), k ->
new SubscriptionStatsImpl());
subscriptionStats.add(v8.getValue());
}
for (Map.Entry<String, ReplicatorStatsImpl> entry : stats.replication.entrySet()) {
ReplicatorStatsImpl replStats = this.replication.computeIfAbsent(entry.getKey(), k -> {
ReplicatorStatsImpl r = new ReplicatorStatsImpl();
r.setConnected(true);
return r;
});
replStats.add(entry.getValue());
}
if ((earliestMsgPublishTimeInBacklogs != 0) && (((TopicStatsImpl)
(ts)).earliestMsgPublishTimeInBacklogs != 0)) {
earliestMsgPublishTimeInBacklogs = Math.min(earliestMsgPublishTimeInBacklogs, ((TopicStatsImpl) (ts)).earliestMsgPublishTimeInBacklogs);
} else {
earliestMsgPublishTimeInBacklogs = Math.max(earliestMsgPublishTimeInBacklogs, ((TopicStatsImpl) (ts)).earliestMsgPublishTimeInBacklogs);
}
return this;
} | 3.26 |
pulsar_BrokerInterceptor_beforeSendMessage_rdh | /**
* Intercept messages before sending them to the consumers.
*
* @param subscription
* pulsar subscription
* @param entry
* entry
* @param ackSet
* entry ack bitset. it is either <tt>null</tt> or an array of long-based bitsets.
* @param msgMetadata
* message metadata. The message metadata will be recycled after this call.
* @param consumer
* consumer. Consumer which entry are sent to.
*/
default void beforeSendMessage(Subscription subscription, Entry entry, long[] ackSet, MessageMetadata msgMetadata, Consumer consumer) {
} | 3.26 |
pulsar_BrokerInterceptor_onMessagePublish_rdh | /**
* Intercept message when broker receive a send request.
*
* @param headersAndPayload
* entry's header and payload
* @param publishContext
* Publish Context
*/
default void onMessagePublish(Producer producer, ByteBuf headersAndPayload, Topic.PublishContext publishContext) {
} | 3.26 |
pulsar_BrokerInterceptor_producerCreated_rdh | /**
* Called by the broker when a new connection is created.
*/default
void producerCreated(ServerCnx cnx, Producer producer, Map<String, String> metadata) {
} | 3.26 |
pulsar_BrokerInterceptor_messageProduced_rdh | /**
* Intercept after a message is produced.
*
* @param cnx
* client Connection
* @param producer
* Producer object
* @param publishContext
* Publish Context
*/
default void messageProduced(ServerCnx cnx, Producer producer, long startTimeNs, long ledgerId, long entryId, Topic.PublishContext publishContext) {} | 3.26 |
pulsar_BrokerInterceptor_consumerCreated_rdh | /**
* Intercept after a consumer is created.
*
* @param cnx
* client Connection
* @param consumer
* Consumer object
* @param metadata
* A map of metadata
*/
default void consumerCreated(ServerCnx cnx, Consumer consumer, Map<String, String> metadata) {
} | 3.26 |
pulsar_BrokerInterceptor_messageDispatched_rdh | /**
* Intercept after a message is dispatched to consumer.
*
* @param cnx
* client Connection
* @param consumer
* Consumer object
* @param ledgerId
* Ledger ID
* @param entryId
* Entry ID
* @param headersAndPayload
* Data
*/
default void messageDispatched(ServerCnx cnx, Consumer consumer, long ledgerId, long entryId, ByteBuf headersAndPayload) {
} | 3.26 |
pulsar_BrokerInterceptor_txnOpened_rdh | /**
* Intercept when a transaction begins.
*
* @param tcId
* Transaction Coordinator Id
* @param txnID
* Transaction ID
*/
default void txnOpened(long tcId, String txnID) {
} | 3.26 |
pulsar_BrokerInterceptor_consumerClosed_rdh | /**
* Called by the broker when a consumer is closed.
*
* @param cnx
* client Connection
* @param consumer
* Consumer object
* @param metadata
* A map of metadata
*/
default void consumerClosed(ServerCnx cnx, Consumer consumer, Map<String, String> metadata) {
} | 3.26 |
pulsar_BrokerInterceptor_onFilter_rdh | /**
* The interception of web processing, as same as `Filter.onFilter`.
* So In this method, we must call `chain.doFilter` to continue the chain.
*/
default void onFilter(ServletRequest request, ServletResponse
response, FilterChain chain) throws IOException, ServletException {
// Just continue the chain by default.
chain.doFilter(request, response);
} | 3.26 |
pulsar_BrokerInterceptor_onConnectionCreated_rdh | /**
* Called by the broker when a new connection is created.
*/
default void onConnectionCreated(ServerCnx cnx) {} | 3.26 |
pulsar_BrokerInterceptor_m0_rdh | /**
* Intercept when a transaction ends.
*
* @param txnID
* Transaction ID
* @param txnAction
* Transaction Action
*/
default void m0(String txnID, long txnAction) {
} | 3.26 |
pulsar_ReaderInterceptor_onPartitionsChange_rdh | /**
* This method is called when partitions of the topic (partitioned-topic) changes.
*
* @param topicName
* topic name
* @param partitions
* new updated number of partitions
*/
default void onPartitionsChange(String topicName, int partitions) {
} | 3.26 |
pulsar_PulsarJsonRowDecoder_decodeRow_rdh | /**
* decode ByteBuf by {@link org.apache.pulsar.client.api.schema.GenericSchema}.
*
* @param byteBuf
* @return */
@Override
public Optional<Map<DecoderColumnHandle, FieldValueProvider>>
decodeRow(ByteBuf
byteBuf) {
GenericJsonRecord record = ((GenericJsonRecord) (genericJsonSchema.decode(byteBuf)));
JsonNode tree = record.getJsonNode();
Map<DecoderColumnHandle, FieldValueProvider> decodedRow = new HashMap<>();
for (Map.Entry<DecoderColumnHandle, JsonFieldDecoder> entry : fieldDecoders.entrySet()) {DecoderColumnHandle columnHandle = entry.getKey();
JsonFieldDecoder decoder = entry.getValue();
JsonNode node = locateNode(tree, columnHandle);
decodedRow.put(columnHandle, decoder.decode(node));
}
return Optional.of(decodedRow);
} | 3.26 |
pulsar_PortManager_m0_rdh | /**
* Returns whether the port was released successfully.
*
* @return whether the release is successful.
*/
public static synchronized boolean m0(int lockedPort) {
return PORTS.remove(lockedPort);
} | 3.26 |
pulsar_NonDurableCursorImpl_recover_rdh | // / Overridden methods from ManagedCursorImpl. Void implementation to skip cursor persistence
@Override
void recover(final VoidCallback callback) {
// / No-Op
} | 3.26 |
pulsar_Metrics_create_rdh | /**
* Creates a metrics object with the dimensions map immutable.
*
* @param dimensionMap
* @return */
public static Metrics create(Map<String, String> dimensionMap) {
// make the dimensions map unmodifiable and immutable;
Map<String, String> map = new TreeMap<>();
map.putAll(dimensionMap);
return new Metrics(Collections.unmodifiableMap(map));
} | 3.26 |
pulsar_BrokerMonitor_start_rdh | /**
* Start the broker monitoring procedure.
*/
public void start() {
try {final BrokerWatcher brokerWatcher = new BrokerWatcher(zkClient);
brokerWatcher.updateBrokers(BROKER_ROOT);
while (true) {
Thread.sleep(GLOBAL_STATS_PRINT_PERIOD_MILLIS);
printGlobalData();
}
} catch (Exception ex) {
throw new RuntimeException(ex);}
} | 3.26 |
pulsar_BrokerMonitor_main_rdh | /**
* Run a monitor from command line arguments.
*
* @param args
* Arguments for the monitor.
*/
public static void main(String[]
args) throws Exception {
final Arguments arguments = new Arguments();
final JCommander jc = new JCommander(arguments);
jc.setProgramName("pulsar-perf monitor-brokers");
try {
jc.parse(args);
} catch (ParameterException e) {
System.out.println(e.getMessage());
jc.usage();
PerfClientUtils.exit(1);
}
if (arguments.extensions) {
final BrokerMonitor monitor = new BrokerMonitor(arguments.connectString);
monitor.startBrokerLoadDataStoreMonitor();
} else {
final ZooKeeper zkClient = new ZooKeeper(arguments.connectString, ZOOKEEPER_TIMEOUT_MILLIS, null);
final BrokerMonitor monitor = new BrokerMonitor(zkClient);
monitor.start();
}
} | 3.26 |
pulsar_BrokerMonitor_printLoadReport_rdh | // Print the load report in a tabular form for a broker running SimpleLoadManagerImpl.
private synchronized void printLoadReport(final
String broker, final LoadReport loadReport) {
f1.put(broker, loadReport);
// Initialize the constant rows.
final Object[][] rows = new Object[10][];
rows[0] = COUNT_ROW;
rows[2] = RAW_SYSTEM_ROW;
rows[4] = ALLOC_SYSTEM_ROW;
rows[6] = RAW_MESSAGE_ROW;
rows[8] = ALLOC_MESSAGE_ROW;
// First column is a label, so start at the second column at index 1.
// Client count row.
rows[1] = new Object[COUNT_ROW.length];
initRow(rows[1], loadReport.getNumTopics(), loadReport.getNumBundles(), loadReport.getNumProducers(), loadReport.getNumConsumers(), loadReport.getBundleGains().size(), loadReport.getBundleLosses().size());
// Raw system row.
final SystemResourceUsage systemResourceUsage =
loadReport.getSystemResourceUsage();
final ResourceUsage v35 = systemResourceUsage.getCpu();
final ResourceUsage memory = systemResourceUsage.getMemory();
final ResourceUsage directMemory = systemResourceUsage.getDirectMemory();
final
ResourceUsage bandwidthIn = systemResourceUsage.getBandwidthIn();
final ResourceUsage bandwidthOut = systemResourceUsage.getBandwidthOut();
final double maxUsage = Math.max(Math.max(Math.max(v35.percentUsage(), memory.percentUsage()), Math.max(directMemory.percentUsage(), bandwidthIn.percentUsage())), bandwidthOut.percentUsage());
rows[3] = new Object[RAW_SYSTEM_ROW.length];
initRow(rows[3], v35.percentUsage(), memory.percentUsage(), directMemory.percentUsage(), bandwidthIn.percentUsage(), bandwidthOut.percentUsage(), maxUsage);
// Allocated system row.
rows[5] = new Object[ALLOC_SYSTEM_ROW.length];
final double allocatedCpuUsage = percentUsage(loadReport.getAllocatedCPU(), v35.limit);
final double allocatedMemoryUsage = percentUsage(loadReport.getAllocatedMemory(), memory.limit);
final double allocatedBandwidthInUsage =
percentUsage(loadReport.getAllocatedBandwidthIn(), bandwidthIn.limit);
final double allocatedBandwidthOutUsage = percentUsage(loadReport.getAllocatedBandwidthOut(), bandwidthOut.limit);
final double maxAllocatedUsage = Math.max(Math.max(Math.max(allocatedCpuUsage, allocatedMemoryUsage), allocatedBandwidthInUsage), allocatedBandwidthOutUsage);
initRow(rows[5], allocatedCpuUsage, allocatedMemoryUsage, null, allocatedBandwidthInUsage, allocatedBandwidthOutUsage, maxAllocatedUsage);
// Raw message row.
rows[7] = new Object[RAW_MESSAGE_ROW.length];
initMessageRow(rows[7], loadReport.getMsgRateIn(), loadReport.getMsgRateOut(), bandwidthIn.usage, bandwidthOut.usage);
// Allocated message row.
rows[9] = new Object[ALLOC_MESSAGE_ROW.length];initMessageRow(rows[9], loadReport.getAllocatedMsgRateIn(), loadReport.getAllocatedMsgRateOut(), loadReport.getAllocatedBandwidthIn(), loadReport.getAllocatedBandwidthOut());
final String table = localTableMaker.make(rows);
f0.info("\nLoad Report for {}:\n{}\n", broker, table);
} | 3.26 |
pulsar_BrokerMonitor_initRow_rdh | // Helper method to initialize rows.
private static void initRow(final Object[] row, final Object... elements) {
System.arraycopy(elements, 0, row, 1, elements.length);
} | 3.26 |
pulsar_BrokerMonitor_printBrokerData_rdh | // Print the broker data in a tabular form for a broker using ModularLoadManagerImpl.
private synchronized void printBrokerData(final String broker, final LocalBrokerData localBrokerData, final TimeAverageBrokerData timeAverageData) {
f1.put(broker, localBrokerData);
// Initialize the constant rows.
final Object[][] rows = new Object[10][];
rows[0] = SYSTEM_ROW;
rows[2] = COUNT_ROW;
rows[4] = LATEST_ROW;
rows[6] = SHORT_ROW;
rows[8] = LONG_ROW;
// First column is a label, so start at the second column at index 1.
// System row.
rows[1] = new Object[SYSTEM_ROW.length];
initRow(rows[1], localBrokerData.getCpu().percentUsage(), localBrokerData.getMemory().percentUsage(), localBrokerData.getDirectMemory().percentUsage(), localBrokerData.getBandwidthIn().percentUsage(), localBrokerData.getBandwidthOut().percentUsage(),
localBrokerData.getMaxResourceUsage() * 100);
// Count row.
rows[3] = new Object[COUNT_ROW.length];initRow(rows[3], localBrokerData.getNumTopics(), localBrokerData.getNumBundles(), localBrokerData.getNumProducers(), localBrokerData.getNumConsumers(), localBrokerData.getLastBundleGains().size(), localBrokerData.getLastBundleLosses().size());
// Latest message data row.
rows[5] = new Object[LATEST_ROW.length];
initMessageRow(rows[5], localBrokerData.getMsgRateIn(), localBrokerData.getMsgRateOut(), localBrokerData.getMsgThroughputIn(), localBrokerData.getMsgThroughputOut());
// Short-term message data row.
rows[7] = new Object[SHORT_ROW.length];
initMessageRow(rows[7], timeAverageData.getShortTermMsgRateIn(), timeAverageData.getShortTermMsgRateOut(), timeAverageData.getShortTermMsgThroughputIn(), timeAverageData.getShortTermMsgThroughputOut());
// Long-term message data row.
rows[9] = new Object[LONG_ROW.length];
initMessageRow(rows[9], timeAverageData.getLongTermMsgRateIn(), timeAverageData.getLongTermMsgRateOut(), timeAverageData.getLongTermMsgThroughputIn(), timeAverageData.getLongTermMsgThroughputOut());
final String table = localTableMaker.make(rows);
f0.info("\nBroker Data for {}:\n{}\n", broker, table);
} | 3.26 |
pulsar_BrokerMonitor_initMessageRow_rdh | // Helper method to initialize rows which hold message data.
private static void initMessageRow(final Object[] row, final double messageRateIn, final double messageRateOut,
final double messageThroughputIn, final double messageThroughputOut) {
initRow(row, messageRateIn, messageRateOut, messageRateIn + messageRateOut, messageThroughputIn / 1024, messageThroughputOut / 1024, (messageThroughputIn + messageThroughputOut) / 1024);
} | 3.26 |
pulsar_BrokerMonitor_printData_rdh | // Decide whether this broker is running SimpleLoadManagerImpl or ModularLoadManagerImpl and then print the data
// accordingly.
private synchronized void printData(final String path) {
final String broker = brokerNameFromPath(path);
String jsonString;
try {
jsonString = new String(zkClient.getData(path, this, null));
} catch (Exception ex) {
throw new RuntimeException(ex);
}
// Use presence of the String "allocated" to determine if this is using SimpleLoadManagerImpl.
if (jsonString.contains("allocated")) {
printLoadReport(broker, gson.fromJson(jsonString, LoadReport.class));
} else {final LocalBrokerData localBrokerData = gson.fromJson(jsonString, LocalBrokerData.class);
final String timeAveragePath = (BROKER_TIME_AVERAGE_BASE_PATH + "/") + broker;
try {
final TimeAverageBrokerData timeAverageData = gson.fromJson(new String(zkClient.getData(timeAveragePath, false, null)), TimeAverageBrokerData.class);
printBrokerData(broker, localBrokerData, timeAverageData);
} catch (Exception e) {
throw new RuntimeException(e);}
}
} | 3.26 |
pulsar_BrokerMonitor_printGlobalData_rdh | // Prints out the global load data.
private void printGlobalData() {
synchronized(f1) {
// 1 header row, 1 total row, and loadData.size() rows for brokers.
Object[][] rows = new Object[f1.size() + 2][];
rows[0] = GLOBAL_HEADER;
int totalBundles = 0;
double totalThroughput = 0;
double totalMessageRate = 0;
double totalLongTermMessageRate = 0;
double maxMaxUsage = 0;
int i = 1;
for (final Map.Entry<String, Object> entry : f1.entrySet()) {
final String broker = entry.getKey();
final Object data = entry.getValue();
rows[i] = new Object[GLOBAL_HEADER.length];
rows[i][0] = broker;
int numBundles;
double messageRate;
double longTermMessageRate;
double messageThroughput;double maxUsage;
if (data instanceof LoadReport) {final LoadReport loadReport = ((LoadReport) (data));
numBundles = loadReport.getNumBundles();
messageRate = loadReport.getMsgRateIn() + loadReport.getMsgRateOut();
longTermMessageRate = loadReport.getAllocatedMsgRateIn() + loadReport.getAllocatedMsgRateOut();
messageThroughput = (loadReport.getAllocatedBandwidthIn() + loadReport.getAllocatedBandwidthOut()) / 1024;
final SystemResourceUsage systemResourceUsage = loadReport.getSystemResourceUsage();
maxUsage = Math.max(Math.max(Math.max(systemResourceUsage.getCpu().percentUsage(), systemResourceUsage.getMemory().percentUsage()), Math.max(systemResourceUsage.getDirectMemory().percentUsage(), systemResourceUsage.getBandwidthIn().percentUsage())), systemResourceUsage.getBandwidthOut().percentUsage());
} else if (data instanceof LocalBrokerData) {
final LocalBrokerData localData = ((LocalBrokerData) (data));
numBundles = localData.getNumBundles();
messageRate = localData.getMsgRateIn() + localData.getMsgRateOut();
final String timeAveragePath = (BROKER_TIME_AVERAGE_BASE_PATH + "/") + broker;
try {
final TimeAverageBrokerData timeAverageData = gson.fromJson(new String(zkClient.getData(timeAveragePath, false, null)), TimeAverageBrokerData.class);
longTermMessageRate = timeAverageData.getLongTermMsgRateIn() + timeAverageData.getLongTermMsgRateOut();
} catch (Exception x) {
throw new
RuntimeException(x);
}
messageThroughput = (localData.getMsgThroughputIn() + localData.getMsgThroughputOut()) /
1024;
maxUsage = localData.getMaxResourceUsage();
} else {
throw new AssertionError("Unreachable code");
}
rows[i][1] = numBundles;
rows[i][2] = messageRate;
rows[i][3] = messageThroughput;
rows[i][4] = longTermMessageRate;
rows[i][5] = maxUsage;
totalBundles += numBundles;
totalMessageRate += messageRate;
totalLongTermMessageRate += longTermMessageRate;
totalThroughput += messageThroughput;
maxMaxUsage = Math.max(maxUsage, maxMaxUsage);
++i;
}final int finalRow = f1.size() + 1;
rows[finalRow] = new Object[GLOBAL_HEADER.length];
rows[finalRow][0] = "TOTAL";rows[finalRow][1] = totalBundles;
rows[finalRow][2] = totalMessageRate;
rows[finalRow][3] = totalLongTermMessageRate;
rows[finalRow][4] = totalThroughput;
rows[finalRow][5] = maxMaxUsage;
final String table = globalTableMaker.make(rows);
f0.info("Overall Broker Data:\n{}", table);
}
} | 3.26 |
pulsar_BrokerMonitor_makeMessageRow_rdh | // Take advantage of repeated labels in message rows.
private static Object[] makeMessageRow(final String firstElement) {
final List<Object> result = new ArrayList<>();
result.add(firstElement);
result.addAll(MESSAGE_FIELDS);
return result.toArray();
} | 3.26 |
pulsar_BrokerMonitor_updateBrokers_rdh | // Inform the user of any broker gains and losses and put watches on newly acquired brokers.
private synchronized void updateBrokers(final String path) {
final Set<String> newBrokers = new HashSet<>();
try {
newBrokers.addAll(zkClient.getChildren(path, this));
} catch (Exception ex) {
throw new RuntimeException(ex);
}
for (String oldBroker :
brokers) {
if (!newBrokers.contains(oldBroker)) {
f0.info("Lost broker: " + oldBroker);
synchronized(f1) {
// Stop including lost broker in global stats.
f1.remove(oldBroker);
}
}
}
for (String newBroker : newBrokers) {
if (!brokers.contains(newBroker)) {
f0.info("Gained broker: " + newBroker);
final BrokerDataWatcher brokerDataWatcher = new BrokerDataWatcher(zkClient);
brokerDataWatcher.printData((path + "/") + newBroker);
}
}
this.brokers = newBrokers;
} | 3.26 |
pulsar_BrokerMonitor_process_rdh | /**
* Print the local and historical broker data in a tabular format, and put this back as a watcher.
*
* @param event
* The watched event.
*/
public synchronized void process(final WatchedEvent event) {
try {
if (event.getType() == EventType.NodeDataChanged) {printData(event.getPath());
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
} | 3.26 |
pulsar_ConsumerConfiguration_getAckTimeoutMillis_rdh | /**
*
* @return the configured timeout in milliseconds for unacked messages.
*/
public long getAckTimeoutMillis() { return conf.getAckTimeoutMillis();
} | 3.26 |
pulsar_ConsumerConfiguration_getNegativeAckRedeliveryBackoff_rdh | /**
*
* @return the configured {@link RedeliveryBackoff} for the consumer
*/public RedeliveryBackoff getNegativeAckRedeliveryBackoff() {
return conf.getNegativeAckRedeliveryBackoff();
} | 3.26 |
pulsar_ConsumerConfiguration_setProperties_rdh | /**
* Add all the properties in the provided map.
*
* @param properties
* @return */
public ConsumerConfiguration setProperties(Map<String, String> properties) {
conf.getProperties().putAll(properties);
return this;
} | 3.26 |
pulsar_ConsumerConfiguration_setCryptoFailureAction_rdh | /**
* Sets the ConsumerCryptoFailureAction to the value specified.
*
* @param action
* consumer action
*/
public void setCryptoFailureAction(ConsumerCryptoFailureAction action) {
conf.setCryptoFailureAction(action);
} | 3.26 |
pulsar_ConsumerConfiguration_setAckTimeout_rdh | /**
* Set the timeout for unacked messages, truncated to the nearest millisecond. The timeout needs to be greater than
* 10 seconds.
*
* @param ackTimeout
* for unacked messages.
* @param timeUnit
* unit in which the timeout is provided.
* @return {@link ConsumerConfiguration}
*/
public ConsumerConfiguration
setAckTimeout(long ackTimeout, TimeUnit timeUnit) {
long ackTimeoutMillis = timeUnit.toMillis(ackTimeout);
checkArgument(ackTimeoutMillis >= minAckTimeoutMillis, ("Ack timeout should be should be greater than " + minAckTimeoutMillis) + " ms");
conf.setAckTimeoutMillis(timeUnit.toMillis(ackTimeout));
return this;
} | 3.26 |
pulsar_ConsumerConfiguration_getSubscriptionType_rdh | /**
*
* @return the configured subscription type
*/
public SubscriptionType getSubscriptionType() {
return conf.getSubscriptionType();
} | 3.26 |
pulsar_ConsumerConfiguration_setMessageListener_rdh | /**
* Sets a {@link MessageListener} for the consumer
* <p>
* When a {@link MessageListener} is set, application will receive messages through it. Calls to
* {@link Consumer#receive()} will not be allowed.
*
* @param messageListener
* the listener object
*/
public ConsumerConfiguration setMessageListener(MessageListener<byte[]> messageListener) {
Objects.requireNonNull(messageListener);
this.messageListener
= messageListener;
conf.setMessageListener(new MessageListener<byte[]>() {
@Override
public void received(Consumer<byte[]> consumer, Message<byte[]> msg) {
messageListener.received(new ConsumerV1Impl(consumer), msg);
}
@Override
public void reachedEndOfTopic(Consumer<byte[]> consumer) {
messageListener.reachedEndOfTopic(new ConsumerV1Impl(consumer));
}
});
return this;
} | 3.26 |
pulsar_ConsumerConfiguration_setSubscriptionType_rdh | /**
* Select the subscription type to be used when subscribing to the topic.
* <p>
* Default is {@link SubscriptionType#Exclusive}
*
* @param subscriptionType
* the subscription type value
*/
public ConsumerConfiguration
setSubscriptionType(SubscriptionType subscriptionType) {
Objects.requireNonNull(subscriptionType);
conf.setSubscriptionType(subscriptionType);
return this;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.