name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_ModularLoadManagerImpl_initialize_rdh | /**
* Initialize this load manager using the given PulsarService. Should be called only once, after invoking the
* default constructor.
*
* @param pulsar
* The service to initialize with.
*/
@Override
public void initialize(final PulsarService pulsar) {
this.pulsar = pulsar;
this.pulsarResources = pulsar.getPulsarResources();
brokersData = pulsar.getCoordinationService().getLockManager(LocalBrokerData.class);
resourceQuotaCache = pulsar.getLocalMetadataStore().getMetadataCache(ResourceQuota.class);
pulsar.getLocalMetadataStore().registerListener(this::handleDataNotification);
pulsar.getLocalMetadataStore().registerSessionListener(this::handleMetadataSessionEvent);
if
(SystemUtils.IS_OS_LINUX) {
brokerHostUsage = new LinuxBrokerHostUsageImpl(pulsar);
} else {
brokerHostUsage = new GenericBrokerHostUsageImpl(pulsar);
}
bundleSplitStrategy = new BundleSplitterTask();
conf = pulsar.getConfiguration();
// Initialize the default stats to assume for unseen bundles (hard-coded for now).
defaultStats.msgThroughputIn = DEFAULT_MESSAGE_THROUGHPUT;
defaultStats.msgThroughputOut = DEFAULT_MESSAGE_THROUGHPUT;
defaultStats.msgRateIn = f0;
defaultStats.msgRateOut = f0;
placementStrategy = ModularLoadManagerStrategy.create(conf);
policies = new SimpleResourceAllocationPolicies(pulsar);
filterPipeline.add(new BrokerLoadManagerClassFilter());
filterPipeline.add(new BrokerVersionFilter());LoadManagerShared.refreshBrokerToFailureDomainMap(pulsar, brokerToFailureDomainMap);
// register listeners for domain changes
pulsarResources.getClusterResources().getFailureDomainResources().registerListener(__ -> {
executors.execute(() -> LoadManagerShared.refreshBrokerToFailureDomainMap(pulsar, brokerToFailureDomainMap));
});loadSheddingPipeline.add(createLoadSheddingStrategy());
} | 3.26 |
pulsar_ModularLoadManagerImpl_updateBundleSplitMetrics_rdh | /**
* As leader broker, update bundle split metrics.
*
* @param bundlesSplit
* the number of bundles splits
*/
private void updateBundleSplitMetrics(int bundlesSplit) {
bundleSplitCount += bundlesSplit;
List<Metrics> metrics = new ArrayList<>();
Map<String, String> dimensions = new HashMap<>();
dimensions.put("metric", "bundlesSplit");
Metrics m = Metrics.create(dimensions);
m.put("brk_lb_bundles_split_total", bundleSplitCount);
metrics.add(m);
this.bundleSplitMetrics.set(metrics);} | 3.26 |
pulsar_ModularLoadManagerImpl_writeBundleDataOnZooKeeper_rdh | /**
* As the leader broker, write bundle data aggregated from all brokers to metadata store.
*/
@Override
public void writeBundleDataOnZooKeeper() {
updateBundleData();
// Write the bundle data to metadata store.
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (Map.Entry<String, BundleData> entry : loadData.getBundleData().entrySet()) {
final String bundle = entry.getKey();
final BundleData data = entry.getValue();
futures.add(pulsarResources.getLoadBalanceResources().getBundleDataResources().updateBundleData(bundle, data));
}
// Write the time average broker data to metadata store.
for (Map.Entry<String, BrokerData> entry : loadData.getBrokerData().entrySet()) {
final String broker = entry.getKey();
final TimeAverageBrokerData data
= entry.getValue().getTimeAverageData();
futures.add(pulsarResources.getLoadBalanceResources().getBrokerTimeAverageDataResources().updateTimeAverageBrokerData(broker, data));
}
try {
FutureUtil.waitForAll(futures).join();
} catch (Exception e) {
log.warn("Error when writing metadata data to store", e);
}
} | 3.26 |
pulsar_ModularLoadManagerImpl_reapDeadBrokerPreallocations_rdh | // For each broker that we have a recent load report, see if they are still alive
private void reapDeadBrokerPreallocations(List<String> aliveBrokers) {
for (String broker : loadData.getBrokerData().keySet()) {if (!aliveBrokers.contains(broker)) {
if (log.isDebugEnabled()) {
log.debug("Broker {} appears to have stopped; now reclaiming any preallocations", broker);
}
final Iterator<Map.Entry<String, String>> iterator = preallocatedBundleToBroker.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, String> entry = iterator.next();
final String preallocatedBundle = entry.getKey();
final String preallocatedBroker = entry.getValue();
if (broker.equals(preallocatedBroker)) {
if (log.isDebugEnabled()) {
log.debug("Removing old preallocation on dead broker {} for bundle {}", preallocatedBroker, preallocatedBundle);}
iterator.remove();
}
}
}
}
} | 3.26 |
pulsar_ModularLoadManagerImpl_start_rdh | /**
* As any broker, start the load manager.
*
* @throws PulsarServerException
* If an unexpected error prevented the load manager from being started.
*/
@Override
public void start() throws PulsarServerException {
try {
// At this point, the ports will be updated with the real port number that the server was assigned
Map<String, String> protocolData = pulsar.getProtocolDataToAdvertise();
lastData = new LocalBrokerData(pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners());lastData.setProtocols(protocolData);
// configure broker-topic mode
lastData.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics());
lastData.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics());
localData = new LocalBrokerData(pulsar.getSafeWebServiceAddress(), pulsar.getWebServiceAddressTls(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners());
localData.setProtocols(protocolData);
localData.setBrokerVersionString(pulsar.getBrokerVersion());// configure broker-topic mode
localData.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics());
localData.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics());
localData.setLoadManagerClassName(conf.getLoadManagerClassName());
String lookupServiceAddress = pulsar.getLookupServiceAddress();
brokerZnodePath = (LoadManager.LOADBALANCE_BROKERS_ROOT + "/") + lookupServiceAddress;
m1();
brokerDataLock = brokersData.acquireLock(brokerZnodePath, localData).join();
pulsarResources.getLoadBalanceResources().getBrokerTimeAverageDataResources().updateTimeAverageBrokerData(lookupServiceAddress, new TimeAverageBrokerData()).join();
updateAll();
} catch (Exception e) {
log.error("Unable to acquire lock for broker: [{}]", brokerZnodePath, e);
throw
new PulsarServerException(e);
}
} | 3.26 |
pulsar_ModularLoadManagerImpl_updateLoadBalancingMetrics_rdh | /**
* As any broker, update System Resource Usage Percentage.
*
* @param systemResourceUsage
*/
private void updateLoadBalancingMetrics(final SystemResourceUsage systemResourceUsage) {
List<Metrics> metrics = new ArrayList<>();
Map<String, String> dimensions = new HashMap<>();
dimensions.put("broker", pulsar.getAdvertisedAddress());
dimensions.put("metric", "loadBalancing");
Metrics m = Metrics.create(dimensions);
m.put("brk_lb_cpu_usage", systemResourceUsage.getCpu().percentUsage());
m.put("brk_lb_memory_usage",
systemResourceUsage.getMemory().percentUsage());
m.put("brk_lb_directMemory_usage", systemResourceUsage.getDirectMemory().percentUsage());
m.put("brk_lb_bandwidth_in_usage", systemResourceUsage.getBandwidthIn().percentUsage());
m.put("brk_lb_bandwidth_out_usage", systemResourceUsage.getBandwidthOut().percentUsage());
metrics.add(m);
this.loadBalancingMetrics.set(metrics);
} | 3.26 |
pulsar_ModularLoadManagerImpl_needBrokerDataUpdate_rdh | // Determine if the broker data requires an update by delegating to the update condition.
private boolean needBrokerDataUpdate() {
final long updateMaxIntervalMillis
=
TimeUnit.MINUTES.toMillis(conf.getLoadBalancerReportUpdateMaxIntervalMinutes());
long timeSinceLastReportWrittenToStore = System.currentTimeMillis() - localData.getLastUpdate();
if (timeSinceLastReportWrittenToStore > updateMaxIntervalMillis) {
log.info("Writing local data to metadata store because time since last" + " update exceeded threshold of {} minutes", conf.getLoadBalancerReportUpdateMaxIntervalMinutes());
// Always update after surpassing the maximum interval.
return true;
}
final double maxChange = Math.max(100.0 * Math.abs(lastData.getMaxResourceUsage()
- localData.getMaxResourceUsage()), Math.max(percentChange(lastData.getMsgRateIn() + lastData.getMsgRateOut(), localData.getMsgRateIn() + localData.getMsgRateOut()), Math.max(percentChange(lastData.getMsgThroughputIn() + lastData.getMsgThroughputOut(), localData.getMsgThroughputIn() + localData.getMsgThroughputOut()), percentChange(lastData.getNumBundles(), localData.getNumBundles()))));
if (maxChange > conf.getLoadBalancerReportUpdateThresholdPercentage()) {
log.info("Writing local data to metadata store because maximum change {}% exceeded threshold {}%; " + "time since last report written is {} seconds", maxChange, conf.getLoadBalancerReportUpdateThresholdPercentage(), timeSinceLastReportWrittenToStore / 1000.0);
return true;
}
return false;
} | 3.26 |
pulsar_ModularLoadManagerImpl_updateAllBrokerData_rdh | // As the leader broker, update the broker data map in loadData by querying metadata store for the broker data put
// there by each broker via updateLocalBrokerData.
private void updateAllBrokerData() {
final Set<String> activeBrokers = getAvailableBrokers();
final Map<String, BrokerData> brokerDataMap = loadData.getBrokerData();
for (String broker : activeBrokers) {
try {
String key = String.format("%s/%s", LoadManager.LOADBALANCE_BROKERS_ROOT, broker);
Optional<LocalBrokerData> localData = brokersData.readLock(key).get();
if (!localData.isPresent()) {
brokerDataMap.remove(broker);
log.info("[{}] Broker load report is not present", broker);
continue;
}
if (brokerDataMap.containsKey(broker)) {
// Replace previous local broker data.
brokerDataMap.get(broker).setLocalData(localData.get());
} else {
// Initialize BrokerData object for previously unseen
// brokers.
brokerDataMap.put(broker, new BrokerData(localData.get()));
}
} catch (Exception e) {
log.warn("Error reading broker data from cache for broker - [{}], [{}]", broker, e.getMessage());
}
}
// Remove obsolete brokers.
for (final String broker : brokerDataMap.keySet()) {
if (!activeBrokers.contains(broker)) {
brokerDataMap.remove(broker);
}
}
} | 3.26 |
pulsar_Schema_generic_rdh | /**
* Returns a generic schema of existing schema info.
*
* <p>Only supports AVRO and JSON.
*
* @param schemaInfo
* schema info
* @return a generic schema instance
*/
static GenericSchema<GenericRecord> generic(SchemaInfo schemaInfo) {return DefaultImplementation.getDefaultImplementation().getGenericSchema(schemaInfo);
} | 3.26 |
pulsar_Schema_supportSchemaVersioning_rdh | /**
* Returns whether this schema supports versioning.
*
* <p>Most of the schema implementations don't really support schema versioning, or it just doesn't
* make any sense to support schema versionings (e.g. primitive schemas). Only schema returns
* {@link GenericRecord} should support schema versioning.
*
* <p>If a schema implementation returns <tt>false</tt>, it should implement {@link #decode(byte[])};
* while a schema implementation returns <tt>true</tt>, it should implement {@link #decode(byte[], byte[])}
* instead.
*
* @return true if this schema implementation supports schema versioning; otherwise returns false.
*/
default boolean
supportSchemaVersioning() {
return false;
} | 3.26 |
pulsar_Schema_PROTOBUF_rdh | /**
* Create a Protobuf schema type with schema definition.
*
* @param schemaDefinition
* schemaDefinition the definition of the schema
* @return a Schema instance
*/
static <T extends GeneratedMessageV3> Schema<T> PROTOBUF(SchemaDefinition<T> schemaDefinition) {
return DefaultImplementation.getDefaultImplementation().newProtobufSchema(schemaDefinition);
} | 3.26 |
pulsar_Schema_getNativeSchema_rdh | /**
* Return the native schema that is wrapped by Pulsar API.
* For instance with this method you can access the Avro schema
*
* @return the internal schema or null if not present
*/
default Optional<Object> getNativeSchema() {
return Optional.empty();
} | 3.26 |
pulsar_Schema_getSchema_rdh | // CHECKSTYLE.ON: MethodName
static Schema<?> getSchema(SchemaInfo schemaInfo) {
return DefaultImplementation.getDefaultImplementation().getSchema(schemaInfo);
} | 3.26 |
pulsar_Schema_JSON_rdh | /**
* Create a JSON schema type with schema definition.
*
* @param schemaDefinition
* the definition of the schema
* @return a Schema instance
*/
static <T> Schema<T> JSON(SchemaDefinition schemaDefinition)
{
return DefaultImplementation.getDefaultImplementation().newJSONSchema(schemaDefinition);
} | 3.26 |
pulsar_Schema_AUTO_PRODUCE_BYTES_rdh | /**
* Create a schema instance that accepts a serialized payload
* and validates it against the schema specified.
*
* @return the auto schema instance
* @since 2.5.0
* @see #AUTO_PRODUCE_BYTES()
*/
static Schema<byte[]> AUTO_PRODUCE_BYTES(Schema<?> schema) {
return DefaultImplementation.getDefaultImplementation().newAutoProduceSchema(schema);
} | 3.26 |
pulsar_Schema_configureSchemaInfo_rdh | /**
* Configure the schema to use the provided schema info.
*
* @param topic
* topic name
* @param componentName
* component name
* @param schemaInfo
* schema info
*/
default void configureSchemaInfo(String topic, String componentName, SchemaInfo schemaInfo) {
// no-op
} | 3.26 |
pulsar_Schema_AUTO_CONSUME_rdh | /**
* Create a schema instance that automatically deserialize messages
* based on the current topic schema.
*
* <p>The messages values are deserialized into a {@link GenericRecord} object,
* that extends the {@link GenericObject} interface.
*
* @return the auto schema instance
*/
static Schema<GenericRecord> AUTO_CONSUME() {
return DefaultImplementation.getDefaultImplementation().newAutoConsumeSchema();
} | 3.26 |
pulsar_Schema_m1_rdh | /**
* Create a Avro schema type with schema definition.
*
* @param schemaDefinition
* the definition of the schema
* @return a Schema instance
*/
static <T> Schema<T> m1(SchemaDefinition<T> schemaDefinition) {
return DefaultImplementation.getDefaultImplementation().newAvroSchema(schemaDefinition);
} | 3.26 |
pulsar_Schema_AVRO_rdh | /**
* Create a Avro schema type by default configuration of the class.
*
* @param pojo
* the POJO class to be used to extract the Avro schema
* @return a Schema instance
*/
static <T> Schema<T> AVRO(Class<T> pojo) {
return DefaultImplementation.getDefaultImplementation().newAvroSchema(SchemaDefinition.builder().withPojo(pojo).build());
} | 3.26 |
pulsar_Schema_validate_rdh | /**
* Check if the message is a valid object for this schema.
*
* <p>The implementation can choose what its most efficient approach to validate the schema.
* If the implementation doesn't provide it, it will attempt to use {@link #decode(byte[])}
* to see if this schema can decode this message or not as a validation mechanism to verify
* the bytes.
*
* @param message
* the messages to verify
* @throws SchemaSerializationException
* if it is not a valid message
*/
default void validate(byte[] message) {
decode(message);
} | 3.26 |
pulsar_Schema_NATIVE_AVRO_rdh | /**
* Create a schema instance that accepts a serialized Avro payload
* without validating it against the schema specified.
* It can be useful when migrating data from existing event or message stores.
*
* @return the auto schema instance
* @since 2.9.0
*/
static Schema<byte[]> NATIVE_AVRO(Object schema) {
return DefaultImplementation.getDefaultImplementation().newAutoProduceValidatedAvroSchema(schema);
} | 3.26 |
pulsar_Schema_decode_rdh | /**
* Decode a ByteBuffer into an object using a given version. <br/>
*
* @param data
* the ByteBuffer to decode
* @return the deserialized object
*/
default T decode(ByteBuffer data) {
if (data == null) {
return null;
}
return decode(getBytes(data));
} | 3.26 |
pulsar_Schema_m0_rdh | /**
* Decode a ByteBuffer into an object using a given version. <br/>
*
* @param data
* the ByteBuffer to decode
* @param schemaVersion
* the schema version to decode the object. null indicates using latest version.
* @return the deserialized object
*/
default T m0(ByteBuffer data,
byte[] schemaVersion) {
if (data == null) {
return null;
}
return decode(getBytes(data), schemaVersion);
} | 3.26 |
pulsar_Schema_PROTOBUF_NATIVE_rdh | /**
* Create a Protobuf-Native schema type with schema definition.
*
* @param schemaDefinition
* schemaDefinition the definition of the schema
* @return a Schema instance
*/
static <T extends GeneratedMessageV3> Schema<T> PROTOBUF_NATIVE(SchemaDefinition<T> schemaDefinition) {
return DefaultImplementation.getDefaultImplementation().newProtobufNativeSchema(schemaDefinition);
} | 3.26 |
pulsar_Schema_KeyValue_rdh | /**
* Key Value Schema using passed in key, value and encoding type schemas.
*/
static <K, V> Schema<KeyValue<K, V>> KeyValue(Schema<K> key, Schema<V> value, KeyValueEncodingType keyValueEncodingType) {
return DefaultImplementation.getDefaultImplementation().newKeyValueSchema(key, value, keyValueEncodingType);
} | 3.26 |
pulsar_BinaryProtoLookupService_getPartitionedTopicMetadata_rdh | /**
* calls broker binaryProto-lookup api to get metadata of partitioned-topic.
*/
public CompletableFuture<PartitionedTopicMetadata> getPartitionedTopicMetadata(TopicName topicName) {
final MutableObject<CompletableFuture> newFutureCreated = new MutableObject<>();
try {
return partitionedMetadataInProgress.computeIfAbsent(topicName, tpName -> {
CompletableFuture<PartitionedTopicMetadata> newFuture = getPartitionedTopicMetadata(serviceNameResolver.resolveHost(), topicName);
newFutureCreated.setValue(newFuture);
return newFuture;
});
} finally {
if (newFutureCreated.getValue() != null) {
newFutureCreated.getValue().whenComplete((v, ex) -> {
partitionedMetadataInProgress.remove(topicName, newFutureCreated.getValue());
});
}
}
} | 3.26 |
pulsar_BinaryProtoLookupService_getBroker_rdh | /**
* Calls broker binaryProto-lookup api to find broker-service address which can serve a given topic.
*
* @param topicName
* topic-name
* @return broker-socket-address that serves given topic
*/
public CompletableFuture<Pair<InetSocketAddress, InetSocketAddress>> getBroker(TopicName topicName) {
final MutableObject<CompletableFuture> newFutureCreated = new MutableObject<>();
try {
return lookupInProgress.computeIfAbsent(topicName, tpName -> {
CompletableFuture<Pair<InetSocketAddress, InetSocketAddress>> newFuture = findBroker(serviceNameResolver.resolveHost(), false, topicName, 0);
newFutureCreated.setValue(newFuture);
return newFuture;
});
} finally {
if (newFutureCreated.getValue() != null) {
newFutureCreated.getValue().whenComplete((v, ex) -> {
lookupInProgress.remove(topicName, newFutureCreated.getValue());
});
}
}
} | 3.26 |
pulsar_HeapDumpUtil_dumpHeap_rdh | /**
* Dump the heap of the JVM.
*
* @param file
* the system-dependent filename
* @param liveObjects
* if true dump only live objects i.e. objects that are reachable from others
*/
public static void dumpHeap(File file, boolean liveObjects) {
try {
HotSpotDiagnosticMXBean hotspotMBean = getHotSpotDiagnosticMXBean();
hotspotMBean.dumpHeap(file.getAbsolutePath(), liveObjects);
} catch (Exception e) {
throw new RuntimeException("Error generating heap dump",
e);
}
} | 3.26 |
pulsar_HeapDumpUtil_getHotSpotDiagnosticMXBean_rdh | // Utility method to get the HotSpotDiagnosticMXBean
private static HotSpotDiagnosticMXBean getHotSpotDiagnosticMXBean() {
try {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
return ManagementFactory.newPlatformMXBeanProxy(server, HOTSPOT_BEAN_NAME, HotSpotDiagnosticMXBean.class);
} catch (Exception e) {
throw new RuntimeException(e);
}
} | 3.26 |
pulsar_ClientCredentialsFlow_fromParameters_rdh | /**
* Constructs a {@link ClientCredentialsFlow} from configuration parameters.
*
* @param params
* @return */public static ClientCredentialsFlow fromParameters(Map<String, String> params) {
URL v4 =
parseParameterUrl(params, CONFIG_PARAM_ISSUER_URL);
String privateKeyUrl = parseParameterString(params, f0);
// These are optional parameters, so we only perform a get
String scope = params.get(CONFIG_PARAM_SCOPE);
String audience = params.get(CONFIG_PARAM_AUDIENCE);
return ClientCredentialsFlow.builder().issuerUrl(v4).audience(audience).privateKey(privateKeyUrl).scope(scope).build();
} | 3.26 |
pulsar_ClientCredentialsFlow_loadPrivateKey_rdh | /**
* Loads the private key from the given URL.
*
* @param privateKeyURL
* @return * @throws IOException
*/
private static KeyFile loadPrivateKey(String privateKeyURL) throws IOException {
try {
URLConnection urlConnection = new URL(privateKeyURL).openConnection();
try {
String protocol = urlConnection.getURL().getProtocol();
String contentType = urlConnection.getContentType();
if ("data".equals(protocol) && (!"application/json".equals(contentType))) {throw new IllegalArgumentException("Unsupported media type or encoding format: " + urlConnection.getContentType());
}
KeyFile privateKey;
try (Reader
r = new InputStreamReader(((InputStream) (urlConnection.getContent())), StandardCharsets.UTF_8)) {
privateKey
= KeyFile.fromJson(r);
}
return privateKey;
} finally {
IOUtils.close(urlConnection);
}
} catch
(URISyntaxException |
InstantiationException | IllegalAccessException e) {
throw new IOException("Invalid privateKey format", e);
}} | 3.26 |
pulsar_StructuredEventLog_get_rdh | /**
* Create a new logger object, from which root events can be created.
*/
static StructuredEventLog get() {
return
Initializer.get();
} | 3.26 |
pulsar_WRRPlacementStrategy_findBrokerForPlacement_rdh | /**
* Function : getByWeightedRoundRobin returns ResourceUnit selected by WRR algorithm
* based on available resource on RU.
* <code>
* ^
* |
* |
* |
* | | | | |
* | | | | |
* | Broker 2 | Broker 3 | Broker 1 | B4 |
* | | | | |
* +----------------+------------------------+--------------------------------+---------
* 0 20 50 90 100
*
* This is weighted Round robin, we calculate weight based on availability of resources;
* total availability is taken as a full range then each broker is given range based on
* its resource availability, if the number generated within total range happens to be in
* broker's range, that broker is selected
* </code>
*/
public ResourceUnit findBrokerForPlacement(Multimap<Long, ResourceUnit> finalCandidates) {if (finalCandidates.isEmpty()) {
return null;
}
log.debug("Total Final Candidates selected - [{}]", finalCandidates.size());
int totalAvailability = 0;
for (Map.Entry<Long, ResourceUnit> candidateOwner : finalCandidates.entries()) {totalAvailability += candidateOwner.getKey().intValue();
}
ResourceUnit selectedRU = null;
if (totalAvailability <= 0) {
// todo: this means all the brokers are overloaded and we can't assign this namespace to any broker
// for now, pick anyone and return that one, because when we don't have ranking we put O for each broker
return finalCandidates.get(0L).stream().skip(rand.nextInt(finalCandidates.size())).findFirst().orElse(null);
}
int weightedSelector = rand.nextInt(totalAvailability);
log.debug("Generated Weighted Selector Number - [{}] ", weightedSelector);
int weightRangeSoFar = 0;
for (Map.Entry<Long, ResourceUnit> candidateOwner : finalCandidates.entries()) {
weightRangeSoFar += candidateOwner.getKey();
if (weightedSelector < weightRangeSoFar) {
selectedRU = candidateOwner.getValue();
log.debug(" Weighted Round Robin Selected RU - [{}]", candidateOwner.getValue().getResourceId());
break;
}
}
return selectedRU;
} | 3.26 |
pulsar_PulsarSchemaToKafkaSchema_parseAvroSchema_rdh | // Parse json to shaded schema
private static Schema parseAvroSchema(String schemaJson) {
final Parser parser = new Parser();
parser.setValidateDefaults(false);
return parser.parse(schemaJson);
} | 3.26 |
pulsar_KeySharedPolicy_setAllowOutOfOrderDelivery_rdh | /**
* If enabled, it will relax the ordering requirement, allowing the broker to send out-of-order messages in case of
* failures. This will make it faster for new consumers to join without being stalled by an existing slow consumer.
*
* <p>In this case, a single consumer will still receive all the keys, but they may be coming in different orders.
*
* @param allowOutOfOrderDelivery
* whether to allow for out of order delivery
* @return KeySharedPolicy instance
*/
public KeySharedPolicy setAllowOutOfOrderDelivery(boolean allowOutOfOrderDelivery) {
this.allowOutOfOrderDelivery = allowOutOfOrderDelivery;
return this;
} | 3.26 |
pulsar_WatermarkTimeEvictionPolicy_evict_rdh | /**
* {@inheritDoc }
* <p/>
* Keeps events with future ts in the queue for processing in the next
* window. If the ts difference is more than the lag, stops scanning
* the queue for the current window.
*/
@Overridepublic Action evict(Event<T> event) {
if (evictionContext == null) {
// It is possible to get asked about eviction before we have a context, due to WindowManager
// .compactWindow.
// In this case we should hold on to all the events. When the first watermark is received,
// the context will be set,
// and the events will be reevaluated for eviction
return Action.STOP;
}
long referenceTime = evictionContext.getReferenceTime();
long diff = referenceTime - event.getTimestamp();
if (diff < (-lag)) {
return Action.STOP;
} else if (diff < 0) {
return Action.KEEP;
} else
{
return super.evict(event);
}
} | 3.26 |
pulsar_BatchMessageIdImpl_toMessageIdImpl_rdh | // MessageIdImpl is widely used as the key of a hash map, in this case, we should convert the batch message id to
// have the correct hash code.
@Deprecated
public MessageIdImpl toMessageIdImpl() {
return ((MessageIdImpl) (MessageIdAdvUtils.discardBatch(this)));
} | 3.26 |
pulsar_HeapHistogramUtil_callDiagnosticCommand_rdh | /**
* Calls a diagnostic commands.
* The available operations are similar to what the jcmd commandline tool has,
* however the naming of the operations are different. The "help" operation can be used
* to find out the available operations. For example, the jcmd command "Thread.print" maps
* to "threadPrint" operation name.
*/
static String callDiagnosticCommand(String operationName, String... args) throws JMException {
return ((String) (ManagementFactory.getPlatformMBeanServer().invoke(new ObjectName("com.sun.management:type=DiagnosticCommand"), operationName, new Object[]{ args }, new String[]{ String[].class.getName() })));
} | 3.26 |
pulsar_JSONSchema_getBackwardsCompatibleJsonSchemaInfo_rdh | /**
* Implemented for backwards compatibility reasons.
* since the original schema generated by JSONSchema was based off the json schema standard
* since then we have standardized on Avro
*
* @return */public SchemaInfo getBackwardsCompatibleJsonSchemaInfo() {
SchemaInfo backwardsCompatibleSchemaInfo;
try
{
ObjectWriter objectWriter = ObjectMapperFactory.getMapperWithIncludeAlways().writer();
JsonSchemaGenerator schemaGen = new JsonSchemaGenerator(objectWriter);
JsonSchema jsonBackwardsCompatibleSchema = schemaGen.generateSchema(pojo);backwardsCompatibleSchemaInfo = SchemaInfoImpl.builder().name("").properties(schemaInfo.getProperties()).type(SchemaType.JSON).schema(objectWriter.writeValueAsBytes(jsonBackwardsCompatibleSchema)).build();
} catch (JsonProcessingException ex) {
throw new RuntimeException(ex);
}
return backwardsCompatibleSchemaInfo;
} | 3.26 |
pulsar_JSONSchema_clearCaches_rdh | /**
* Clears the caches tied to the ObjectMapper instances and replaces the singleton ObjectMapper instance.
*
* This can be used in tests to ensure that classloaders and class references don't leak across tests.
*/
public static void clearCaches() {
jsonMapper().getTypeFactory().clearCache();
replaceSingletonInstance();
} | 3.26 |
pulsar_AbstractDispatcherSingleActiveConsumer_disconnectAllConsumers_rdh | /**
* Disconnect all consumers on this dispatcher (server side close). This triggers channelInactive on the inbound
* handler which calls dispatcher.removeConsumer(), where the closeFuture is completed
*
* @return */
public synchronized CompletableFuture<Void> disconnectAllConsumers(boolean isResetCursor) {
closeFuture = new CompletableFuture<>();
if (!consumers.isEmpty()) {
consumers.forEach(consumer -> consumer.disconnect(isResetCursor));
cancelPendingRead();
} else {
// no consumer connected, complete disconnect immediately
closeFuture.complete(null);
}
return closeFuture;
} | 3.26 |
pulsar_AbstractDispatcherSingleActiveConsumer_canUnsubscribe_rdh | /**
* Handle unsubscribe command from the client API For failover subscription, if consumer is connected consumer, we
* can unsubscribe.
*
* @param consumer
* Calling consumer object
*/
public synchronized boolean canUnsubscribe(Consumer consumer) {
return (consumers.size() == 1) && Objects.equals(consumer, ACTIVE_CONSUMER_UPDATER.get(this));
} | 3.26 |
pulsar_AbstractDispatcherSingleActiveConsumer_pickAndScheduleActiveConsumer_rdh | /**
* Pick active consumer for a topic for {@link SubType#Failover} subscription.
* If it's a non-partitioned topic then it'll pick consumer based on order they subscribe to the topic.
* If is's a partitioned topic, first sort consumers based on their priority level and consumer name then
* distributed partitions evenly across consumers with highest priority level.
*
* @return the true consumer if the consumer is changed, otherwise false.
*/
protected boolean pickAndScheduleActiveConsumer() {
checkArgument(!consumers.isEmpty());
AtomicBoolean hasPriorityConsumer = new AtomicBoolean(false);
consumers.sort((c1, c2) ->
{
int priority = c1.getPriorityLevel() - c2.getPriorityLevel();
if (priority != 0) {
hasPriorityConsumer.set(true);
return priority;
}
return c1.consumerName().compareTo(c2.consumerName());
});
int consumersSize = consumers.size();
// find number of consumers which are having the highest priorities. so partitioned-topic assignment happens
// evenly across highest priority consumers
if (hasPriorityConsumer.get()) {
int highestPriorityLevel
= consumers.get(0).getPriorityLevel();
for (int v4 = 0; v4 < consumers.size(); v4++) {
if (highestPriorityLevel != consumers.get(v4).getPriorityLevel()) {
consumersSize = v4;
break;
}
}
}
int index = (partitionIndex >= 0) ? partitionIndex % consumersSize : peekConsumerIndexFromHashRing(makeHashRing(consumersSize));
Consumer prevConsumer = ACTIVE_CONSUMER_UPDATER.getAndSet(this, consumers.get(index));
Consumer activeConsumer =
ACTIVE_CONSUMER_UPDATER.get(this);
if (prevConsumer == activeConsumer)
{
// Active consumer did not change. Do nothing at this point
return false;
} else {
// If the active consumer is changed, send notification.
scheduleReadOnActiveConsumer();
return true;
}
} | 3.26 |
pulsar_URIPreconditions_checkURI_rdh | /**
* Check whether the given string is a legal URI and passes the user's check.
*
* @param uri
* URI String
* @param predicate
* User defined rule
* @throws IllegalArgumentException
* Illegal URI or failed in the user's rules
*/
public static void checkURI(@Nonnull
String uri, @Nonnull
Predicate<URI> predicate) throws IllegalArgumentException {
checkURI(uri, predicate, null);
} | 3.26 |
pulsar_URIPreconditions_checkURIIfPresent_rdh | /**
* Check whether the given string is a legal URI and passes the user's check.
*
* @param uri
* URI String
* @param predicate
* User defined rule
* @throws IllegalArgumentException
* Illegal URI or failed in the user's rules
*/
public static void checkURIIfPresent(@Nullable
String uri, @Nonnull
Predicate<URI> predicate) throws IllegalArgumentException {
checkURIIfPresent(uri, predicate, null);
} | 3.26 |
pulsar_ServiceConfigurationUtils_getInternalListener_rdh | /**
* Gets the internal advertised listener for broker-to-broker communication.
*
* @return a non-null advertised listener
*/
public static AdvertisedListener getInternalListener(ServiceConfiguration
config, String protocol) {
Map<String, AdvertisedListener> result = MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config);
AdvertisedListener internal = result.get(config.getInternalListenerName());
if ((internal == null) || (!internal.hasUriForProtocol(protocol))) {
// Search for an advertised listener for same protocol
for (AdvertisedListener l : result.values()) {
if (l.hasUriForProtocol(protocol)) {
internal = l;
break;
}
}
}
if (internal == null) {
// synthesize an advertised listener based on legacy configuration properties
String host = ServiceConfigurationUtils.getDefaultOrConfiguredAddress(config.getAdvertisedAddress());
internal = AdvertisedListener.builder().brokerServiceUrl(createUriOrNull("pulsar", host, config.getBrokerServicePort())).brokerServiceUrlTls(createUriOrNull("pulsar+ssl", host, config.getBrokerServicePortTls())).build();}
return internal;
} | 3.26 |
pulsar_ServiceConfigurationUtils_getAppliedAdvertisedAddress_rdh | /**
* Get the address of Broker, first try to get it from AdvertisedAddress.
* If it is not set, try to get the address set by advertisedListener.
* If it is still not set, get it through InetAddress.getLocalHost().
*
* @param configuration
* @param ignoreAdvertisedListener
* Sometimes we can’t use the default key of AdvertisedListener,
* setting it to true can ignore AdvertisedListener.
* @return */
@Deprecated
public static String getAppliedAdvertisedAddress(ServiceConfiguration configuration, boolean ignoreAdvertisedListener) {
Map<String, AdvertisedListener> result = MultipleListenerValidator.validateAndAnalysisAdvertisedListener(configuration);
String advertisedAddress = configuration.getAdvertisedAddress();
if (advertisedAddress != null) {
return advertisedAddress;
}
AdvertisedListener advertisedListener = result.get(configuration.getInternalListenerName());
if ((advertisedListener != null) && (!ignoreAdvertisedListener)) {
String address = advertisedListener.getBrokerServiceUrl().getHost();
if (address != null) {
return address;
} }
return getDefaultOrConfiguredAddress(advertisedAddress);
} | 3.26 |
pulsar_PersistentAcknowledgmentsGroupingTracker_isDuplicate_rdh | /**
* Since the ack are delayed, we need to do some best-effort duplicate check to discard messages that are being
* resent after a disconnection and for which the user has already sent an acknowledgement.
*/@Override
public boolean isDuplicate(MessageId messageId) {
if (!(messageId instanceof MessageIdAdv)) {
throw new IllegalArgumentException((("isDuplicated cannot accept " + messageId.getClass().getName()) + ": ") + messageId);
}
final MessageIdAdv messageIdAdv = ((MessageIdAdv) (messageId));
if (lastCumulativeAck.compareTo(messageIdAdv) >= 0) {
// Already included in a cumulative ack
return true;
} else {// If "batchIndexAckEnabled" is false, the batched messages acknowledgment will be traced by
// pendingIndividualAcks. So no matter what type the message ID is, check with "pendingIndividualAcks"
// first.
MessageIdAdv key
= MessageIdAdvUtils.discardBatch(messageIdAdv);
if (pendingIndividualAcks.contains(key)) {
return true;
}
if (messageIdAdv.getBatchIndex() >= 0) {
ConcurrentBitSetRecyclable v2 = pendingIndividualBatchIndexAcks.get(key);
return (v2 != null) && (!v2.get(messageIdAdv.getBatchIndex()));
}
return false;
}
} | 3.26 |
pulsar_PersistentAcknowledgmentsGroupingTracker_flush_rdh | /**
* Flush all the pending acks and send them to the broker.
*/
@Override
public void flush() {
ClientCnx cnx = consumer.getClientCnx();if (cnx == null) {
if
(log.isDebugEnabled()) {
log.debug("[{}] Cannot flush pending acks since we're not connected to broker", consumer);
}
return;
}
Optional<Lock> writeLock = acquireWriteLock();
try {
flushAsync(cnx);
} finally {
writeLock.ifPresent(Lock::unlock);
}
} | 3.26 |
pulsar_PropertiesUtils_filterAndMapProperties_rdh | /**
* Filters the {@link Properties} object so that only properties with the configured prefix are retained,
* and then replaces the srcPrefix with the targetPrefix when putting the key value pairs in the resulting map.
*
* @param props
* - the properties object to filter
* @param srcPrefix
* - the prefix to filter against and then remove for keys in the resulting map
* @param targetPrefix
* - the prefix to add to keys in the result map
* @return a map of properties
*/
public static Map<String, Object> filterAndMapProperties(Properties props, String srcPrefix, String targetPrefix) {
Map<String, Object> result =
new HashMap<>();
int prefixLength = srcPrefix.length();
props.forEach((keyObject, value) -> {
if (!(keyObject
instanceof String)) {
return;
}
String key = ((String) (keyObject));
if (key.startsWith(srcPrefix) && (value != null)) {
String truncatedKey = key.substring(prefixLength);
result.put(targetPrefix + truncatedKey, value);
}
});
return result;} | 3.26 |
pulsar_Producer_completed_rdh | /**
* Executed from managed ledger thread when the message is persisted.
*/
@Override
public void completed(Exception exception, long ledgerId, long entryId) {
if (exception != null) {final ServerError
serverError = getServerError(exception);
producer.cnx.execute(() -> {
if (!(exception instanceof TopicClosedException)) {
// For TopicClosed exception there's no need to send explicit error, since the client was
// already notified
long callBackSequenceId = Math.max(highestSequenceId, sequenceId);
producer.cnx.getCommandSender().sendSendError(producer.producerId, callBackSequenceId, serverError, exception.getMessage());
}
producer.cnx.completedSendOperation(producer.isNonPersistentTopic, msgSize);
producer.publishOperationCompleted();
recycle();
});
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] [{}] triggered send callback. cnx {}, sequenceId {}",
producer.topic, producer.producerName, producer.producerId, producer.cnx.clientAddress(), sequenceId);
}
this.f0 = ledgerId;
this.entryId = entryId;
producer.cnx.execute(this);
}
} | 3.26 |
pulsar_Producer_run_rdh | /**
* Executed from I/O thread when sending receipt back to client.
*/
@Override
public void run() {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] [{}] Persisted message. cnx {}, sequenceId {}", producer.topic, producer.producerName, producer.producerId, producer.cnx, sequenceId);
}
// stats
rateIn.recordMultipleEvents(batchSize, msgSize);
producer.topic.recordAddLatency(System.nanoTime() - startTimeNs, TimeUnit.NANOSECONDS);
producer.cnx.getCommandSender().sendSendReceiptResponse(producer.producerId, sequenceId, highestSequenceId, f0, entryId);
producer.cnx.completedSendOperation(producer.isNonPersistentTopic, msgSize);
if (this.chunked) {
producer.chunkedMessageRate.recordEvent();
}
producer.publishOperationCompleted();
if (producer.brokerInterceptor != null) {
producer.brokerInterceptor.messageProduced(((ServerCnx) (producer.cnx)), producer, startTimeNs, f0, entryId, this);
}
recycle();
} | 3.26 |
pulsar_Producer_disconnect_rdh | /**
* It closes the producer from server-side and sends command to client to disconnect producer from existing
* connection without closing that connection.
*
* @return Completable future indicating completion of producer close
*/
public CompletableFuture<Void> disconnect(Optional<BrokerLookupData> assignedBrokerLookupData) {
if ((!closeFuture.isDone()) && isDisconnecting.compareAndSet(false, true)) {
log.info("Disconnecting producer: {}", this);cnx.execute(() -> {
cnx.closeProducer(this, assignedBrokerLookupData);
closeNow(true);
});
}
return closeFuture;
} | 3.26 |
pulsar_Producer_isSuccessorTo_rdh | /**
* Method to determine if this producer can replace another producer.
*
* @param other
* - producer to compare to this one
* @return true if this producer is a subsequent instantiation of the same logical producer. Otherwise, false.
*/
public boolean isSuccessorTo(Producer other) {
return (((Objects.equals(producerName, other.producerName) && Objects.equals(topic, other.topic)) && (producerId == other.producerId)) && Objects.equals(cnx, other.cnx)) && (other.getEpoch() < epoch);} | 3.26 |
pulsar_Producer_close_rdh | /**
* Close the producer immediately if: a. the connection is dropped b. it's a graceful close and no pending publish
* acks are left else wait for pending publish acks
*
* @return completable future indicate completion of close
*/
public synchronized CompletableFuture<Void> close(boolean removeFromTopic) {
if (log.isDebugEnabled()) {
log.debug("Closing producer {} -- isClosed={}",
this, isClosed);
}
if (!isClosed) {
isClosed = true;
if (log.isDebugEnabled()) {
log.debug("Trying to close producer {} -- cnxIsActive: {} -- pendingPublishAcks: {}", this, cnx.isActive(), pendingPublishAcks);
}
if ((!cnx.isActive()) || (pendingPublishAcks == 0)) {closeNow(removeFromTopic);
}
}
return closeFuture;
} | 3.26 |
pulsar_Producer_parseRemoteClusterName_rdh | /**
* Producer name for replicator is in format.
* "replicatorPrefix.localCluster" (old)
* "replicatorPrefix.localCluster-->remoteCluster" (new)
*/
private String parseRemoteClusterName(String producerName, boolean isRemote, String replicatorPrefix) {
if (isRemote) {
String clusterName = producerName.substring(replicatorPrefix.length());
return clusterName.contains(REPL_PRODUCER_NAME_DELIMITER) ? clusterName.split(REPL_PRODUCER_NAME_DELIMITER)[0] : clusterName;
}
return null;
} | 3.26 |
pulsar_Producer_getLastSequenceId_rdh | /**
* Return the sequence id of.
*
* @return the sequence id
*/
public long getLastSequenceId() {
if (isNonPersistentTopic) {
return -1;
} else {
return ((PersistentTopic) (topic)).getLastPublishedSequenceId(producerName);
}
} | 3.26 |
pulsar_PulsarLedgerIdGenerator_formatHalfId_rdh | /**
* Formats half an ID as 10-character 0-padded string.
*
* @param i
* - 32 bits of the ID to format
* @return a 10-character 0-padded string.
*/
private String formatHalfId(int i) {
return String.format("%010d", i);
} | 3.26 |
zilla_DefaultBufferPool_release_rdh | /**
* Releases a slot so it may be used by other streams
*
* @param slot
* - Id of a previously acquired slot
*/
@Override
public void release(int slot)
{
assert used.get(slot);
used.clear(slot);
availableSlots.value++;
poolBuffer.putLongOrdered(usedIndex + (slot << 3), 0L);
} | 3.26 |
zilla_HpackHuffman_encodedSize_rdh | // Returns the no of bytes needed to encode src
public static int encodedSize(DirectBuffer src, int offset, int length) {
int totalBits = 0;
for (int i = 0; i < length; i++) {
int index = src.getByte(offset + i) & 0xff;
int bits = CODES[index][1];
totalBits += bits;
}
return (totalBits + 7) / 8;
} | 3.26 |
zilla_HpackHuffman_encode_rdh | // Huffman encodes src buffer into dst buffer
// Assumes enough space is in the dst buffer
public static void
encode(DirectBuffer src, MutableDirectBuffer dst) {
// assert dst.capacity() >= encodedSize(src, 0, src.capacity());
int remainingBits = 0;
int dstIndex = 0;
long currentSeq = 0;// Aligned to LSB, for e.g 0000_0000_0XXX_XXXX
for (int i = 0; i < src.capacity(); i++) {
int index = src.getByte(i) & 0xff;
int code = CODES[index][0];
int bits = CODES[index][1];
// exceeds long (no more space for current bits)
if ((remainingBits
+ bits) > 64) {
dst.putLong(dstIndex, currentSeq << (64 - remainingBits), BIG_ENDIAN);
dstIndex += remainingBits / 8;
remainingBits = remainingBits % 8;
}
currentSeq <<= bits;
currentSeq |= code;
remainingBits += bits;
}
while (remainingBits > 0) { if (remainingBits >= 8) {
remainingBits -= 8;
dst.putByte(dstIndex++, ((byte) (currentSeq >> remainingBits)));
} else {
currentSeq <<= 8 - remainingBits;// partial byte, so align to MSB
currentSeq |= 0xff >>> remainingBits;// fill remaining bits with EOS bits
remainingBits = 8;
}
} } | 3.26 |
zilla_HpackHuffman_transition_rdh | // Build one Node x byte transition
private static void transition(Node node, int b) {
Node cur = node;
String str = node.symbols[b];
for (int i = 7; i >= 0; i--) {
int bit
= (b >>> i) & 0x1;// Using MSB to traverse
cur = (bit == 0) ? cur.left : cur.right;
// EOS is invalid in sequence
if ((cur == null) || (cur.symbol == 256)) {
return;
}
// Can have two symbols in a byte traversal
if (cur.symbol != (-1)) {
str = (str == null) ? "" + ((char) (cur.symbol)) : str + ((char) (cur.symbol));
cur = ROOT;
}
}
node.transitions[b]
= cur;
node.symbols[b] = str;
}
/* // Searches the huffman tree for a code
private static int search(Node node, int code, int length) {
for(int i=length-1; i >= 0; i--) {
int bit = ((code >>> i) & 0x01);
if (bit == 0) {
node = node.left;
} else {
node = node.right;
}
if (node == null) {
throw new RuntimeException("Not there");
}
}
return node.sym;
}
// Decodes by traversing huffman tree by single bits
public static void decode(Node root, DirectBuffer buf, int offset, int length) {
StringBuilder sb = new StringBuilder();
Node cur = root;
for (int k = 0; k < length; k++) {
byte b = buf.getByte(offset + k);
for (int i = 7; i >= 0; i--) {
int bit = ((b >>> i) & 0x01);
System.out.print(bit+" ");
if (bit == 0) {
cur = cur.left;
} else {
cur = cur.right;
}
if (cur == null) {
throw new RuntimeException("Not there, but parsed until " + sb.toString());
}
if (cur.sym != -1) {
sb.append((char)cur.sym);
cur = root;
}
}
System.out.println();
}
System.out.println(sb.toString());
} | 3.26 |
zilla_HpackContext_staticIndex11_rdh | // Index in static table for the given name of length 11
private static int staticIndex11(DirectBuffer name) {
return (name.getByte(10) == 'r') && STATIC_TABLE[53].name.equals(name) ? 53 : -1;// retry-after
} | 3.26 |
zilla_HpackContext_evict_rdh | // Evicts older entries from dynamic table
private void evict(int noEntries) {
for (int i = 0; i
< noEntries; i++) {
HeaderField header = table.get(i);
tableSize -= header.size;
if (encoding) {
Long id = noEvictions + i;
if (id.equals(name2Index.get(header.name))) {
name2Index.remove(header.name, id);
}
NameValue nameValue = new NameValue(header.name, header.value);
if (id.equals(namevalue2Index.get(nameValue))) {
namevalue2Index.remove(nameValue, id);
}
}
}
table.subList(0, noEntries).clear();
noEvictions += noEntries;
} | 3.26 |
zilla_HpackContext_valid_rdh | // @return true if the index is valid
// false otherwise
public boolean valid(int index) {
return (index != 0) && (index < (STATIC_TABLE.length + table.size()));
} | 3.26 |
zilla_HpackContext_staticIndex15_rdh | // Index in static table for the given name of length 15
private static int staticIndex15(DirectBuffer name) {
switch (name.getByte(14)) {
case 'e' :
// accept-language
if (STATIC_TABLE[17].name.equals(name)) {
return 17;
}
break;
case 'g' :
// accept-encoding
if (STATIC_TABLE[16].name.equals(name)) {
return 16;
}
break;}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex8_rdh | // Index in static table for the given name of length 8
private static int staticIndex8(DirectBuffer name) {
switch (name.getByte(7)) {
case 'e' :
// if-range
if (STATIC_TABLE[42].name.equals(name)) {
return 42;
}
break;
case 'h' : // if-match
if (STATIC_TABLE[39].name.equals(name)) {
return 39;
}
break;
case 'n' :
// location
if (STATIC_TABLE[46].name.equals(name)) {
return 46;
}
break;
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex4_rdh | // Index in static table for the given name of length 4
private static int staticIndex4(DirectBuffer name) {switch (name.getByte(3)) {
case 'e' :
// date
if (STATIC_TABLE[33].name.equals(name)) {
return 33;
}
break;
case 'g' :
// etag
if (STATIC_TABLE[34].name.equals(name)) {
return 34;
}
break;
case 'k' :
// link
if (STATIC_TABLE[45].name.equals(name)) {
return 45;
}
break;
case 'm' :
// from
if (STATIC_TABLE[37].name.equals(name)) {
return 37;
}
break;
case 't' :
// host
if (STATIC_TABLE[38].name.equals(name)) {
return 38;
}
break;
case 'y' :
// vary
if (STATIC_TABLE[59].name.equals(name)) {
return 59;
}
break;
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex18_rdh | // Index in static table for the given name of length 18
private static int staticIndex18(DirectBuffer name) {
return (name.getByte(17) == 'e') && STATIC_TABLE[48].name.equals(name) ? 48 : -1;// proxy-authenticate
} | 3.26 |
zilla_HpackContext_staticIndex5_rdh | // Index in static table for the given name of length 5
private static int staticIndex5(DirectBuffer
name) {
switch (name.getByte(4)) {
case 'e' :
// range
if (STATIC_TABLE[50].name.equals(name)) {
return 50;
}
break;
case 'h' :
// path
if
(STATIC_TABLE[4].name.equals(name)) {
return 4;
}
break;
case 'w' :
// allow
if (STATIC_TABLE[22].name.equals(name)) {
return 22;
}
break;
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex10_rdh | // Index in static table for the given name of length 10
private static int staticIndex10(DirectBuffer name) {
switch (name.getByte(9)) {
case 'e' :
// set-cookie
if (STATIC_TABLE[55].name.equals(name)) {
return 55;
}
break;
case 't' :
// user-agent
if (STATIC_TABLE[58].name.equals(name)) {
return 58;
}
break;
case 'y' :
// :authority
if (STATIC_TABLE[1].name.equals(name)) {
return 1;
}
break;
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex14_rdh | // Index in static table for the given name of length 14
private static int staticIndex14(DirectBuffer name) {
switch (name.getByte(13)) {
case 'h' :
// content-length
if (STATIC_TABLE[28].name.equals(name)) {
return 28;
}
break;
case 't' :
// accept-charset
if (STATIC_TABLE[15].name.equals(name)) {
return 15;
}
break;
}return -1;} | 3.26 |
zilla_HpackContext_staticIndex25_rdh | // Index in static table for the given name of length 25
private static int staticIndex25(DirectBuffer name) {
return (name.getByte(24) == 'y') && STATIC_TABLE[56].name.equals(name) ? 56 : -1;// strict-transport-security
} | 3.26 |
zilla_HpackContext_m1_rdh | // Index in static table for the given name of length 19
private static int m1(DirectBuffer name) {
switch (name.getByte(18)) {
case 'e' :// if-unmodified-since
if (STATIC_TABLE[43].name.equals(name)) {
return 43;
}
break;
case 'n' :
// content-disposition
if (STATIC_TABLE[25].name.equals(name)) {
return 25;
}
// proxy-authorization
if (STATIC_TABLE[49].name.equals(name)) {
return 49;
}
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex3_rdh | // Index in static table for the given name of length 3
private static int staticIndex3(DirectBuffer name) {
switch (name.getByte(2)) {
case 'a' :
// via
if (STATIC_TABLE[60].name.equals(name)) {
return 60;
}
break;case 'e' :
// age
if (STATIC_TABLE[21].name.equals(name)) {
return 21;
}
break;
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex12_rdh | // Index in static table for the given name of length 12
private static int staticIndex12(DirectBuffer name) {
switch (name.getByte(11)) {
case 'e' :
// content-type
if (STATIC_TABLE[31].name.equals(name)) {
return 31;
}
break;
case 's' :
// max-forwards
if (STATIC_TABLE[47].name.equals(name)) {
return 47;
}
break;
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex16_rdh | // Index in static table for the given name of length 16
private static int staticIndex16(DirectBuffer name) {
switch (name.getByte(15)) {
case 'e'
:
// content-language
if (STATIC_TABLE[27].name.equals(name)) {
return 27;
}
// www-authenticate
if (STATIC_TABLE[61].name.equals(name)) {
return 61;
}
break;
case 'g' :
// content-encoding
if (STATIC_TABLE[26].name.equals(name)) {
return 26;
}
break;
case 'n' :
// content-location
if (STATIC_TABLE[29].name.equals(name)) {
return 29;
}
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex27_rdh | // Index in static table for the given name of length 27
private static int staticIndex27(DirectBuffer name) {
return (name.getByte(26) == 'n') && STATIC_TABLE[20].name.equals(name) ? 20 : -1;// access-control-allow-origi
} | 3.26 |
zilla_HpackContext_staticIndex13_rdh | // Index in static table for the given name of length 13
private static int staticIndex13(DirectBuffer name) {
switch (name.getByte(12)) {
case 'd' :
// last-modified
if (STATIC_TABLE[44].name.equals(name)) {
return 44; }
break;
case 'e' :
// content-range
if (STATIC_TABLE[30].name.equals(name)) {
return 30;
}
break;
case 'h' :
// if-none-match
if (STATIC_TABLE[41].name.equals(name)) {
return 41;
}
break;
case 'l' :
// cache-control
if (STATIC_TABLE[24].name.equals(name)) {
return 24;
}
break;
case 'n' :
// authorization
if (STATIC_TABLE[23].name.equals(name)) {
return 23;
}
break;
case 's' :
// accept-ranges
if (STATIC_TABLE[18].name.equals(name)) {
return 18;
}
break;
} return -1;
} | 3.26 |
zilla_HpackContext_staticIndex17_rdh | // Index in static table for the given name of length 17
private static int staticIndex17(DirectBuffer name) {
switch (name.getByte(16)) {
case 'e' :
// if-modified-since
if (STATIC_TABLE[40].name.equals(name)) {
return 40;
}
break;
case 'g' :
// transfer-encoding
if (STATIC_TABLE[57].name.equals(name)) {
return 57;
}
break;
}
return -1;
} | 3.26 |
zilla_HpackContext_staticIndex6_rdh | // Index in static table for the given name of length 6
private static int staticIndex6(DirectBuffer name) {
switch (name.getByte(5)) {
case 'e' :
// cookie
if (STATIC_TABLE[32].name.equals(name)) {
return 32;
}
break;
case 'r' :
// server
if (STATIC_TABLE[54].name.equals(name)) {
return 54;
}
break;
case 't' :
// accept
if (STATIC_TABLE[19].name.equals(name)) {
return 19;
}
// expect
if (STATIC_TABLE[35].name.equals(name)) {
return 35;
}break;
}
return -1;
} | 3.26 |
zilla_DispatchAgent_supplyCounterWriter_rdh | // required for testing
public LongConsumer supplyCounterWriter(long bindingId, long metricId) {
return countersLayout.supplyWriter(bindingId, metricId);
} | 3.26 |
zilla_DispatchAgent_supplyHistogramWriter_rdh | // required for testing
public LongConsumer supplyHistogramWriter(long bindingId, long metricId) {
return histogramsLayout.supplyWriter(bindingId, metricId);
} | 3.26 |
zilla_DispatchAgent_supplyGaugeWriter_rdh | // required for testing
public LongConsumer
supplyGaugeWriter(long bindingId, long metricId) {
return gaugesLayout.supplyWriter(bindingId, metricId);
} | 3.26 |
zilla_StructFlyweightGenerator_defaultPriorField_rdh | // TODO: Varuint32 should NEVER be < 0
private void defaultPriorField(CodeBlock.Builder code) {
if ((priorDefaultValue != null) && f2) {
code.addStatement("$L($L)", priorFieldIfDefaulted, defaultName(priorFieldIfDefaulted));
} else // Attempt to default the entire object. This will fail if it has any required fields.
if (priorDefaultValue == NULL_DEFAULT) {
if (m5(priorSizeType)) {
code.addStatement("$L(-1)", methodName(priorSizeName)).addStatement("lastFieldSet = $L", index(priorFieldIfDefaulted));
} else if (isVaruintType(priorSizeType) || isVaruintnType(priorSizeType)) {
code.addStatement("$L(0)", methodName(priorSizeName)).addStatement("lastFieldSet = $L", index(priorFieldIfDefaulted));
} else if (priorDefaultedIsString) {
code.addStatement("$L((String) null)", priorFieldIfDefaulted);
} else {
code.addStatement("$L(b -> { })", priorFieldIfDefaulted);
code.addStatement("int limit = limit()");
code.addStatement("limit($L)", dynamicOffset(priorSizeName));
code.addStatement("$L(-1)", methodName(priorSizeName));
code.addStatement("limit(limit)");
}
} else if (priorDefaultedIsEnum) {
code.addStatement("$L(b -> b.set($L))", priorFieldIfDefaulted, defaultName(priorFieldIfDefaulted));
} else if (priorDefaultedIsString) {
code.addStatement("$L($L)", priorFieldIfDefaulted, priorDefaultValue);
} else {
code.addStatement("$L(b -> { })", priorFieldIfDefaulted);
}
} | 3.26 |
zilla_HttpClientFactory_encodeLiteral_rdh | // TODO dynamic table, Huffman, never indexed
private void encodeLiteral(HpackLiteralHeaderFieldFW.Builder builder, HpackContext hpackContext, DirectBuffer nameBuffer, DirectBuffer valueBuffer) {
builder.type(WITHOUT_INDEXING);
final int nameIndex = hpackContext.index(nameBuffer);
if (nameIndex != (-1)) {
builder.name(nameIndex);
} else {
builder.name(nameBuffer, 0, nameBuffer.capacity());
}
builder.value(valueBuffer, 0, valueBuffer.capacity());
} | 3.26 |
zilla_HttpClientFactory_collectHeaders_rdh | // Collect headers into map to resolve target
// TODO avoid this
private void collectHeaders(DirectBuffer name, DirectBuffer value)
{
if (!error()) {
String nameStr = name.getStringWithoutLengthUtf8(0, name.capacity());
String valueStr = value.getStringWithoutLengthUtf8(0, value.capacity());
// TODO cookie needs to be appended with ';'
headers.merge(nameStr, valueStr, (o, n) -> String.format("%s, %s", o, n));
}
} | 3.26 |
zilla_HttpClientFactory_teHeader_rdh | // 8.1.2.2 TE header MUST NOT contain any value other than "trailers".
private void teHeader(DirectBuffer name, DirectBuffer value) {
if (((!error()) && name.equals(TE)) && (!value.equals(TRAILERS))) {
streamError = Http2ErrorCode.PROTOCOL_ERROR;
}
} | 3.26 |
zilla_WsServerFactory_assembleHeader_rdh | // @return no bytes consumed to assemble websocket header
private int assembleHeader(DirectBuffer buffer, int
offset, int length) {
int remaining = Math.min(length, MAXIMUM_HEADER_SIZE - headerLength);
// may copy more than actual header length (up to max header length), but will adjust at the end
header.putBytes(headerLength, buffer, offset, remaining);
int consumed = remaining;
if ((headerLength + remaining) >= 2) {
int wsHeaderLength = wsHeaderLength(header);
// eventual headLength must not be more than wsHeaderLength
if ((headerLength + remaining) > wsHeaderLength) {
consumed = wsHeaderLength - headerLength;
}
}
headerLength
+= consumed;
return consumed;
} | 3.26 |
zilla_HttpServerFactory_teHeader_rdh | // 8.1.2.2 TE header MUST NOT contain any value other than "trailers".
private void teHeader(DirectBuffer name, DirectBuffer value) {
if (((!error()) && name.equals(TE)) && (!value.equals(TRAILERS))) {
streamError = Http2ErrorCode.PROTOCOL_ERROR;
}
} | 3.26 |
zilla_HttpServerFactory_serverHeader_rdh | // Checks if response has server header
private void serverHeader(HttpHeaderFW header) {serverHeader |= header.name().value().equals(context.nameBuffer(54));
} | 3.26 |
zilla_HttpServerFactory_collectHeaders_rdh | // Collect headers into map to resolve target
// TODO avoid this
private void collectHeaders(DirectBuffer name, DirectBuffer value) {
if (!error()) {
String nameStr = name.getStringWithoutLengthUtf8(0, name.capacity());
String valueStr = value.getStringWithoutLengthUtf8(0, value.capacity());
// TODO cookie needs to be appended with ';'
headers.merge(nameStr, valueStr, (o, n) -> String.format("%s, %s", o, n));
}
} | 3.26 |
zilla_HttpServerFactory_encodeLiteral_rdh | // TODO dynamic table, Huffman, never indexed
private void encodeLiteral(HpackLiteralHeaderFieldFW.Builder builder, HpackContext hpackContext, DirectBuffer nameBuffer, DirectBuffer valueBuffer) {
builder.type(WITHOUT_INDEXING);
final int nameIndex = hpackContext.index(nameBuffer);
if (nameIndex != (-1)) {
builder.name(nameIndex);
} else {
builder.name(nameBuffer, 0, nameBuffer.capacity());
}
builder.value(valueBuffer, 0, valueBuffer.capacity());
} | 3.26 |
zilla_ManyToOneRingBuffer_nextCorrelationId_rdh | /**
* {@inheritDoc }
*/
public long nextCorrelationId() {
return buffer.getAndAddLong(correlationIdCounterIndex, 1);
} | 3.26 |
zilla_ManyToOneRingBuffer_size_rdh | /**
* {@inheritDoc }
*/
public int size() {
long headBefore;
long tail;
long headAfter = buffer.getLongVolatile(headPositionIndex);
do {
headBefore = headAfter;
tail = buffer.getLongVolatile(tailPositionIndex);
headAfter = buffer.getLongVolatile(headPositionIndex);
} while (headAfter != headBefore );
return ((int) (tail - headAfter));
} | 3.26 |
zilla_ManyToOneRingBuffer_commit_rdh | /**
* {@inheritDoc }
*/
public void commit(final int index) {
final int recordIndex = computeRecordIndex(index);
final AtomicBuffer buffer = this.buffer;
final int recordLength = verifyClaimedSpaceNotReleased(buffer, recordIndex);
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); } | 3.26 |
zilla_ManyToOneRingBuffer_capacity_rdh | /**
* {@inheritDoc }
*/
public int capacity() {
return f0;
} | 3.26 |
zilla_ManyToOneRingBuffer_abort_rdh | /**
* {@inheritDoc }
*/
public void abort(final int index) {
final int recordIndex = computeRecordIndex(index);
final AtomicBuffer buffer = this.buffer;
final int recordLength = verifyClaimedSpaceNotReleased(buffer, recordIndex);
buffer.putInt(typeOffset(recordIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
} | 3.26 |
zilla_ManyToOneRingBuffer_unblock_rdh | /**
* {@inheritDoc }
*/
public boolean unblock() {
final AtomicBuffer buffer = this.buffer;
final int mask = f0 - 1;
final int consumerIndex = ((int) (buffer.getLongVolatile(headPositionIndex) & mask));
final int producerIndex = ((int) (buffer.getLongVolatile(tailPositionIndex) & mask));
if (producerIndex == consumerIndex) {
return false;
}
boolean unblocked =
false;
int length = buffer.getIntVolatile(consumerIndex);if (length < 0) {
buffer.putInt(typeOffset(consumerIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(consumerIndex), -length);
unblocked = true;
} else if (0 == length) {// go from (consumerIndex to producerIndex) or (consumerIndex to capacity)
final int limit = (producerIndex > consumerIndex) ? producerIndex : f0;
int i = consumerIndex + ALIGNMENT;
do {
// read the top int of every long (looking for length aligned to 8=ALIGNMENT)
length = buffer.getIntVolatile(i);
if (0 != length) {
if (scanBackToConfirmStillZeroed(buffer, i, consumerIndex)) {
buffer.putInt(typeOffset(consumerIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(consumerIndex), i - consumerIndex);
unblocked = true;}
break;
}
i += ALIGNMENT;
} while (i < limit );
}
return unblocked;
} | 3.26 |
zilla_ManyToOneRingBuffer_read_rdh | /**
* {@inheritDoc }
*/public int read(final MessageHandler handler, final int messageCountLimit) {
int messagesRead = 0;final AtomicBuffer buffer = this.buffer;
final long head = buffer.getLong(headPositionIndex);
final int capacity = this.f0;
final int headIndex = ((int) (head)) & (capacity - 1);
final int maxBlockLength = Math.min(capacity - headIndex, capacity >> 1);
int bytesRead = 0;
try {
while ((bytesRead < maxBlockLength) && (messagesRead < messageCountLimit)) {
final int recordIndex = headIndex + bytesRead;
final int recordLength = buffer.getIntVolatile(lengthOffset(recordIndex));
if (recordLength <= 0) {
break;
}
bytesRead += align(recordLength, ALIGNMENT);
final int v13 = buffer.getInt(typeOffset(recordIndex));
if (PADDING_MSG_TYPE_ID == v13) {
continue;
}
++messagesRead;
handler.onMessage(v13, buffer, recordIndex + HEADER_LENGTH, recordLength - HEADER_LENGTH);
}
} finally {
if (bytesRead != 0) {
buffer.putLongOrdered(headPositionIndex, head + bytesRead);
}
}
return messagesRead;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.