name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_LogicalTypeMerging_findAdditionDecimalType | /** Finds the result type of a decimal addition operation. */
public static DecimalType findAdditionDecimalType(
int precision1, int scale1, int precision2, int scale2) {
final int scale = Math.max(scale1, scale2);
int precision = Math.max(precision1 - scale1, precision2 - scale2) + scale + 1;
return adjustPrecisionScale(precision, scale);
} | 3.68 |
framework_EventRouter_fireEvent | /**
* Sends an event to all registered listeners. The listeners will decide if
* the activation method should be called or not.
* <p>
* If an error handler is set, the processing of other listeners will
* continue after the error handler method call unless the error handler
* itself throws an exception.
*
* @param event
* the Event to be sent to all listeners.
* @param errorHandler
* error handler to use to handle any exceptions thrown by
* listeners or null to let the exception propagate to the
* caller, preventing further listener calls
*/
public void fireEvent(EventObject event, ErrorHandler errorHandler) {
// It is not necessary to send any events if there are no listeners
if (listenerList != null) {
// Make a copy of the listener list to allow listeners to be added
// inside listener methods. Fixes #3605.
// Send the event to all listeners. The listeners themselves
// will filter out unwanted events.
for (Object l : listenerList.toArray()) {
ListenerMethod listenerMethod = (ListenerMethod) l;
if (null != errorHandler) {
try {
listenerMethod.receiveEvent(event);
} catch (Exception e) {
errorHandler.error(new ErrorEvent(e));
}
} else {
listenerMethod.receiveEvent(event);
}
}
}
} | 3.68 |
framework_CustomLayout_addComponent | /**
* Adds the component into this container. The component is added without
* specifying the location (empty string is then used as location). Only one
* component can be added to the default "" location and adding more
* components into that location overwrites the old components.
*
* @param c
* the component to be added.
*/
@Override
public void addComponent(Component c) {
this.addComponent(c, "");
} | 3.68 |
flink_MathUtils_log2strict | /**
* Computes the logarithm of the given value to the base of 2. This method throws an error, if
* the given argument is not a power of 2.
*
* @param value The value to compute the logarithm for.
* @return The logarithm to the base of 2.
* @throws ArithmeticException Thrown, if the given value is zero.
* @throws IllegalArgumentException Thrown, if the given value is not a power of two.
*/
public static int log2strict(int value) throws ArithmeticException, IllegalArgumentException {
if (value == 0) {
throw new ArithmeticException("Logarithm of zero is undefined.");
}
if ((value & (value - 1)) != 0) {
throw new IllegalArgumentException(
"The given value " + value + " is not a power of two.");
}
return 31 - Integer.numberOfLeadingZeros(value);
} | 3.68 |
hadoop_XMLUtils_newSecureTransformerFactory | /**
* This method should be used if you need a {@link TransformerFactory}. Use this method
* instead of {@link TransformerFactory#newInstance()}. The factory that is returned has
* secure configuration enabled.
*
* @return a {@link TransformerFactory} with secure configuration enabled
* @throws TransformerConfigurationException if the {@code JAXP} transformer does not
* support the secure configuration
*/
public static TransformerFactory newSecureTransformerFactory()
throws TransformerConfigurationException {
TransformerFactory trfactory = TransformerFactory.newInstance();
trfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
setOptionalSecureTransformerAttributes(trfactory);
return trfactory;
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertFromSelectStatementWithExplicitFieldsWhereJoinOnInnerSelect | /**
* Tests an insert from a select which joins inner selects using a where clause. The fields for selection are specified.
*
* <p>The use case for this is to select a subset of the fields from an inner select, where the inner select has joined across several tables</p>.
*/
@Test
public void testInsertFromSelectStatementWithExplicitFieldsWhereJoinOnInnerSelect() {
SelectStatement inner1 = select(field(INNER_FIELD_A).as(INNER_FIELD_A), field(INNER_FIELD_B).as(INNER_FIELD_B)).from(tableRef("Inner")).alias("InnerAlias");
SelectStatement outer = select(field(INNER_FIELD_A)).
from(inner1);
InsertStatement insert = insert().
into(tableRef("InsertA")).
fields(field(INNER_FIELD_A)).
from(outer);
String expectedSql =
"INSERT INTO " + tableName("InsertA") + " (innerFieldA) " +
"SELECT innerFieldA " +
"FROM (SELECT innerFieldA AS innerFieldA, innerFieldB AS innerFieldB FROM " + tableName("Inner") + ") InnerAlias";
assertEquals("Select with join on where clause", ImmutableList.of(expectedSql), testDialect.convertStatementToSQL(insert, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE)));
} | 3.68 |
morf_AlteredTable_isTemporary | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.metadata.Table#isTemporary()
*/
@Override
public boolean isTemporary() {
return baseTable.isTemporary();
} | 3.68 |
hudi_HoodieRealtimeRecordReader_constructRecordReader | /**
* Construct record reader based on job configuration.
*
* @param split File Split
* @param jobConf Job Configuration
* @param realReader Parquet Record Reader
* @return Realtime Reader
*/
private static RecordReader<NullWritable, ArrayWritable> constructRecordReader(RealtimeSplit split,
JobConf jobConf, RecordReader<NullWritable, ArrayWritable> realReader) {
try {
if (canSkipMerging(jobConf)) {
LOG.info("Enabling un-merged reading of realtime records");
return new RealtimeUnmergedRecordReader(split, jobConf, realReader);
}
LOG.info("Enabling merged reading of realtime records for split " + split);
return new RealtimeCompactedRecordReader(split, jobConf, realReader);
} catch (Exception e) {
LOG.error("Got exception when constructing record reader", e);
try {
if (null != realReader) {
realReader.close();
}
} catch (IOException ioe) {
LOG.error("Unable to close real reader", ioe);
}
throw new HoodieException("Exception when constructing record reader ", e);
}
} | 3.68 |
framework_Flash_setArchive | /**
* This attribute may be used to specify a space-separated list of URIs for
* archives containing resources relevant to the object, which may include
* the resources specified by the classid and data attributes. Preloading
* archives will generally result in reduced load times for objects.
* Archives specified as relative URIs should be interpreted relative to the
* codebase attribute.
*
* @param archive
* Space-separated list of URIs with resources relevant to the
* object
*/
public void setArchive(String archive) {
if (archive != getState().archive
|| (archive != null && !archive.equals(getState().archive))) {
getState().archive = archive;
requestRepaint();
}
} | 3.68 |
hudi_HoodieTable_getRestoreTimeline | /**
* Get restore timeline.
*/
public HoodieTimeline getRestoreTimeline() {
return getActiveTimeline().getRestoreTimeline();
} | 3.68 |
hmily_HmilySafeNumberOperationUtils_safeIntersection | /**
* Execute range intersection method by safe mode.
*
* @param range range
* @param connectedRange connected range
* @return the intersection result of two ranges
*/
public static Range<Comparable<?>> safeIntersection(final Range<Comparable<?>> range, final Range<Comparable<?>> connectedRange) {
try {
return range.intersection(connectedRange);
} catch (final ClassCastException ex) {
Comparable<?> rangeLowerEndpoint = range.hasLowerBound() ? range.lowerEndpoint() : null;
Comparable<?> rangeUpperEndpoint = range.hasUpperBound() ? range.upperEndpoint() : null;
Comparable<?> connectedRangeLowerEndpoint = connectedRange.hasLowerBound() ? connectedRange.lowerEndpoint() : null;
Comparable<?> connectedRangeUpperEndpoint = connectedRange.hasUpperBound() ? connectedRange.upperEndpoint() : null;
Class<?> clazz = getTargetNumericType(Lists.newArrayList(rangeLowerEndpoint, rangeUpperEndpoint, connectedRangeLowerEndpoint, connectedRangeUpperEndpoint));
if (clazz == null) {
throw ex;
}
Range<Comparable<?>> newRange = createTargetNumericTypeRange(range, clazz);
Range<Comparable<?>> newConnectedRange = createTargetNumericTypeRange(connectedRange, clazz);
return newRange.intersection(newConnectedRange);
}
} | 3.68 |
rocketmq-connect_JsonConverter_configure | /**
* Configure this class.
*
* @param configs configs in key/value pairs
*/
@Override
public void configure(Map<String, ?> configs) {
converterConfig = new JsonConverterConfig(configs);
fromConnectSchemaCache = new LRUCache<>(converterConfig.cacheSize());
toConnectSchemaCache = new LRUCache<>(converterConfig.cacheSize());
} | 3.68 |
hadoop_SchedulingRequest_allocationRequestId | /**
* Set the <code>allocationRequestId</code> of the request.
*
* @see SchedulingRequest#setAllocationRequestId(long)
* @param allocationRequestId <code>allocationRequestId</code> of the
* request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
*/
@Public
@Unstable
public SchedulingRequestBuilder allocationRequestId(
long allocationRequestId) {
schedulingRequest.setAllocationRequestId(allocationRequestId);
return this;
} | 3.68 |
hbase_HMaster_getMasterStartTime | /** Returns timestamp in millis when HMaster was started. */
public long getMasterStartTime() {
return startcode;
} | 3.68 |
dubbo_LoggerFactory_getFile | /**
* Get the current logging file
*
* @return current logging file
*/
public static File getFile() {
return loggerAdapter.getFile();
} | 3.68 |
flink_CatalogManager_getPartition | /**
* Retrieves a partition with a fully qualified table path and partition spec. If the path is
* not yet fully qualified use{@link #qualifyIdentifier(UnresolvedIdentifier)} first.
*
* @param tableIdentifier full path of the table to retrieve
* @param partitionSpec full partition spec
* @return partition in the table.
*/
public Optional<CatalogPartition> getPartition(
ObjectIdentifier tableIdentifier, CatalogPartitionSpec partitionSpec) {
Optional<Catalog> catalogOptional = getCatalog(tableIdentifier.getCatalogName());
if (catalogOptional.isPresent()) {
try {
return Optional.of(
catalogOptional
.get()
.getPartition(tableIdentifier.toObjectPath(), partitionSpec));
} catch (PartitionNotExistException ignored) {
}
}
return Optional.empty();
} | 3.68 |
framework_AbstractSelect_getValue | /**
* Gets the selected item id or in multiselect mode a set of selected ids.
*
* @see AbstractField#getValue()
*/
@Override
public Object getValue() {
final Object retValue = super.getValue();
if (isMultiSelect()) {
// If the return value is not a set
if (retValue == null) {
return new HashSet<Object>();
}
if (retValue instanceof Set) {
return Collections.unmodifiableSet((Set<?>) retValue);
} else if (retValue instanceof Collection) {
return new HashSet<Object>((Collection<?>) retValue);
} else {
final Set<Object> s = new HashSet<Object>();
if (items.containsId(retValue)) {
s.add(retValue);
}
return s;
}
} else {
return retValue;
}
} | 3.68 |
hudi_KafkaConnectUtils_getPartitionColumns | /**
* Extract partition columns directly if an instance of class {@link BaseKeyGenerator},
* else extract partition columns from the properties.
*
* @param keyGenerator key generator Instance of the keygenerator.
* @param typedProperties properties from the config.
* @return partition columns Returns the partition columns separated by comma.
*/
public static String getPartitionColumns(KeyGenerator keyGenerator, TypedProperties typedProperties) {
if (keyGenerator instanceof CustomAvroKeyGenerator) {
return ((BaseKeyGenerator) keyGenerator).getPartitionPathFields().stream().map(
pathField -> Arrays.stream(pathField.split(CustomAvroKeyGenerator.SPLIT_REGEX))
.findFirst().orElse("Illegal partition path field format: '$pathField' for ${c.getClass.getSimpleName}"))
.collect(Collectors.joining(","));
}
if (keyGenerator instanceof BaseKeyGenerator) {
return String.join(",", ((BaseKeyGenerator) keyGenerator).getPartitionPathFields());
}
return typedProperties.getString(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key());
} | 3.68 |
hbase_HFileBlock_write | /**
* Writes the Cell to this block
*/
void write(Cell cell) throws IOException {
expectState(State.WRITING);
this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream);
} | 3.68 |
flink_ProcessingTimeSessionWindows_withGap | /**
* Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions
* based on the element timestamp.
*
* @param size The session timeout, i.e. the time gap between sessions
* @return The policy.
*/
public static ProcessingTimeSessionWindows withGap(Time size) {
return new ProcessingTimeSessionWindows(size.toMilliseconds());
} | 3.68 |
framework_Page_getUI | /**
* Returns the {@link UI} of this {@link Page}.
*
* @return the {@link UI} of this {@link Page}.
*
* @since 8.2
*/
public UI getUI() {
return uI;
} | 3.68 |
pulsar_PulsarAuthorizationProvider_canConsumeAsync | /**
* Check if the specified role has permission to receive messages from the specified fully qualified topic
* name.
*
* @param topicName
* the fully qualified topic name associated with the topic.
* @param role
* the app id used to receive messages from the topic.
* @param subscription
* the subscription name defined by the client
*/
@Override
public CompletableFuture<Boolean> canConsumeAsync(TopicName topicName, String role,
AuthenticationDataSource authenticationData, String subscription) {
return pulsarResources.getNamespaceResources().getPoliciesAsync(topicName.getNamespaceObject())
.thenCompose(policies -> {
if (!policies.isPresent()) {
if (log.isDebugEnabled()) {
log.debug("Policies node couldn't be found for topic : {}", topicName);
}
} else {
if (isNotBlank(subscription)) {
// validate if role is authorized to access subscription. (skip validation if authorization
// list is empty)
Set<String> roles = policies.get().auth_policies
.getSubscriptionAuthentication().get(subscription);
if (roles != null && !roles.isEmpty() && !roles.contains(role)) {
log.warn("[{}] is not authorized to subscribe on {}-{}", role, topicName, subscription);
return CompletableFuture.completedFuture(false);
}
// validate if subscription-auth mode is configured
if (policies.get().subscription_auth_mode != null) {
switch (policies.get().subscription_auth_mode) {
case Prefix:
if (!subscription.startsWith(role)) {
PulsarServerException ex = new PulsarServerException(String.format(
"Failed to create consumer - The subscription name needs to be"
+ " prefixed by the authentication role, like %s-xxxx for topic: %s",
role, topicName));
return FutureUtil.failedFuture(ex);
}
break;
default:
break;
}
}
}
}
return checkAuthorization(topicName, role, AuthAction.consume);
});
} | 3.68 |
framework_Dependency_findDependencies | /**
* Finds all the URLs defined for the given classes, registers the URLs to
* the communication manager, passes the registered dependencies through any
* defined filters and returns the filtered collection of dependencies to
* load.
*
* @since 8.1
* @param connectorTypes
* the collection of connector classes to scan
* @param manager
* a reference to the communication manager which tracks
* dependencies
* @param context
* the context information for the filtering operation
* @return the list of found and filtered dependencies
*/
public static List<Dependency> findDependencies(
List<Class<? extends ClientConnector>> connectorTypes,
LegacyCommunicationManager manager, FilterContext context) {
List<Dependency> dependencies = findDependencies(connectorTypes,
manager);
VaadinService service = context.getService();
for (DependencyFilter filter : service.getDependencyFilters()) {
dependencies = filter.filter(dependencies, context);
}
return dependencies;
} | 3.68 |
morf_AbstractSqlDialectTest_testRepairAutoNumberStartPositionOverRepairLimit | /**
* Tests for {@link SqlDialect#repairAutoNumberStartPosition(Table, SqlScriptExecutor, Connection)}
*/
@Test
public void testRepairAutoNumberStartPositionOverRepairLimit() {
setMaxIdOnAutonumberTable(MAX_ID_OVER_REPAIR_LIMIT);
testDialect.repairAutoNumberStartPosition(metadata.getTable(TEST_TABLE), sqlScriptExecutor,connection);
testDialect.repairAutoNumberStartPosition(metadata.getTable(AUTO_NUMBER_TABLE), sqlScriptExecutor,connection);
verifyRepairAutoNumberStartPosition(sqlScriptExecutor,connection);
} | 3.68 |
hbase_PrivateCellUtil_setTimestamp | /**
* Sets the given timestamp to the cell.
* @throws IOException when the passed cell is not of type {@link ExtendedCell}
*/
public static void setTimestamp(Cell cell, byte[] ts) throws IOException {
if (cell instanceof ExtendedCell) {
((ExtendedCell) cell).setTimestamp(ts);
} else {
throw new IOException(
new UnsupportedOperationException("Cell is not of type " + ExtendedCell.class.getName()));
}
} | 3.68 |
flink_BinaryStringData_indexOf | /**
* Returns the index within this string of the first occurrence of the specified substring,
* starting at the specified index.
*
* @param str the substring to search for.
* @param fromIndex the index from which to start the search.
* @return the index of the first occurrence of the specified substring, starting at the
* specified index, or {@code -1} if there is no such occurrence.
*/
public int indexOf(BinaryStringData str, int fromIndex) {
ensureMaterialized();
str.ensureMaterialized();
if (str.binarySection.sizeInBytes == 0) {
return 0;
}
if (inFirstSegment()) {
// position in byte
int byteIdx = 0;
// position is char
int charIdx = 0;
while (byteIdx < binarySection.sizeInBytes && charIdx < fromIndex) {
byteIdx += numBytesForFirstByte(getByteOneSegment(byteIdx));
charIdx++;
}
do {
if (byteIdx + str.binarySection.sizeInBytes > binarySection.sizeInBytes) {
return -1;
}
if (BinarySegmentUtils.equals(
binarySection.segments,
binarySection.offset + byteIdx,
str.binarySection.segments,
str.binarySection.offset,
str.binarySection.sizeInBytes)) {
return charIdx;
}
byteIdx += numBytesForFirstByte(getByteOneSegment(byteIdx));
charIdx++;
} while (byteIdx < binarySection.sizeInBytes);
return -1;
} else {
return indexOfMultiSegs(str, fromIndex);
}
} | 3.68 |
flink_DriverUtils_fromProperties | /**
* Generate map from given properties.
*
* @param properties the given properties
* @return the result map
*/
public static Map<String, String> fromProperties(Properties properties) {
Map<String, String> map = new HashMap<>();
Enumeration<?> e = properties.propertyNames();
while (e.hasMoreElements()) {
String key = (String) e.nextElement();
map.put(key, properties.getProperty(key));
}
return map;
} | 3.68 |
dubbo_EdsEndpointManager_getEndpointListeners | // for test
static ConcurrentHashMap<String, Set<EdsEndpointListener>> getEndpointListeners() {
return ENDPOINT_LISTENERS;
} | 3.68 |
hbase_RegionStates_isRegionOffline | /** Returns True if region is offline (In OFFLINE or CLOSED state). */
public boolean isRegionOffline(final RegionInfo regionInfo) {
return isRegionInState(regionInfo, State.OFFLINE, State.CLOSED);
} | 3.68 |
hbase_User_runAsLoginUser | /** Executes the given action as the login user */
@SuppressWarnings({ "rawtypes", "unchecked" })
public static <T> T runAsLoginUser(PrivilegedExceptionAction<T> action) throws IOException {
try {
Class c = Class.forName("org.apache.hadoop.security.SecurityUtil");
Class[] types = new Class[] { PrivilegedExceptionAction.class };
Object[] args = new Object[] { action };
return (T) Methods.call(c, null, "doAsLoginUser", types, args);
} catch (Throwable e) {
throw new IOException(e);
}
} | 3.68 |
flink_FileSystemCheckpointStorage_getSavepointPath | /** @return The default location where savepoints will be externalized if set. */
@Nullable
public Path getSavepointPath() {
return location.getBaseSavepointPath();
} | 3.68 |
hbase_MasterObserver_postGetTableDescriptors | /**
* Called after a getTableDescriptors request has been processed.
* @param ctx the environment to interact with the framework and master
* @param tableNamesList the list of table names, or null if querying for all
* @param descriptors the list of descriptors about to be returned
* @param regex regular expression used for filtering the table names
*/
default void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<TableDescriptor> descriptors, String regex)
throws IOException {
} | 3.68 |
pulsar_FieldParser_setEmptyValue | /**
* Sets the empty/null value if field is allowed to be set empty.
*
* @param strValue
* @param field
* @param obj
* @throws IllegalArgumentException
* @throws IllegalAccessException
*/
public static <T> void setEmptyValue(String strValue, Field field, T obj)
throws IllegalArgumentException, IllegalAccessException {
requireNonNull(field);
// if field is not primitive type
Type fieldType = field.getGenericType();
if (fieldType instanceof ParameterizedType) {
if (field.getType().equals(List.class)) {
field.set(obj, new ArrayList<>());
} else if (field.getType().equals(Set.class)) {
field.set(obj, new LinkedHashSet<>());
} else if (field.getType().equals(Optional.class)) {
field.set(obj, Optional.empty());
} else {
throw new IllegalArgumentException(
format("unsupported field-type %s for %s", field.getType(), field.getName()));
}
} else if (Number.class.isAssignableFrom(field.getType()) || fieldType.getClass().equals(String.class)) {
field.set(obj, null);
}
} | 3.68 |
hbase_TableName_createTableNameIfNecessary | /**
* Check that the object does not exist already. There are two reasons for creating the objects
* only once: 1) With 100K regions, the table names take ~20MB. 2) Equals becomes much faster as
* it's resolved with a reference and an int comparison.
*/
private static TableName createTableNameIfNecessary(ByteBuffer bns, ByteBuffer qns) {
for (TableName tn : tableCache) {
if (Bytes.equals(tn.getQualifier(), qns) && Bytes.equals(tn.getNamespace(), bns)) {
return tn;
}
}
TableName newTable = new TableName(bns, qns);
if (tableCache.add(newTable)) { // Adds the specified element if it is not already present
return newTable;
}
// Someone else added it. Let's find it.
for (TableName tn : tableCache) {
if (Bytes.equals(tn.getQualifier(), qns) && Bytes.equals(tn.getNamespace(), bns)) {
return tn;
}
}
// this should never happen.
throw new IllegalStateException(newTable + " was supposed to be in the cache");
} | 3.68 |
hadoop_BlockGrouper_makeBlockGroup | /**
* Calculating and organizing BlockGroup, to be called by ECManager
* @param dataBlocks Data blocks to compute parity blocks against
* @param parityBlocks To be computed parity blocks
* @return ECBlockGroup.
*/
public ECBlockGroup makeBlockGroup(ECBlock[] dataBlocks,
ECBlock[] parityBlocks) {
ECBlockGroup blockGroup = new ECBlockGroup(dataBlocks, parityBlocks);
return blockGroup;
} | 3.68 |
hadoop_DurationTrackerFactory_trackDuration | /**
* Initiate a duration tracking operation by creating/returning
* an object whose {@code close()} call will
* update the statistics.
* The expected use is within a try-with-resources clause.
* @param key statistic key
* @return an object to close after an operation completes.
*/
default DurationTracker trackDuration(String key) {
return trackDuration(key, 1);
} | 3.68 |
hadoop_LocalResolver_getDatanodesSubcluster | /**
* Get the Datanode mapping from the subclusters from the Namenodes. This
* needs to be done as a privileged action to use the user for the Router and
* not the one from the client in the RPC call.
*
* @return DN IP -> Subcluster.
*/
private Map<String, String> getDatanodesSubcluster() {
final RouterRpcServer rpcServer = getRpcServer();
if (rpcServer == null) {
LOG.error("Cannot access the Router RPC server");
return null;
}
Map<String, String> ret = new HashMap<>();
try {
// We need to get the DNs as a privileged user
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
Map<String, DatanodeStorageReport[]> dnMap = loginUser.doAs(
new PrivilegedAction<Map<String, DatanodeStorageReport[]>>() {
@Override
public Map<String, DatanodeStorageReport[]> run() {
try {
return rpcServer.getDatanodeStorageReportMap(
DatanodeReportType.ALL);
} catch (IOException e) {
LOG.error("Cannot get the datanodes from the RPC server", e);
return null;
}
}
});
for (Entry<String, DatanodeStorageReport[]> entry : dnMap.entrySet()) {
String nsId = entry.getKey();
DatanodeStorageReport[] dns = entry.getValue();
for (DatanodeStorageReport dn : dns) {
DatanodeInfo dnInfo = dn.getDatanodeInfo();
String ipAddr = dnInfo.getIpAddr();
ret.put(ipAddr, nsId);
}
}
} catch (IOException e) {
LOG.error("Cannot get Datanodes from the Namenodes: {}", e.getMessage());
}
return ret;
} | 3.68 |
hbase_HttpDoAsClient_bytes | // Helper to translate strings to UTF8 bytes
private byte[] bytes(String s) {
return Bytes.toBytes(s);
} | 3.68 |
pulsar_ProducerConfiguration_setBatchingEnabled | /**
* Control whether automatic batching of messages is enabled for the producer. <i>default: false [No batching]</i>
*
* When batching is enabled, multiple calls to Producer.sendAsync can result in a single batch to be sent to the
* broker, leading to better throughput, especially when publishing small messages. If compression is enabled,
* messages will be compressed at the batch level, leading to a much better compression ratio for similar headers or
* contents.
*
* When enabled default batch delay is set to 1 ms and default batch size is 1000 messages
*
* @see ProducerConfiguration#setBatchingMaxPublishDelay(long, TimeUnit)
* @since 1.0.36 <br>
* Make sure all the consumer applications have been updated to use this client version, before starting to
* batch messages.
*
*/
public ProducerConfiguration setBatchingEnabled(boolean batchMessagesEnabled) {
conf.setBatchingEnabled(batchMessagesEnabled);
return this;
} | 3.68 |
flink_DataSet_joinWithHuge | /**
* Initiates a Join transformation.
*
* <p>A Join transformation joins the elements of two {@link DataSet DataSets} on key equality
* and provides multiple ways to combine joining elements into one DataSet.
*
* <p>This method also gives the hint to the optimizer that the second DataSet to join is much
* larger than the first one.
*
* <p>This method returns a {@link JoinOperatorSets} on which one of the {@code where} methods
* can be called to define the join key of the first joining (i.e., this) DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
* @see JoinOperatorSets
* @see DataSet
*/
public <R> JoinOperatorSets<T, R> joinWithHuge(DataSet<R> other) {
return new JoinOperatorSets<>(this, other, JoinHint.BROADCAST_HASH_FIRST);
} | 3.68 |
framework_GridMultiSelect_addMultiSelectionListener | /**
* Adds a selection listener that will be called when the selection is
* changed either by the user or programmatically.
*
* @param listener
* the value change listener, not {@code null}
* @return a registration for the listener
*/
public Registration addMultiSelectionListener(
MultiSelectionListener<T> listener) {
return model.addMultiSelectionListener(listener);
} | 3.68 |
open-banking-gateway_PsuEncryptionServiceProvider_forPrivateKey | /**
* Private key (read only) encryption.
* @param keyId Key ID
* @param key Private key
* @return Encryption service for reading only
*/
public EncryptionService forPrivateKey(UUID keyId, PrivateKey key) {
return oper.encryptionService(keyId.toString(), key);
} | 3.68 |
flink_DataStream_union | /**
* Creates a new {@link DataStream} by merging {@link DataStream} outputs of the same type with
* each other. The DataStreams merged using this operator will be transformed simultaneously.
*
* @param streams The DataStreams to union output with.
* @return The {@link DataStream}.
*/
@SafeVarargs
public final DataStream<T> union(DataStream<T>... streams) {
List<Transformation<T>> unionedTransforms = new ArrayList<>();
unionedTransforms.add(this.transformation);
for (DataStream<T> newStream : streams) {
if (!getType().equals(newStream.getType())) {
throw new IllegalArgumentException(
"Cannot union streams of different types: "
+ getType()
+ " and "
+ newStream.getType());
}
unionedTransforms.add(newStream.getTransformation());
}
return new DataStream<>(this.environment, new UnionTransformation<>(unionedTransforms));
} | 3.68 |
flink_SqlLikeChainChecker_checkBegin | /** Matches the beginning of each string to a pattern. */
private static boolean checkBegin(
BinaryStringData pattern, MemorySegment[] segments, int start, int len) {
int lenSub = pattern.getSizeInBytes();
return len >= lenSub
&& SegmentsUtil.equals(pattern.getSegments(), 0, segments, start, lenSub);
} | 3.68 |
flink_DefaultResourceCleaner_withPrioritizedCleanup | /**
* Prioritized cleanups run before their regular counterparts. This method enables the
* caller to model dependencies between cleanup tasks. The order in which cleanable
* resources are added matters, i.e. if two cleanable resources are added as prioritized
* cleanup tasks, the resource being added first will block the cleanup of the second
* resource. All prioritized cleanup resources will run and finish before any resource that
* is added using {@link #withRegularCleanup(String, Object)} is started.
*
* @param label The label being used when logging errors in the given cleanup.
* @param prioritizedCleanup The cleanup callback that is going to be prioritized.
*/
public Builder<T> withPrioritizedCleanup(String label, T prioritizedCleanup) {
this.prioritizedCleanup.add(new CleanupWithLabel<>(prioritizedCleanup, label));
return this;
} | 3.68 |
framework_Escalator_getSpacersInDom | /**
* Gets the spacers currently rendered in the DOM.
*
* @return an unmodifiable (but live) collection of the spacers
* currently in the DOM
*/
public Collection<SpacerImpl> getSpacersInDom() {
return Collections
.unmodifiableCollection(rowIndexToSpacer.values());
} | 3.68 |
querydsl_GeometryExpression_distance | /**
* Returns the shortest distance between any two Points in the two geometric objects as
* calculated in the spatial reference system of this geometric object. Because the geometries
* are closed, it is possible to find a point on each geometric object involved, such that the
* distance between these 2 points is the returned distance between their geometric objects.
*
* @param geometry other geometry
* @return distance between this and the other geometry
*/
public NumberExpression<Double> distance(Expression<? extends Geometry> geometry) {
return Expressions.numberOperation(Double.class, SpatialOps.DISTANCE, mixin, geometry);
} | 3.68 |
flink_ThreadSafeSimpleCounter_inc | /**
* Increment the current count by the given value.
*
* @param n value to increment the current count by
*/
@Override
public void inc(long n) {
longAdder.add(n);
} | 3.68 |
hudi_HoodieFlinkWriteClient_createIndex | /**
* Complete changes performed at the given instantTime marker with specified action.
*/
@Override
protected HoodieIndex createIndex(HoodieWriteConfig writeConfig) {
return FlinkHoodieIndexFactory.createIndex((HoodieFlinkEngineContext) context, config);
} | 3.68 |
pulsar_ProducerImpl_stripChecksum | /**
* Strips checksum from {@link OpSendMsg} command if present else ignore it.
*
* @param op
*/
private void stripChecksum(OpSendMsg op) {
ByteBufPair msg = op.cmd;
if (msg != null) {
int totalMsgBufSize = msg.readableBytes();
ByteBuf headerFrame = msg.getFirst();
headerFrame.markReaderIndex();
try {
headerFrame.skipBytes(4); // skip [total-size]
int cmdSize = (int) headerFrame.readUnsignedInt();
// verify if checksum present
headerFrame.skipBytes(cmdSize);
if (!hasChecksum(headerFrame)) {
return;
}
int headerSize = 4 + 4 + cmdSize; // [total-size] [cmd-length] [cmd-size]
int checksumSize = 4 + 2; // [magic-number] [checksum-size]
int checksumMark = (headerSize + checksumSize); // [header-size] [checksum-size]
int metaPayloadSize = (totalMsgBufSize - checksumMark); // metadataPayload = totalSize - checksumMark
int newTotalFrameSizeLength = 4 + cmdSize + metaPayloadSize; // new total-size without checksum
headerFrame.resetReaderIndex();
int headerFrameSize = headerFrame.readableBytes();
headerFrame.setInt(0, newTotalFrameSizeLength); // rewrite new [total-size]
ByteBuf metadata = headerFrame.slice(checksumMark, headerFrameSize - checksumMark); // sliced only
// metadata
headerFrame.writerIndex(headerSize); // set headerFrame write-index to overwrite metadata over checksum
metadata.readBytes(headerFrame, metadata.readableBytes());
headerFrame.capacity(headerFrameSize - checksumSize); // reduce capacity by removed checksum bytes
} finally {
headerFrame.resetReaderIndex();
}
} else {
log.warn("[{}] Failed while casting null into ByteBufPair", producerName);
}
} | 3.68 |
hadoop_FederationStateStoreFacade_updateApplicationHomeSubCluster | /**
* Update ApplicationHomeSubCluster to FederationStateStore.
*
* @param subClusterId homeSubClusterId
* @param applicationId applicationId.
* @param homeSubCluster homeSubCluster, homeSubCluster selected according to policy.
* @throws YarnException yarn exception.
*/
public void updateApplicationHomeSubCluster(SubClusterId subClusterId,
ApplicationId applicationId, ApplicationHomeSubCluster homeSubCluster) throws YarnException {
try {
updateApplicationHomeSubCluster(homeSubCluster);
} catch (YarnException e) {
SubClusterId subClusterIdInStateStore = getApplicationHomeSubCluster(applicationId);
if (subClusterId == subClusterIdInStateStore) {
LOG.info("Application {} already submitted on SubCluster {}.", applicationId, subClusterId);
} else {
String msg = String.format(
"Unable to update the ApplicationId %s into the FederationStateStore.", applicationId);
throw new YarnException(msg, e);
}
}
} | 3.68 |
AreaShop_AreaShop_hasPermission | /**
* Check for a permission of a (possibly offline) player.
* @param offlinePlayer OfflinePlayer to check
* @param permission Permission to check
* @return true if the player has the permission, false if the player does not have permission or, is offline and there is not Vault-compatible permission plugin
*/
public boolean hasPermission(OfflinePlayer offlinePlayer, String permission) {
// Online, return through Bukkit
if(offlinePlayer.getPlayer() != null) {
return offlinePlayer.getPlayer().hasPermission(permission);
}
// Resolve while offline if possible
net.milkbowl.vault.permission.Permission permissionProvider = getPermissionProvider();
if(permissionProvider != null) {
// TODO: Should we provide a world here?
return permissionProvider.playerHas(null, offlinePlayer, permission);
}
// Player offline and no offline permission provider available, safely say that there is no permission
return false;
} | 3.68 |
flink_CatalogTableImpl_removeRedundant | /** Construct catalog table properties from {@link #toProperties()}. */
public static Map<String, String> removeRedundant(
Map<String, String> properties, TableSchema schema, List<String> partitionKeys) {
Map<String, String> ret = new HashMap<>(properties);
DescriptorProperties descriptorProperties = new DescriptorProperties(false);
descriptorProperties.putTableSchema(SCHEMA, schema);
descriptorProperties.putPartitionKeys(partitionKeys);
descriptorProperties.asMap().keySet().forEach(ret::remove);
return ret;
} | 3.68 |
hadoop_DiskValidatorFactory_getInstance | /**
* Returns {@link DiskValidator} instance corresponding to its name.
* The diskValidator parameter can be "basic" for {@link BasicDiskValidator}
* or "read-write" for {@link ReadWriteDiskValidator}.
* @param diskValidator canonical class name, for example, "basic"
* @throws DiskErrorException if the class cannot be located
* @return disk validator.
*/
@SuppressWarnings("unchecked")
public static DiskValidator getInstance(String diskValidator)
throws DiskErrorException {
@SuppressWarnings("rawtypes")
Class clazz;
if (diskValidator.equalsIgnoreCase(BasicDiskValidator.NAME)) {
clazz = BasicDiskValidator.class;
} else if (diskValidator.equalsIgnoreCase(ReadWriteDiskValidator.NAME)) {
clazz = ReadWriteDiskValidator.class;
} else {
try {
clazz = Class.forName(diskValidator);
} catch (ClassNotFoundException cnfe) {
throw new DiskErrorException(diskValidator
+ " DiskValidator class not found.", cnfe);
}
}
return getInstance(clazz);
} | 3.68 |
hbase_WALEdit_getRegionEventDescriptor | /**
* @return Returns a RegionEventDescriptor made by deserializing the content of the passed in
* <code>cell</code>, IFF the <code>cell</code> is a RegionEventDescriptor type WALEdit.
*/
public static RegionEventDescriptor getRegionEventDescriptor(Cell cell) throws IOException {
return CellUtil.matchingColumnFamilyAndQualifierPrefix(cell, METAFAMILY, REGION_EVENT_PREFIX)
? RegionEventDescriptor.parseFrom(CellUtil.cloneValue(cell))
: null;
} | 3.68 |
flink_ServiceType_classify | // Helper method
public static KubernetesConfigOptions.ServiceExposedType classify(Service service) {
KubernetesConfigOptions.ServiceExposedType type =
KubernetesConfigOptions.ServiceExposedType.valueOf(service.getSpec().getType());
if (type == KubernetesConfigOptions.ServiceExposedType.ClusterIP) {
if (HeadlessClusterIPService.HEADLESS_CLUSTER_IP.equals(
service.getSpec().getClusterIP())) {
type = KubernetesConfigOptions.ServiceExposedType.Headless_ClusterIP;
}
}
return type;
} | 3.68 |
dubbo_NacosMetadataReport_innerReceive | /**
* receive
*
* @param dataId data ID
* @param group group
* @param configInfo content
*/
@Override
public void innerReceive(String dataId, String group, String configInfo) {
String oldValue = cacheData.get(dataId);
ConfigChangedEvent event =
new ConfigChangedEvent(dataId, group, configInfo, getChangeType(configInfo, oldValue));
if (configInfo == null) {
cacheData.remove(dataId);
} else {
cacheData.put(dataId, configInfo);
}
listeners.forEach(listener -> listener.process(event));
} | 3.68 |
hbase_ScannerModel_buildFilter | /**
* @param s the JSON representation of the filter
* @return the filter
*/
public static Filter buildFilter(String s) throws Exception {
FilterModel model =
getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE)
.readValue(s, FilterModel.class);
return model.build();
} | 3.68 |
hudi_HoodieOperation_isInsert | /**
* Returns whether the operation is INSERT.
*/
public static boolean isInsert(HoodieOperation operation) {
return operation == INSERT;
} | 3.68 |
dubbo_Parameters_getMethodExtension | /**
* @deprecated will be removed in 3.3.0
*/
@Deprecated
public <T> T getMethodExtension(Class<T> type, String method, String key, String defaultValue) {
String name = getMethodParameter(method, key, defaultValue);
return ExtensionLoader.getExtensionLoader(type).getExtension(name);
} | 3.68 |
hmily_CommonAssembler_assembleHmilyPaginationValueSegment | /**
* Assemble hmily PaginationValue segment.
* @param paginationValue pagination value segment
* @return Hmily pagination value segment
*/
public static HmilyPaginationValueSegment assembleHmilyPaginationValueSegment(final PaginationValueSegment paginationValue) {
HmilyPaginationValueSegment hmilyPaginationValueSegment = null;
int startIndex = paginationValue.getStartIndex();
int stopIndex = paginationValue.getStopIndex();
if (paginationValue instanceof NumberLiteralLimitValueSegment) {
hmilyPaginationValueSegment = new HmilyNumberLiteralLimitValueSegment(startIndex, stopIndex, ((NumberLiteralLimitValueSegment) paginationValue).getValue());
} else if (paginationValue instanceof ParameterMarkerLimitValueSegment) {
hmilyPaginationValueSegment = new HmilyParameterMarkerLimitValueSegment(startIndex, stopIndex, ((ParameterMarkerLimitValueSegment) paginationValue).getParameterIndex());
} else if (paginationValue instanceof NumberLiteralRowNumberValueSegment) {
NumberLiteralRowNumberValueSegment nrnvs = (NumberLiteralRowNumberValueSegment) paginationValue;
hmilyPaginationValueSegment = new HmilyNumberLiteralRowNumberValueSegment(startIndex, stopIndex, nrnvs.getValue(), nrnvs.isBoundOpened());
} else if (paginationValue instanceof ParameterMarkerRowNumberValueSegment) {
ParameterMarkerRowNumberValueSegment pmrnvs = (ParameterMarkerRowNumberValueSegment) paginationValue;
hmilyPaginationValueSegment = new HmilyParameterMarkerRowNumberValueSegment(startIndex, stopIndex, pmrnvs.getParameterIndex(), pmrnvs.isBoundOpened());
}
return hmilyPaginationValueSegment;
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateUrlNullCheck | /**
* generate method URL argument null check
*/
private String generateUrlNullCheck(int index) {
return String.format(CODE_URL_NULL_CHECK, index, URL.class.getName(), index);
} | 3.68 |
hbase_IndexBlockEncoding_writeIdInBytes | /**
* Writes id bytes to the given array starting from offset.
* @param dest output array
* @param offset starting offset of the output array
*/
public void writeIdInBytes(byte[] dest, int offset) throws IOException {
System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE);
} | 3.68 |
pulsar_GrowablePriorityLongPairQueue_items | /**
* @return a new list of keys with max provided numberOfItems (makes a copy)
*/
public Set<LongPair> items(int numberOfItems) {
Set<LongPair> items = new HashSet<>(this.size);
forEach((item1, item2) -> {
if (items.size() < numberOfItems) {
items.add(new LongPair(item1, item2));
}
});
return items;
} | 3.68 |
flink_HiveDeclarativeAggregateFunction_getTypeInference | /** This method is used to infer result type when generate {@code AggregateCall} of calcite. */
public TypeInference getTypeInference(DataTypeFactory factory) {
return TypeInference.newBuilder()
.outputTypeStrategy(new HiveAggregateFunctionOutputStrategy(this))
.build();
} | 3.68 |
pulsar_SystemTopicClient_delete | /**
* Delete event in the system topic.
* @param key the key of the event
* @param t pulsar event
* @return message id
* @throws PulsarClientException exception while write event cause
*/
default MessageId delete(String key, T t) throws PulsarClientException {
throw new UnsupportedOperationException("Unsupported operation");
} | 3.68 |
rocketmq-connect_Stat_type | /**
* type
*
* @return
*/
default String type() {
return NoneType.none.name();
} | 3.68 |
framework_CheckBox_getInputElement | /**
* Returns the {@link CheckBoxInputElement} element to manipulate the style
* name of the {@code input} element of the {@link CheckBox}.
*
* @since 8.7
* @return the current {@link CheckBoxInputElement}, not {@code null}.
*/
public CheckBoxInputElement getInputElement() {
if (checkBoxInputElement == null) {
checkBoxInputElement = new CheckBoxInputElement(this);
}
return checkBoxInputElement;
} | 3.68 |
graphhopper_IntFloatBinaryHeap_percolateDownMinHeap | /**
* Percolates element down heap from the array position given by the index.
*/
final void percolateDownMinHeap(final int index) {
final int element = elements[index];
final float key = keys[index];
int hole = index;
while (hole * 2 <= size) {
int child = hole * 2;
// if we have a right child and that child can not be percolated
// up then move onto other child
if (child != size && keys[child + 1] < keys[child]) {
child++;
}
// if we found resting place of bubble then terminate search
if (keys[child] >= key) {
break;
}
elements[hole] = elements[child];
keys[hole] = keys[child];
hole = child;
}
elements[hole] = element;
keys[hole] = key;
} | 3.68 |
framework_VFilterSelect_selectPrevPage | /*
* Show the prev page.
*/
private void selectPrevPage() {
if (currentPage > 0) {
filterOptions(currentPage - 1, lastFilter);
selectPopupItemWhenResponseIsReceived = Select.LAST;
}
} | 3.68 |
Activiti_AstNode_findPublicAccessibleMethod | /**
* Find accessible method. Searches the inheritance tree of the class declaring
* the method until it finds a method that can be invoked.
* @param method method
* @return accessible method or <code>null</code>
*/
private static Method findPublicAccessibleMethod(Method method) {
if (method == null || !Modifier.isPublic(method.getModifiers())) {
return null;
}
if (
method.isAccessible() ||
Modifier.isPublic(method.getDeclaringClass().getModifiers())
) {
return method;
}
for (Class<?> cls : method.getDeclaringClass().getInterfaces()) {
Method mth = null;
try {
mth =
findPublicAccessibleMethod(
cls.getMethod(
method.getName(),
method.getParameterTypes()
)
);
if (mth != null) {
return mth;
}
} catch (NoSuchMethodException ignore) {
// do nothing
}
}
Class<?> cls = method.getDeclaringClass().getSuperclass();
if (cls != null) {
Method mth = null;
try {
mth =
findPublicAccessibleMethod(
cls.getMethod(
method.getName(),
method.getParameterTypes()
)
);
if (mth != null) {
return mth;
}
} catch (NoSuchMethodException ignore) {
// do nothing
}
}
return null;
} | 3.68 |
hbase_WALEventTrackerTableAccessor_getRowKey | /**
* Create rowKey: 1. We want RS name to be the leading part of rowkey so that we can query by RS
* name filter. WAL name contains rs name as a leading part. 2. Timestamp when the event was
* generated. 3. Add state of the wal. Combination of 1 + 2 + 3 is definitely going to create a
* unique rowkey.
* @param payload payload to process
* @return rowKey byte[]
*/
public static byte[] getRowKey(final WALEventTrackerPayload payload) {
String walName = payload.getWalName();
// converting to string since this will help seeing the timestamp in string format using
// hbase shell commands.
String timestampStr = String.valueOf(payload.getTimeStamp());
String walState = payload.getState();
final String rowKeyStr = walName + DELIMITER + timestampStr + DELIMITER + walState;
return Bytes.toBytes(rowKeyStr);
} | 3.68 |
open-banking-gateway_DatasafeMetadataStorage_delete | /**
* Deletes user profile data
* @param id Entity id
*/
@Override
@Transactional
public void delete(String id) {
throw new IllegalStateException("Not allowed");
} | 3.68 |
framework_AbstractRendererConnector_getRenderer | /**
* Returns the renderer associated with this renderer connector.
* <p>
* A subclass of AbstractRendererConnector should override this method as
* shown below. The framework uses
* {@link com.google.gwt.core.client.GWT#create(Class) GWT.create(Class)} to
* create a renderer based on the return type of the overridden method, but
* only if {@link #createRenderer()} is not overridden as well:
*
* <pre>
* public MyRenderer getRenderer() {
* return (MyRenderer) super.getRenderer();
* }
* </pre>
*
* @return the renderer bound to this connector
*/
public Renderer<T> getRenderer() {
if (renderer == null) {
renderer = createRenderer();
}
return renderer;
} | 3.68 |
hbase_ZKUtil_listChildrenBFSNoWatch | /**
* BFS Traversal of all the children under path, with the entries in the list, in the same order
* as that of the traversal. Lists all the children without setting any watches. - zk reference -
* path of node
* @return list of children znodes under the path if unexpected ZooKeeper exception
*/
private static List<String> listChildrenBFSNoWatch(ZKWatcher zkw, final String znode)
throws KeeperException {
Deque<String> queue = new LinkedList<>();
List<String> tree = new ArrayList<>();
queue.add(znode);
while (true) {
String node = queue.pollFirst();
if (node == null) {
break;
}
List<String> children = listChildrenNoWatch(zkw, node);
if (children == null) {
continue;
}
for (final String child : children) {
final String childPath = node + "/" + child;
queue.add(childPath);
tree.add(childPath);
}
}
return tree;
} | 3.68 |
hbase_HFileBlock_getOnDiskDataSizeWithHeader | /** Returns the size of data on disk + header. Excludes checksum. */
int getOnDiskDataSizeWithHeader() {
return this.onDiskDataSizeWithHeader;
} | 3.68 |
framework_AbstractComponentConnector_onDragSourceAttached | /**
* Invoked when a {@link DragSourceExtensionConnector} has been attached to
* this component.
* <p>
* By default, does nothing. If you need to apply some changes to the
* widget, override this method.
* <p>
* This is a framework internal method, and should not be invoked manually.
*
* @since 8.1
* @see #onDragSourceDetached()
*/
public void onDragSourceAttached() {
} | 3.68 |
pulsar_DispatchRateLimiter_createDispatchRate | /**
* createDispatchRate according to broker service config.
*
* @return
*/
private DispatchRate createDispatchRate() {
int dispatchThrottlingRateInMsg;
long dispatchThrottlingRateInByte;
ServiceConfiguration config = brokerService.pulsar().getConfiguration();
switch (type) {
case TOPIC:
dispatchThrottlingRateInMsg = config.getDispatchThrottlingRatePerTopicInMsg();
dispatchThrottlingRateInByte = config.getDispatchThrottlingRatePerTopicInByte();
break;
case SUBSCRIPTION:
dispatchThrottlingRateInMsg = config.getDispatchThrottlingRatePerSubscriptionInMsg();
dispatchThrottlingRateInByte = config.getDispatchThrottlingRatePerSubscriptionInByte();
break;
case REPLICATOR:
dispatchThrottlingRateInMsg = config.getDispatchThrottlingRatePerReplicatorInMsg();
dispatchThrottlingRateInByte = config.getDispatchThrottlingRatePerReplicatorInByte();
break;
case BROKER:
dispatchThrottlingRateInMsg = config.getDispatchThrottlingRateInMsg();
dispatchThrottlingRateInByte = config.getDispatchThrottlingRateInByte();
break;
default:
dispatchThrottlingRateInMsg = -1;
dispatchThrottlingRateInByte = -1;
}
return DispatchRate.builder()
.dispatchThrottlingRateInMsg(dispatchThrottlingRateInMsg)
.dispatchThrottlingRateInByte(dispatchThrottlingRateInByte)
.ratePeriodInSecond(1)
.relativeToPublishRate(type != Type.BROKER && config.isDispatchThrottlingRateRelativeToPublishRate())
.build();
} | 3.68 |
hadoop_BytesWritable_getSize | /**
* Get the current size of the buffer.
* @deprecated Use {@link #getLength()} instead.
* @return current size of the buffer.
*/
@Deprecated
public int getSize() {
return getLength();
} | 3.68 |
graphhopper_VectorTile_hasId | /**
* <code>optional uint64 id = 1 [default = 0];</code>
*/
public boolean hasId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
} | 3.68 |
flink_CheckpointConfig_getCheckpointStorage | /**
* @return The {@link CheckpointStorage} that has been configured for the job. Or {@code null}
* if none has been set.
* @see #setCheckpointStorage(CheckpointStorage)
*/
@Nullable
@PublicEvolving
public CheckpointStorage getCheckpointStorage() {
return this.storage;
} | 3.68 |
hadoop_TimelineDomain_setCreatedTime | /**
* Set the created time of the domain
*
* @param createdTime the created time of the domain
*/
public void setCreatedTime(Long createdTime) {
this.createdTime = createdTime;
} | 3.68 |
hbase_FavoredNodeLoadBalancer_generateFavoredNodesForDaughter | /*
* Generate Favored Nodes for daughters during region split. If the parent does not have FN,
* regenerates them for the daughters. If the parent has FN, inherit two FN from parent for each
* daughter and generate the remaining. The primary FN for both the daughters should be the same
* as parent. Inherit the secondary FN from the parent but keep it different for each daughter.
* Choose the remaining FN randomly. This would give us better distribution over a period of time
* after enough splits.
*/
@Override
public void generateFavoredNodesForDaughter(List<ServerName> servers, RegionInfo parent,
RegionInfo regionA, RegionInfo regionB) throws IOException {
Map<RegionInfo, List<ServerName>> result = new HashMap<>();
FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, rackManager);
helper.initialize();
List<ServerName> parentFavoredNodes = getFavoredNodes(parent);
if (parentFavoredNodes == null) {
LOG.debug("Unable to find favored nodes for parent, " + parent
+ " generating new favored nodes for daughter");
result.put(regionA, helper.generateFavoredNodes(regionA));
result.put(regionB, helper.generateFavoredNodes(regionB));
} else {
// Lets get the primary and secondary from parent for regionA
Set<ServerName> regionAFN =
getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, SECONDARY);
result.put(regionA, Lists.newArrayList(regionAFN));
// Lets get the primary and tertiary from parent for regionB
Set<ServerName> regionBFN =
getInheritedFNForDaughter(helper, parentFavoredNodes, PRIMARY, TERTIARY);
result.put(regionB, Lists.newArrayList(regionBFN));
}
fnm.updateFavoredNodes(result);
} | 3.68 |
framework_Slot_hasRelativeHeight | /**
* Returns whether the slot's height is relative.
*
* @return {@code true} if the slot uses relative height, {@code false} if
* the slot has a static height
*/
public boolean hasRelativeHeight() {
return relativeHeight;
} | 3.68 |
flink_StringUtils_generateRandomAlphanumericString | /**
* Creates a random alphanumeric string of given length.
*
* @param rnd The random number generator to use.
* @param length The number of alphanumeric characters to append.
*/
public static String generateRandomAlphanumericString(Random rnd, int length) {
checkNotNull(rnd);
checkArgument(length >= 0);
StringBuilder buffer = new StringBuilder(length);
for (int i = 0; i < length; i++) {
buffer.append(nextAlphanumericChar(rnd));
}
return buffer.toString();
} | 3.68 |
morf_XmlDataSetProducer_tableNames | /**
* @see org.alfasoftware.morf.metadata.Schema#tableNames()
*/
@Override
public Collection<String> tableNames() {
return xmlStreamProvider.availableStreamNames();
} | 3.68 |
pulsar_TripleLongPriorityQueue_add | /**
* Add a tuple of 3 long items to the priority queue.
*
* @param n1
* @param n2
* @param n3
*/
public void add(long n1, long n2, long n3) {
long arrayIdx = tuplesCount * ITEMS_COUNT;
if ((arrayIdx + 2) >= array.getCapacity()) {
array.increaseCapacity();
}
put(tuplesCount, n1, n2, n3);
siftUp(tuplesCount);
++tuplesCount;
} | 3.68 |
framework_CustomLayout_replaceComponent | /* Documented in superclass */
@Override
public void replaceComponent(Component oldComponent,
Component newComponent) {
// Gets the locations
String oldLocation = null;
String newLocation = null;
for (final String location : slots.keySet()) {
final Component component = slots.get(location);
if (component == oldComponent) {
oldLocation = location;
}
if (component == newComponent) {
newLocation = location;
}
}
if (oldLocation == null) {
addComponent(newComponent);
} else if (newLocation == null) {
removeComponent(oldLocation);
addComponent(newComponent, oldLocation);
} else {
slots.put(newLocation, oldComponent);
slots.put(oldLocation, newComponent);
getState().childLocations.put(newComponent, oldLocation);
getState().childLocations.put(oldComponent, newLocation);
}
} | 3.68 |
hudi_HoodieTable_reconcileAgainstMarkers | /**
* Reconciles WriteStats and marker files to detect and safely delete duplicate data files created because of Spark
* retries.
*
* @param context HoodieEngineContext
* @param instantTs Instant Timestamp
* @param stats Hoodie Write Stat
* @param consistencyCheckEnabled Consistency Check Enabled
* @throws HoodieIOException
*/
protected void reconcileAgainstMarkers(HoodieEngineContext context,
String instantTs,
List<HoodieWriteStat> stats,
boolean consistencyCheckEnabled) throws HoodieIOException {
try {
// Reconcile marker and data files with WriteStats so that partially written data-files due to failed
// (but succeeded on retry) tasks are removed.
String basePath = getMetaClient().getBasePath();
WriteMarkers markers = WriteMarkersFactory.get(config.getMarkersType(), this, instantTs);
if (!markers.doesMarkerDirExist()) {
// can happen if it was an empty write say.
return;
}
// Ignores log file appended for update, since they are already fail-safe.
// but new created log files should be included.
Set<String> invalidDataPaths = getInvalidDataPaths(markers);
Set<String> validDataPaths = stats.stream()
.map(HoodieWriteStat::getPath)
.collect(Collectors.toSet());
Set<String> validCdcDataPaths = stats.stream()
.map(HoodieWriteStat::getCdcStats)
.filter(Objects::nonNull)
.flatMap(cdcStat -> cdcStat.keySet().stream())
.collect(Collectors.toSet());
// Contains list of partially created files. These needs to be cleaned up.
invalidDataPaths.removeAll(validDataPaths);
invalidDataPaths.removeAll(validCdcDataPaths);
if (!invalidDataPaths.isEmpty()) {
LOG.info("Removing duplicate files created due to task retries before committing. Paths=" + invalidDataPaths);
Map<String, List<Pair<String, String>>> invalidPathsByPartition = invalidDataPaths.stream()
.map(dp -> Pair.of(new Path(basePath, dp).getParent().toString(), new Path(basePath, dp).toString()))
.collect(Collectors.groupingBy(Pair::getKey));
// Ensure all files in delete list is actually present. This is mandatory for an eventually consistent FS.
// Otherwise, we may miss deleting such files. If files are not found even after retries, fail the commit
if (consistencyCheckEnabled) {
// This will either ensure all files to be deleted are present.
waitForAllFiles(context, invalidPathsByPartition, FileVisibility.APPEAR);
}
// Now delete partially written files
context.setJobStatus(this.getClass().getSimpleName(), "Delete all partially written files: " + config.getTableName());
deleteInvalidFilesByPartitions(context, invalidPathsByPartition);
// Now ensure the deleted files disappear
if (consistencyCheckEnabled) {
// This will either ensure all files to be deleted are absent.
waitForAllFiles(context, invalidPathsByPartition, FileVisibility.DISAPPEAR);
}
}
} catch (IOException ioe) {
throw new HoodieIOException(ioe.getMessage(), ioe);
}
} | 3.68 |
hadoop_BinaryPartitioner_setLeftOffset | /**
* Set the subarray to be used for partitioning to
* <code>bytes[offset:]</code> in Python syntax.
*
* @param conf configuration object
* @param offset left Python-style offset
*/
public static void setLeftOffset(Configuration conf, int offset) {
conf.setInt(LEFT_OFFSET_PROPERTY_NAME, offset);
} | 3.68 |
hmily_BrpcHmilyOrderApplication_main | /**
* main.
*
* @param args args
*/
public static void main(final String[] args) {
SpringApplication.run(BrpcHmilyOrderApplication.class, args);
} | 3.68 |
flink_HiveStatsUtil_statsChanged | /**
* Determine whether the table statistics changes.
*
* @param newTableStats new catalog table statistics.
* @param parameters original hive table statistics parameters.
* @return whether the table statistics changes
*/
public static boolean statsChanged(
CatalogTableStatistics newTableStats, Map<String, String> parameters) {
return newTableStats.getRowCount()
!= parsePositiveLongStat(parameters, StatsSetupConst.ROW_COUNT)
|| newTableStats.getTotalSize()
!= parsePositiveLongStat(parameters, StatsSetupConst.TOTAL_SIZE)
|| newTableStats.getFileCount()
!= parsePositiveIntStat(parameters, StatsSetupConst.NUM_FILES)
|| newTableStats.getRawDataSize()
!= parsePositiveLongStat(parameters, StatsSetupConst.RAW_DATA_SIZE);
} | 3.68 |
framework_AbstractInMemoryContainer_fireItemRemoved | /**
* Notify item set change listeners that an item has been removed from the
* container.
*
* @since 7.4
*
* @param position
* position of the removed item in the view prior to removal (if
* was visible)
* @param itemId
* id of the removed item, of type {@link Object} to satisfy
* {@link Container#removeItem(Object)} API
*/
protected void fireItemRemoved(int position, Object itemId) {
fireItemsRemoved(position, itemId, 1);
} | 3.68 |
hbase_ExecutorService_delayedSubmit | // Submit the handler after the given delay. Used for retrying.
public void delayedSubmit(EventHandler eh, long delay, TimeUnit unit) {
ListenableFuture<?> future = delayedSubmitTimer.schedule(() -> submit(eh), delay, unit);
future.addListener(() -> {
try {
future.get();
} catch (Exception e) {
LOG.error("Failed to submit the event handler {} to executor", eh, e);
}
}, MoreExecutors.directExecutor());
} | 3.68 |
flink_DataStream_addSink | /**
* Adds the given sink to this DataStream. Only streams with sinks added will be executed once
* the {@link StreamExecutionEnvironment#execute()} method is called.
*
* @param sinkFunction The object containing the sink's invoke function.
* @return The closed DataStream.
*/
public DataStreamSink<T> addSink(SinkFunction<T> sinkFunction) {
// read the output type of the input Transform to coax out errors about MissingTypeInfo
transformation.getOutputType();
// configure the type if needed
if (sinkFunction instanceof InputTypeConfigurable) {
((InputTypeConfigurable) sinkFunction).setInputType(getType(), getExecutionConfig());
}
return DataStreamSink.forSinkFunction(this, clean(sinkFunction));
} | 3.68 |
hadoop_MappingRuleResult_createRejectResult | /**
* Generator method for reject results.
* @return The generated MappingRuleResult
*/
public static MappingRuleResult createRejectResult() {
return RESULT_REJECT;
} | 3.68 |
hmily_HmilyXaTransactionManager_getThreadTransaction | /**
* Gets thread transaction.
*
* @return the thread transaction
*/
public Transaction getThreadTransaction() {
synchronized (tms) {
Stack<Transaction> stack = tms.get();
if (stack == null) {
return null;
}
return stack.peek();
}
} | 3.68 |
flink_FailureHandlingResultSnapshot_getTimestamp | /**
* The time the failure occurred.
*
* @return The time of the failure.
*/
public long getTimestamp() {
return timestamp;
} | 3.68 |
hbase_DeletionListener_getException | /**
* Get the last exception which has occurred when re-setting the watch. Use hasException() to
* check whether or not an exception has occurred.
* @return The last exception observed when re-setting the watch.
*/
public Throwable getException() {
return exception;
} | 3.68 |
flink_SqlFunctionUtils_log | /** Returns the logarithm of "x" with base "base". */
public static double log(double base, double x) {
return Math.log(x) / Math.log(base);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.