name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ReplicationPeerImpl_getId | /**
* Get the identifier of this peer
* @return string representation of the id (short)
*/
@Override
public String getId() {
return id;
} | 3.68 |
hbase_LogRollRegionServerProcedureManager_buildSubprocedure | /**
* If in a running state, creates the specified subprocedure for handling a backup procedure.
* @return Subprocedure to submit to the ProcedureMember.
*/
public Subprocedure buildSubprocedure(byte[] data) {
// don't run a backup if the parent is stop(ping)
if (rss.isStopping() || rss.isStopped()) {
throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName()
+ ", because stopping/stopped!");
}
LOG.info("Attempting to run a roll log procedure for backup.");
ForeignExceptionDispatcher errorDispatcher = new ForeignExceptionDispatcher();
Configuration conf = rss.getConfiguration();
long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
long wakeMillis =
conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT);
LogRollBackupSubprocedurePool taskManager =
new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf);
return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis,
taskManager, data);
} | 3.68 |
framework_VMenuBar_addItem | /**
* Add a new item to this menu.
*
* @param item
*/
public void addItem(CustomMenuItem item) {
if (items.contains(item)) {
return;
}
add(item);
item.setParentMenu(this);
item.setSelected(false);
items.add(item);
} | 3.68 |
framework_ResourceLoader_loadStylesheet | /**
* Load a stylesheet and notify a listener when the stylesheet is loaded.
* Calling this method when the stylesheet is currently loading or already
* loaded doesn't cause the stylesheet to be loaded again, but the listener
* will still be notified when appropriate.
*
* @param stylesheetUrl
* the url of the stylesheet to load
* @param resourceLoadListener
* the listener that will get notified when the stylesheet is
* loaded
*/
public void loadStylesheet(final String stylesheetUrl,
final ResourceLoadListener resourceLoadListener) {
final String url = WidgetUtil.getAbsoluteUrl(stylesheetUrl);
final ResourceLoadEvent event = new ResourceLoadEvent(this, url);
if (loadedResources.contains(url)) {
if (resourceLoadListener != null) {
resourceLoadListener.onLoad(event);
}
return;
}
if (addListener(url, resourceLoadListener, loadListeners)) {
getLogger().info("Loading style sheet from " + url);
LinkElement linkElement = Document.get().createLinkElement();
linkElement.setRel("stylesheet");
linkElement.setType("text/css");
linkElement.setHref(url);
if (BrowserInfo.get().isSafariOrIOS()) {
// Safari doesn't fire any events for link elements
// See http://www.phpied.com/when-is-a-stylesheet-really-loaded/
Scheduler.get().scheduleFixedPeriod(new RepeatingCommand() {
private final Duration duration = new Duration();
@Override
public boolean execute() {
int styleSheetLength = getStyleSheetLength(url);
if (getStyleSheetLength(url) > 0) {
fireLoad(event);
return false; // Stop repeating
} else if (styleSheetLength == 0) {
// "Loaded" empty sheet -> most likely 404 error
fireError(event);
return true;
} else if (duration.elapsedMillis() > 60 * 1000) {
fireError(event);
return false;
} else {
return true; // Continue repeating
}
}
}, 10);
} else {
addOnloadHandler(linkElement, new ResourceLoadListener() {
@Override
public void onLoad(ResourceLoadEvent event) {
// Chrome, IE, Edge all fire load for errors, must check
// stylesheet data
if (BrowserInfo.get().isChrome()
|| BrowserInfo.get().isIE()
|| BrowserInfo.get().isEdge()) {
int styleSheetLength = getStyleSheetLength(url);
// Error if there's an empty stylesheet
if (styleSheetLength == 0) {
fireError(event);
return;
}
}
fireLoad(event);
}
@Override
public void onError(ResourceLoadEvent event) {
fireError(event);
}
}, event);
if (BrowserInfo.get().isOpera()) {
// Opera onerror never fired, assume error if no onload in x
// seconds
new Timer() {
@Override
public void run() {
if (!loadedResources.contains(url)) {
fireError(event);
}
}
}.schedule(5 * 1000);
}
}
head.appendChild(linkElement);
}
} | 3.68 |
framework_VTransferable_getVariableMap | /**
* This helper method should only be called by {@link VDragAndDropManager}.
*
* @return data in this Transferable that needs to be moved to server.
*/
Map<String, Object> getVariableMap() {
return variables;
} | 3.68 |
flink_FlinkImageBuilder_setImageNamePrefix | /**
* Sets the prefix name of building image.
*
* <p>If the name is not specified, {@link #DEFAULT_IMAGE_NAME_BUILD_PREFIX} will be used.
*/
public FlinkImageBuilder setImageNamePrefix(String imageNamePrefix) {
this.imageNamePrefix = imageNamePrefix;
return this;
} | 3.68 |
dubbo_ServiceInstancesChangedListener_accept | /**
* @param event {@link ServiceInstancesChangedEvent event}
* @return If service name matches, return <code>true</code>, or <code>false</code>
*/
private boolean accept(ServiceInstancesChangedEvent event) {
return serviceNames.contains(event.getServiceName());
} | 3.68 |
hbase_HMaster_startServiceThreads | /*
* Start up all services. If any of these threads gets an unhandled exception then they just die
* with a logged message. This should be fine because in general, we do not expect the master to
* get such unhandled exceptions as OOMEs; it should be lightly loaded. See what HRegionServer
* does if need to install an unexpected exception handler.
*/
private void startServiceThreads() throws IOException {
// Start the executor service pools
final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS,
HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize));
final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS,
HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION)
.setCorePoolSize(masterCloseRegionPoolSize));
final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS,
HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS)
.setCorePoolSize(masterServerOpThreads));
final int masterServerMetaOpsThreads =
conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS,
HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS)
.setCorePoolSize(masterServerMetaOpsThreads));
final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS,
HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads));
final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY,
SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS)
.setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true));
final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS,
HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS)
.setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true));
// We depend on there being only one instance of this executor running
// at a time. To do concurrency, would need fencing of enable/disable of
// tables.
// Any time changing this maxThreads to > 1, pls see the comment at
// AccessController#postCompletedCreateTableAction
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1));
startProcedureExecutor();
// Create log cleaner thread pool
logCleanerPool = DirScanPool.getLogCleanerScanPool(conf);
Map<String, Object> params = new HashMap<>();
params.put(MASTER, this);
// Start log cleaner thread
int cleanerInterval =
conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL);
this.logCleaner =
new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(),
getMasterWalManager().getOldLogDir(), logCleanerPool, params);
getChoreService().scheduleChore(logCleaner);
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
// Create custom archive hfile cleaners
String[] paths = conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS);
// todo: handle the overlap issues for the custom paths
if (paths != null && paths.length > 0) {
if (conf.getStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS) == null) {
Set<String> cleanerClasses = new HashSet<>();
String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
if (cleaners != null) {
Collections.addAll(cleanerClasses, cleaners);
}
conf.setStrings(HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS,
cleanerClasses.toArray(new String[cleanerClasses.size()]));
LOG.info("Archive custom cleaner paths: {}, plugins: {}", Arrays.asList(paths),
cleanerClasses);
}
// share the hfile cleaner pool in custom paths
sharedHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf.get(CUSTOM_POOL_SIZE, "6"));
for (int i = 0; i < paths.length; i++) {
Path path = new Path(paths[i].trim());
HFileCleaner cleaner =
new HFileCleaner("ArchiveCustomHFileCleaner-" + path.getName(), cleanerInterval, this,
conf, getMasterFileSystem().getFileSystem(), new Path(archiveDir, path),
HFileCleaner.HFILE_CLEANER_CUSTOM_PATHS_PLUGINS, sharedHFileCleanerPool, params, null);
hfileCleaners.add(cleaner);
hfileCleanerPaths.add(path);
}
}
// Create the whole archive dir cleaner thread pool
exclusiveHFileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf);
hfileCleaners.add(0,
new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(),
archiveDir, exclusiveHFileCleanerPool, params, hfileCleanerPaths));
hfileCleanerPaths.add(0, archiveDir);
// Schedule all the hfile cleaners
for (HFileCleaner hFileCleaner : hfileCleaners) {
getChoreService().scheduleChore(hFileCleaner);
}
// Regions Reopen based on very high storeFileRefCount is considered enabled
// only if hbase.regions.recovery.store.file.ref.count has value > 0
final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD,
HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD);
if (maxStoreFileRefCount > 0) {
this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this);
getChoreService().scheduleChore(this.regionsRecoveryChore);
} else {
LOG.info(
"Reopening regions with very high storeFileRefCount is disabled. "
+ "Provide threshold value > 0 for {} to enable it.",
HConstants.STORE_FILE_REF_COUNT_THRESHOLD);
}
this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this);
replicationBarrierCleaner =
new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager);
getChoreService().scheduleChore(replicationBarrierCleaner);
final boolean isSnapshotChoreEnabled = this.snapshotCleanupStateStore.get();
this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
if (isSnapshotChoreEnabled) {
getChoreService().scheduleChore(this.snapshotCleanerChore);
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore..");
}
}
serviceStarted = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Started service threads");
}
} | 3.68 |
pulsar_Reflections_classExists | /**
* Check if class exists.
*
* @param fqcn fully qualified class name to search for
* @return true if class can be loaded from jar and false if otherwise
*/
public static boolean classExists(String fqcn) {
try {
Class.forName(fqcn);
return true;
} catch (ClassNotFoundException e) {
return false;
}
} | 3.68 |
framework_ColorPickerPopup_setRGBTabVisible | /**
* Sets the RGB tab visibility.
*
* @param visible
* The visibility of the RGB tab
*/
public void setRGBTabVisible(boolean visible) {
if (visible && !isTabVisible(rgbTab)) {
tabs.addTab(rgbTab, "RGB", null);
checkIfTabsNeeded();
} else if (!visible && isTabVisible(rgbTab)) {
tabs.removeComponent(rgbTab);
checkIfTabsNeeded();
}
} | 3.68 |
morf_SqlUtils_delete | /**
* Constructs a Delete Statement.
*
* <p>Usage is discouraged; this method will be deprecated at some point. Use
* {@link DeleteStatement#delete(TableReference)} for preference.</p>
*
* @param table the database table to delete from.
* @return {@link DeleteStatement}
*/
public static DeleteStatement delete(TableReference table) {
return new DeleteStatement(table);
} | 3.68 |
framework_ContainerHierarchicalWrapper_addItemSetChangeListener | /*
* Registers a new Item set change listener for this Container. Don't add a
* JavaDoc comment here, we use the default documentation from implemented
* interface.
*/
@Override
public void addItemSetChangeListener(
Container.ItemSetChangeListener listener) {
if (container instanceof Container.ItemSetChangeNotifier) {
((Container.ItemSetChangeNotifier) container)
.addItemSetChangeListener(new PiggybackListener(listener));
}
} | 3.68 |
hadoop_CommonAuditContext_currentThreadID | /**
* A thread ID which is unique for this process and shared across all
* S3A clients on the same thread, even those using different FS instances.
* @return a thread ID for reporting.
*/
public static String currentThreadID() {
return Long.toString(Thread.currentThread().getId());
} | 3.68 |
hudi_HoodieTableMetadataUtil_getFileGroupIndexFromFileId | /**
* Extract the index from the fileID of a file group in the MDT partition. See {@code getFileIDForFileGroup} for the format of the fileID.
*
* @param fileId fileID of a file group.
* @return The index of file group
*/
public static int getFileGroupIndexFromFileId(String fileId) {
final int endIndex = getFileIdLengthWithoutFileIndex(fileId);
final int fromIndex = fileId.lastIndexOf("-", endIndex - 1);
return Integer.parseInt(fileId.substring(fromIndex + 1, endIndex));
} | 3.68 |
flink_ColumnStats_merge | /**
* Merges two column stats. When the stats are unknown, whatever the other are, we need return
* unknown stats. The unknown definition for column stats is null.
*
* @param other The other column stats to merge.
* @return The merged column stats.
*/
public ColumnStats merge(ColumnStats other, boolean isPartitionKey) {
if (this == UNKNOWN || other == UNKNOWN) {
return UNKNOWN;
}
Long ndv;
if (isPartitionKey) {
ndv = combineIfNonNull(Long::sum, this.ndv, other.ndv);
} else {
ndv = combineIfNonNull(Long::max, this.ndv, other.ndv);
}
Long nullCount = combineIfNonNull(Long::sum, this.nullCount, other.nullCount);
Double avgLen = combineIfNonNull((a1, a2) -> (a1 + a2) / 2, this.avgLen, other.avgLen);
Integer maxLen = combineIfNonNull(Math::max, this.maxLen, other.maxLen);
Number maxValue =
combineIfNonNull(
(n1, n2) -> n1.doubleValue() > n2.doubleValue() ? n1 : n2,
this.maxValue,
other.maxValue);
Number minValue =
combineIfNonNull(
(n1, n2) -> n1.doubleValue() < n2.doubleValue() ? n1 : n2,
this.minValue,
other.minValue);
@SuppressWarnings("unchecked")
Comparable max =
combineIfNonNull(
(c1, c2) -> ((Comparable) c1).compareTo(c2) > 0 ? c1 : c2,
this.max,
other.max);
@SuppressWarnings("unchecked")
Comparable min =
combineIfNonNull(
(c1, c2) -> ((Comparable) c1).compareTo(c2) < 0 ? c1 : c2,
this.min,
other.min);
if (max != null || min != null) {
return new ColumnStats(ndv, nullCount, avgLen, maxLen, max, min);
} else {
return new ColumnStats(ndv, nullCount, avgLen, maxLen, maxValue, minValue);
}
} | 3.68 |
hibernate-validator_MethodValidationConfiguration_getConfiguredRuleSet | /**
* Return an unmodifiable Set of MethodConfigurationRule that are to be
* enforced based on the configuration.
*
* @return a set of method configuration rules based on this configuration state
*/
public Set<MethodConfigurationRule> getConfiguredRuleSet() {
return configuredRuleSet;
} | 3.68 |
hibernate-validator_ValidatorFactoryBean_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
rocketmq-connect_RocketMQSourceValueConverter_convertStructValue | /**
* convert struct value
*
* @param toStruct
* @param originalStruct
*/
private void convertStructValue(Struct toStruct, org.apache.kafka.connect.data.Struct originalStruct) {
for (Field field : toStruct.schema().getFields()) {
try {
FieldType type = field.getSchema().getFieldType();
Object value = originalStruct.get(field.getName());
switch (type) {
case INT8:
case INT16:
case INT32:
case INT64:
case FLOAT32:
case FLOAT64:
case BOOLEAN:
case STRING:
case BYTES:
toStruct.put(field.getName(), value);
break;
case STRUCT:
case ARRAY:
case MAP:
toStruct.put(
field.getName(),
convertKafkaValue(
toStruct.schema().getField(field.getName()).getSchema(),
value
)
);
break;
}
} catch (Exception ex) {
logger.error("Convert schema failure! ex {}", ex);
throw new ConnectException(ex);
}
}
} | 3.68 |
hbase_MultiByteBuff_position | /**
* Sets this MBB's position to the given value.
* @return this object
*/
@Override
public MultiByteBuff position(int position) {
checkRefCount();
// Short circuit for positioning within the cur item. Mostly that is the case.
if (
this.itemBeginPos[this.curItemIndex] <= position
&& this.itemBeginPos[this.curItemIndex + 1] > position
) {
this.curItem.position(position - this.itemBeginPos[this.curItemIndex]);
return this;
}
int itemIndex = getItemIndex(position);
// All items from 0 - curItem-1 set position at end.
for (int i = 0; i < itemIndex; i++) {
this.items[i].position(this.items[i].limit());
}
// All items after curItem set position at begin
for (int i = itemIndex + 1; i < this.items.length; i++) {
this.items[i].position(0);
}
this.curItem = this.items[itemIndex];
this.curItem.position(position - this.itemBeginPos[itemIndex]);
this.curItemIndex = itemIndex;
return this;
} | 3.68 |
hbase_HBaseReplicationEndpoint_reportSinkSuccess | /**
* Report that a {@code SinkPeer} successfully replicated a chunk of data. The SinkPeer that had a
* failed replication attempt on it
*/
protected synchronized void reportSinkSuccess(SinkPeer sinkPeer) {
badReportCounts.remove(sinkPeer.getServerName());
} | 3.68 |
hadoop_Cluster_getBlackListedTaskTrackers | /**
* Get blacklisted trackers.
*
* @return array of {@link TaskTrackerInfo}
* @throws IOException
* @throws InterruptedException
*/
public TaskTrackerInfo[] getBlackListedTaskTrackers()
throws IOException, InterruptedException {
return client.getBlacklistedTrackers();
} | 3.68 |
hadoop_ByteArrayDecodingState_checkInputBuffers | /**
* Check and ensure the buffers are of the desired length.
* @param buffers the buffers to check
*/
void checkInputBuffers(byte[][] buffers) {
int validInputs = 0;
for (byte[] buffer : buffers) {
if (buffer == null) {
continue;
}
if (buffer.length != decodeLength) {
throw new HadoopIllegalArgumentException(
"Invalid buffer, not of length " + decodeLength);
}
validInputs++;
}
if (validInputs < decoder.getNumDataUnits()) {
throw new HadoopIllegalArgumentException(
"No enough valid inputs are provided, not recoverable");
}
} | 3.68 |
framework_SQLContainer_updateOffsetAndCache | /**
* Determines a new offset for updating the row cache. The offset is
* calculated from the given index, and will be fixed to match the start of
* a page, based on the value of pageLength.
*
* @param index
* Index of the item that was requested, but not found in cache
*/
private void updateOffsetAndCache(int index) {
int oldOffset = currentOffset;
currentOffset = (index / pageLength) * pageLength - cacheOverlap;
if (currentOffset < 0) {
currentOffset = 0;
}
if (oldOffset == currentOffset && !cachedItems.isEmpty()) {
return;
}
getPage();
} | 3.68 |
hadoop_ManifestCommitterSupport_createIOStatisticsStore | /**
* Create an IOStatistics Store with the standard statistics
* set up.
* @return a store builder preconfigured with the standard stats.
*/
public static IOStatisticsStoreBuilder createIOStatisticsStore() {
final IOStatisticsStoreBuilder store
= iostatisticsStore();
store.withSampleTracking(COUNTER_STATISTICS);
store.withDurationTracking(DURATION_STATISTICS);
return store;
} | 3.68 |
hadoop_RollbackResponse_newInstance | /**
* Create new instance of a Rollback response.
* @return Rollback Response.
*/
@Private
@Unstable
public static RollbackResponse newInstance() {
return Records.newRecord(RollbackResponse.class);
} | 3.68 |
querydsl_ColumnMetadata_named | /**
* Creates default column meta data with the given column name, but without
* any type or constraint information. Use the fluent builder methods to
* further configure it.
*
* @throws NullPointerException
* if the name is null
*/
public static ColumnMetadata named(String name) {
return new ColumnMetadata(null, name, null, true, UNDEFINED, UNDEFINED);
} | 3.68 |
graphhopper_VectorTile_getValuesBuilderList | /**
* <pre>
* Dictionary encoding for values
* </pre>
*
* <code>repeated .vector_tile.Tile.Value values = 4;</code>
*/
public java.util.List<vector_tile.VectorTile.Tile.Value.Builder>
getValuesBuilderList() {
return getValuesFieldBuilder().getBuilderList();
} | 3.68 |
hbase_PrivateCellUtil_writeFamily | /**
* Writes the family from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
* @param flength the family length
*/
public static void writeFamily(OutputStream out, Cell cell, byte flength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) cell).getFamilyByteBuffer(),
((ByteBufferExtendedCell) cell).getFamilyPosition(), flength);
} else {
out.write(cell.getFamilyArray(), cell.getFamilyOffset(), flength);
}
} | 3.68 |
pulsar_ResourceUsageTopicTransportManager_unregisterResourceUsageConsumer | /*
* Unregister a resource owner (resource-group, tenant, namespace, topic etc).
*
* @param resource usage consumer
*/
public void unregisterResourceUsageConsumer(ResourceUsageConsumer r) {
consumerMap.remove(r.getID());
} | 3.68 |
hadoop_FindOptions_isFollowArgLink | /**
* Should command line symbolic links be follows?
*
* @return true indicates links should be followed
*/
public boolean isFollowArgLink() {
return this.followArgLink;
} | 3.68 |
framework_Escalator_getMaxVisibleRowCount | /**
* Gets the maximum number of body rows that can be visible on the screen at
* once.
*
* @return the maximum capacity
*/
public int getMaxVisibleRowCount() {
return body.getMaxVisibleRowCount();
} | 3.68 |
hudi_HoodieMetadataTableValidator_validatePartitions | /**
* Compare the listing partitions result between metadata table and fileSystem.
*/
private List<String> validatePartitions(HoodieSparkEngineContext engineContext, String basePath) {
// compare partitions
List<String> allPartitionPathsFromFS = FSUtils.getAllPartitionPaths(engineContext, basePath, false);
HoodieTimeline completedTimeline = metaClient.getCommitsTimeline().filterCompletedInstants();
// ignore partitions created by uncommitted ingestion.
allPartitionPathsFromFS = allPartitionPathsFromFS.stream().parallel().filter(part -> {
HoodiePartitionMetadata hoodiePartitionMetadata =
new HoodiePartitionMetadata(metaClient.getFs(), FSUtils.getPartitionPath(basePath, part));
Option<String> instantOption = hoodiePartitionMetadata.readPartitionCreatedCommitTime();
if (instantOption.isPresent()) {
String instantTime = instantOption.get();
// There are two cases where the created commit time is written to the partition metadata:
// (1) Commit C1 creates the partition and C1 succeeds, the partition metadata has C1 as
// the created commit time.
// (2) Commit C1 creates the partition, the partition metadata is written, and C1 fails
// during writing data files. Next time, C2 adds new data to the same partition after C1
// is rolled back. In this case, the partition metadata still has C1 as the created commit
// time, since Hudi does not rewrite the partition metadata in C2.
if (!completedTimeline.containsOrBeforeTimelineStarts(instantTime)) {
Option<HoodieInstant> lastInstant = completedTimeline.lastInstant();
return lastInstant.isPresent()
&& HoodieTimeline.compareTimestamps(
instantTime, LESSER_THAN_OR_EQUALS, lastInstant.get().getTimestamp());
}
return true;
} else {
return false;
}
}).collect(Collectors.toList());
List<String> allPartitionPathsMeta = FSUtils.getAllPartitionPaths(engineContext, basePath, true);
Collections.sort(allPartitionPathsFromFS);
Collections.sort(allPartitionPathsMeta);
if (allPartitionPathsFromFS.size() != allPartitionPathsMeta.size()
|| !allPartitionPathsFromFS.equals(allPartitionPathsMeta)) {
String message = "Compare Partitions Failed! " + "AllPartitionPathsFromFS : " + allPartitionPathsFromFS + " and allPartitionPathsMeta : " + allPartitionPathsMeta;
LOG.error(message);
throw new HoodieValidationException(message);
}
return allPartitionPathsMeta;
} | 3.68 |
hadoop_TaskTrackerInfo_isBlacklisted | /**
* Whether tracker is blacklisted
* @return true if tracker is blacklisted
* false otherwise
*/
public boolean isBlacklisted() {
return isBlacklisted;
} | 3.68 |
hadoop_PatternValidator_validate | /**
* Validate the name -restricting it to the set defined in
* @param name name to validate
* @throws IllegalArgumentException if not a valid name
*/
public void validate(String name) {
if (!matches(name)) {
throw new IllegalArgumentException(
String.format(E_INVALID_NAME, name, pattern));
}
} | 3.68 |
hbase_TakeSnapshotHandler_snapshotDisabledRegion | /**
* Take a snapshot of the specified disabled region
*/
protected void snapshotDisabledRegion(final RegionInfo regionInfo) throws IOException {
snapshotManifest.addRegion(CommonFSUtils.getTableDir(rootDir, snapshotTable), regionInfo);
monitor.rethrowException();
status.setStatus("Completed referencing HFiles for offline region " + regionInfo.toString()
+ " of table: " + snapshotTable);
} | 3.68 |
open-banking-gateway_BaseDatasafeDbStorageService_deduceId | /**
* Resolves objects' ID from path.
* @param path Object path to resolve ID from.
* @return ID of the object in the table.
*/
protected String deduceId(AbsoluteLocation<?> path) {
return path.location().getWrapped().getPath().replaceAll("^/", "");
} | 3.68 |
hbase_BackupManager_readRegionServerLastLogRollResult | /**
* Get the RS log information after the last log roll from backup system table.
* @return RS log info
* @throws IOException exception
*/
public HashMap<String, Long> readRegionServerLastLogRollResult() throws IOException {
return systemTable.readRegionServerLastLogRollResult(backupInfo.getBackupRootDir());
} | 3.68 |
hadoop_MutableStat_getSnapshotTimeStamp | /**
* @return Return the SampleStat snapshot timestamp.
*/
public long getSnapshotTimeStamp() {
return snapshotTimeStamp;
} | 3.68 |
morf_DatabaseSchemaManager_ensureTableExists | /**
* Ensure that a specific table is present in the DB.
*
* @return Any SQL required to adjust the DB to include this table.
*/
private Collection<? extends String> ensureTableExists(Table requiredTable, TruncationBehavior truncationBehavior, ProducerCache producerCache) {
boolean dropRequired;
boolean deployRequired;
boolean truncateRequired;
DifferenceWriter differenceWriter = new DifferenceWriter() {
@Override
public void difference(String message) {
log.debug(message);
}
};
if (requiredTable.getName().length() > 27) {
log.warn("Required table name [" + requiredTable.getName() + "] is [" + requiredTable.getName().length() + "] characters long!");
}
// if we have an existing table, check it's identical
Table existingTable = getTable(producerCache, requiredTable.getName());
if (existingTable != null) {
if (new SchemaHomology(differenceWriter, "cache", "required").tablesMatch(existingTable, requiredTable)) {
// they match - it's identical, so we can re-use it
dropRequired = false;
deployRequired = false;
if (tablesNotNeedingTruncate.get().contains(requiredTable.getName().toUpperCase())) {
truncateRequired = TruncationBehavior.ALWAYS.equals(truncationBehavior);
} else {
// if we didn't find it in the cache we don't know what state it is in, so truncate it
truncateRequired = true;
tablesNotNeedingTruncate.get().add(requiredTable.getName().toUpperCase());
}
} else {
// they don't match
dropRequired = true;
deployRequired = true;
truncateRequired = false;
}
} else {
// no existing table
dropRequired = false;
deployRequired = true;
truncateRequired = false;
}
Collection<String> sql = Lists.newLinkedList();
if (dropRequired)
sql.addAll(dropTable(existingTable));
if (deployRequired) {
sql.addAll(deployTable(requiredTable));
}
if (truncateRequired) {
sql.addAll(truncateTable(requiredTable));
}
return sql;
} | 3.68 |
hadoop_FederationBlock_initHtmlPageFederation | /**
* Initialize the Html page.
*
* @param html html object
*/
private void initHtmlPageFederation(Block html, boolean isEnabled) {
List<Map<String, String>> lists = new ArrayList<>();
// Table header
TBODY<TABLE<Hamlet>> tbody =
html.table("#rms").$class("cell-border").$style("width:100%").thead().tr()
.th(".id", "SubCluster")
.th(".state", "State")
.th(".lastStartTime", "LastStartTime")
.th(".lastHeartBeat", "LastHeartBeat")
.th(".resources", "Resources")
.th(".nodes", "Nodes")
.__().__().tbody();
try {
if (isEnabled) {
initSubClusterPage(tbody, lists);
} else {
initLocalClusterPage(tbody, lists);
}
} catch (Exception e) {
LOG.error("Cannot render Router Federation.", e);
}
// Init FederationBlockTableJs
initFederationSubClusterDetailTableJs(html, lists);
// Tips
tbody.__().__().div().p().$style("color:red")
.__("*The application counts are local per subcluster").__().__();
} | 3.68 |
flink_DataSet_filter | /**
* Applies a Filter transformation on a {@link DataSet}.
*
* <p>The transformation calls a {@link
* org.apache.flink.api.common.functions.RichFilterFunction} for each element of the DataSet and
* retains only those element for which the function returns true. Elements for which the
* function returns false are filtered.
*
* @param filter The FilterFunction that is called for each element of the DataSet.
* @return A FilterOperator that represents the filtered DataSet.
* @see org.apache.flink.api.common.functions.RichFilterFunction
* @see FilterOperator
* @see DataSet
*/
public FilterOperator<T> filter(FilterFunction<T> filter) {
if (filter == null) {
throw new NullPointerException("Filter function must not be null.");
}
return new FilterOperator<>(this, clean(filter), Utils.getCallLocationName());
} | 3.68 |
morf_XmlDataSetProducer_getWidth | /**
* @see org.alfasoftware.morf.metadata.Column#getWidth()
*/
@Override
public int getWidth() {
if (width == null) {
return 0;
}
return width;
} | 3.68 |
hbase_VerifyReplication_main | /**
* Main entry point.
* @param args The command line parameters.
* @throws Exception When running the job fails.
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(HBaseConfiguration.create(), new VerifyReplication(), args);
System.exit(res);
} | 3.68 |
framework_VComboBox_setSelectedCaption | /**
* Sets the caption of selected item, if "scroll to page" is disabled. This
* method is meant for internal use and may change in future versions.
*
* @since 7.7
* @param selectedCaption
* the caption of selected item
*/
public void setSelectedCaption(String selectedCaption) {
explicitSelectedCaption = selectedCaption;
if (selectedCaption != null) {
setText(selectedCaption);
}
} | 3.68 |
hbase_RegionServerSpaceQuotaManager_copyQuotaSnapshots | /**
* Copies the last {@link SpaceQuotaSnapshot}s that were recorded. The current view of what the
* RegionServer thinks the table's utilization is.
*/
public Map<TableName, SpaceQuotaSnapshot> copyQuotaSnapshots() {
return new HashMap<>(currentQuotaSnapshots.get());
} | 3.68 |
hbase_TableDescriptorBuilder_setReplicationScope | /**
* Sets replication scope all & only the columns already in the builder. Columns added later won't
* be backfilled with replication scope.
* @param scope replication scope
* @return a TableDescriptorBuilder
*/
public TableDescriptorBuilder setReplicationScope(int scope) {
Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
newFamilies.putAll(desc.families);
newFamilies.forEach((cf, cfDesc) -> {
desc.removeColumnFamily(cf);
desc
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope).build());
});
return this;
} | 3.68 |
framework_DropTargetExtensionConnector_removeDropListeners | /**
* Removes dragenter, dragover, dragleave and drop event listeners from the
* given DOM element.
*
* @param element
* DOM element to remove event listeners from.
*/
private void removeDropListeners(Element element) {
EventTarget target = element.cast();
target.removeEventListener(Event.DRAGENTER, dragEnterListener);
target.removeEventListener(Event.DRAGOVER, dragOverListener);
target.removeEventListener(Event.DRAGLEAVE, dragLeaveListener);
target.removeEventListener(Event.DROP, dropListener);
} | 3.68 |
framework_CellReference_getColumnIndexDOM | /**
* Gets the index of the cell in the DOM. The difference to
* {@link #getColumnIndex()} is caused by hidden columns.
*
* @since 7.5.0
* @return the index of the column in the DOM
*/
public int getColumnIndexDOM() {
return columnIndexDOM;
} | 3.68 |
graphhopper_VectorTile_setUintValue | /**
* <code>optional uint64 uint_value = 5;</code>
*/
public Builder setUintValue(long value) {
bitField0_ |= 0x00000010;
uintValue_ = value;
onChanged();
return this;
} | 3.68 |
framework_TableSqlContainer_createTestTable | /**
* (Re)creates the test table
*
* @param connectionPool
*/
private void createTestTable(JDBCConnectionPool connectionPool) {
Connection conn = null;
try {
conn = connectionPool.reserveConnection();
Statement statement = conn.createStatement();
try {
statement.executeUpdate("DROP TABLE mytable");
} catch (SQLException e) {
}
statement.execute("CREATE TABLE mytable "
+ "(id INTEGER GENERATED BY DEFAULT AS IDENTITY, D DATE,"
+ "MYFIELD VARCHAR(45), " + "PRIMARY KEY(ID))");
statement.close();
conn.commit();
} catch (SQLException e) {
e.printStackTrace();
} finally {
connectionPool.releaseConnection(conn);
}
} | 3.68 |
flink_ProducerMergedPartitionFileReader_sliceBuffer | /**
* Slice the read memory segment to multiple small network buffers.
*
* <p>Note that although the method appears to be split into multiple buffers, the sliced
* buffers still share the same one actual underlying memory segment.
*
* @param byteBuffer the byte buffer to be sliced, it points to the underlying memorySegment
* @param memorySegment the underlying memory segment to be sliced
* @param partialBuffer the partial buffer, if the partial buffer is not null, it contains the
* partial data buffer from the previous read
* @param readBuffers the read buffers list is to accept the sliced buffers
* @return the first field is the number of total sliced bytes, the second field is the bytes of
* the partial buffer
*/
private Tuple2<Integer, Integer> sliceBuffer(
ByteBuffer byteBuffer,
MemorySegment memorySegment,
@Nullable CompositeBuffer partialBuffer,
BufferRecycler bufferRecycler,
List<Buffer> readBuffers) {
checkState(reusedHeaderBuffer.position() == 0);
checkState(partialBuffer == null || partialBuffer.missingLength() > 0);
NetworkBuffer buffer = new NetworkBuffer(memorySegment, bufferRecycler);
buffer.setSize(byteBuffer.remaining());
try {
int numSlicedBytes = 0;
if (partialBuffer != null) {
// If there is a previous small partial buffer, the current read operation should
// read additional data and combine it with the existing partial to construct a new
// complete buffer
buffer.retainBuffer();
int position = byteBuffer.position() + partialBuffer.missingLength();
int numPartialBytes = partialBuffer.missingLength();
partialBuffer.addPartialBuffer(
buffer.readOnlySlice(byteBuffer.position(), numPartialBytes));
numSlicedBytes += numPartialBytes;
byteBuffer.position(position);
readBuffers.add(partialBuffer);
}
partialBuffer = null;
while (byteBuffer.hasRemaining()) {
// Parse the small buffer's header
BufferHeader header = parseBufferHeader(byteBuffer);
if (header == null) {
// If the remaining data length in the buffer is not enough to construct a new
// complete buffer header, drop it directly.
break;
} else {
numSlicedBytes += HEADER_LENGTH;
}
if (header.getLength() <= byteBuffer.remaining()) {
// The remaining data length in the buffer is enough to generate a new small
// sliced network buffer. The small sliced buffer is not a partial buffer, we
// should read the slice of the buffer directly
buffer.retainBuffer();
ReadOnlySlicedNetworkBuffer slicedBuffer =
buffer.readOnlySlice(byteBuffer.position(), header.getLength());
slicedBuffer.setDataType(header.getDataType());
slicedBuffer.setCompressed(header.isCompressed());
byteBuffer.position(byteBuffer.position() + header.getLength());
numSlicedBytes += header.getLength();
readBuffers.add(slicedBuffer);
} else {
// The remaining data length in the buffer is smaller than the actual length of
// the buffer, so we should generate a new partial buffer, allowing for
// generating a new complete buffer during the next read operation
buffer.retainBuffer();
int numPartialBytes = byteBuffer.remaining();
numSlicedBytes += numPartialBytes;
partialBuffer = new CompositeBuffer(header);
partialBuffer.addPartialBuffer(
buffer.readOnlySlice(byteBuffer.position(), numPartialBytes));
readBuffers.add(partialBuffer);
break;
}
}
return Tuple2.of(numSlicedBytes, getPartialBufferReadBytes(partialBuffer));
} catch (Throwable throwable) {
LOG.error("Failed to slice the read buffer {}.", byteBuffer, throwable);
throw throwable;
} finally {
buffer.recycleBuffer();
}
} | 3.68 |
pulsar_EventLoopUtil_newEventLoopGroup | /**
* @return an EventLoopGroup suitable for the current platform
*/
public static EventLoopGroup newEventLoopGroup(int nThreads, boolean enableBusyWait, ThreadFactory threadFactory) {
if (Epoll.isAvailable()) {
String enableIoUring = System.getProperty(ENABLE_IO_URING);
// By default, io_uring will not be enabled, even if available. The environment variable will be used:
// enable.io_uring=1
if (StringUtils.equalsAnyIgnoreCase(enableIoUring, "1", "true")) {
// Throw exception if IOUring cannot be used
IOUring.ensureAvailability();
return new IOUringEventLoopGroup(nThreads, threadFactory);
} else {
if (!enableBusyWait) {
// Regular Epoll based event loop
return new EpollEventLoopGroup(nThreads, threadFactory);
}
// With low latency setting, put the Netty event loop on busy-wait loop to reduce cost of
// context switches
EpollEventLoopGroup eventLoopGroup = new EpollEventLoopGroup(nThreads, threadFactory,
() -> (selectSupplier, hasTasks) -> SelectStrategy.BUSY_WAIT);
// Enable CPU affinity on IO threads
for (int i = 0; i < nThreads; i++) {
eventLoopGroup.next().submit(() -> {
try {
CpuAffinity.acquireCore();
} catch (Throwable t) {
log.warn("Failed to acquire CPU core for thread {} {}", Thread.currentThread().getName(),
t.getMessage(), t);
}
});
}
return eventLoopGroup;
}
} else {
// Fallback to NIO
return new NioEventLoopGroup(nThreads, threadFactory);
}
} | 3.68 |
hadoop_ColumnRWHelper_getPutTimestamp | /**
* Figures out the cell timestamp used in the Put For storing.
* Will supplement the timestamp if required. Typically done for flow run
* table.If we supplement the timestamp, we left shift the timestamp and
* supplement it with the AppId id so that there are no collisions in the flow
* run table's cells.
*/
private static long getPutTimestamp(
Long timestamp, boolean supplementTs, Attribute[] attributes) {
if (timestamp == null) {
timestamp = System.currentTimeMillis();
}
if (!supplementTs) {
return timestamp;
} else {
String appId = getAppIdFromAttributes(attributes);
long supplementedTS = TimestampGenerator.getSupplementedTimestamp(
timestamp, appId);
return supplementedTS;
}
} | 3.68 |
framework_ContainerEventProvider_setStyleNameProperty | /**
* Set the property which provides the style name for the event.
*/
public void setStyleNameProperty(Object styleNameProperty) {
this.styleNameProperty = styleNameProperty;
} | 3.68 |
zilla_ManyToOneRingBuffer_consumerHeartbeatTime | /**
* {@inheritDoc}
*/
public long consumerHeartbeatTime()
{
return buffer.getLongVolatile(consumerHeartbeatIndex);
} | 3.68 |
druid_BeanTypeAutoProxyCreator_setTargetBeanType | /**
* @param targetClass the targetClass to set
*/
public void setTargetBeanType(Class<?> targetClass) {
this.targetBeanType = targetClass;
} | 3.68 |
flink_RpcEndpoint_internalCallOnStop | /**
* Internal method which is called by the RpcService implementation to stop the RpcEndpoint.
*
* @return Future which is completed once all post stop actions are completed. If an error
* occurs this future is completed exceptionally
*/
public final CompletableFuture<Void> internalCallOnStop() {
validateRunsInMainThread();
CompletableFuture<Void> stopFuture = new CompletableFuture<>();
try {
resourceRegistry.close();
stopFuture.complete(null);
} catch (IOException e) {
stopFuture.completeExceptionally(
new RuntimeException("Close resource registry fail", e));
}
stopFuture = CompletableFuture.allOf(stopFuture, onStop());
isRunning = false;
return stopFuture;
} | 3.68 |
flink_SqlNodeConvertUtils_toCatalogView | /** convert the query part of a VIEW statement into a {@link CatalogView}. */
static CatalogView toCatalogView(
SqlNode query,
List<SqlNode> viewFields,
Map<String, String> viewOptions,
String viewComment,
ConvertContext context) {
// Put the sql string unparse (getQuotedSqlString()) in front of
// the node conversion (toQueryOperation()),
// because before Calcite 1.22.0, during sql-to-rel conversion, the SqlWindow
// bounds state would be mutated as default when they are null (not specified).
// This bug is fixed in CALCITE-3877 of Calcite 1.23.0.
String originalQuery = context.toQuotedSqlString(query);
SqlNode validateQuery = context.getSqlValidator().validate(query);
// The LATERAL operator was eliminated during sql validation, thus the unparsed SQL
// does not contain LATERAL which is problematic,
// the issue was resolved in CALCITE-4077
// (always treat the table function as implicitly LATERAL).
String expandedQuery = context.expandSqlIdentifiers(originalQuery);
PlannerQueryOperation operation = toQueryOperation(validateQuery, context);
ResolvedSchema schema = operation.getResolvedSchema();
// the view column list in CREATE VIEW is optional, if it's not empty, we should update
// the column name with the names in view column list.
if (!viewFields.isEmpty()) {
// alias column names:
List<String> inputFieldNames = schema.getColumnNames();
List<String> aliasFieldNames =
viewFields.stream().map(SqlNode::toString).collect(Collectors.toList());
if (inputFieldNames.size() != aliasFieldNames.size()) {
throw new ValidationException(
String.format(
"VIEW definition and input fields not match:\n\tDef fields: %s.\n\tInput fields: %s.",
aliasFieldNames, inputFieldNames));
}
schema = ResolvedSchema.physical(aliasFieldNames, schema.getColumnDataTypes());
}
return CatalogView.of(
Schema.newBuilder().fromResolvedSchema(schema).build(),
viewComment,
originalQuery,
expandedQuery,
viewOptions);
} | 3.68 |
hmily_EventData_getConfig | /**
* Gets config.
*
* @param <M> the type parameter
* @return the config
*/
public <M extends Config> M getConfig() {
return (M) config;
} | 3.68 |
flink_HybridSource_builder | /** Builder for {@link HybridSource}. */
public static <T, EnumT extends SplitEnumerator> HybridSourceBuilder<T, EnumT> builder(
Source<T, ?, ?> firstSource) {
HybridSourceBuilder<T, EnumT> builder = new HybridSourceBuilder<>();
return builder.addSource(firstSource);
} | 3.68 |
flink_KvStateRegistry_unregisterKvState | /**
* Unregisters the KvState instance identified by the given KvStateID.
*
* @param jobId JobId the KvState instance belongs to
* @param kvStateId KvStateID to identify the KvState instance
* @param keyGroupRange Key group range the KvState instance belongs to
*/
public void unregisterKvState(
JobID jobId,
JobVertexID jobVertexId,
KeyGroupRange keyGroupRange,
String registrationName,
KvStateID kvStateId) {
KvStateEntry<?, ?, ?> entry = registeredKvStates.remove(kvStateId);
if (entry != null) {
entry.clear();
final KvStateRegistryListener listener = getKvStateRegistryListener(jobId);
if (listener != null) {
listener.notifyKvStateUnregistered(
jobId, jobVertexId, keyGroupRange, registrationName);
}
}
} | 3.68 |
graphhopper_EdgeBasedTarjanSCC_getSingleEdgeComponents | /**
* The set of edge-keys that form their own (single-edge key) component. If {@link EdgeBasedTarjanSCC#excludeSingleEdgeComponents}
* is enabled this set will be empty.
*/
public BitSet getSingleEdgeComponents() {
return singleEdgeComponents;
} | 3.68 |
flink_TaskEventDispatcher_clearAll | /** Removes all registered event handlers. */
public void clearAll() {
synchronized (registeredHandlers) {
registeredHandlers.clear();
}
} | 3.68 |
morf_FieldReference_getImpliedName | /**
* @see org.alfasoftware.morf.sql.element.AliasedField#getImpliedName()
*/
@Override
public String getImpliedName() {
return StringUtils.isBlank(super.getImpliedName()) ? getName() : super.getImpliedName();
} | 3.68 |
flink_FlinkContainersSettings_haStoragePath | /**
* Sets the {@code haStoragePath} and returns a reference to this Builder enabling method
* chaining.
*
* @param haStoragePath The path for storing HA data.
* @return A reference to this Builder.
*/
public Builder haStoragePath(String haStoragePath) {
this.haStoragePath = haStoragePath;
return setConfigOption(HighAvailabilityOptions.HA_STORAGE_PATH, toUri(haStoragePath));
} | 3.68 |
graphhopper_LandmarkStorage_createLandmarks | /**
* This method calculates the landmarks and initial weightings to & from them.
*/
public void createLandmarks() {
if (isInitialized())
throw new IllegalStateException("Initialize the landmark storage only once!");
// fill 'from' and 'to' weights with maximum value
long maxBytes = (long) graph.getNodes() * LM_ROW_LENGTH;
this.landmarkWeightDA.create(2000);
this.landmarkWeightDA.ensureCapacity(maxBytes);
for (long pointer = 0; pointer < maxBytes; pointer += 2) {
landmarkWeightDA.setShort(pointer, (short) SHORT_INFINITY);
}
int[] empty = new int[landmarks];
Arrays.fill(empty, UNSET_SUBNETWORK);
landmarkIDs.add(empty);
byte[] subnetworks = new byte[graph.getNodes()];
Arrays.fill(subnetworks, (byte) UNSET_SUBNETWORK);
String snKey = Subnetwork.key(lmConfig.getName());
// TODO We could use EdgeBasedTarjanSCC instead of node-based TarjanSCC here to get the small networks directly,
// instead of using the subnetworkEnc from PrepareRoutingSubnetworks.
if (!encodedValueLookup.hasEncodedValue(snKey))
throw new IllegalArgumentException("EncodedValue '" + snKey + "' does not exist. For Landmarks this is " +
"currently required (also used in PrepareRoutingSubnetworks). See #2256");
// Exclude edges that we previously marked in PrepareRoutingSubnetworks to avoid problems like "connection not found".
final BooleanEncodedValue edgeInSubnetworkEnc = encodedValueLookup.getBooleanEncodedValue(snKey);
final IntHashSet blockedEdges;
// We use the areaIndex to split certain areas from each other but do not permanently change the base graph
// so that other algorithms still can route through these regions. This is done to increase the density of
// landmarks for an area like Europe+Asia, which improves the query speed.
if (areaIndex != null) {
StopWatch sw = new StopWatch().start();
blockedEdges = findBorderEdgeIds(areaIndex);
if (logDetails)
LOGGER.info("Made " + blockedEdges.size() + " edges inaccessible. Calculated country cut in " + sw.stop().getSeconds() + "s, " + Helper.getMemInfo());
} else {
blockedEdges = new IntHashSet();
}
EdgeFilter accessFilter = edge -> !edge.get(edgeInSubnetworkEnc) && !blockedEdges.contains(edge.getEdge());
EdgeFilter tarjanFilter = edge -> accessFilter.accept(edge) && Double.isFinite(weighting.calcEdgeWeight(edge, false));
StopWatch sw = new StopWatch().start();
ConnectedComponents graphComponents = TarjanSCC.findComponents(graph, tarjanFilter, true);
if (logDetails)
LOGGER.info("Calculated " + graphComponents.getComponents().size() + " subnetworks via tarjan in " + sw.stop().getSeconds() + "s, " + Helper.getMemInfo());
String additionalInfo = "";
// guess the factor
if (factor <= 0) {
// A 'factor' is necessary to store the weight in just a short value but without losing too much precision.
// This factor is rather delicate to pick, we estimate it from an exploration with some "test landmarks",
// see estimateMaxWeight. If we pick the distance too big for small areas this could lead to (slightly)
// suboptimal routes as there will be too big rounding errors. But picking it too small is bad for performance
// e.g. for Germany at least 1500km is very important otherwise speed is at least twice as slow e.g. for 1000km
double maxWeight = estimateMaxWeight(graphComponents.getComponents(), accessFilter);
setMaximumWeight(maxWeight);
additionalInfo = ", maxWeight:" + maxWeight + " from quick estimation";
}
if (logDetails)
LOGGER.info("init landmarks for subnetworks with node count greater than " + minimumNodes + " with factor:" + factor + additionalInfo);
int nodes = 0;
for (IntArrayList subnetworkIds : graphComponents.getComponents()) {
nodes += subnetworkIds.size();
if (subnetworkIds.size() < minimumNodes)
continue;
if (factor <= 0)
throw new IllegalStateException("factor wasn't initialized " + factor + ", subnetworks:"
+ graphComponents.getComponents().size() + ", minimumNodes:" + minimumNodes + ", current size:" + subnetworkIds.size());
int index = subnetworkIds.size() - 1;
// ensure start node is reachable from both sides and no subnetwork is associated
for (; index >= 0; index--) {
int nextStartNode = subnetworkIds.get(index);
if (subnetworks[nextStartNode] == UNSET_SUBNETWORK) {
if (logDetails) {
GHPoint p = createPoint(graph, nextStartNode);
LOGGER.info("start node: " + nextStartNode + " (" + p + ") subnetwork " + index + ", subnetwork size: " + subnetworkIds.size()
+ ", " + Helper.getMemInfo() + ((areaIndex == null) ? "" : " area:" + areaIndex.query(p.lat, p.lon)));
}
if (createLandmarksForSubnetwork(nextStartNode, subnetworks, accessFilter))
break;
}
}
if (index < 0)
LOGGER.warn("next start node not found in big enough network of size " + subnetworkIds.size() + ", first element is " + subnetworkIds.get(0) + ", " + createPoint(graph, subnetworkIds.get(0)));
}
int subnetworkCount = landmarkIDs.size();
// store all landmark node IDs and one int for the factor itself.
this.landmarkWeightDA.ensureCapacity(maxBytes /* landmark weights */ + (long) subnetworkCount * landmarks /* landmark mapping per subnetwork */);
// calculate offset to point into landmark mapping
long bytePos = maxBytes;
for (int[] landmarks : landmarkIDs) {
for (int lmNodeId : landmarks) {
landmarkWeightDA.setInt(bytePos, lmNodeId);
bytePos += 4L;
}
}
landmarkWeightDA.setHeader(0 * 4, graph.getNodes());
landmarkWeightDA.setHeader(1 * 4, landmarks);
landmarkWeightDA.setHeader(2 * 4, subnetworkCount);
if (factor * DOUBLE_MLTPL > Integer.MAX_VALUE)
throw new UnsupportedOperationException("landmark weight factor cannot be bigger than Integer.MAX_VALUE " + factor * DOUBLE_MLTPL);
landmarkWeightDA.setHeader(3 * 4, (int) Math.round(factor * DOUBLE_MLTPL));
// serialize fast byte[] into DataAccess
subnetworkStorage.create(graph.getNodes());
for (int nodeId = 0; nodeId < subnetworks.length; nodeId++) {
subnetworkStorage.setSubnetwork(nodeId, subnetworks[nodeId]);
}
if (logDetails)
LOGGER.info("Finished landmark creation. Subnetwork node count sum " + nodes + " vs. nodes " + graph.getNodes());
initialized = true;
} | 3.68 |
hbase_AccessControlUtil_buildGetUserPermissionsResponse | /**
* Converts the permissions list into a protocol buffer GetUserPermissionsResponse
*/
public static GetUserPermissionsResponse
buildGetUserPermissionsResponse(final List<UserPermission> permissions) {
GetUserPermissionsResponse.Builder builder = GetUserPermissionsResponse.newBuilder();
for (UserPermission perm : permissions) {
builder.addUserPermission(toUserPermission(perm));
}
return builder.build();
} | 3.68 |
druid_CharsetConvert_isEmpty | /**
* 判断空字符串
*
* @param s String
* @return boolean
*/
public boolean isEmpty(String s) {
return s == null || "".equals(s);
} | 3.68 |
hbase_AbstractFSWALProvider_getTimestamp | /**
* Split a WAL filename to get a start time. WALs usually have the time we start writing to them
* with as part of their name, usually the suffix. Sometimes there will be an extra suffix as when
* it is a WAL for the meta table. For example, WALs might look like this
* <code>10.20.20.171%3A60020.1277499063250</code> where <code>1277499063250</code> is the
* timestamp. Could also be a meta WAL which adds a '.meta' suffix or a synchronous replication
* WAL which adds a '.syncrep' suffix. Check for these. File also may have no timestamp on it. For
* example the recovered.edits files are WALs but are named in ascending order. Here is an
* example: 0000000000000016310. Allow for this.
* @param name Name of the WAL file.
* @return Timestamp or {@link #NO_TIMESTAMP}.
*/
public static long getTimestamp(String name) {
Matcher matcher = WAL_FILE_NAME_PATTERN.matcher(name);
return matcher.matches() ? Long.parseLong(matcher.group(2)) : NO_TIMESTAMP;
} | 3.68 |
hbase_MapReduceBackupMergeJob_convertToDest | /**
* Converts path before copying
* @param p path
* @param backupDirPath backup root
* @return converted path
*/
protected Path convertToDest(Path p, Path backupDirPath) {
String backupId = backupDirPath.getName();
Deque<String> stack = new ArrayDeque<String>();
String name = null;
while (true) {
name = p.getName();
if (!name.equals(backupId)) {
stack.push(name);
p = p.getParent();
} else {
break;
}
}
Path newPath = new Path(backupDirPath.toString());
while (!stack.isEmpty()) {
newPath = new Path(newPath, stack.pop());
}
return newPath;
} | 3.68 |
hadoop_AbfsClientThrottlingIntercept_setAnalyzer | /**
* Sets the analyzer for the intercept.
* @param name Name of the analyzer.
* @param abfsConfiguration The configuration.
* @return AbfsClientThrottlingAnalyzer instance.
*/
private AbfsClientThrottlingAnalyzer setAnalyzer(String name, AbfsConfiguration abfsConfiguration) {
return new AbfsClientThrottlingAnalyzer(name, abfsConfiguration);
} | 3.68 |
hbase_CompactionLifeCycleTracker_afterExecution | /**
* Called after compaction is executed by CompactSplitThread.
* <p>
* Requesting compaction on a region can lead to multiple compactions on different stores, so we
* will pass the {@link Store} in to tell you the store we operate on.
*/
default void afterExecution(Store store) {
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsCreateFolder | // Used to create a folder
static void fsCreateFolder(final OBSFileSystem owner,
final String objectName)
throws ObsException {
for (int retryTime = 1;
retryTime < OBSCommonUtils.MAX_RETRY_TIME; retryTime++) {
try {
innerFsCreateFolder(owner, objectName);
return;
} catch (ObsException e) {
LOG.warn("Failed to create folder [{}], retry time [{}], "
+ "exception [{}]", objectName, retryTime, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
innerFsCreateFolder(owner, objectName);
} | 3.68 |
pulsar_URIPreconditions_checkURIIfPresent | /**
* Check whether the given string is a legal URI and passes the user's check.
*
* @param uri URI String
* @param predicate User defined rule
* @param errorMessage Error message
* @throws IllegalArgumentException Illegal URI or failed in the user's rules
*/
public static void checkURIIfPresent(@Nullable String uri,
@Nonnull Predicate<URI> predicate,
@Nullable String errorMessage) throws IllegalArgumentException {
if (uri == null || uri.length() == 0) {
return;
}
checkURI(uri, predicate, errorMessage);
} | 3.68 |
hadoop_MapHost_penalize | /**
* Mark the host as penalized
*/
public synchronized void penalize() {
state = State.PENALIZED;
} | 3.68 |
hadoop_QueueAclsInfo_getQueueName | /**
* Get queue name.
*
* @return name
*/
public String getQueueName() {
return queueName;
} | 3.68 |
flink_FlinkImageBuilder_setLogProperties | /**
* Sets log4j properties.
*
* <p>Containers will use "log4j-console.properties" under flink-dist as the base configuration
* of loggers. Properties specified by this method will be appended to the config file, or
* overwrite the property if already exists in the base config file.
*/
public FlinkImageBuilder setLogProperties(Properties logProperties) {
this.logProperties.putAll(logProperties);
return this;
} | 3.68 |
dubbo_ScopeClusterInvoker_isInjvmExported | /**
* Checks whether the current ScopeClusterInvoker is exported to the local JVM and returns a boolean value.
*
* @return true if the ScopeClusterInvoker is exported to the local JVM, false otherwise
* @throws RpcException if there was an error during the invocation
*/
private boolean isInjvmExported() {
Boolean localInvoke = RpcContext.getServiceContext().getLocalInvoke();
boolean isExportedValue = isExported.get();
boolean localOnce = (localInvoke != null && localInvoke);
// Determine whether this call is local
if (isExportedValue && localOnce) {
return true;
}
// Determine whether this call is remote
if (localInvoke != null && !localInvoke) {
return false;
}
// When calling locally, determine whether it does not meet the requirements
if (!isExportedValue && (isForceLocal() || localOnce)) {
// If it's supposed to be exported to the local JVM ,but it's not, throw an exception
throw new RpcException(
"Local service for " + getUrl().getServiceInterface() + " has not been exposed yet!");
}
return isExportedValue && injvmFlag;
} | 3.68 |
graphhopper_BBox_calculateIntersection | /**
* Calculates the intersecting BBox between this and the specified BBox
*
* @return the intersecting BBox or null if not intersecting
*/
public BBox calculateIntersection(BBox bBox) {
if (!this.intersects(bBox))
return null;
double minLon = Math.max(this.minLon, bBox.minLon);
double maxLon = Math.min(this.maxLon, bBox.maxLon);
double minLat = Math.max(this.minLat, bBox.minLat);
double maxLat = Math.min(this.maxLat, bBox.maxLat);
return new BBox(minLon, maxLon, minLat, maxLat);
} | 3.68 |
querydsl_AbstractHibernateQuery_setComment | /**
* Add a comment to the generated SQL.
* @param comment comment
* @return the current object
*/
@SuppressWarnings("unchecked")
public Q setComment(String comment) {
this.comment = comment;
return (Q) this;
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_stream | /**
* The stream this state is associated with.
*/
@Override
public Http2Stream stream() {
return stream;
} | 3.68 |
morf_Cast_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser.dispatch(getExpression());
} | 3.68 |
flink_RocksDBIncrementalRestoreOperation_restoreBaseDBFromLocalState | /** Restores RocksDB instance from local state. */
private void restoreBaseDBFromLocalState(IncrementalLocalKeyedStateHandle localKeyedStateHandle)
throws Exception {
KeyedBackendSerializationProxy<K> serializationProxy =
readMetaData(localKeyedStateHandle.getMetaDataStateHandle());
List<StateMetaInfoSnapshot> stateMetaInfoSnapshots =
serializationProxy.getStateMetaInfoSnapshots();
Path restoreSourcePath = localKeyedStateHandle.getDirectoryStateHandle().getDirectory();
logger.debug(
"Restoring keyed backend uid in operator {} from incremental snapshot to {}.",
operatorIdentifier,
backendUID);
this.rocksHandle.openDB(
createColumnFamilyDescriptors(stateMetaInfoSnapshots, true),
stateMetaInfoSnapshots,
restoreSourcePath);
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredFile | /**
* @param label The {@link org.apache.streampipes.sdk.helpers.Label}
* that describes why this parameter is needed in a user-friendly manner.
* @param requiredFiletypes A list of required filetypes (a string marking the file extension) the element supports.
* @return this
*/
public K requiredFile(Label label, String... requiredFiletypes) {
FileStaticProperty fp = new FileStaticProperty(label.getInternalId(), label.getLabel(), label
.getDescription());
List<String> collectedFiletypes = Arrays.asList(requiredFiletypes);
fp.setRequiredFiletypes(collectedFiletypes);
this.staticProperties.add(fp);
return me();
} | 3.68 |
rocketmq-connect_DorisSinkConnector_taskConfigs | /**
* Returns a set of configurations for Tasks based on the current configuration,
* producing at most count configurations.
*
* @param maxTasks maximum number of configurations to generate
* @return configurations for Tasks
*/
@Override
public List<KeyValue> taskConfigs(int maxTasks) {
log.info("Starting task config !!! ");
List<KeyValue> configs = new ArrayList<>();
for (int i = 0; i < maxTasks; i++) {
configs.add(this.connectConfig);
}
return configs;
} | 3.68 |
hadoop_AbstractMultipartUploader_close | /**
* Perform any cleanup.
* The upload is not required to support any operations after this.
* @throws IOException problems on close.
*/
@Override
public void close() throws IOException {
} | 3.68 |
hbase_BufferedMutatorParams_setWriteBufferPeriodicFlushTimerTickMs | /**
* Set the TimerTick how often the buffer timeout if checked.
* @deprecated Since 3.0.0, will be removed in 4.0.0. We use a common timer in the whole client
* implementation so you can not set it any more.
*/
@Deprecated
public BufferedMutatorParams setWriteBufferPeriodicFlushTimerTickMs(long timerTickMs) {
this.writeBufferPeriodicFlushTimerTickMs = timerTickMs;
return this;
} | 3.68 |
hadoop_ConnectionPool_getMinSize | /**
* Get the minimum number of connections in this pool.
*
* @return Minimum number of connections.
*/
protected int getMinSize() {
return this.minSize;
} | 3.68 |
framework_Slot_setLayout | /**
* Set the layout in which this slot is. This method must be called exactly
* once at slot construction time when using the default constructor.
*
* The method should normally only be called by
* {@link VAbstractOrderedLayout#createSlot(Widget)}.
*
* @since 7.6
* @param layout
* the layout containing the slot
*/
public void setLayout(VAbstractOrderedLayout layout) {
this.layout = layout;
} | 3.68 |
streampipes_BoilerpipeHTMLContentHandler_endDocument | // @Override
public void endDocument() throws SAXException {
flushBlock();
} | 3.68 |
pulsar_ClientCnxIdleState_tryMarkReleasedAndCloseConnection | /**
* Changes the idle-state of the connection to #{@link State#RELEASED}, This method only changes this
* connection from the #{@link State#RELEASING} state to the #{@link State#RELEASED}
* state, and close {@param clientCnx} if change state to #{@link State#RELEASED} success.
* @return Whether change idle-stat to #{@link State#RELEASED} and close connection success.
*/
public boolean tryMarkReleasedAndCloseConnection() {
if (!compareAndSetIdleStat(State.RELEASING, State.RELEASED)) {
return false;
}
clientCnx.close();
return true;
} | 3.68 |
framework_GridLayoutElement_getCell | /**
* Gets the cell element at the given position.
*
* @param row
* the row coordinate
* @param column
* the column coordinate
* @return the cell element at the given position
* @throws NoSuchElementException
* if no cell was found at the given position
* @since 8.0.6
*/
public WebElement getCell(int row, int column) {
WebElement res = (WebElement) getCommandExecutor().executeScript(
"return arguments[0].getCell(" + row + "," + column + ")",
this);
if (res == null) {
throw new NoSuchElementException(
"No cell found at " + row + "," + column);
}
return res;
} | 3.68 |
shardingsphere-elasticjob_SQLPropertiesFactory_getProperties | /**
* Get SQL properties.
*
* @param type tracing storage database type
* @return SQL properties
*/
public static Properties getProperties(final TracingStorageDatabaseType type) {
return loadProps(String.format("%s.properties", type.getType()));
} | 3.68 |
flink_ResourceInformationReflector_getAllResourceInfos | /** Get the name and value of all resources from the {@link Resource}. */
@VisibleForTesting
Map<String, Long> getAllResourceInfos(Object resource) {
if (!isYarnResourceTypesAvailable) {
return Collections.emptyMap();
}
final Map<String, Long> externalResources = new HashMap<>();
final Object[] externalResourcesInfo;
try {
externalResourcesInfo = (Object[]) resourceGetResourcesMethod.invoke(resource);
for (int i = 0; i < externalResourcesInfo.length; i++) {
final String name =
(String) resourceInformationGetNameMethod.invoke(externalResourcesInfo[i]);
final long value =
(long) resourceInformationGetValueMethod.invoke(externalResourcesInfo[i]);
externalResources.put(name, value);
}
} catch (Exception e) {
LOG.warn("Could not obtain the external resources supported by the given Resource.", e);
return Collections.emptyMap();
}
return externalResources;
} | 3.68 |
hadoop_NMTokenCache_setNMToken | /**
* Sets the NMToken for node address only in the singleton obtained from
* {@link #getSingleton()}. If you are using your own NMTokenCache that is
* different from the singleton, use {@link #setToken(String, Token) }
*
* @param nodeAddr
* node address (host:port)
* @param token
* NMToken
*/
@Public
public static void setNMToken(String nodeAddr, Token token) {
NM_TOKEN_CACHE.setToken(nodeAddr, token);
} | 3.68 |
rocketmq-connect_ColumnDefinition_typeName | /**
* Retrieves the designated column's database-specific type name.
*
* @return type name used by the database. If the column type is a user-defined type, then a
* fully-qualified type name is returned.
*/
public String typeName() {
return typeName;
} | 3.68 |
framework_CSSInjectWithColorpicker_createEditor | /**
* Creates a text editor for visually editing text
*
* @param text
* The text editor
* @return
*/
private Component createEditor(String text) {
Panel editor = new Panel("Text Editor");
editor.setWidth("580px");
VerticalLayout panelContent = new VerticalLayout();
panelContent.setSpacing(true);
panelContent.setMargin(new MarginInfo(true, false, false, false));
editor.setContent(panelContent);
// Create the toolbar
HorizontalLayout toolbar = new HorizontalLayout();
toolbar.setSpacing(true);
toolbar.setMargin(new MarginInfo(false, false, false, true));
// Create the font family selector
toolbar.addComponent(createFontSelect());
// Create the font size selector
toolbar.addComponent(createFontSizeSelect());
// Create the text color selector
toolbar.addComponent(createTextColorSelect());
// Create the background color selector
toolbar.addComponent(createBackgroundColorSelect());
panelContent.addComponent(toolbar);
panelContent.setComponentAlignment(toolbar, Alignment.MIDDLE_LEFT);
// Spacer between toolbar and text
panelContent.addComponent(new Label("<hr/>", ContentMode.HTML));
// The text to edit
TextArea textLabel = new TextArea(null, text);
textLabel.setWidth("100%");
textLabel.setHeight("200px");
// IMPORTANT: We are here setting the style name of the label, we are
// going to use this in our injected styles to target the label
textLabel.setStyleName("text-label");
panelContent.addComponent(textLabel);
return editor;
} | 3.68 |
framework_ColorPickerPreviewElement_getColorFieldContainsErrors | /**
* Get whether TextField in ColorPickerPreview has validation errors.
*
* @return true if field has errors, false otherwise
*
* @since 8.4
*/
public boolean getColorFieldContainsErrors() {
List<WebElement> caption = findElements(
By.className("v-caption-v-colorpicker-preview-textfield"));
return !caption.isEmpty() && !caption.get(0)
.findElements(By.className("v-errorindicator")).isEmpty();
} | 3.68 |
flink_AsynchronousFileIOChannel_handleProcessedBuffer | /**
* Handles a processed <tt>Buffer</tt>. This method is invoked by the asynchronous IO worker
* threads upon completion of the IO request with the provided buffer and/or an exception that
* occurred while processing the request for that buffer.
*
* @param buffer The buffer to be processed.
* @param ex The exception that occurred in the I/O threads when processing the buffer's
* request.
*/
protected final void handleProcessedBuffer(T buffer, IOException ex) {
if (buffer == null) {
return;
}
// even if the callbacks throw an error, we need to maintain our bookkeeping
try {
if (ex != null && this.exception == null) {
this.exception = ex;
this.resultHandler.requestFailed(buffer, ex);
} else {
this.resultHandler.requestSuccessful(buffer);
}
} finally {
NotificationListener listener = null;
// Decrement the number of outstanding requests. If we are currently closing, notify the
// waiters. If there is a listener, notify her as well.
synchronized (this.closeLock) {
if (this.requestsNotReturned.decrementAndGet() == 0) {
if (this.closed) {
this.closeLock.notifyAll();
}
synchronized (listenerLock) {
listener = allRequestsProcessedListener;
allRequestsProcessedListener = null;
}
}
}
if (listener != null) {
listener.onNotification();
}
}
} | 3.68 |
flink_TestStreamEnvironment_randomizeConfiguration | /**
* This is the place for randomization the configuration that relates to DataStream API such as
* ExecutionConf, CheckpointConf, StreamExecutionEnvironment. List of the configurations can be
* found here {@link StreamExecutionEnvironment#configure(ReadableConfig, ClassLoader)}. All
* other configuration should be randomized here {@link
* org.apache.flink.runtime.testutils.MiniClusterResource#randomizeConfiguration(Configuration)}.
*/
private static void randomizeConfiguration(MiniCluster miniCluster, Configuration conf) {
// randomize ITTests for enabling unaligned checkpoint
if (RANDOMIZE_CHECKPOINTING_CONFIG) {
randomize(conf, ExecutionCheckpointingOptions.ENABLE_UNALIGNED, true, false);
randomize(
conf,
ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT,
Duration.ofSeconds(0),
Duration.ofMillis(100),
Duration.ofSeconds(2));
randomize(conf, CheckpointingOptions.CLEANER_PARALLEL_MODE, true, false);
}
// randomize ITTests for enabling state change log
if (isConfigurationSupportedByChangelog(miniCluster.getConfiguration())) {
if (STATE_CHANGE_LOG_CONFIG.equalsIgnoreCase(STATE_CHANGE_LOG_CONFIG_ON)) {
if (!conf.contains(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG)) {
conf.set(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG, true);
miniCluster.overrideRestoreModeForChangelogStateBackend();
}
} else if (STATE_CHANGE_LOG_CONFIG.equalsIgnoreCase(STATE_CHANGE_LOG_CONFIG_RAND)) {
boolean enabled =
randomize(conf, StateChangelogOptions.ENABLE_STATE_CHANGE_LOG, true, false);
if (enabled) {
randomize(
conf,
StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL,
Duration.ofMillis(100),
Duration.ofMillis(500),
Duration.ofSeconds(1),
Duration.ofSeconds(5),
Duration.ofSeconds(-1));
miniCluster.overrideRestoreModeForChangelogStateBackend();
}
}
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.