name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_AbstractBytesMultiMap_appendValue | // ----------------------- Append -----------------------
private int appendValue(BinaryRowData value) throws IOException {
final long offsetOfPointer = writePointer(valOutView, -1);
valueSerializer.serializeToPages(value, valOutView);
if (offsetOfPointer > Integer.MAX_VALUE) {
LOG.warn(
"We can't handle key area with more than Integer.MAX_VALUE bytes,"
+ " because the pointer is a integer.");
throw new EOFException();
}
return (int) offsetOfPointer;
} | 3.68 |
flink_SourceTestSuiteBase_testSavepoint | /**
* Test connector source restart from a savepoint.
*
* <p>This test will create 4 splits in the external system first, write test data to all
* splits, and consume back via a Flink job. Then stop the job with savepoint, restart the job
* from the checkpoint. After the job has been running, add some extra data to the source and
* compare the result.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*/
@TestTemplate
@DisplayName("Test source restarting from a savepoint")
public void testSavepoint(
TestEnvironment testEnv,
DataStreamSourceExternalContext<T> externalContext,
CheckpointingMode semantic)
throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 4, 4, 4);
} | 3.68 |
hudi_HoodieRepairTool_backupFiles | /**
* Backs up dangling files from table base path to backup path.
*
* @param relativeFilePaths A {@link List} of relative file paths for backup.
* @return {@code true} if all successful; {@code false} otherwise.
*/
boolean backupFiles(List<String> relativeFilePaths) {
return copyFiles(context, relativeFilePaths, cfg.basePath, cfg.backupPath);
} | 3.68 |
hibernate-validator_ScriptEngineScriptEvaluator_evaluate | /**
* Executes the given script, using the given variable bindings. The execution of the script happens either synchronized or
* unsynchronized, depending on the engine's threading abilities.
*
* @param script the script to be executed
* @param bindings the bindings to be used
*
* @return the script's result
*
* @throws ScriptEvaluationException in case an error occurred during the script evaluation
*/
@Override
public Object evaluate(String script, Map<String, Object> bindings) throws ScriptEvaluationException {
if ( engineAllowsParallelAccessFromMultipleThreads() ) {
return doEvaluate( script, bindings );
}
else {
synchronized ( engine ) {
return doEvaluate( script, bindings );
}
}
} | 3.68 |
framework_VDebugWindow_meta | /**
* Called when the result from analyzeLayouts is received.
*
* @param ac
* @param meta
*/
public void meta(ApplicationConnection ac, ValueMap meta) {
if (isClosed()) {
return;
}
for (Section s : sections) {
s.meta(ac, meta);
}
} | 3.68 |
hudi_HiveSyncTool_getTablePartitions | /**
* Fetch partitions from meta service, will try to push down more filters to avoid fetching
* too many unnecessary partitions.
*
* @param writtenPartitions partitions has been added, updated, or dropped since last synced.
*/
private List<Partition> getTablePartitions(String tableName, List<String> writtenPartitions) {
if (!config.getBooleanOrDefault(HIVE_SYNC_FILTER_PUSHDOWN_ENABLED)) {
return syncClient.getAllPartitions(tableName);
}
List<String> partitionKeys = config.getSplitStrings(META_SYNC_PARTITION_FIELDS).stream()
.map(String::toLowerCase)
.collect(Collectors.toList());
List<FieldSchema> partitionFields = syncClient.getMetastoreFieldSchemas(tableName)
.stream()
.filter(f -> partitionKeys.contains(f.getName()))
.collect(Collectors.toList());
return syncClient.getPartitionsByFilter(tableName,
PartitionFilterGenerator.generatePushDownFilter(writtenPartitions, partitionFields, config));
} | 3.68 |
hbase_HttpServer_getPort | /**
* Get the port that the server is on
* @return the port
* @deprecated Since 0.99.0
*/
@Deprecated
public int getPort() {
return ((ServerConnector) webServer.getConnectors()[0]).getLocalPort();
} | 3.68 |
framework_VaadinService_runPendingAccessTasks | /**
* Purges the queue of pending access invocations enqueued with
* {@link VaadinSession#access(Runnable)}.
* <p>
* This method is automatically run by the framework at appropriate
* situations and is not intended to be used by application developers.
*
* @param session
* the vaadin session to purge the queue for
* @since 7.1
*/
public void runPendingAccessTasks(VaadinSession session) {
assert session.hasLock();
if (session.getPendingAccessQueue().isEmpty()) {
return;
}
FutureAccess pendingAccess;
// Dump all current instances, not only the ones dumped by setCurrent
Map<Class<?>, CurrentInstance> oldInstances = CurrentInstance
.getInstances();
CurrentInstance.setCurrent(session);
try {
while ((pendingAccess = session.getPendingAccessQueue()
.poll()) != null) {
if (!pendingAccess.isCancelled()) {
pendingAccess.run();
try {
pendingAccess.get();
} catch (Exception exception) {
if (exception instanceof ExecutionException) {
Throwable cause = exception.getCause();
if (cause instanceof Exception) {
exception = (Exception) cause;
}
}
pendingAccess.handleError(exception);
}
}
}
} finally {
CurrentInstance.clearAll();
CurrentInstance.restoreInstances(oldInstances);
}
} | 3.68 |
framework_VCalendarPanel_setFocusOutListener | /**
* A focus out listener is triggered when the panel loosed focus. This can
* happen either after a user clicks outside the panel or tabs out.
*
* @param listener
* The listener to trigger
*/
public void setFocusOutListener(FocusOutListener listener) {
focusOutListener = listener;
} | 3.68 |
flink_ExecutionVertex_cancel | /**
* Cancels this ExecutionVertex.
*
* @return A future that completes once the execution has reached its final state.
*/
public CompletableFuture<?> cancel() {
// to avoid any case of mixup in the presence of concurrent calls,
// we copy a reference to the stack to make sure both calls go to the same Execution
final Execution exec = currentExecution;
exec.cancel();
return exec.getReleaseFuture();
} | 3.68 |
framework_WebBrowser_getLocale | /** Get the default locate of the browser. */
public Locale getLocale() {
return locale;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_isBootstrapNeeded | /**
* Whether initialize operation needed for this metadata table.
* <p>
* Rollback of the first commit would look like un-synced instants in the metadata table.
* Action metadata is needed to verify the instant time and avoid erroneous initializing.
* <p>
* TODO: Revisit this logic and validate that filtering for all
* commits timeline is the right thing to do
*
* @return True if the initialization is not needed, False otherwise
*/
private boolean isBootstrapNeeded(Option<HoodieInstant> latestMetadataInstant) {
if (!latestMetadataInstant.isPresent()) {
LOG.warn("Metadata Table will need to be re-initialized as no instants were found");
return true;
}
final String latestMetadataInstantTimestamp = latestMetadataInstant.get().getTimestamp();
if (latestMetadataInstantTimestamp.startsWith(SOLO_COMMIT_TIMESTAMP)) { // the initialization timestamp is SOLO_COMMIT_TIMESTAMP + offset
return false;
}
return false;
} | 3.68 |
flink_FileSystem_initOutPathLocalFS | /**
* Initializes output directories on local file systems according to the given write mode.
*
* <ul>
* <li>WriteMode.NO_OVERWRITE & parallel output:
* <ul>
* <li>A directory is created if the output path does not exist.
* <li>An existing directory is reused, files contained in the directory are NOT
* deleted.
* <li>An existing file raises an exception.
* </ul>
* <li>WriteMode.NO_OVERWRITE & NONE parallel output:
* <ul>
* <li>An existing file or directory raises an exception.
* </ul>
* <li>WriteMode.OVERWRITE & parallel output:
* <ul>
* <li>A directory is created if the output path does not exist.
* <li>An existing directory is reused, files contained in the directory are NOT
* deleted.
* <li>An existing file is deleted and replaced by a new directory.
* </ul>
* <li>WriteMode.OVERWRITE & NONE parallel output:
* <ul>
* <li>An existing file or directory (and all its content) is deleted
* </ul>
* </ul>
*
* <p>Files contained in an existing directory are not deleted, because multiple instances of a
* DataSinkTask might call this function at the same time and hence might perform concurrent
* delete operations on the file system (possibly deleting output files of concurrently running
* tasks). Since concurrent DataSinkTasks are not aware of each other, coordination of delete
* and create operations would be difficult.
*
* @param outPath Output path that should be prepared.
* @param writeMode Write mode to consider.
* @param createDirectory True, to initialize a directory at the given path, false to prepare
* space for a file.
* @return True, if the path was successfully prepared, false otherwise.
* @throws IOException Thrown, if any of the file system access operations failed.
*/
public boolean initOutPathLocalFS(Path outPath, WriteMode writeMode, boolean createDirectory)
throws IOException {
if (isDistributedFS()) {
return false;
}
// NOTE: We actually need to lock here (process wide). Otherwise, multiple threads that
// concurrently work in this method (multiple output formats writing locally) might end
// up deleting each other's directories and leave non-retrievable files, without necessarily
// causing an exception. That results in very subtle issues, like output files looking as if
// they are not getting created.
// we acquire the lock interruptibly here, to make sure that concurrent threads waiting
// here can cancel faster
try {
OUTPUT_DIRECTORY_INIT_LOCK.lockInterruptibly();
} catch (InterruptedException e) {
// restore the interruption state
Thread.currentThread().interrupt();
// leave the method - we don't have the lock anyways
throw new IOException(
"The thread was interrupted while trying to initialize the output directory");
}
try {
FileStatus status;
try {
status = getFileStatus(outPath);
} catch (FileNotFoundException e) {
// okay, the file is not there
status = null;
}
// check if path exists
if (status != null) {
// path exists, check write mode
switch (writeMode) {
case NO_OVERWRITE:
if (status.isDir() && createDirectory) {
return true;
} else {
// file may not be overwritten
throw new IOException(
"File or directory "
+ outPath
+ " already exists. Existing files and directories "
+ "are not overwritten in "
+ WriteMode.NO_OVERWRITE.name()
+ " mode. Use "
+ WriteMode.OVERWRITE.name()
+ " mode to overwrite existing files and directories.");
}
case OVERWRITE:
if (status.isDir()) {
if (createDirectory) {
// directory exists and does not need to be created
return true;
} else {
// we will write in a single file, delete directory
try {
delete(outPath, true);
} catch (IOException e) {
throw new IOException(
"Could not remove existing directory '"
+ outPath
+ "' to allow overwrite by result file",
e);
}
}
} else {
// delete file
try {
delete(outPath, false);
} catch (IOException e) {
throw new IOException(
"Could not remove existing file '"
+ outPath
+ "' to allow overwrite by result file/directory",
e);
}
}
break;
default:
throw new IllegalArgumentException("Invalid write mode: " + writeMode);
}
}
if (createDirectory) {
// Output directory needs to be created
if (!exists(outPath)) {
mkdirs(outPath);
}
// double check that the output directory exists
try {
return getFileStatus(outPath).isDir();
} catch (FileNotFoundException e) {
return false;
}
} else {
// check that the output path does not exist and an output file
// can be created by the output format.
return !exists(outPath);
}
} finally {
OUTPUT_DIRECTORY_INIT_LOCK.unlock();
}
} | 3.68 |
morf_SchemaChangeSequence_changePrimaryKeyColumns | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#changePrimaryKeyColumns(java.lang.String, java.util.List, java.util.List)
*/
@Override
public void changePrimaryKeyColumns(String tableName, List<String> oldPrimaryKeyColumns, List<String> newPrimaryKeyColumns) {
ChangePrimaryKeyColumns changePrimaryKeyColumns = new ChangePrimaryKeyColumns(tableName, oldPrimaryKeyColumns, newPrimaryKeyColumns);
visitor.visit(changePrimaryKeyColumns);
schemaAndDataChangeVisitor.visit(changePrimaryKeyColumns);
} | 3.68 |
framework_VScrollTable_getRequiredHeight | /**
* @return the height of scrollable body, subpixels ceiled.
*/
public int getRequiredHeight() {
return preSpacer.getOffsetHeight() + postSpacer.getOffsetHeight()
+ WidgetUtil.getRequiredHeight(table);
} | 3.68 |
hadoop_OperationAuditorOptions_withIoStatisticsStore | /**
* Set builder value.
* @param value new value
* @return the builder
*/
public OperationAuditorOptions withIoStatisticsStore(
final IOStatisticsStore value) {
ioStatisticsStore = value;
return this;
} | 3.68 |
AreaShop_GeneralRegion_matchesLimitGroup | /**
* Check if this region matches the filters of a limit group.
* @param group The group to check
* @return true if the region applies to the limit group, otherwise false
*/
public boolean matchesLimitGroup(String group) {
List<String> worlds = plugin.getConfig().getStringList("limitGroups." + group + ".worlds");
List<String> groups = plugin.getConfig().getStringList("limitGroups." + group + ".groups");
if((worlds == null || worlds.isEmpty() || worlds.contains(getWorldName()))) {
if(groups == null || groups.isEmpty()) {
return true;
} else {
boolean inGroups = false;
for(RegionGroup checkGroup : plugin.getFileManager().getGroups()) {
inGroups = inGroups || (groups.contains(checkGroup.getName()) && checkGroup.isMember(this));
}
return inGroups;
}
}
return false;
} | 3.68 |
hbase_MasterObserver_preSetTableQuota | /**
* Called before the quota for the table is stored.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
* @param quotas the current quota for the table
*/
default void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {
} | 3.68 |
hadoop_Server_initServices | /**
* Initializes the list of services.
*
* @param services services to initialized, it must be a de-dupped list of
* services.
*
* @throws ServerException thrown if the services could not be initialized.
*/
protected void initServices(List<Service> services) throws ServerException {
for (Service service : services) {
log.debug("Initializing service [{}]", service.getInterface());
checkServiceDependencies(service);
service.init(this);
this.services.put(service.getInterface(), service);
}
for (Service service : services) {
service.postInit();
}
} | 3.68 |
hadoop_VolumeAMSProcessor_checkAndGetVolume | /**
* If given volume ID already exists in the volume manager,
* it returns the existing volume. Otherwise, it creates a new
* volume and add that to volume manager.
* @param metaData
* @return volume
*/
private Volume checkAndGetVolume(VolumeMetaData metaData)
throws InvalidVolumeException {
Volume toAdd = new VolumeImpl(metaData);
CsiAdaptorProtocol adaptor = volumeManager
.getAdaptorByDriverName(metaData.getDriverName());
if (adaptor == null) {
throw new InvalidVolumeException("It seems for the driver name"
+ " specified in the volume " + metaData.getDriverName()
+ " ,there is no matched driver-adaptor can be found. "
+ "Is the driver probably registered? Please check if"
+ " adaptors service addresses defined in "
+ YarnConfiguration.NM_CSI_ADAPTOR_ADDRESSES
+ " are correct and services are started.");
}
toAdd.setClient(adaptor);
return this.volumeManager.addOrGetVolume(toAdd);
} | 3.68 |
flink_TimestampsAndWatermarksOperator_processWatermark | /**
* Override the base implementation to completely ignore watermarks propagated from upstream,
* except for the "end of time" watermark.
*/
@Override
public void processWatermark(org.apache.flink.streaming.api.watermark.Watermark mark)
throws Exception {
// if we receive a Long.MAX_VALUE watermark we forward it since it is used
// to signal the end of input and to not block watermark progress downstream
if (mark.getTimestamp() == Long.MAX_VALUE) {
wmOutput.emitWatermark(Watermark.MAX_WATERMARK);
}
} | 3.68 |
hbase_DirectMemoryUtils_getDirectMemorySize | /** Returns the direct memory limit of the current progress */
public static long getDirectMemorySize() {
return MAX_DIRECT_MEMORY;
} | 3.68 |
hudi_ZeroToOneUpgradeHandler_recreateMarkers | /**
* Recreate markers in new format.
* Step1: Delete existing markers
* Step2: Collect all rollback file info.
* Step3: recreate markers for all interested files.
*
* @param commitInstantTime instant of interest for which markers need to be recreated.
* @param table instance of {@link HoodieTable} to use
* @param context instance of {@link HoodieEngineContext} to use
* @throws HoodieRollbackException on any exception during upgrade.
*/
protected void recreateMarkers(final String commitInstantTime,
HoodieTable table,
HoodieEngineContext context,
int parallelism) throws HoodieRollbackException {
try {
// fetch hoodie instant
Option<HoodieInstant> commitInstantOpt = Option.fromJavaOptional(table.getActiveTimeline().getCommitsTimeline().getInstantsAsStream()
.filter(instant -> HoodieActiveTimeline.EQUALS.test(instant.getTimestamp(), commitInstantTime))
.findFirst());
if (commitInstantOpt.isPresent()) {
// delete existing markers
WriteMarkers writeMarkers = WriteMarkersFactory.get(MarkerType.DIRECT, table, commitInstantTime);
writeMarkers.quietDeleteMarkerDir(context, parallelism);
// generate rollback stats
List<HoodieRollbackStat> rollbackStats = getListBasedRollBackStats(table, context, commitInstantOpt);
// recreate markers adhering to marker based rollback
for (HoodieRollbackStat rollbackStat : rollbackStats) {
for (String path : rollbackStat.getSuccessDeleteFiles()) {
String dataFileName = path.substring(path.lastIndexOf("/") + 1);
// not feasible to differentiate MERGE from CREATE. hence creating with MERGE IOType for all base files.
writeMarkers.create(rollbackStat.getPartitionPath(), dataFileName, IOType.MERGE);
}
for (FileStatus fileStatus : rollbackStat.getCommandBlocksCount().keySet()) {
writeMarkers.create(rollbackStat.getPartitionPath(), getFileNameForMarkerFromLogFile(fileStatus.getPath().toString(), table), IOType.APPEND);
}
}
}
} catch (Exception e) {
throw new HoodieRollbackException("Exception thrown while upgrading Hoodie Table from version 0 to 1", e);
}
} | 3.68 |
flink_PatternStreamBuilder_build | /**
* Creates a data stream containing results of {@link PatternProcessFunction} to fully matching
* event patterns.
*
* @param processFunction function to be applied to matching event sequences
* @param outTypeInfo output TypeInformation of {@link PatternProcessFunction#processMatch(Map,
* PatternProcessFunction.Context, Collector)}
* @param <OUT> type of output events
* @return Data stream containing fully matched event sequence with applied {@link
* PatternProcessFunction}
*/
<OUT, K> SingleOutputStreamOperator<OUT> build(
final TypeInformation<OUT> outTypeInfo,
final PatternProcessFunction<IN, OUT> processFunction) {
checkNotNull(outTypeInfo);
checkNotNull(processFunction);
final TypeSerializer<IN> inputSerializer =
inputStream.getType().createSerializer(inputStream.getExecutionConfig());
final boolean isProcessingTime = timeBehaviour == TimeBehaviour.ProcessingTime;
final boolean timeoutHandling = processFunction instanceof TimedOutPartialMatchHandler;
final NFACompiler.NFAFactory<IN> nfaFactory =
NFACompiler.compileFactory(pattern, timeoutHandling);
final CepOperator<IN, K, OUT> operator =
new CepOperator<>(
inputSerializer,
isProcessingTime,
nfaFactory,
comparator,
pattern.getAfterMatchSkipStrategy(),
processFunction,
lateDataOutputTag);
final SingleOutputStreamOperator<OUT> patternStream;
if (inputStream instanceof KeyedStream) {
KeyedStream<IN, K> keyedStream = (KeyedStream<IN, K>) inputStream;
patternStream = keyedStream.transform("CepOperator", outTypeInfo, operator);
} else {
KeySelector<IN, Byte> keySelector = new NullByteKeySelector<>();
patternStream =
inputStream
.keyBy(keySelector)
.transform("GlobalCepOperator", outTypeInfo, operator)
.forceNonParallel();
}
return patternStream;
} | 3.68 |
framework_VaadinService_removeServiceDestroyListener | /**
* Removes a service destroy listener that was previously added with
* {@link #addServiceDestroyListener(ServiceDestroyListener)}.
*
* @since 7.2
* @param listener
* the service destroy listener to remove
* @deprecated use the {@link Registration} object returned by
* {@link #addServiceDestroyListener(ServiceDestroyListener)} to
* remove the listener
*/
@Deprecated
public void removeServiceDestroyListener(ServiceDestroyListener listener) {
serviceDestroyListeners.remove(listener);
} | 3.68 |
hudi_BaseAvroPayload_isDeleteRecord | /**
* @param genericRecord instance of {@link GenericRecord} of interest.
* @returns {@code true} if record represents a delete record. {@code false} otherwise.
*/
protected boolean isDeleteRecord(GenericRecord genericRecord) {
final String isDeleteKey = HoodieRecord.HOODIE_IS_DELETED_FIELD;
// Modify to be compatible with new version Avro.
// The new version Avro throws for GenericRecord.get if the field name
// does not exist in the schema.
if (genericRecord.getSchema().getField(isDeleteKey) == null) {
return false;
}
Object deleteMarker = genericRecord.get(isDeleteKey);
return (deleteMarker instanceof Boolean && (boolean) deleteMarker);
} | 3.68 |
querydsl_MongodbExpressions_nearSphere | /**
* Finds the closest points relative to the given location on a sphere and orders the results with decreasing proximity
*
* @param expr location
* @param latVal latitude
* @param longVal longitude
* @return predicate
*/
public static BooleanExpression nearSphere(Expression<Double[]> expr, double latVal, double longVal) {
return Expressions.booleanOperation(MongodbOps.NEAR_SPHERE, expr, ConstantImpl.create(new Double[]{latVal, longVal}));
} | 3.68 |
hmily_HmilyLockCacheManager_get | /**
* Acquire hmily lock.
*
* @param lockId this guava key.
* @return {@linkplain HmilyTransaction}
*/
public Optional<HmilyLock> get(final String lockId) {
try {
return loadingCache.get(lockId);
} catch (ExecutionException ex) {
return Optional.empty();
}
} | 3.68 |
flink_ExtractionUtils_validateStructuredClass | /**
* Validates the characteristics of a class for a {@link StructuredType} such as accessibility.
*/
public static void validateStructuredClass(Class<?> clazz) {
final int m = clazz.getModifiers();
if (Modifier.isAbstract(m)) {
throw extractionError("Class '%s' must not be abstract.", clazz.getName());
}
if (!Modifier.isPublic(m)) {
throw extractionError("Class '%s' is not public.", clazz.getName());
}
if (clazz.getEnclosingClass() != null
&& (clazz.getDeclaringClass() == null || !Modifier.isStatic(m))) {
throw extractionError(
"Class '%s' is a not a static, globally accessible class.", clazz.getName());
}
} | 3.68 |
hadoop_StageConfig_getJobId | /**
* Job ID.
*/
public String getJobId() {
return jobId;
} | 3.68 |
flink_FineGrainedSlotManager_allocateTaskManagersAccordingTo | /**
* Allocate pending task managers, returns the ids of pending task managers that can not be
* allocated.
*/
private Set<PendingTaskManagerId> allocateTaskManagersAccordingTo(
List<PendingTaskManager> pendingTaskManagers) {
Preconditions.checkState(resourceAllocator.isSupported());
final Set<PendingTaskManagerId> failedAllocations = new HashSet<>();
for (PendingTaskManager pendingTaskManager : pendingTaskManagers) {
if (!allocateResource(pendingTaskManager)) {
failedAllocations.add(pendingTaskManager.getPendingTaskManagerId());
}
}
return failedAllocations;
} | 3.68 |
flink_BinaryIndexedSortable_checkNextIndexOffset | /** check if we need request next index memory. */
protected boolean checkNextIndexOffset() {
if (this.currentSortIndexOffset > this.lastIndexEntryOffset) {
MemorySegment returnSegment = nextMemorySegment();
if (returnSegment != null) {
this.currentSortIndexSegment = returnSegment;
this.sortIndex.add(this.currentSortIndexSegment);
this.currentSortIndexOffset = 0;
} else {
return false;
}
}
return true;
} | 3.68 |
hudi_RealtimeCompactedRecordReader_getMergedLogRecordScanner | /**
* Goes through the log files and populates a map with latest version of each key logged, since the base split was
* written.
*/
private HoodieMergedLogRecordScanner getMergedLogRecordScanner() throws IOException {
// NOTE: HoodieCompactedLogRecordScanner will not return records for an in-flight commit
// but can return records for completed commits > the commit we are trying to read (if using
// readCommit() API)
return HoodieMergedLogRecordScanner.newBuilder()
.withFileSystem(FSUtils.getFs(split.getPath().toString(), jobConf))
.withBasePath(split.getBasePath())
.withLogFilePaths(split.getDeltaLogPaths())
.withReaderSchema(getLogScannerReaderSchema())
.withLatestInstantTime(split.getMaxCommitTime())
.withMaxMemorySizeInBytes(HoodieRealtimeRecordReaderUtils.getMaxCompactionMemoryInBytes(jobConf))
.withReadBlocksLazily(
ConfigUtils.getBooleanWithAltKeys(jobConf,
HoodieReaderConfig.COMPACTION_LAZY_BLOCK_READ_ENABLE))
.withReverseReader(false)
.withBufferSize(jobConf.getInt(HoodieMemoryConfig.MAX_DFS_STREAM_BUFFER_SIZE.key(),
HoodieMemoryConfig.DEFAULT_MR_MAX_DFS_STREAM_BUFFER_SIZE))
.withSpillableMapBasePath(jobConf.get(HoodieMemoryConfig.SPILLABLE_MAP_BASE_PATH.key(),
FileIOUtils.getDefaultSpillableMapBasePath()))
.withDiskMapType(jobConf.getEnum(HoodieCommonConfig.SPILLABLE_DISK_MAP_TYPE.key(), HoodieCommonConfig.SPILLABLE_DISK_MAP_TYPE.defaultValue()))
.withBitCaskDiskMapCompressionEnabled(jobConf.getBoolean(HoodieCommonConfig.DISK_MAP_BITCASK_COMPRESSION_ENABLED.key(),
HoodieCommonConfig.DISK_MAP_BITCASK_COMPRESSION_ENABLED.defaultValue()))
.withOptimizedLogBlocksScan(jobConf.getBoolean(HoodieReaderConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN.key(),
Boolean.parseBoolean(HoodieReaderConfig.ENABLE_OPTIMIZED_LOG_BLOCKS_SCAN.defaultValue())))
.withInternalSchema(schemaEvolutionContext.internalSchemaOption.orElse(InternalSchema.getEmptyInternalSchema()))
.build();
} | 3.68 |
framework_TreeGrid_expandRecursively | /**
* Expands the given items and their children recursively until the given
* depth.
* <p>
* {@code depth} describes the maximum distance between a given item and its
* descendant, meaning that {@code expandRecursively(items, 0)} expands only
* the given items while {@code expandRecursively(items, 2)} expands the
* given items as well as their children and grandchildren.
* <p>
* This method will <i>not</i> fire events for expanded nodes.
*
* @param items
* the items to expand recursively
* @param depth
* the maximum depth of recursion
* @since 8.4
*/
public void expandRecursively(Stream<T> items, int depth) {
if (depth < 0) {
return;
}
HierarchicalDataCommunicator<T> communicator = getDataCommunicator();
items.forEach(item -> {
if (communicator.hasChildren(item)) {
communicator.expand(item, false);
expandRecursively(
getDataProvider().fetchChildren(
new HierarchicalQuery<>(null, item)),
depth - 1);
}
});
getDataProvider().refreshAll();
} | 3.68 |
pulsar_AuthorizationProvider_allowTopicPolicyOperationAsync | /**
* Check if a given <tt>role</tt> is allowed to execute a given topic <tt>operation</tt> on topic's <tt>policy</tt>.
*
* @param topic topic name
* @param role role name
* @param operation topic operation
* @param authData authenticated data
* @return CompletableFuture<Boolean>
*/
default CompletableFuture<Boolean> allowTopicPolicyOperationAsync(TopicName topic,
String role,
PolicyName policy,
PolicyOperation operation,
AuthenticationDataSource authData) {
return FutureUtil.failedFuture(
new IllegalStateException("TopicPolicyOperation [" + policy.name() + "/" + operation.name() + "] "
+ "is not supported by the Authorization provider you are using."));
} | 3.68 |
morf_AbstractSqlDialectTest_testRenamingTableWithLongName | /**
* Tests that the syntax is correct for renaming a table which has a long name.
*/
@SuppressWarnings("unchecked")
@Test
public void testRenamingTableWithLongName() {
String tableNameOver30 = "123456789012345678901234567890XXX";
String indexName30 = "123456789012345678901234567_PK";
Table longNamedTable = table(tableNameOver30)
.columns(
idColumn(),
versionColumn(),
column("someField", DataType.STRING, 3).nullable()
).indexes(
index(indexName30).unique().columns("someField")
);
Table renamedTable = table("Blah")
.columns(
idColumn(),
versionColumn(),
column("someField", DataType.STRING, 3).nullable()
).indexes(
index("Blah_PK").unique().columns("someField")
);
compareStatements(getRenamingTableWithLongNameStatements(), getTestDialect().renameTableStatements(longNamedTable, renamedTable));
} | 3.68 |
hbase_HBaseReplicationEndpoint_reportBadSink | /**
* Report a {@code SinkPeer} as being bad (i.e. an attempt to replicate to it failed). If a single
* SinkPeer is reported as bad more than replication.bad.sink.threshold times, it will be removed
* from the pool of potential replication targets.
* @param sinkPeer The SinkPeer that had a failed replication attempt on it
*/
protected synchronized void reportBadSink(SinkPeer sinkPeer) {
ServerName serverName = sinkPeer.getServerName();
int badReportCount = badReportCounts.compute(serverName, (k, v) -> v == null ? 1 : v + 1);
if (badReportCount > badSinkThreshold) {
this.sinkServers.remove(serverName);
if (sinkServers.isEmpty()) {
chooseSinks();
}
}
} | 3.68 |
flink_DefaultRollingPolicy_getMaxPartSize | /**
* Returns the maximum part file size before rolling.
*
* @return Max size in bytes
*/
public long getMaxPartSize() {
return partSize;
} | 3.68 |
framework_VComboBox_updateReadOnly | /** For internal use only. May be removed or replaced in the future. */
public void updateReadOnly() {
if (readonly) {
suggestionPopup.hide();
}
debug("VComboBox: updateReadOnly()");
tb.setReadOnly(readonly || !textInputEnabled);
} | 3.68 |
flink_ExecutionVertexInputInfo_getPartitionIndexRange | /** Get the partition range this subtask should consume. */
public IndexRange getPartitionIndexRange() {
return partitionIndexRange;
} | 3.68 |
flink_RemoteStreamEnvironment_getClientConfiguration | /** @deprecated This method is going to be removed in the next releases. */
@Deprecated
public Configuration getClientConfiguration() {
return configuration;
} | 3.68 |
hadoop_Tracer_curThreadTracer | // Keeping this function at the moment for HTrace compatiblity,
// in fact all threads share a single global tracer for OpenTracing.
public static Tracer curThreadTracer() {
return globalTracer;
} | 3.68 |
pulsar_PropertiesUtils_filterAndMapProperties | /**
* Filters the {@link Properties} object so that only properties with the configured prefix are retained,
* and then replaces the srcPrefix with the targetPrefix when putting the key value pairs in the resulting map.
* @param props - the properties object to filter
* @param srcPrefix - the prefix to filter against and then remove for keys in the resulting map
* @param targetPrefix - the prefix to add to keys in the result map
* @return a map of properties
*/
public static Map<String, Object> filterAndMapProperties(Properties props, String srcPrefix, String targetPrefix) {
Map<String, Object> result = new HashMap<>();
int prefixLength = srcPrefix.length();
props.forEach((keyObject, value) -> {
if (!(keyObject instanceof String)) {
return;
}
String key = (String) keyObject;
if (key.startsWith(srcPrefix) && value != null) {
String truncatedKey = key.substring(prefixLength);
result.put(targetPrefix + truncatedKey, value);
}
});
return result;
} | 3.68 |
graphhopper_VectorTile_setSintValue | /**
* <code>optional sint64 sint_value = 6;</code>
*/
public Builder setSintValue(long value) {
bitField0_ |= 0x00000020;
sintValue_ = value;
onChanged();
return this;
} | 3.68 |
flink_StreamGraphUtils_configureBufferTimeout | /**
* Configure a stream node's buffer timeout according to the given transformation.
*
* @param streamGraph The StreamGraph the node belongs to
* @param nodeId The node's id
* @param transformation A given transformation
* @param defaultBufferTimeout The default buffer timeout value
*/
public static <T> void configureBufferTimeout(
StreamGraph streamGraph,
int nodeId,
Transformation<T> transformation,
long defaultBufferTimeout) {
if (transformation.getBufferTimeout() >= 0) {
streamGraph.setBufferTimeout(nodeId, transformation.getBufferTimeout());
} else {
streamGraph.setBufferTimeout(nodeId, defaultBufferTimeout);
}
} | 3.68 |
hadoop_MultiObjectDeleteException_translateException | /**
* A {@code MultiObjectDeleteException} is raised if one or more
* paths listed in a bulk DELETE operation failed.
* The top-level exception is therefore just "something wasn't deleted",
* but doesn't include the what or the why.
* This translation will extract an AccessDeniedException if that's one of
* the causes, otherwise grabs the status code and uses it in the
* returned exception.
* @param message text for the exception
* @return an IOE with more detail.
*/
public IOException translateException(final String message) {
LOG.info("Bulk delete operation failed to delete all objects;"
+ " failure count = {}",
errors().size());
final StringBuilder result = new StringBuilder(
errors().size() * 256);
result.append(message).append(": ");
String exitCode = "";
for (S3Error error : errors()) {
String code = error.code();
String item = String.format("%s: %s%s: %s%n", code, error.key(),
(error.versionId() != null
? (" (" + error.versionId() + ")")
: ""),
error.message());
LOG.info(item);
result.append(item);
if (exitCode == null || exitCode.isEmpty() || ACCESS_DENIED.equals(code)) {
exitCode = code;
}
}
if (ACCESS_DENIED.equals(exitCode)) {
return (IOException) new AccessDeniedException(result.toString())
.initCause(this);
} else {
return new AWSS3IOException(result.toString(), this);
}
} | 3.68 |
framework_DragAndDropService_handleDragRequest | /**
* Handles a drag/move request from the VDragAndDropManager.
*
* @param dropTarget
* @param variables
*/
private void handleDragRequest(DropTarget dropTarget,
Map<String, Object> variables) {
lastVisitId = (Integer) variables.get("visitId");
acceptCriterion = dropTarget.getDropHandler().getAcceptCriterion();
/*
* Construct the Transferable and the DragDropDetails for the drag
* operation based on the info passed from the client widgets (drag
* source for Transferable, current target for DragDropDetails).
*/
Transferable transferable = constructTransferable(variables);
TargetDetails dragDropDetails = constructDragDropDetails(dropTarget,
variables);
dragEvent = new DragAndDropEvent(transferable, dragDropDetails);
lastVisitAccepted = acceptCriterion.accept(dragEvent);
} | 3.68 |
pulsar_AbstractTopic_updateResourceGroupLimiter | /**
* @deprecated Avoid using the deprecated method
* #{@link org.apache.pulsar.broker.resources.NamespaceResources#getPoliciesIfCached(NamespaceName)} and we can use
* #{@link AbstractTopic#updateResourceGroupLimiter(Policies)} to instead of it.
*/
@Deprecated
public void updateResourceGroupLimiter(Optional<Policies> optPolicies) {
Policies policies;
try {
policies = optPolicies.orElseGet(() ->
brokerService.pulsar()
.getPulsarResources()
.getNamespaceResources()
.getPoliciesIfCached(TopicName.get(topic).getNamespaceObject())
.orElseGet(Policies::new));
} catch (Exception e) {
log.warn("[{}] Error getting policies {} and publish throttling will be disabled", topic, e.getMessage());
policies = new Policies();
}
updateResourceGroupLimiter(policies);
} | 3.68 |
hudi_HoodieBaseFile_maybeHandleExternallyGeneratedFileName | /**
* If the file was created externally, the original file path will have a '_[commitTime]_hudiext' suffix when stored in the metadata table. That suffix needs to be removed from the FileStatus so
* that the actual file can be found and read.
* @param fileStatus an input file status that may require updating
* @param fileId the fileId for the file
* @return the original file status if it was not externally created, or a new FileStatus with the original file name if it was externally created
*/
private static FileStatus maybeHandleExternallyGeneratedFileName(FileStatus fileStatus, String fileId) {
if (fileStatus == null) {
return null;
}
if (ExternalFilePathUtil.isExternallyCreatedFile(fileStatus.getPath().getName())) {
// fileId is the same as the original file name for externally created files
Path parent = fileStatus.getPath().getParent();
return new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), fileStatus.getReplication(),
fileStatus.getBlockSize(), fileStatus.getModificationTime(), fileStatus.getAccessTime(),
fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(),
new CachingPath(parent, createRelativePathUnsafe(fileId)));
} else {
return fileStatus;
}
} | 3.68 |
hbase_HRegion_checkAndPreparePut | /**
* During replay, there could exist column families which are removed between region server
* failure and replay
*/
@Override
protected void checkAndPreparePut(Put p) throws IOException {
Map<byte[], List<Cell>> familyCellMap = p.getFamilyCellMap();
List<byte[]> nonExistentList = null;
for (byte[] family : familyCellMap.keySet()) {
if (!region.htableDescriptor.hasColumnFamily(family)) {
if (nonExistentList == null) {
nonExistentList = new ArrayList<>();
}
nonExistentList.add(family);
}
}
if (nonExistentList != null) {
for (byte[] family : nonExistentList) {
// Perhaps schema was changed between crash and replay
LOG.info("No family for {} omit from reply in region {}.", Bytes.toString(family), this);
familyCellMap.remove(family);
}
}
} | 3.68 |
flink_AbstractServerBase_start | /**
* Starts the server by binding to the configured bind address (blocking).
*
* @throws Exception If something goes wrong during the bind operation.
*/
public void start() throws Throwable {
Preconditions.checkState(
serverAddress == null && serverShutdownFuture.get() == null,
serverName + " is already running @ " + serverAddress + ". ");
Iterator<Integer> portIterator = bindPortRange.iterator();
while (portIterator.hasNext() && !attemptToBind(portIterator.next())) {}
if (serverAddress != null) {
log.info("Started {} @ {}.", serverName, serverAddress);
} else {
log.info(
"Unable to start {}. All ports in provided range ({}) are occupied.",
serverName,
bindPortRange);
throw new FlinkRuntimeException(
"Unable to start "
+ serverName
+ ". All ports in provided range are occupied.");
}
} | 3.68 |
querydsl_AbstractMySQLQuery_straightJoin | /**
* STRAIGHT_JOIN forces the optimizer to join the tables in the order in which they are listed
* in the FROM clause. You can use this to speed up a query if the optimizer joins the tables
* in nonoptimal order. STRAIGHT_JOIN also can be used in the table_references list.
*
* @return the current object
*/
public C straightJoin() {
return addFlag(Position.AFTER_SELECT, STRAIGHT_JOIN);
} | 3.68 |
hudi_LSMTimelineWriter_clean | /**
* Checks whether there is any unfinished compaction operation.
*
* @param context HoodieEngineContext used for parallelize to delete obsolete files if necessary.
*/
public void clean(HoodieEngineContext context, int compactedVersions) throws IOException {
// if there are more than 3 version of snapshots, clean the oldest files.
List<Integer> allSnapshotVersions = LSMTimeline.allSnapshotVersions(metaClient);
int numVersionsToKeep = 3 + compactedVersions; // should make the threshold configurable.
if (allSnapshotVersions.size() > numVersionsToKeep) {
allSnapshotVersions.sort((v1, v2) -> v2 - v1);
List<Integer> versionsToKeep = allSnapshotVersions.subList(0, numVersionsToKeep);
Set<String> filesToKeep = versionsToKeep.stream()
.flatMap(version -> LSMTimeline.latestSnapshotManifest(metaClient, version).getFileNames().stream())
.collect(Collectors.toSet());
// delete the manifest file first
List<String> manifestFilesToClean = new ArrayList<>();
Arrays.stream(LSMTimeline.listAllManifestFiles(metaClient)).forEach(fileStatus -> {
if (!versionsToKeep.contains(LSMTimeline.getManifestVersion(fileStatus.getPath().getName()))) {
manifestFilesToClean.add(fileStatus.getPath().toString());
}
});
FSUtils.deleteFilesParallelize(metaClient, manifestFilesToClean, context, config.getArchiveDeleteParallelism(), false);
// delete the data files
List<String> dataFilesToClean = Arrays.stream(LSMTimeline.listAllMetaFiles(metaClient))
.filter(fileStatus -> !filesToKeep.contains(fileStatus.getPath().getName()))
.map(fileStatus -> fileStatus.getPath().toString())
.collect(Collectors.toList());
FSUtils.deleteFilesParallelize(metaClient, dataFilesToClean, context, config.getArchiveDeleteParallelism(), false);
}
} | 3.68 |
pulsar_TopicPoliciesImpl_validateTopic | /*
* returns topic name with encoded Local Name
*/
private TopicName validateTopic(String topic) {
// Parsing will throw exception if name is not valid
return TopicName.get(topic);
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWhereInSubquery | /**
* Tests a select with an IN operator against a sub-query.
*/
@Test
public void testSelectWhereInSubquery() {
SelectStatement inStatement = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(TEST_TABLE))
.where(isNotNull(new FieldReference(INT_FIELD)));
SelectStatement stmt = new SelectStatement()
.from(new TableReference(ALTERNATE_TABLE))
.where(in(new FieldReference(STRING_FIELD), inStatement));
String expectedSql = "SELECT * FROM " + tableName(ALTERNATE_TABLE) + " WHERE (stringField IN (SELECT stringField FROM " + tableName(TEST_TABLE) + " WHERE (intField IS NOT NULL)))";
assertEquals("Select with exists check", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_SourceTestSuiteBase_testScaleDown | /**
* Test connector source restart from a savepoint with a lower parallelism.
*
* <p>This test will create 4 splits in the external system first, write test data to all splits
* and consume back via a Flink job with parallelism 4. Then stop the job with savepoint,
* restart the job from the checkpoint with a lower parallelism 2. After the job has been
* running, add some extra data to the source and compare the result.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*/
@TestTemplate
@DisplayName("Test source restarting with a lower parallelism")
public void testScaleDown(
TestEnvironment testEnv,
DataStreamSourceExternalContext<T> externalContext,
CheckpointingMode semantic)
throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 4, 4, 2);
} | 3.68 |
pulsar_ManagedLedgerConfig_isLazyCursorRecovery | /**
* @return the lazyCursorRecovery
*/
public boolean isLazyCursorRecovery() {
return lazyCursorRecovery;
} | 3.68 |
flink_PrioritizedOperatorSubtaskState_getPrioritizedManagedOperatorState | /**
* Returns an immutable list with all alternative snapshots to restore the managed operator
* state, in the order in which we should attempt to restore.
*/
@Nonnull
public List<StateObjectCollection<OperatorStateHandle>> getPrioritizedManagedOperatorState() {
return prioritizedManagedOperatorState;
} | 3.68 |
hadoop_RegistryPathUtils_validateZKPath | /**
* Validate ZK path with the path itself included in
* the exception text
* @param path path to validate
* @return the path parameter
* @throws InvalidPathnameException if the pathname is invalid.
*/
public static String validateZKPath(String path) throws
InvalidPathnameException {
try {
PathUtils.validatePath(path);
} catch (IllegalArgumentException e) {
throw new InvalidPathnameException(path,
"Invalid Path \"" + path + "\" : " + e, e);
}
return path;
} | 3.68 |
hbase_StoreScanner_needToReturn | /**
* If the top cell won't be flushed into disk, the new top cell may be changed after
* #reopenAfterFlush. Because the older top cell only exist in the memstore scanner but the
* memstore scanner is replaced by hfile scanner after #reopenAfterFlush. If the row of top cell
* is changed, we should return the current cells. Otherwise, we may return the cells across
* different rows.
* @param outResult the cells which are visible for user scan
* @return null is the top cell doesn't change. Otherwise, the NextState to return
*/
private NextState needToReturn(List<Cell> outResult) {
if (!outResult.isEmpty() && topChanged) {
return heap.peek() == null ? NextState.NO_MORE_VALUES : NextState.MORE_VALUES;
}
return null;
} | 3.68 |
framework_Tree_setNewItemsAllowed | /**
* Adding new items is not supported.
*
* @throws UnsupportedOperationException
* if set to true.
* @see Select#setNewItemsAllowed(boolean)
*/
@Override
public void setNewItemsAllowed(boolean allowNewOptions)
throws UnsupportedOperationException {
if (allowNewOptions) {
throw new UnsupportedOperationException();
}
} | 3.68 |
framework_SelectorPredicate_extractPredicateString | /**
* Returns the predicate string, i.e. the string between the brackets in a
* path fragment. Examples: <code>
* VTextField[0] => 0
* VTextField[caption='foo'] => caption='foo'
* </code>
*
* @param pathFragment
* The path fragment from which to extract the predicate string.
* @return The predicate string for the path fragment or empty string if not
* found.
*/
private static String extractPredicateString(String pathFragment) {
int ixOpenBracket = LocatorUtil.indexOfIgnoringQuoted(pathFragment,
'[');
if (ixOpenBracket >= 0) {
int ixCloseBracket = LocatorUtil.indexOfIgnoringQuoted(pathFragment,
']', ixOpenBracket);
return pathFragment.substring(ixOpenBracket + 1, ixCloseBracket);
}
return "";
} | 3.68 |
hbase_ByteBufferUtils_copyOfRange | /**
* Similar to {@link Arrays#copyOfRange(byte[], int, int)}
* @param original the buffer from which the copy has to happen
* @param from the starting index
* @param to the ending index
* @return a byte[] created out of the copy
*/
public static byte[] copyOfRange(ByteBuffer original, int from, int to) {
int newLength = to - from;
if (newLength < 0) {
throw new IllegalArgumentException(from + " > " + to);
}
byte[] copy = new byte[newLength];
ByteBufferUtils.copyFromBufferToArray(copy, original, from, 0, newLength);
return copy;
} | 3.68 |
flink_RefCountedTmpFileCreator_apply | /**
* Gets the next temp file and stream to temp file. This creates the temp file atomically,
* making sure no previous file is overwritten.
*
* <p>This method is safe against concurrent use.
*
* @return A pair of temp file and output stream to that temp file.
* @throws IOException Thrown, if the stream to the temp file could not be opened.
*/
@Override
public RefCountedFileWithStream apply(File file) throws IOException {
final File directory = tempDirectories[nextIndex()];
while (true) {
try {
if (file == null) {
final File newFile = new File(directory, ".tmp_" + UUID.randomUUID());
final OutputStream out =
Files.newOutputStream(newFile.toPath(), StandardOpenOption.CREATE_NEW);
return RefCountedFileWithStream.newFile(newFile, out);
} else {
final OutputStream out =
Files.newOutputStream(file.toPath(), StandardOpenOption.APPEND);
return RefCountedFileWithStream.restoredFile(file, out, file.length());
}
} catch (FileAlreadyExistsException ignored) {
// fall through the loop and retry
}
}
} | 3.68 |
framework_Tree_collapseItem | /**
* Collapses an item.
*
* @param itemId
* the item id.
* @return True if the collapse operation succeeded
*/
public boolean collapseItem(Object itemId) {
// Succeeds if the node is already collapsed
if (!isExpanded(itemId)) {
return true;
}
// Collapse
expanded.remove(itemId);
markAsDirty();
fireCollapseEvent(itemId);
return true;
} | 3.68 |
hbase_MutableRegionInfo_isOffline | /**
* @return True if this region is offline.
* @deprecated since 3.0.0 and will be removed in 4.0.0
* @see <a href="https://issues.apache.org/jira/browse/HBASE-25210">HBASE-25210</a>
*/
@Override
@Deprecated
public boolean isOffline() {
return this.offLine;
} | 3.68 |
hbase_Constraints_disableConstraint | /**
* Disable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the
* {@link Constraint}, but it just doesn't load the {@link Constraint} on the table.
* @param builder {@link TableDescriptorBuilder} to modify
* @param clazz {@link Constraint} to disable.
* @throws IOException if the constraint cannot be found
*/
public static void disableConstraint(TableDescriptorBuilder builder,
Class<? extends Constraint> clazz) throws IOException {
changeConstraintEnabled(builder, clazz, false);
} | 3.68 |
pulsar_BacklogQuotaManager_handleExceededBacklogQuota | /**
* Handle exceeded size backlog by using policies set in the zookeeper for given topic.
*
* @param persistentTopic Topic on which backlog has been exceeded
*/
public void handleExceededBacklogQuota(PersistentTopic persistentTopic, BacklogQuotaType backlogQuotaType,
boolean preciseTimeBasedBacklogQuotaCheck) {
BacklogQuota quota = persistentTopic.getBacklogQuota(backlogQuotaType);
log.info("Backlog quota type {} exceeded for topic [{}]. Applying [{}] policy", backlogQuotaType,
persistentTopic.getName(), quota.getPolicy());
switch (quota.getPolicy()) {
case consumer_backlog_eviction:
switch (backlogQuotaType) {
case destination_storage:
dropBacklogForSizeLimit(persistentTopic, quota);
break;
case message_age:
dropBacklogForTimeLimit(persistentTopic, quota, preciseTimeBasedBacklogQuotaCheck);
break;
default:
break;
}
break;
case producer_exception:
case producer_request_hold:
if (!advanceSlowestSystemCursor(persistentTopic)) {
// The slowest is not a system cursor. Disconnecting producers to put backpressure.
disconnectProducers(persistentTopic);
}
break;
default:
break;
}
} | 3.68 |
hadoop_AbstractS3ACommitter_getTaskAttemptFilesystem | /**
* Get the task attempt path filesystem. This may not be the same as the
* final destination FS, and so may not be an S3A FS.
* @param context task attempt
* @return the filesystem
* @throws IOException failure to instantiate
*/
protected FileSystem getTaskAttemptFilesystem(TaskAttemptContext context)
throws IOException {
return getTaskAttemptPath(context).getFileSystem(getConf());
} | 3.68 |
framework_VColorPickerArea_setHeight | /**
* Sets the color area's height. This height does not include caption or
* decorations such as border, margin, and padding.
*/
@Override
public void setHeight(String height) {
area.setHeight(height);
} | 3.68 |
framework_ApplicationConnection_setResource | /**
* Sets a resource that has been pre-loaded via UIDL, such as custom
* layouts.
*
* @since 7.6
* @param name
* identifier of the resource to Set
* @param resource
* the resource
*/
public void setResource(String name, String resource) {
resourcesMap.put(name, resource);
} | 3.68 |
flink_BinaryHashTable_nextMatching | /** Next record from rebuilt spilled partition or build side outer partition. */
public boolean nextMatching() throws IOException {
if (type.needSetProbed()) {
return processProbeIter() || processBuildIter() || prepareNextPartition();
} else {
return processProbeIter() || prepareNextPartition();
}
} | 3.68 |
streampipes_AdapterResourceManager_cloneAndEncrypt | /**
* Takes an adapterDescription and returns an encrypted copy
*/
private AdapterDescription cloneAndEncrypt(AdapterDescription adapterDescription) {
AdapterDescription encryptedAdapterDescription = new Cloner().adapterDescription(adapterDescription);
SecretProvider.getEncryptionService().apply(encryptedAdapterDescription);
return encryptedAdapterDescription;
} | 3.68 |
hbase_WALEdit_createRegionEventDescriptorQualifier | /**
* @return Cell qualifier for the passed in RegionEventDescriptor Type; e.g. we'll return
* something like a byte array with HBASE::REGION_EVENT::REGION_OPEN in it.
*/
@InterfaceAudience.Private
public static byte[] createRegionEventDescriptorQualifier(RegionEventDescriptor.EventType t) {
return Bytes.toBytes(REGION_EVENT_PREFIX_STR + t.toString());
} | 3.68 |
hadoop_SchedulerHealth_getReleaseCount | /**
* Get the count of release from the latest scheduler health report.
*
* @return release count
*/
public Long getReleaseCount() {
return getOperationCount(Operation.RELEASE);
} | 3.68 |
morf_DataSourceAdapter_isWrapperFor | /**
* @see java.sql.Wrapper#isWrapperFor(java.lang.Class)
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new UnsupportedOperationException("Wrappers not supported");
} | 3.68 |
morf_DatabaseDataSetProducer_open | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#open()
*/
@Override
public void open() {
try {
this.connection = dataSource.getConnection();
this.wasAutoCommit = connection.getAutoCommit();
// disable auto-commit on this connection for HSQLDB performance
wasAutoCommit = connection.getAutoCommit();
connection.setAutoCommit(false);
} catch (SQLException e) {
throw new RuntimeSqlException("Error opening connection", e);
}
} | 3.68 |
hbase_CatalogFamilyFormat_parseRegionInfoFromRegionName | /**
* Returns an HRI parsed from this regionName. Not all the fields of the HRI is stored in the
* name, so the returned object should only be used for the fields in the regionName.
* <p/>
* Since the returned object does not contain all the fields, we do not expose this method in
* public API, such as {@link RegionInfo} or {@link RegionInfoBuilder}.
*/
public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws IOException {
byte[][] fields = RegionInfo.parseRegionName(regionName);
long regionId = Long.parseLong(Bytes.toString(fields[2]));
int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1])
.setRegionId(regionId).setReplicaId(replicaId).build();
} | 3.68 |
flink_Configuration_getRawValueFromOption | /**
* This method will do the following steps to get the value of a config option:
*
* <p>1. get the value from {@link Configuration}. <br>
* 2. if key is not found, try to get the value with fallback keys from {@link Configuration}
* <br>
* 3. if no fallback keys are found, return {@link Optional#empty()}. <br>
*
* @return the value of the configuration or {@link Optional#empty()}.
*/
private Optional<Object> getRawValueFromOption(ConfigOption<?> configOption) {
return applyWithOption(configOption, this::getRawValue);
} | 3.68 |
flink_BufferSizeEMA_calculateBufferSize | /**
* Calculating the buffer size over total possible buffers size and number of buffers in use.
*
* @param totalBufferSizeInBytes Total buffers size.
* @param totalBuffers Total number of buffers in use.
* @return Throughput calculated according to implemented algorithm.
*/
public int calculateBufferSize(long totalBufferSizeInBytes, int totalBuffers) {
checkArgument(totalBufferSizeInBytes >= 0, "Size of buffer should be non negative");
checkArgument(totalBuffers > 0, "Number of buffers should be positive");
// Since the result value is always limited by max buffer size while the instant value is
// potentially unlimited. It can lead to an instant change from min to max value in case
// when the instant value is significantly larger than the possible max value.
// The solution is to limit the instant buffer size by twice of current buffer size in order
// to have the same growth and shrink speeds. for example if the instant value is equal to 0
// and the current value is 16000 we can decrease it at maximum by 1600(suppose alfa=0.1) .
// The idea is to allow increase and decrease size by the same number. So if the instant
// value would be large(for example 100000) it will be possible to increase the current
// value by 1600(the same as decreasing) because the limit will be 2 * currentValue = 32000.
// Example of change speed:
// growing = 32768, 29647, 26823, 24268, 21956, 19864
// shrinking = 19864, 21755, 23826, 26095, 28580, 31301, 32768
long desirableBufferSize =
Math.min(totalBufferSizeInBytes / totalBuffers, 2L * lastBufferSize);
lastBufferSize += alpha * (desirableBufferSize - lastBufferSize);
return lastBufferSize = Math.max(minBufferSize, Math.min(lastBufferSize, maxBufferSize));
} | 3.68 |
morf_FieldReference_desc | /**
* sets descending order on this field
* @return this
*/
public Builder desc() {
this.direction = Direction.DESCENDING;
return this;
} | 3.68 |
framework_AbstractConnector_getStateType | /**
* Find the type of the state for the given connector.
*
* @param connector
* the connector whose state type to find
* @return the state type
*/
public static Type getStateType(ServerConnector connector) {
try {
return TypeData.getType(connector.getClass()).getMethod("getState")
.getReturnType();
} catch (NoDataException e) {
throw new IllegalStateException(
"There is no information about the state for "
+ connector.getClass().getSimpleName()
+ ". Did you remember to compile the right widgetset?",
e);
}
} | 3.68 |
pulsar_SchemaReader_getNativeSchema | /**
* Returns the underling Schema if possible.
* @return the schema, or an empty Optional if it is not possible to access it
*/
default Optional<Object> getNativeSchema() {
return Optional.empty();
} | 3.68 |
pulsar_PulsarAuthorizationProvider_canProduceAsync | /**
* Check if the specified role has permission to send messages to the specified fully qualified topic name.
*
* @param topicName
* the fully qualified topic name associated with the topic.
* @param role
* the app id used to send messages to the topic.
*/
@Override
public CompletableFuture<Boolean> canProduceAsync(TopicName topicName, String role,
AuthenticationDataSource authenticationData) {
return checkAuthorization(topicName, role, AuthAction.produce);
} | 3.68 |
dubbo_DubboBootstrap_stop | /**
* Stop dubbo application
*
* @return
* @throws IllegalStateException
*/
public DubboBootstrap stop() throws IllegalStateException {
destroy();
return this;
} | 3.68 |
morf_XmlDataSetProducer_isTemporary | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.metadata.Table#isTemporary()
*/
@Override
public boolean isTemporary() {
return false;
} | 3.68 |
pulsar_ResourceGroup_getRgRemoteUsageMessageCount | // Visibility for unit testing
protected static double getRgRemoteUsageMessageCount (String rgName, String monClassName, String brokerName) {
return rgRemoteUsageReportsMessages.labels(rgName, monClassName, brokerName).get();
} | 3.68 |
hudi_RestoreUtils_getRestorePlan | /**
* Get Latest version of Restore plan corresponding to a restore instant.
*
* @param metaClient Hoodie Table Meta Client
* @param restoreInstant Instant referring to restore action
* @return Rollback plan corresponding to rollback instant
* @throws IOException
*/
public static HoodieRestorePlan getRestorePlan(HoodieTableMetaClient metaClient, HoodieInstant restoreInstant)
throws IOException {
final HoodieInstant requested = HoodieTimeline.getRollbackRequestedInstant(restoreInstant);
return TimelineMetadataUtils.deserializeAvroMetadata(
metaClient.getActiveTimeline().readRestoreInfoAsBytes(requested).get(), HoodieRestorePlan.class);
} | 3.68 |
morf_HumanReadableStatementHelper_generateAddIndexString | /**
* Generates a human-readable "Add Index" string.
*
* @param tableName the name of the table to add the index to
* @param index the definition of the index to add
* @return a string containing the human-readable version of the action
*/
public static String generateAddIndexString(final String tableName, final Index index) {
return String.format("Add %s index called %s to %s", generateUniqueIndexString(index), index.getName(), tableName);
} | 3.68 |
graphhopper_LMApproximator_setEpsilon | /**
* Increase approximation with higher epsilon
*/
public LMApproximator setEpsilon(double epsilon) {
this.epsilon = epsilon;
return this;
} | 3.68 |
flink_BinaryStringData_endsWith | /**
* Tests if this BinaryStringData ends with the specified suffix.
*
* @param suffix the suffix.
* @return {@code true} if the bytes represented by the argument is a suffix of the bytes
* represented by this object; {@code false} otherwise. Note that the result will be {@code
* true} if the argument is the empty string or is equal to this {@code BinaryStringData}
* object as determined by the {@link #equals(Object)} method.
*/
public boolean endsWith(final BinaryStringData suffix) {
ensureMaterialized();
suffix.ensureMaterialized();
return matchAt(suffix, binarySection.sizeInBytes - suffix.binarySection.sizeInBytes);
} | 3.68 |
flink_SchemaValidator_deriveRowtimeAttributes | /** Finds the rowtime attributes if defined. */
public static List<RowtimeAttributeDescriptor> deriveRowtimeAttributes(
DescriptorProperties properties) {
Map<String, String> names = properties.getIndexedProperty(SCHEMA, SCHEMA_NAME);
List<RowtimeAttributeDescriptor> attributes = new ArrayList<>();
// check for rowtime in every field
for (int i = 0; i < names.size(); i++) {
Optional<Tuple2<TimestampExtractor, WatermarkStrategy>> rowtimeComponents =
RowtimeValidator.getRowtimeComponents(properties, SCHEMA + "." + i + ".");
int index = i;
// create descriptor
rowtimeComponents.ifPresent(
tuple2 ->
attributes.add(
new RowtimeAttributeDescriptor(
properties.getString(
SCHEMA + "." + index + "." + SCHEMA_NAME),
tuple2.f0,
tuple2.f1)));
}
return attributes;
} | 3.68 |
zxing_HighLevelEncoder_updateStateListForChar | // We update a set of states for a new character by updating each state
// for the new character, merging the results, and then removing the
// non-optimal states.
private Collection<State> updateStateListForChar(Iterable<State> states, int index) {
Collection<State> result = new LinkedList<>();
for (State state : states) {
updateStateForChar(state, index, result);
}
return simplifyStates(result);
} | 3.68 |
hadoop_ListResultEntrySchema_withContentLength | /**
* Set the contentLength value.
*
* @param contentLength the contentLength value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withContentLength(final Long contentLength) {
this.contentLength = contentLength;
return this;
} | 3.68 |
querydsl_StringExpression_notEqualsIgnoreCase | /**
* Create a {@code !this.equalsIgnoreCase(str)} expression
*
* <p>Compares this {@code StringExpression} to another {@code StringExpression}, ignoring case
* considerations.</p>
*
* @param str string
* @return !this.equalsIgnoreCase(str)
* @see java.lang.String#equalsIgnoreCase(String)
*/
public BooleanExpression notEqualsIgnoreCase(String str) {
return equalsIgnoreCase(str).not();
} | 3.68 |
hbase_InclusiveStopFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof InclusiveStopFilter)) {
return false;
}
InclusiveStopFilter other = (InclusiveStopFilter) o;
return Bytes.equals(this.getStopRowKey(), other.getStopRowKey());
} | 3.68 |
flink_ProjectOperator_projectTuple20 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>
ProjectOperator<
T,
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>
projectTuple20() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>
tType =
new TupleTypeInfo<
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>(fTypes);
return new ProjectOperator<
T,
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
hbase_TableSnapshotInputFormatImpl_write | // TODO: We should have ProtobufSerialization in Hadoop, and directly use PB objects instead of
// doing this wrapping with Writables.
@Override
public void write(DataOutput out) throws IOException {
TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder()
.setTable(ProtobufUtil.toTableSchema(htd)).setRegion(ProtobufUtil.toRegionInfo(regionInfo));
for (String location : locations) {
builder.addLocations(location);
}
TableSnapshotRegionSplit split = builder.build();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
split.writeTo(baos);
baos.close();
byte[] buf = baos.toByteArray();
out.writeInt(buf.length);
out.write(buf);
Bytes.writeByteArray(out, Bytes.toBytes(scan));
Bytes.writeByteArray(out, Bytes.toBytes(restoreDir));
} | 3.68 |
flink_DataExchangeMode_select | /**
* Computes the mode of data exchange to be used for a given execution mode and ship strategy.
* The type of the data exchange depends also on whether this connection has been identified to
* require pipeline breaking for deadlock avoidance.
*
* <ul>
* <li>If the connection is set to be pipeline breaking, this returns the pipeline breaking
* variant of the execution mode {@link
* org.apache.flink.runtime.io.network.DataExchangeMode#getPipelineBreakingExchange(org.apache.flink.api.common.ExecutionMode)}.
* <li>If the data exchange is a simple FORWARD (one-to-one communication), this returns
* {@link
* org.apache.flink.runtime.io.network.DataExchangeMode#getForForwardExchange(org.apache.flink.api.common.ExecutionMode)}.
* <li>If otherwise, this returns {@link
* org.apache.flink.runtime.io.network.DataExchangeMode#getForShuffleOrBroadcast(org.apache.flink.api.common.ExecutionMode)}.
* </ul>
*
* @param shipStrategy The ship strategy (FORWARD, PARTITION, BROADCAST, ...) of the runtime
* data exchange.
* @return The data exchange mode for the connection, given the concrete ship strategy.
*/
public static DataExchangeMode select(
ExecutionMode executionMode, ShipStrategyType shipStrategy, boolean breakPipeline) {
if (shipStrategy == null || shipStrategy == ShipStrategyType.NONE) {
throw new IllegalArgumentException("shipStrategy may not be null or NONE");
}
if (executionMode == null) {
throw new IllegalArgumentException("executionMode may not mbe null");
}
if (breakPipeline) {
return getPipelineBreakingExchange(executionMode);
} else if (shipStrategy == ShipStrategyType.FORWARD) {
return getForForwardExchange(executionMode);
} else {
return getForShuffleOrBroadcast(executionMode);
}
} | 3.68 |
hbase_ColumnSchemaModel___getBlockcache | /** Returns true if the BLOCKCACHE attribute is present and true */
public boolean __getBlockcache() {
Object o = attrs.get(BLOCKCACHE);
return o != null
? Boolean.parseBoolean(o.toString())
: ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.