name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MultiRowRangeFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof MultiRowRangeFilter)) {
return false;
}
MultiRowRangeFilter other = (MultiRowRangeFilter) o;
if (this.rangeList.size() != other.rangeList.size()) return false;
for (int i = 0; i < rangeList.size(); ++i) {
RowRange thisRange = this.rangeList.get(i);
RowRange otherRange = other.rangeList.get(i);
if (
!(Bytes.equals(thisRange.startRow, otherRange.startRow)
&& Bytes.equals(thisRange.stopRow, otherRange.stopRow)
&& (thisRange.startRowInclusive == otherRange.startRowInclusive)
&& (thisRange.stopRowInclusive == otherRange.stopRowInclusive))
) {
return false;
}
}
return true;
} | 3.68 |
hadoop_BlockGrouper_getRequiredNumParityBlocks | /**
* Get required parity blocks count in a BlockGroup.
* @return count of required parity blocks
*/
public int getRequiredNumParityBlocks() {
return schema.getNumParityUnits();
} | 3.68 |
hadoop_DockerContainerDeletionTask_toString | /**
* Convert the DockerContainerDeletionTask to a String representation.
*
* @return String representation of the DockerContainerDeletionTask.
*/
@Override
public String toString() {
StringBuffer sb = new StringBuffer("DockerContainerDeletionTask : ");
sb.append(" id : ").append(this.getTaskId());
sb.append(" containerId : ").append(this.containerId);
return sb.toString().trim();
} | 3.68 |
framework_Upload_getContentLength | /**
* @return the length of the file that is being uploaded
*/
public long getContentLength() {
return length;
} | 3.68 |
dubbo_NetUtils_isPreferredNetworkInterface | /**
* Is preferred {@link NetworkInterface} or not
*
* @param networkInterface {@link NetworkInterface}
* @return if the name of the specified {@link NetworkInterface} matches
* the property value from {@link CommonConstants#DUBBO_PREFERRED_NETWORK_INTERFACE}, return <code>true</code>,
* or <code>false</code>
*/
public static boolean isPreferredNetworkInterface(NetworkInterface networkInterface) {
String preferredNetworkInterface = System.getProperty(DUBBO_PREFERRED_NETWORK_INTERFACE);
return Objects.equals(networkInterface.getDisplayName(), preferredNetworkInterface);
} | 3.68 |
framework_HasValue_getOldValue | /**
* Returns the value of the source before this value change event
* occurred.
*
* @return the value previously held by the source of this event
*/
public V getOldValue() {
return oldValue;
} | 3.68 |
hudi_NonThrownExecutor_executeSync | /**
* Run the action in a loop and wait for completion.
*/
public void executeSync(ThrowingRunnable<Throwable> action, String actionName, Object... actionParams) {
try {
executor.submit(wrapAction(action, this.exceptionHook, actionName, actionParams)).get();
} catch (InterruptedException e) {
handleException(e, this.exceptionHook, getActionString(actionName, actionParams));
} catch (ExecutionException e) {
// nonfatal exceptions are handled by wrapAction
ExceptionUtils.rethrowIfFatalErrorOrOOM(e.getCause());
}
} | 3.68 |
hudi_AvroSchemaCompatibility_getWriter | /**
* Gets the writer schema that was validated.
*
* @return writer schema that was validated.
*/
public Schema getWriter() {
return mWriter;
} | 3.68 |
morf_AbstractSqlDialectTest_testTruncateTableStatements | /**
* Tests SQL for clearing tables.
*/
@SuppressWarnings("unchecked")
@Test
public void testTruncateTableStatements() {
Table table = metadata.getTable(TEST_TABLE);
compareStatements(
expectedTruncateTableStatements(),
testDialect.truncateTableStatements(table));
} | 3.68 |
flink_Schema_withComment | /** Apply comment to the previous column. */
public Builder withComment(@Nullable String comment) {
if (columns.size() > 0) {
columns.set(
columns.size() - 1, columns.get(columns.size() - 1).withComment(comment));
} else {
throw new IllegalArgumentException(
"Method 'withComment(...)' must be called after a column definition, "
+ "but there is no preceding column defined.");
}
return this;
} | 3.68 |
flink_RpcEndpoint_validateResourceClosed | /**
* Validate whether all the resources are closed.
*
* @return true if all the resources are closed, otherwise false
*/
boolean validateResourceClosed() {
return mainThreadExecutor.validateScheduledExecutorClosed() && resourceRegistry.isClosed();
} | 3.68 |
flink_WindowedStream_aggregate | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate function. This means
* that the window function typically has only a single value to process when called.
*
* @param aggregateFunction The aggregation function that is used for incremental aggregation.
* @param windowFunction The window function.
* @param accumulatorType Type information for the internal accumulator type of the aggregation
* function
* @param resultType Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
* @param <ACC> The type of the AggregateFunction's accumulator
* @param <V> The type of AggregateFunction's result, and the WindowFunction's input
* @param <R> The type of the elements in the resulting stream, equal to the WindowFunction's
* result type
*/
@PublicEvolving
public <ACC, V, R> SingleOutputStreamOperator<R> aggregate(
AggregateFunction<T, ACC, V> aggregateFunction,
ProcessWindowFunction<V, R, K, W> windowFunction,
TypeInformation<ACC> accumulatorType,
TypeInformation<V> aggregateResultType,
TypeInformation<R> resultType) {
checkNotNull(aggregateFunction, "aggregateFunction");
checkNotNull(windowFunction, "windowFunction");
checkNotNull(accumulatorType, "accumulatorType");
checkNotNull(aggregateResultType, "aggregateResultType");
checkNotNull(resultType, "resultType");
if (aggregateFunction instanceof RichFunction) {
throw new UnsupportedOperationException(
"This aggregate function cannot be a RichFunction.");
}
// clean the closures
windowFunction = input.getExecutionEnvironment().clean(windowFunction);
aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction);
final String opName = builder.generateOperatorName();
final String opDescription =
builder.generateOperatorDescription(aggregateFunction, windowFunction);
OneInputStreamOperator<T, R> operator =
builder.aggregate(aggregateFunction, windowFunction, accumulatorType);
return input.transform(opName, resultType, operator).setDescription(opDescription);
} | 3.68 |
hudi_HoodieAvroUtils_rewriteRecord | /**
* Given an Avro record with a given schema, rewrites it into the new schema while setting fields only from the new
* schema.
* <p>
* NOTE: This method is rewriting every record's field that is record itself recursively. It's
* caller's responsibility to make sure that no unnecessary re-writing occurs (by preemptively
* checking whether the record does require re-writing to adhere to the new schema)
* <p>
* NOTE: Here, the assumption is that you cannot go from an evolved schema (schema with (N) fields)
* to an older schema (schema with (N-1) fields). All fields present in the older record schema MUST be present in the
* new schema and the default/existing values are carried over.
* <p>
* This particular method does the following:
* <ol>
* <li>Create a new empty GenericRecord with the new schema.</li>
* <li>For GenericRecord, copy over the data from the old schema to the new schema or set default values for all
* fields of this transformed schema</li>
* <li>For SpecificRecord, hoodie_metadata_fields have a special treatment (see below)</li>
* </ol>
* <p>
* For SpecificRecord we ignore Hudi Metadata fields, because for code generated
* avro classes (HoodieMetadataRecord), the avro record is a SpecificBaseRecord type instead of a GenericRecord.
* SpecificBaseRecord throws null pointer exception for record.get(name) if name is not present in the schema of the
* record (which happens when converting a SpecificBaseRecord without hoodie_metadata_fields to a new record with it).
* In this case, we do NOT set the defaults for the hoodie_metadata_fields explicitly, instead, the new record assumes
* the default defined in the avro schema itself.
* TODO: See if we can always pass GenericRecord instead of SpecificBaseRecord in some cases.
*/
public static GenericRecord rewriteRecord(GenericRecord oldRecord, Schema newSchema) {
GenericRecord newRecord = new GenericData.Record(newSchema);
boolean isSpecificRecord = oldRecord instanceof SpecificRecordBase;
for (Schema.Field f : newSchema.getFields()) {
if (!(isSpecificRecord && isMetadataField(f.name()))) {
copyOldValueOrSetDefault(oldRecord, newRecord, f);
}
}
return newRecord;
} | 3.68 |
framework_VTextField_updateFieldContent | /** For internal use only. May be removed or replaced in the future. */
public void updateFieldContent(final String text) {
setPrompting(inputPrompt != null && focusedTextField != this
&& (text.equals("")));
String fieldValue;
if (prompting) {
fieldValue = isReadOnly() ? "" : inputPrompt;
addStyleDependentName(CLASSNAME_PROMPT);
} else {
fieldValue = text;
removeStyleDependentName(CLASSNAME_PROMPT);
}
setText(fieldValue);
lastTextChangeString = valueBeforeEdit = text;
valueBeforeEditIsSynced = true;
} | 3.68 |
hbase_HFileArchiver_resolveAndArchiveFile | /**
* Attempt to archive the passed in file to the archive directory.
* <p>
* If the same file already exists in the archive, it is moved to a timestamped directory under
* the archive directory and the new file is put in its place.
* @param archiveDir {@link Path} to the directory that stores the archives of the hfiles
* @param currentFile {@link Path} to the original HFile that will be archived
* @param archiveStartTime time the archiving started, to resolve naming conflicts
* @return <tt>true</tt> if the file is successfully archived. <tt>false</tt> if there was a
* problem, but the operation still completed.
* @throws IOException on failure to complete {@link FileSystem} operations.
*/
private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile,
String archiveStartTime) throws IOException {
// build path as it should be in the archive
String filename = currentFile.getName();
Path archiveFile = new Path(archiveDir, filename);
FileSystem fs = currentFile.getFileSystem();
// An existing destination file in the archive is unexpected, but we handle it here.
if (fs.exists(archiveFile)) {
if (!fs.exists(currentFile.getPath())) {
// If the file already exists in the archive, and there is no current file to archive, then
// assume that the file in archive is correct. This is an unexpected situation, suggesting a
// race condition or split brain.
// In HBASE-26718 this was found when compaction incorrectly happened during warmupRegion.
LOG.warn("{} exists in archive. Attempted to archive nonexistent file {}.", archiveFile,
currentFile);
// We return success to match existing behavior in this method, where FileNotFoundException
// in moveAndClose is ignored.
return true;
}
// There is a conflict between the current file and the already existing archived file.
// Move the archived file to a timestamped backup. This is a really, really unlikely
// situation, where we get the same name for the existing file, but is included just for that
// 1 in trillion chance. We are potentially incurring data loss in the archive directory if
// the files are not identical. The timestamped backup will be cleaned by HFileCleaner as it
// has no references.
FileStatus curStatus = fs.getFileStatus(currentFile.getPath());
FileStatus archiveStatus = fs.getFileStatus(archiveFile);
long curLen = curStatus.getLen();
long archiveLen = archiveStatus.getLen();
long curMtime = curStatus.getModificationTime();
long archiveMtime = archiveStatus.getModificationTime();
if (curLen != archiveLen) {
LOG.error(
"{} already exists in archive with different size than current {}."
+ " archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}",
archiveFile, currentFile, archiveLen, curLen, archiveMtime, curMtime);
throw new IOException(
archiveFile + " already exists in archive with different size" + " than " + currentFile);
}
LOG.error(
"{} already exists in archive, moving to timestamped backup and overwriting"
+ " current {}. archiveLen: {} currentLen: {} archiveMtime: {} currentMtime: {}",
archiveFile, currentFile, archiveLen, curLen, archiveMtime, curMtime);
// move the archive file to the stamped backup
Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime);
if (!fs.rename(archiveFile, backedupArchiveFile)) {
LOG.error("Could not rename archive file to backup: " + backedupArchiveFile
+ ", deleting existing file in favor of newer.");
// try to delete the existing file, if we can't rename it
if (!fs.delete(archiveFile, false)) {
throw new IOException("Couldn't delete existing archive file (" + archiveFile
+ ") or rename it to the backup file (" + backedupArchiveFile
+ ") to make room for similarly named file.");
}
} else {
LOG.info("Backed up archive file from {} to {}.", archiveFile, backedupArchiveFile);
}
}
LOG.trace("No existing file in archive for {}, free to archive original file.", archiveFile);
// at this point, we should have a free spot for the archive file
boolean success = false;
for (int i = 0; !success && i < DEFAULT_RETRIES_NUMBER; ++i) {
if (i > 0) {
// Ensure that the archive directory exists.
// The previous "move to archive" operation has failed probably because
// the cleaner has removed our archive directory (HBASE-7643).
// (we're in a retry loop, so don't worry too much about the exception)
try {
if (!fs.exists(archiveDir)) {
if (fs.mkdirs(archiveDir)) {
LOG.debug("Created archive directory {}", archiveDir);
}
}
} catch (IOException e) {
LOG.warn("Failed to create directory {}", archiveDir, e);
}
}
try {
success = currentFile.moveAndClose(archiveFile);
} catch (FileNotFoundException fnfe) {
LOG.warn("Failed to archive " + currentFile
+ " because it does not exist! Skipping and continuing on.", fnfe);
success = true;
} catch (IOException e) {
success = false;
// When HFiles are placed on a filesystem other than HDFS a rename operation can be a
// non-atomic file copy operation. It can take a long time to copy a large hfile and if
// interrupted there may be a partially copied file present at the destination. We must
// remove the partially copied file, if any, or otherwise the archive operation will fail
// indefinitely from this point.
LOG.warn("Failed to archive " + currentFile + " on try #" + i, e);
try {
fs.delete(archiveFile, false);
} catch (FileNotFoundException fnfe) {
// This case is fine.
} catch (IOException ee) {
// Complain about other IO exceptions
LOG.warn("Failed to clean up from failure to archive " + currentFile + " on try #" + i,
ee);
}
}
}
if (!success) {
LOG.error("Failed to archive " + currentFile);
return false;
}
LOG.debug("Archived from {} to {}", currentFile, archiveFile);
return true;
} | 3.68 |
morf_SchemaUtils_indexes | /**
* @see org.alfasoftware.morf.metadata.SchemaUtils.TableBuilder#indexes(java.lang.Iterable)
*/
@Override
public TableBuilder indexes(Iterable<? extends Index> indexes) {
return new TableBuilderImpl(getName(), columns(), indexes, isTemporary());
} | 3.68 |
hbase_HFileCorruptionChecker_createQuarantinePath | /**
* Given a path, generates a new path to where we move a corrupted hfile (bad trailer, no
* trailer). Path to a corrupt hfile (assumes that it is HBASE_DIR/ table /region/cf/file)
* @return path to where corrupted files are stored. This should be
* HBASE_DIR/.corrupt/table/region/cf/file.
*/
Path createQuarantinePath(Path hFile) throws IOException {
// extract the normal dirs structure
Path cfDir = hFile.getParent();
Path regionDir = cfDir.getParent();
Path tableDir = regionDir.getParent();
// build up the corrupted dirs structure
Path corruptBaseDir = new Path(CommonFSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME);
if (conf.get("hbase.hfile.quarantine.dir") != null) {
LOG.warn("hbase.hfile.quarantine.dir is deprecated. Default to " + corruptBaseDir);
}
Path corruptTableDir = new Path(corruptBaseDir, tableDir.getName());
Path corruptRegionDir = new Path(corruptTableDir, regionDir.getName());
Path corruptFamilyDir = new Path(corruptRegionDir, cfDir.getName());
Path corruptHfile = new Path(corruptFamilyDir, hFile.getName());
return corruptHfile;
} | 3.68 |
flink_CatalogManager_qualifyIdentifier | /**
* Returns the full name of the given table path, this name may be padded with current
* catalog/database name based on the {@code identifier's} length.
*
* @param identifier an unresolved identifier
* @return a fully qualified object identifier
*/
public ObjectIdentifier qualifyIdentifier(UnresolvedIdentifier identifier) {
return ObjectIdentifier.of(
identifier
.getCatalogName()
.orElseGet(
() -> {
final String currentCatalog = getCurrentCatalog();
if (StringUtils.isNullOrWhitespaceOnly(currentCatalog)) {
throw new ValidationException(
"A current catalog has not been set. Please use a"
+ " fully qualified identifier (such as"
+ " 'my_catalog.my_database.my_table') or"
+ " set a current catalog using"
+ " 'USE CATALOG my_catalog'.");
}
return currentCatalog;
}),
identifier
.getDatabaseName()
.orElseGet(
() -> {
final String currentDatabase = getCurrentDatabase();
if (StringUtils.isNullOrWhitespaceOnly(currentDatabase)) {
throw new ValidationException(
"A current database has not been set. Please use a"
+ " fully qualified identifier (such as"
+ " 'my_database.my_table' or"
+ " 'my_catalog.my_database.my_table') or"
+ " set a current database using"
+ " 'USE my_database'.");
}
return currentDatabase;
}),
identifier.getObjectName());
} | 3.68 |
hadoop_BlockStorageMovementAttemptedItems_start | /**
* Starts the monitor thread.
*/
public synchronized void start() {
monitorRunning = true;
timerThread = new Daemon(new BlocksStorageMovementAttemptMonitor());
timerThread.setName("BlocksStorageMovementAttemptMonitor");
timerThread.start();
} | 3.68 |
hudi_WriteOperationType_isDataChange | /**
* Whether the operation changes the dataset.
*/
public static boolean isDataChange(WriteOperationType operation) {
return operation == WriteOperationType.INSERT
|| operation == WriteOperationType.UPSERT
|| operation == WriteOperationType.UPSERT_PREPPED
|| operation == WriteOperationType.DELETE
|| operation == WriteOperationType.DELETE_PREPPED
|| operation == WriteOperationType.BULK_INSERT
|| operation == WriteOperationType.DELETE_PARTITION
|| operation == WriteOperationType.INSERT_OVERWRITE
|| operation == WriteOperationType.INSERT_OVERWRITE_TABLE
|| operation == WriteOperationType.BOOTSTRAP;
} | 3.68 |
flink_CheckpointConfig_enableExternalizedCheckpoints | /**
* Sets the mode for externalized checkpoint clean-up. Externalized checkpoints will be enabled
* automatically unless the mode is set to {@link
* ExternalizedCheckpointCleanup#NO_EXTERNALIZED_CHECKPOINTS}.
*
* <p>Externalized checkpoints write their meta data out to persistent storage and are
* <strong>not</strong> automatically cleaned up when the owning job fails or is suspended
* (terminating with job status {@link JobStatus#FAILED} or {@link JobStatus#SUSPENDED}). In
* this case, you have to manually clean up the checkpoint state, both the meta data and actual
* program state.
*
* <p>The {@link ExternalizedCheckpointCleanup} mode defines how an externalized checkpoint
* should be cleaned up on job cancellation. If you choose to retain externalized checkpoints on
* cancellation you have to handle checkpoint clean-up manually when you cancel the job as well
* (terminating with job status {@link JobStatus#CANCELED}).
*
* <p>The target directory for externalized checkpoints is configured via {@link
* CheckpointingOptions#CHECKPOINTS_DIRECTORY}.
*
* @param cleanupMode Externalized checkpoint clean-up behaviour.
* @deprecated use {@link #setExternalizedCheckpointCleanup(ExternalizedCheckpointCleanup)}
* instead.
*/
@PublicEvolving
@Deprecated
public void enableExternalizedCheckpoints(ExternalizedCheckpointCleanup cleanupMode) {
setExternalizedCheckpointCleanup(cleanupMode);
} | 3.68 |
hbase_MasterRpcServices_createConfigurationSubset | /**
* @return Subset of configuration to pass initializing regionservers: e.g. the filesystem to use
* and root directory to use.
*/
private RegionServerStartupResponse.Builder createConfigurationSubset() {
RegionServerStartupResponse.Builder resp =
addConfig(RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR);
resp = addConfig(resp, "fs.defaultFS");
return addConfig(resp, "hbase.master.info.port");
} | 3.68 |
framework_GridElement_getSubPart | /**
* Helper function to get Grid subparts wrapped correctly
*
* @param subPartSelector
* SubPart to be used in ComponentLocator
* @return SubPart element wrapped in TestBenchElement class
*/
private TestBenchElement getSubPart(String subPartSelector) {
return (TestBenchElement) findElement(By.vaadin(subPartSelector));
} | 3.68 |
framework_AbstractComponent_fireComponentEvent | /**
* Emits the component event. It is transmitted to all registered listeners
* interested in such events.
*/
protected void fireComponentEvent() {
fireEvent(new Component.Event(this));
} | 3.68 |
flink_ContinuousProcessingTimeTrigger_of | /**
* Creates a trigger that continuously fires based on the given interval.
*
* @param interval The time interval at which to fire.
* @param <W> The type of {@link Window Windows} on which this trigger can operate.
*/
public static <W extends Window> ContinuousProcessingTimeTrigger<W> of(Time interval) {
return new ContinuousProcessingTimeTrigger<>(interval.toMilliseconds());
} | 3.68 |
flink_KubernetesStateHandleStore_clearEntries | /**
* Remove all the filtered keys in the ConfigMap.
*
* @throws Exception when removing the keys failed
*/
@Override
public void clearEntries() throws Exception {
updateConfigMap(
configMap -> {
configMap.getData().keySet().removeIf(configMapKeyFilter);
return Optional.of(configMap);
})
.get();
} | 3.68 |
pulsar_LoadSimulationController_change | // Change producer settings for a given topic and JCommander arguments.
private void change(final ShellArguments arguments, final String topic, final int client) throws Exception {
outputStreams[client].write(LoadSimulationClient.CHANGE_COMMAND);
writeProducerOptions(outputStreams[client], arguments, topic);
outputStreams[client].flush();
} | 3.68 |
hbase_MasterQuotaManager_removeRegionSizesForTable | /**
* Removes each region size entry where the RegionInfo references the provided TableName.
* @param tableName tableName.
*/
public void removeRegionSizesForTable(TableName tableName) {
regionSizes.keySet().removeIf(regionInfo -> regionInfo.getTable().equals(tableName));
} | 3.68 |
framework_FileParameters_setSize | /**
* Sets the file size.
*
* @param size
* Size of the file.
*/
public void setSize(long size) {
this.size = size;
} | 3.68 |
dubbo_ClientStream_onComplete | /**
* Callback when request completed.
*
* @param status response status
* @param attachments attachments received from remote peer
* @param reserved triple protocol reserved data
*/
default void onComplete(
TriRpcStatus status,
Map<String, Object> attachments,
Map<String, String> reserved,
boolean isReturnTriException) {
onComplete(status, attachments);
} | 3.68 |
hudi_BaseRollbackHelper_deleteFiles | /**
* Common method used for cleaning out files during rollback.
*/
protected List<HoodieRollbackStat> deleteFiles(HoodieTableMetaClient metaClient, List<String> filesToBeDeleted, boolean doDelete) throws IOException {
return filesToBeDeleted.stream().map(fileToDelete -> {
String basePath = metaClient.getBasePath();
try {
Path fullDeletePath = new Path(fileToDelete);
String partitionPath = FSUtils.getRelativePartitionPath(new Path(basePath), fullDeletePath.getParent());
boolean isDeleted = true;
if (doDelete) {
try {
isDeleted = metaClient.getFs().delete(fullDeletePath);
} catch (FileNotFoundException e) {
// if first rollback attempt failed and retried again, chances that some files are already deleted.
isDeleted = true;
}
}
return HoodieRollbackStat.newBuilder()
.withPartitionPath(partitionPath)
.withDeletedFileResult(fullDeletePath.toString(), isDeleted)
.build();
} catch (IOException e) {
LOG.error("Fetching file status for ");
throw new HoodieIOException("Fetching file status for " + fileToDelete + " failed ", e);
}
}).collect(Collectors.toList());
} | 3.68 |
hbase_Pair_setFirst | /**
* Replace the first element of the pair.
* @param a operand
*/
public void setFirst(T1 a) {
this.first = a;
} | 3.68 |
pulsar_ManagedLedgerImpl_delete | /**
* Delete this ManagedLedger completely from the system.
*
* @throws Exception
*/
@Override
public void delete() throws InterruptedException, ManagedLedgerException {
final CountDownLatch counter = new CountDownLatch(1);
final AtomicReference<ManagedLedgerException> exception = new AtomicReference<>();
asyncDelete(new DeleteLedgerCallback() {
@Override
public void deleteLedgerComplete(Object ctx) {
counter.countDown();
}
@Override
public void deleteLedgerFailed(ManagedLedgerException e, Object ctx) {
exception.set(e);
counter.countDown();
}
}, null);
if (!counter.await(AsyncOperationTimeoutSeconds, TimeUnit.SECONDS)) {
throw new ManagedLedgerException("Timeout during managed ledger delete operation");
}
if (exception.get() != null) {
log.error("[{}] Error deleting managed ledger", name, exception.get());
throw exception.get();
}
} | 3.68 |
framework_FocusableComplexPanel_setFocus | /**
* Sets/Removes the keyboard focus to the panel.
*
* @param focus
* If set to true then the focus is moved to the panel, if set to
* false the focus is removed
*/
public void setFocus(boolean focus) {
if (focus) {
FocusImpl.getFocusImplForPanel().focus(getElement());
} else {
FocusImpl.getFocusImplForPanel().blur(getElement());
}
} | 3.68 |
framework_VScrollTable_updatePageLength | /**
* Determines the pagelength when the table height is fixed.
*/
public void updatePageLength() {
// Only update if visible and enabled
if (!isVisible() || !enabled) {
return;
}
if (scrollBody == null) {
return;
}
if (isDynamicHeight()) {
return;
}
int rowHeight = (int) Math.round(scrollBody.getRowHeight());
int bodyH = scrollBodyPanel.getOffsetHeight();
int rowsAtOnce = bodyH / rowHeight;
boolean anotherPartlyVisible = ((bodyH % rowHeight) != 0);
if (anotherPartlyVisible) {
rowsAtOnce++;
}
if (pageLength != rowsAtOnce) {
pageLength = rowsAtOnce;
client.updateVariable(paintableId, "pagelength", pageLength, false);
if (!rendering) {
int currentlyVisible = scrollBody.getLastRendered()
- scrollBody.getFirstRendered();
if (currentlyVisible < pageLength
&& currentlyVisible < totalRows) {
// shake scrollpanel to fill empty space
scrollBodyPanel.setScrollPosition(scrollTop + 1);
scrollBodyPanel.setScrollPosition(scrollTop - 1);
}
sizeNeedsInit = true;
}
}
} | 3.68 |
flink_FlinkFilterJoinRule_validateJoinFilters | /**
* Validates that target execution framework can satisfy join filters.
*
* <p>If the join filter cannot be satisfied (for example, if it is {@code l.c1 > r.c2} and the
* join only supports equi-join), removes the filter from {@code joinFilters} and adds it to
* {@code aboveFilters}.
*
* <p>The default implementation does nothing; i.e. the join can handle all conditions.
*
* @param aboveFilters Filter above Join
* @param joinFilters Filters in join condition
* @param join Join
* @param joinType JoinRelType could be different from type in Join due to outer join
* simplification.
*/
protected void validateJoinFilters(
List<RexNode> aboveFilters,
List<RexNode> joinFilters,
Join join,
JoinRelType joinType) {
final Iterator<RexNode> filterIter = joinFilters.iterator();
while (filterIter.hasNext()) {
RexNode exp = filterIter.next();
// Do not pull up filter conditions for semi/anti join.
if (!config.getPredicate().apply(join, joinType, exp) && joinType.projectsRight()) {
aboveFilters.add(exp);
filterIter.remove();
}
}
} | 3.68 |
hadoop_DiskBalancerWorkItem_getBlocksCopied | /**
* Returns number of blocks copied for this DiskBalancerWorkItem.
*
* @return long count of blocks.
*/
public long getBlocksCopied() {
return blocksCopied;
} | 3.68 |
querydsl_NumberExpression_floor | /**
* Create a {@code floor(this)} expression
*
* <p>Returns the largest (closest to positive infinity)
* {@code double} value that is less than or equal to the
* argument and is equal to a mathematical integer.</p>
*
* @return floor(this)
* @see java.lang.Math#floor(double)
*/
public NumberExpression<T> floor() {
if (floor == null) {
floor = Expressions.numberOperation(getType(), MathOps.FLOOR, mixin);
}
return floor;
} | 3.68 |
flink_PartialCachingAsyncLookupProvider_of | /**
* Build a {@link PartialCachingAsyncLookupProvider} from the specified {@link
* AsyncLookupFunction} and {@link LookupCache}.
*/
static PartialCachingAsyncLookupProvider of(
AsyncLookupFunction asyncLookupFunction, LookupCache cache) {
return new PartialCachingAsyncLookupProvider() {
@Override
public LookupCache getCache() {
return cache;
}
@Override
public AsyncLookupFunction createAsyncLookupFunction() {
return asyncLookupFunction;
}
};
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSelectEvery | /**
* @return The expected sql.
*/
protected String expectedSelectEvery() {
return "SELECT MIN(booleanField) FROM " + tableName(TEST_TABLE);
} | 3.68 |
morf_OracleDialect_getSqlForIsNull | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForIsNull(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForIsNull(Function function) {
return "nvl(" + getSqlFrom(function.getArguments().get(0)) + ", " + getSqlFrom(function.getArguments().get(1)) + ") ";
} | 3.68 |
flink_StringUtils_toQuotedListString | /**
* Generates a string containing a comma-separated list of values in double-quotes. Uses
* lower-cased values returned from {@link Object#toString()} method for each element in the
* given array. Null values are skipped.
*
* @param values array of elements for the list
* @return The string with quoted list of elements
*/
public static String toQuotedListString(Object[] values) {
return Arrays.stream(values)
.filter(Objects::nonNull)
.map(v -> v.toString().toLowerCase())
.collect(Collectors.joining(", ", "\"", "\""));
} | 3.68 |
framework_MenuBar_setScrollEnabled | /**
* Sets the menu scroll enabled or disabled.
*
* @since 7.2.6
* @param enabled
* the enabled state of the scroll.
*/
public void setScrollEnabled(boolean enabled) {
if (enabled) {
if (vertical) {
outer.getStyle().setOverflowY(Overflow.AUTO);
} else {
outer.getStyle().setOverflowX(Overflow.AUTO);
}
} else {
if (vertical) {
outer.getStyle().clearOverflowY();
} else {
outer.getStyle().clearOverflowX();
}
}
} | 3.68 |
framework_DataCommunicator_getInMemorySorting | /**
* Returns the {@link Comparator} to use with in-memory sorting.
*
* @return comparator used to sort data
* @since 8.0.6
*/
public Comparator<T> getInMemorySorting() {
return inMemorySorting;
} | 3.68 |
hbase_BackupManager_close | /**
* Stop all the work of backup.
*/
@Override
public void close() {
if (systemTable != null) {
try {
systemTable.close();
} catch (Exception e) {
LOG.error(e.toString(), e);
}
}
} | 3.68 |
hmily_SwaggerConfig_api | /**
* Api docket.
*
* @return the docket
*/
@Bean
public Docket api() {
return new Docket(DocumentationType.SWAGGER_2).apiInfo(apiInfo()).select().apis(RequestHandlerSelectors.withClassAnnotation(RestController.class))
// .paths(paths())
.build().pathMapping("/").directModelSubstitute(LocalDate.class, String.class)
.genericModelSubstitutes(ResponseEntity.class).useDefaultResponseMessages(false)
.globalResponseMessage(RequestMethod.GET, newArrayList(new ResponseMessageBuilder().code(500).message("500 message")
.responseModel(new ModelRef("Error")).build()));
} | 3.68 |
framework_TreeGridElement_getExpandElement | /**
* Gets the expand/collapse element for the given row.
*
* @param rowIndex
* 0-based row index
* @param hierarchyColumnIndex
* 0-based index of the hierarchy column
* @return the {@code span} element that is clicked for expanding/collapsing
* a rows
* @throws NoSuchElementException
* if there is no expand element for this row
*/
public WebElement getExpandElement(int rowIndex, int hierarchyColumnIndex) {
return getCell(rowIndex, hierarchyColumnIndex)
.findElement(By.className("v-treegrid-expander"));
} | 3.68 |
hbase_BlockIOUtils_builderFromContext | /**
* Construct a fresh {@link AttributesBuilder} from the provided {@link Context}, populated with
* relevant attributes populated by {@link HFileContextAttributesBuilderConsumer#CONTEXT_KEY}.
*/
private static AttributesBuilder builderFromContext(Context context) {
final AttributesBuilder attributesBuilder = Attributes.builder();
Optional.ofNullable(context)
.map(val -> val.get(HFileContextAttributesBuilderConsumer.CONTEXT_KEY))
.ifPresent(c -> c.accept(attributesBuilder));
return attributesBuilder;
} | 3.68 |
MagicPlugin_CompatibilityUtilsBase_checkChunk | /**
* Take care if setting generate to false, the chunk will load but not show as loaded
*/
@Override
public boolean checkChunk(World world, int chunkX, int chunkZ, boolean generate) {
if (!world.isChunkLoaded(chunkX, chunkZ)) {
loadChunk(world, chunkX, chunkZ, generate);
return false;
}
return isReady(world.getChunkAt(chunkX, chunkZ));
} | 3.68 |
flink_DeclarativeAggregateFunction_operands | /**
* Args of accumulate and retract, the input value (usually obtained from a new arrived data).
*/
public final UnresolvedReferenceExpression[] operands() {
int operandCount = operandCount();
Preconditions.checkState(
operandCount >= 0, "inputCount must be greater than or equal to 0.");
UnresolvedReferenceExpression[] ret = new UnresolvedReferenceExpression[operandCount];
for (int i = 0; i < operandCount; i++) {
String name = String.valueOf(i);
validateOperandName(name);
ret[i] = unresolvedRef(name);
}
return ret;
} | 3.68 |
framework_IndexedContainer_generateId | /**
* Generates an unique identifier for use as an item id. Guarantees that the
* generated id is not currently used as an id.
*
* @return
*/
private Serializable generateId() {
Serializable id;
do {
id = Integer.valueOf(nextGeneratedItemId++);
} while (items.containsKey(id));
return id;
} | 3.68 |
hadoop_YarnVersionInfo_getDate | /**
* The date that YARN was compiled.
* @return the compilation date in unix date format
*/
public static String getDate() {
return YARN_VERSION_INFO._getDate();
} | 3.68 |
flink_LogicalSlot_releaseSlot | /**
* Releases this slot.
*
* @return Future which is completed once the slot has been released, in case of a failure it is
* completed exceptionally
* @deprecated Added because extended the actual releaseSlot method with cause parameter.
*/
default CompletableFuture<?> releaseSlot() {
return releaseSlot(null);
} | 3.68 |
flink_JobGraph_getMaximumParallelism | /**
* Gets the maximum parallelism of all operations in this job graph.
*
* @return The maximum parallelism of this job graph
*/
public int getMaximumParallelism() {
int maxParallelism = -1;
for (JobVertex vertex : taskVertices.values()) {
maxParallelism = Math.max(vertex.getParallelism(), maxParallelism);
}
return maxParallelism;
} | 3.68 |
flink_TimestampData_fromInstant | /**
* Creates an instance of {@link TimestampData} from an instance of {@link Instant}.
*
* @param instant an instance of {@link Instant}
*/
public static TimestampData fromInstant(Instant instant) {
long epochSecond = instant.getEpochSecond();
int nanoSecond = instant.getNano();
long millisecond = epochSecond * 1_000 + nanoSecond / 1_000_000;
int nanoOfMillisecond = nanoSecond % 1_000_000;
return new TimestampData(millisecond, nanoOfMillisecond);
} | 3.68 |
hbase_HMobStore_resolve | /**
* Reads the cell from the mob file.
* @param reference The cell found in the HBase, its value is a path to a mob
* file.
* @param cacheBlocks Whether the scanner should cache blocks.
* @param readPt the read point.
* @param readEmptyValueOnMobCellMiss Whether return null value when the mob file is missing or
* corrupt.
* @return The cell found in the mob file.
*/
public MobCell resolve(Cell reference, boolean cacheBlocks, long readPt,
boolean readEmptyValueOnMobCellMiss) throws IOException {
MobCell mobCell = null;
if (MobUtils.hasValidMobRefCellValue(reference)) {
String fileName = MobUtils.getMobFileName(reference);
Optional<TableName> tableName = MobUtils.getTableName(reference);
if (tableName.isPresent()) {
List<Path> locations = getLocations(tableName.get());
mobCell = readCell(locations, fileName, reference, cacheBlocks, readPt,
readEmptyValueOnMobCellMiss);
}
}
if (mobCell == null) {
LOG.warn("The Cell result is null, assemble a new Cell with the same row,family,"
+ "qualifier,timestamp,type and tags but with an empty value to return.");
Cell cell = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY)
.setRow(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength())
.setFamily(reference.getFamilyArray(), reference.getFamilyOffset(),
reference.getFamilyLength())
.setQualifier(reference.getQualifierArray(), reference.getQualifierOffset(),
reference.getQualifierLength())
.setTimestamp(reference.getTimestamp()).setType(reference.getTypeByte())
.setValue(HConstants.EMPTY_BYTE_ARRAY)
.setTags(reference.getTagsArray(), reference.getTagsOffset(), reference.getTagsLength())
.build();
mobCell = new MobCell(cell);
}
return mobCell;
} | 3.68 |
graphhopper_VectorTile_getGeometryCount | /**
* <pre>
* Contains a stream of commands and parameters (vertices).
* A detailed description on geometry encoding is located in
* section 4.3 of the specification.
* </pre>
*
* <code>repeated uint32 geometry = 4 [packed = true];</code>
*/
public int getGeometryCount() {
return geometry_.size();
} | 3.68 |
flink_ExternalTypeInfo_of | /**
* Creates type information for a {@link DataType} that is possibly represented by internal data
* structures but serialized and deserialized into external data structures.
*
* @param isInternalInput allows for a non-bidirectional serializer from internal to external
*/
public static <T> ExternalTypeInfo<T> of(DataType dataType, boolean isInternalInput) {
final TypeSerializer<T> serializer =
createExternalTypeSerializer(dataType, isInternalInput);
return new ExternalTypeInfo<>(dataType, serializer);
} | 3.68 |
framework_BinderValidationStatus_hasErrors | /**
* Gets whether the validation for the binder failed or not.
*
* @return {@code true} if validation failed, {@code false} if validation
* passed
*/
public boolean hasErrors() {
return binderStatuses.stream().filter(ValidationResult::isError)
.findAny().isPresent()
|| bindingStatuses.stream()
.filter(BindingValidationStatus::isError).findAny()
.isPresent();
} | 3.68 |
flink_BatchExecSink_getPhysicalRowType | /** Get the physical row type with given column indices. */
private RowType getPhysicalRowType(ResolvedSchema schema, int[] columnIndices) {
List<Column> columns = schema.getColumns();
List<Column> requireColumns = new ArrayList<>();
for (int columnIndex : columnIndices) {
requireColumns.add(columns.get(columnIndex));
}
return (RowType) ResolvedSchema.of(requireColumns).toPhysicalRowDataType().getLogicalType();
} | 3.68 |
hbase_RegionReplicationSink_add | /**
* Add this edit to replication queue.
* <p/>
* The {@code rpcCall} is for retaining the cells if the edit is built within an rpc call and the
* rpc call has cell scanner, which is off heap.
*/
public void add(WALKeyImpl key, WALEdit edit, ServerCall<?> rpcCall) {
if (!tableDesc.hasRegionMemStoreReplication() && !edit.isMetaEdit()) {
// only replicate meta edit if region memstore replication is not enabled
return;
}
synchronized (entries) {
if (stopping) {
return;
}
if (edit.isMetaEdit()) {
// check whether we flushed all stores, which means we could drop all the previous edits,
// and also, recover from the previous failure of some replicas
for (Cell metaCell : edit.getCells()) {
getStartFlushAllDescriptor(metaCell).ifPresent(flushDesc -> {
long flushSequenceNumber = flushDesc.getFlushSequenceNumber();
lastFlushedSequenceId = flushSequenceNumber;
long clearedCount = entries.size();
long clearedSize = clearAllEntries();
if (LOG.isDebugEnabled()) {
LOG.debug(
"Got a flush all request with sequence id {}, clear {} pending"
+ " entries with size {}, clear failed replicas {}",
flushSequenceNumber, clearedCount,
StringUtils.TraditionalBinaryPrefix.long2String(clearedSize, "", 1),
failedReplicas);
}
failedReplicas.clear();
flushRequester.recordFlush(flushSequenceNumber);
});
}
}
if (failedReplicas.size() == regionReplication - 1) {
// this means we have marked all the replicas as failed, so just give up here
return;
}
SinkEntry entry = new SinkEntry(key, edit, rpcCall);
entries.add(entry);
pendingSize += entry.size;
if (manager.increase(entry.size)) {
if (!sending) {
send();
}
} else {
// we have run out of the max pending size, drop all the edits, and mark all replicas as
// failed
clearAllEntries();
for (int replicaId = 1; replicaId < regionReplication; replicaId++) {
failedReplicas.add(replicaId);
}
flushRequester.requestFlush(entry.key.getSequenceId());
}
}
} | 3.68 |
flink_Pattern_oneOrMore | /**
* Specifies that this pattern can occur {@code one or more} times and time interval corresponds
* to the maximum time gap between previous and current event for each times. This means at
* least one and at most infinite number of events can be matched to this pattern.
*
* <p>If this quantifier is enabled for a pattern {@code A.oneOrMore().followedBy(B)} and a
* sequence of events {@code A1 A2 B} appears, this will generate patterns: {@code A1 B} and
* {@code A1 A2 B}. See also {@link #allowCombinations()}.
*
* @param windowTime time of the matching window between times
* @return The same pattern with a {@link Quantifier#looping(ConsumingStrategy)} quantifier
* applied.
* @throws MalformedPatternException if the quantifier is not applicable to this pattern.
*/
public Pattern<T, F> oneOrMore(@Nullable Time windowTime) {
checkIfNoNotPattern();
checkIfQuantifierApplied();
this.quantifier = Quantifier.looping(quantifier.getConsumingStrategy());
this.times = Times.of(1, windowTime);
return this;
} | 3.68 |
druid_Base64_base64toInt | /**
* Translates the specified character, which is assumed to be in the "Base 64 Alphabet" into its equivalent 6-bit
* positive integer.
*
* @throw IllegalArgumentException or ArrayOutOfBoundsException if c is not in the Base64 Alphabet.
*/
private static int base64toInt(char c, byte[] alphaToInt) {
int result = alphaToInt[c];
if (result < 0) {
throw new IllegalArgumentException("Illegal character " + c);
}
return result;
} | 3.68 |
hadoop_RouterMetricsService_getJvmMetrics | /**
* Get the JVM metrics for the Router.
*
* @return JVM metrics.
*/
public JvmMetrics getJvmMetrics() {
if (this.routerMetrics == null) {
return null;
}
return this.routerMetrics.getJvmMetrics();
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_currentDownloadBytesPerSecond | /**
* Record the current bytes-per-second download rate seen.
* @param bytesPerSecond The bytes per second.
*/
public synchronized void currentDownloadBytesPerSecond(long bytesPerSecond) {
if (bytesPerSecond > currentMaximumDownloadBytesPerSecond) {
currentMaximumDownloadBytesPerSecond = bytesPerSecond;
maximumDownloadBytesPerSecond.set(bytesPerSecond);
}
} | 3.68 |
graphhopper_ArrayUtil_applyOrder | /**
* Creates a copy of the given array such that it is ordered by the given order.
* The order can be shorter or equal, but not longer than the array.
*/
public static int[] applyOrder(int[] arr, int[] order) {
if (order.length > arr.length)
throw new IllegalArgumentException("sort order must not be shorter than array");
int[] result = new int[order.length];
for (int i = 0; i < result.length; ++i)
result[i] = arr[order[i]];
return result;
} | 3.68 |
pulsar_AbstractMetrics_getManagedLedgerCacheStats | /**
* Returns the managed ledger cache statistics from ML factory.
*
* @return
*/
protected ManagedLedgerFactoryMXBean getManagedLedgerCacheStats() {
return ((ManagedLedgerFactoryImpl) pulsar.getManagedLedgerFactory()).getCacheStats();
} | 3.68 |
dubbo_MulticastRegistry_destroy | /**
* Remove the expired providers(if clean is true), leave the multicast group and close the multicast socket.
*/
@Override
public void destroy() {
super.destroy();
try {
ExecutorUtil.cancelScheduledFuture(cleanFuture);
} catch (Throwable t) {
logger.warn(REGISTRY_SOCKET_EXCEPTION, "", "", t.getMessage(), t);
}
try {
multicastSocket.leaveGroup(multicastAddress);
multicastSocket.close();
} catch (Throwable t) {
logger.warn(REGISTRY_SOCKET_EXCEPTION, "", "", t.getMessage(), t);
}
ExecutorUtil.gracefulShutdown(cleanExecutor, cleanPeriod);
} | 3.68 |
hadoop_EntityCacheItem_refreshCache | /**
* Refresh this cache item if it needs refresh. This will enforce an appLogs
* rescan and then load new data. The refresh process is synchronized with
* other operations on the same cache item.
*
* @param aclManager ACL manager for the timeline storage
* @param metrics Metrics to trace the status of the entity group store
* @return a {@link org.apache.hadoop.yarn.server.timeline.TimelineStore}
* object filled with all entities in the group.
* @throws IOException
*/
public synchronized TimelineStore refreshCache(TimelineACLsManager aclManager,
EntityGroupFSTimelineStoreMetrics metrics) throws IOException {
if (needRefresh()) {
long startTime = Time.monotonicNow();
// If an application is not finished, we only update summary logs (and put
// new entities into summary storage).
// Otherwise, since the application is done, we can update detail logs.
if (!appLogs.isDone()) {
appLogs.parseSummaryLogs();
} else if (appLogs.getDetailLogs().isEmpty()) {
appLogs.scanForLogs();
}
if (!appLogs.getDetailLogs().isEmpty()) {
if (store == null) {
store = ReflectionUtils.newInstance(config.getClass(
YarnConfiguration
.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CACHE_STORE,
MemoryTimelineStore.class, TimelineStore.class),
config);
store.init(config);
store.start();
} else {
// Store is not null, the refresh is triggered by stale storage.
metrics.incrCacheStaleRefreshes();
}
try (TimelineDataManager tdm =
new TimelineDataManager(store, aclManager)) {
tdm.init(config);
tdm.start();
// Load data from appLogs to tdm
appLogs.loadDetailLog(tdm, groupId);
}
}
updateRefreshTimeToNow();
metrics.addCacheRefreshTime(Time.monotonicNow() - startTime);
} else {
LOG.debug("Cache new enough, skip refreshing");
metrics.incrNoRefreshCacheRead();
}
return store;
} | 3.68 |
hadoop_ConfigurationUtils_copy | /**
* Copy configuration key/value pairs from one configuration to another if a property exists in the target, it gets
* replaced.
*
* @param source source configuration.
* @param target target configuration.
*/
public static void copy(Configuration source, Configuration target) {
Check.notNull(source, "source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {
target.set(entry.getKey(), entry.getValue());
}
} | 3.68 |
hadoop_SysInfoWindows_getNumVCoresUsed | /** {@inheritDoc} */
@Override
public synchronized float getNumVCoresUsed() {
refreshIfNeeded();
float ret = cpuUsage;
if (ret != -1) {
ret = ret / 100F;
}
return ret;
} | 3.68 |
framework_Window_getAssistivePrefix | /**
* Gets the accessibility prefix for the window caption.
*
* This prefix is read to assistive device users before the window caption,
* but not visible on the page.
*
* @return The accessibility prefix
*/
public String getAssistivePrefix() {
return getState(false).assistivePrefix;
} | 3.68 |
hadoop_Service_getValue | /**
* Get the integer value of a state
* @return the numeric value of the state
*/
public int getValue() {
return value;
} | 3.68 |
framework_Calendar_getWeeklyCaptionFormat | /**
* Gets the date caption format for the weekly view.
*
* @return The pattern used in caption of dates in weekly view.
*/
public String getWeeklyCaptionFormat() {
return weeklyCaptionFormat;
} | 3.68 |
hbase_SnapshotQuotaObserverChore_getSnapshotsToComputeSize | /**
* Fetches each table with a quota (table or namespace quota), and then fetch the name of each
* snapshot which was created from that table.
* @return A mapping of table to snapshots created from that table
*/
Multimap<TableName, String> getSnapshotsToComputeSize() throws IOException {
Set<TableName> tablesToFetchSnapshotsFrom = new HashSet<>();
QuotaFilter filter = new QuotaFilter();
filter.addTypeFilter(QuotaType.SPACE);
try (Admin admin = conn.getAdmin()) {
// Pull all of the tables that have quotas (direct, or from namespace)
for (QuotaSettings qs : QuotaRetriever.open(conf, filter)) {
if (qs.getQuotaType() == QuotaType.SPACE) {
String ns = qs.getNamespace();
TableName tn = qs.getTableName();
if ((null == ns && null == tn) || (null != ns && null != tn)) {
throw new IllegalStateException(
"Expected either one of namespace and tablename to be null but not both");
}
// Collect either the table name itself, or all of the tables in the namespace
if (null != ns) {
tablesToFetchSnapshotsFrom.addAll(Arrays.asList(admin.listTableNamesByNamespace(ns)));
} else {
tablesToFetchSnapshotsFrom.add(tn);
}
}
}
// Fetch all snapshots that were created from these tables
return getSnapshotsFromTables(admin, tablesToFetchSnapshotsFrom);
}
} | 3.68 |
hadoop_FSEditLogAsync_logEdit | // return whether edit log wants to sync.
boolean logEdit() {
return log.doEditTransaction(op);
} | 3.68 |
hmily_HmilyMetaDataManager_register | /**
* Register hmily metadata.
*
* @param hmilyTacResource the hmily resource
* @param databaseType database type
*/
public static void register(final HmilyTacResource hmilyTacResource, final DatabaseType databaseType) {
DataSourceMetaData dataSourceMetaData;
try {
dataSourceMetaData = DataSourceMetaDataLoader.load(hmilyTacResource.getTargetDataSource(), databaseType);
} catch (final SQLException ex) {
throw new IllegalStateException("failed in loading datasource metadata into hmily");
}
DATASOURCE_META_CACHE.put(hmilyTacResource.getResourceId(), dataSourceMetaData);
} | 3.68 |
hudi_HiveHoodieTableFileIndex_listFileSlices | /**
* Lists latest file-slices (base-file along w/ delta-log files) per partition.
*
* @return mapping from string partition paths to its base/log files
*/
public Map<String, List<FileSlice>> listFileSlices() {
return getAllInputFileSlices().entrySet().stream()
.collect(Collectors.toMap(e -> e.getKey().getPath(), Map.Entry::getValue));
} | 3.68 |
framework_VCalendarPanel_focusPreviousDay | /**
* Moves the focus backward the given number of days.
*/
private void focusPreviousDay(int days) {
focusNextDay(-days);
} | 3.68 |
flink_OutputFormatBase_postClose | /**
* Tear down the OutputFormat. This method is called at the end of {@link
* OutputFormatBase#close()}.
*/
protected void postClose() {} | 3.68 |
flink_BlobUtils_moveTempFileToStore | /**
* Moves the temporary <tt>incomingFile</tt> to its permanent location where it is available for
* use (not thread-safe!).
*
* @param incomingFile temporary file created during transfer
* @param jobId ID of the job this blob belongs to or <tt>null</tt> if job-unrelated
* @param blobKey BLOB key identifying the file
* @param storageFile (local) file where the blob is/should be stored
* @param log logger for debug information
* @param blobStore HA store (or <tt>null</tt> if unavailable)
* @throws IOException thrown if an I/O error occurs while moving the file or uploading it to
* the HA store
*/
static void moveTempFileToStore(
File incomingFile,
@Nullable JobID jobId,
BlobKey blobKey,
File storageFile,
Logger log,
@Nullable BlobStore blobStore)
throws IOException {
internalMoveTempFileToStore(
incomingFile,
jobId,
blobKey,
storageFile,
log,
blobStore,
(source, target) -> Files.move(source.toPath(), target.toPath()));
} | 3.68 |
hudi_TableCommand_createTable | /**
* Create a Hoodie Table if it does not exist.
*
* @param path Base Path
* @param name Hoodie Table Name
* @param tableTypeStr Hoodie Table Type
* @param payloadClass Payload Class
*/
@ShellMethod(key = "create", value = "Create a hoodie table if not present")
public String createTable(
@ShellOption(value = {"--path"}, help = "Base Path of the table") final String path,
@ShellOption(value = {"--tableName"}, help = "Hoodie Table Name") final String name,
@ShellOption(value = {"--tableType"}, defaultValue = "COPY_ON_WRITE",
help = "Hoodie Table Type. Must be one of : COPY_ON_WRITE or MERGE_ON_READ") final String tableTypeStr,
@ShellOption(value = {"--archiveLogFolder"}, help = "Folder Name for storing archived timeline",
defaultValue = ShellOption.NULL) String archiveFolder,
@ShellOption(value = {"--layoutVersion"}, help = "Specific Layout Version to use",
defaultValue = ShellOption.NULL) Integer layoutVersion,
@ShellOption(value = {"--payloadClass"}, defaultValue = "org.apache.hudi.common.model.HoodieAvroPayload",
help = "Payload Class") final String payloadClass) throws IOException {
boolean initialized = HoodieCLI.initConf();
HoodieCLI.initFS(initialized);
boolean existing = false;
try {
HoodieTableMetaClient.builder().setConf(HoodieCLI.conf).setBasePath(path).build();
existing = true;
} catch (TableNotFoundException dfe) {
// expected
}
// Do not touch table that already exist
if (existing) {
throw new IllegalStateException("Table already existing in path : " + path);
}
HoodieTableMetaClient.withPropertyBuilder()
.setTableType(tableTypeStr)
.setTableName(name)
.setArchiveLogFolder(archiveFolder)
.setPayloadClassName(payloadClass)
.setTimelineLayoutVersion(layoutVersion)
.initTable(HoodieCLI.conf, path);
// Now connect to ensure loading works
return connect(path, layoutVersion, false, 0, 0, 0);
} | 3.68 |
hbase_MetaTableAccessor_getDaughterRegions | /**
* Returns the daughter regions by reading the corresponding columns of the catalog table Result.
* @param data a Result object from the catalog table scan
* @return pair of RegionInfo or PairOfSameType(null, null) if region is not a split parent
*/
public static PairOfSameType<RegionInfo> getDaughterRegions(Result data) {
RegionInfo splitA = CatalogFamilyFormat.getRegionInfo(data, HConstants.SPLITA_QUALIFIER);
RegionInfo splitB = CatalogFamilyFormat.getRegionInfo(data, HConstants.SPLITB_QUALIFIER);
return new PairOfSameType<>(splitA, splitB);
} | 3.68 |
flink_CatalogManager_dropTemporaryTable | /**
* Drop a temporary table in a given fully qualified path.
*
* @param objectIdentifier The fully qualified path of the table to drop.
* @param ignoreIfNotExists If false exception will be thrown if the table to be dropped does
* not exist.
*/
public void dropTemporaryTable(ObjectIdentifier objectIdentifier, boolean ignoreIfNotExists) {
dropTemporaryTableInternal(
objectIdentifier,
(table) -> table instanceof CatalogTable,
ignoreIfNotExists,
true);
} | 3.68 |
hbase_IdLock_tryLockEntry | /**
* Blocks until the lock corresponding to the given id is acquired.
* @param id an arbitrary number to lock on
* @param time time to wait in ms
* @return an "entry" to pass to {@link #releaseLockEntry(Entry)} to release the lock
* @throws IOException if interrupted
*/
public Entry tryLockEntry(long id, long time) throws IOException {
Preconditions.checkArgument(time >= 0);
Thread currentThread = Thread.currentThread();
Entry entry = new Entry(id, currentThread);
Entry existing;
long waitUtilTS = EnvironmentEdgeManager.currentTime() + time;
long remaining = time;
while ((existing = map.putIfAbsent(entry.id, entry)) != null) {
synchronized (existing) {
if (existing.locked) {
++existing.numWaiters; // Add ourselves to waiters.
try {
while (existing.locked) {
existing.wait(remaining);
if (existing.locked) {
long currentTS = EnvironmentEdgeManager.currentTime();
if (currentTS >= waitUtilTS) {
// time is up
return null;
} else {
// our wait is waken, but the lock is still taken, this can happen
// due to JDK Object's wait/notify mechanism.
// Calculate the new remaining time to wait
remaining = waitUtilTS - currentTS;
}
}
}
} catch (InterruptedException e) {
// HBASE-21292
// Please refer to the comments in getLockEntry()
// the difference here is that we decrease numWaiters in finally block
if (!existing.locked && existing.numWaiters == 1) {
map.remove(existing.id);
}
throw new InterruptedIOException("Interrupted waiting to acquire sparse lock");
} finally {
--existing.numWaiters; // Remove ourselves from waiters.
}
existing.locked = true;
existing.holder = currentThread;
return existing;
}
// If the entry is not locked, it might already be deleted from the
// map, so we cannot return it. We need to get our entry into the map
// or get someone else's locked entry.
}
}
return entry;
} | 3.68 |
hadoop_LoggingAuditor_setLastHeader | /**
* Set that last header.
* @param lastHeader the value for the lastHeader field.
*/
private void setLastHeader(final String lastHeader) {
this.lastHeader = lastHeader;
} | 3.68 |
framework_ListDataSource_asList | /**
* Gets the list that backs this datasource. Any changes made to this list
* will be reflected in the datasource.
* <p>
* Note: The list is not the same list as passed into the data source via
* the constructor.
*
* @return Returns a list implementation that wraps the real list that backs
* the data source and provides events for the data source
* listeners.
*/
public List<T> asList() {
return wrapper;
} | 3.68 |
hbase_ResponseConverter_buildGetServerInfoResponse | /**
* A utility to build a GetServerInfoResponse.
* @return the response
*/
public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName,
final int webuiPort) {
GetServerInfoResponse.Builder builder = GetServerInfoResponse.newBuilder();
ServerInfo.Builder serverInfoBuilder = ServerInfo.newBuilder();
serverInfoBuilder.setServerName(ProtobufUtil.toServerName(serverName));
if (webuiPort >= 0) {
serverInfoBuilder.setWebuiPort(webuiPort);
}
builder.setServerInfo(serverInfoBuilder.build());
return builder.build();
} | 3.68 |
hudi_InternalSchemaUtils_pruneType | /**
* Project hudi type by projected cols field_ids
* this is auxiliary function used by pruneInternalSchema.
*/
private static Type pruneType(Type type, List<Integer> fieldIds) {
switch (type.typeId()) {
case RECORD:
Types.RecordType record = (Types.RecordType) type;
List<Types.Field> fields = record.fields();
List<Type> newTypes = new ArrayList<>();
for (Types.Field f : fields) {
Type newType = pruneType(f.type(), fieldIds);
if (fieldIds.contains(f.fieldId())) {
newTypes.add(f.type());
} else if (newType != null) {
newTypes.add(newType);
} else {
newTypes.add(null);
}
}
boolean changed = false;
List<Field> newFields = new ArrayList<>();
for (int i = 0; i < fields.size(); i++) {
Types.Field oldField = fields.get(i);
Type newType = newTypes.get(i);
if (oldField.type() == newType) {
newFields.add(oldField);
} else if (newType != null) {
changed = true;
newFields.add(Types.Field.get(oldField.fieldId(), oldField.isOptional(), oldField.name(), newType, oldField.doc()));
}
}
if (newFields.isEmpty()) {
return null;
}
if (newFields.size() == fields.size() && !changed) {
return record;
} else {
return Types.RecordType.get(newFields);
}
case ARRAY:
Types.ArrayType array = (Types.ArrayType) type;
Type newElementType = pruneType(array.elementType(), fieldIds);
if (fieldIds.contains(array.elementId())) {
return array;
} else if (newElementType != null) {
if (array.elementType() == newElementType) {
return array;
}
return Types.ArrayType.get(array.elementId(), array.isElementOptional(), newElementType);
}
return null;
case MAP:
Types.MapType map = (Types.MapType) type;
Type newValueType = pruneType(map.valueType(), fieldIds);
if (fieldIds.contains(map.valueId())) {
return map;
} else if (newValueType != null) {
if (map.valueType() == newValueType) {
return map;
}
return Types.MapType.get(map.keyId(), map.valueId(), map.keyType(), newValueType, map.isValueOptional());
}
return null;
default:
return null;
}
} | 3.68 |
framework_VAbstractPopupCalendar_onClick | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.ClickHandler#onClick(com.google.gwt.event
* .dom.client.ClickEvent)
*/
@Override
public void onClick(ClickEvent event) {
if (event.getSource() == calendarToggle && isEnabled()) {
if (open) {
closeCalendarPanel();
} else if (!preventOpenPopupCalendar) {
openCalendarPanel();
}
preventOpenPopupCalendar = false;
}
} | 3.68 |
hbase_MiniHBaseCluster_stopMaster | /**
* Shut down the specified master cleanly
* @param serverNumber Used as index into a list.
* @param shutdownFS True is we are to shutdown the filesystem as part of this master's
* shutdown. Usually we do but you do not want to do this if you are running
* multiple master in a test and you shut down one before end of the test.
* @return the master that was stopped
*/
public JVMClusterUtil.MasterThread stopMaster(int serverNumber, final boolean shutdownFS) {
JVMClusterUtil.MasterThread server = hbaseCluster.getMasters().get(serverNumber);
LOG.info("Stopping " + server.toString());
server.getMaster().stop("Stopping master " + serverNumber);
return server;
} | 3.68 |
hbase_TableBackupClient_beginBackup | /**
* Begin the overall backup.
* @param backupInfo backup info
* @throws IOException exception
*/
protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo)
throws IOException {
BackupSystemTable.snapshot(conn);
backupManager.setBackupInfo(backupInfo);
// set the start timestamp of the overall backup
long startTs = EnvironmentEdgeManager.currentTime();
backupInfo.setStartTs(startTs);
// set overall backup status: ongoing
backupInfo.setState(BackupState.RUNNING);
backupInfo.setPhase(BackupPhase.REQUEST);
LOG.info("Backup " + backupInfo.getBackupId() + " started at " + startTs + ".");
backupManager.updateBackupInfo(backupInfo);
if (LOG.isDebugEnabled()) {
LOG.debug("Backup session " + backupInfo.getBackupId() + " has been started.");
}
} | 3.68 |
framework_VaadinService_removeSessionDestroyListener | /**
* Removes a Vaadin service session destroy listener from this service.
*
* @see #addSessionDestroyListener(SessionDestroyListener)
*
* @param listener
* the vaadin service session destroy listener
* @deprecated use the {@link Registration} object returned by
* {@link #addSessionDestroyListener(SessionDestroyListener)} to
* remove the listener
*/
@Deprecated
public void removeSessionDestroyListener(SessionDestroyListener listener) {
sessionDestroyListeners.remove(listener);
} | 3.68 |
flink_AbstractBinaryExternalMerger_mergeChannelList | /**
* Merges the given sorted runs to a smaller number of sorted runs.
*
* @param channelIDs The IDs of the sorted runs that need to be merged.
* @return A list of the IDs of the merged channels.
* @throws IOException Thrown, if the readers or writers encountered an I/O problem.
*/
public List<ChannelWithMeta> mergeChannelList(List<ChannelWithMeta> channelIDs)
throws IOException {
// A channel list with length maxFanIn<sup>i</sup> can be merged to maxFanIn files in i-1
// rounds where every merge
// is a full merge with maxFanIn input channels. A partial round includes merges with fewer
// than maxFanIn
// inputs. It is most efficient to perform the partial round first.
final double scale = Math.ceil(Math.log(channelIDs.size()) / Math.log(maxFanIn)) - 1;
final int numStart = channelIDs.size();
final int numEnd = (int) Math.pow(maxFanIn, scale);
final int numMerges = (int) Math.ceil((numStart - numEnd) / (double) (maxFanIn - 1));
final int numNotMerged = numEnd - numMerges;
final int numToMerge = numStart - numNotMerged;
// unmerged channel IDs are copied directly to the result list
final List<ChannelWithMeta> mergedChannelIDs = new ArrayList<>(numEnd);
mergedChannelIDs.addAll(channelIDs.subList(0, numNotMerged));
final int channelsToMergePerStep = (int) Math.ceil(numToMerge / (double) numMerges);
final List<ChannelWithMeta> channelsToMergeThisStep =
new ArrayList<>(channelsToMergePerStep);
int channelNum = numNotMerged;
while (!closed && channelNum < channelIDs.size()) {
channelsToMergeThisStep.clear();
for (int i = 0;
i < channelsToMergePerStep && channelNum < channelIDs.size();
i++, channelNum++) {
channelsToMergeThisStep.add(channelIDs.get(channelNum));
}
mergedChannelIDs.add(mergeChannels(channelsToMergeThisStep));
}
return mergedChannelIDs;
} | 3.68 |
flink_FromElementsFunction_getNumElements | /**
* Gets the number of elements produced in total by this function.
*
* @return The number of elements produced in total.
*/
public int getNumElements() {
return numElements;
} | 3.68 |
hmily_HmilyTccTransactionExecutor_preTryParticipant | /**
* this is Participant transaction preTry.
*
* @param context transaction context.
* @param point cut point
* @return TccTransaction hmily transaction
*/
public HmilyParticipant preTryParticipant(final HmilyTransactionContext context, final ProceedingJoinPoint point) {
LogUtil.debug(LOGGER, "participant hmily tcc transaction start..:{}", context::toString);
final HmilyParticipant hmilyParticipant = buildHmilyParticipant(point, context.getParticipantId(), context.getParticipantRefId(), HmilyRoleEnum.PARTICIPANT.getCode(), context.getTransId());
HmilyTransactionHolder.getInstance().cacheHmilyParticipant(hmilyParticipant);
HmilyRepositoryStorage.createHmilyParticipant(hmilyParticipant);
//publishEvent
//Nested transaction support
context.setRole(HmilyRoleEnum.PARTICIPANT.getCode());
HmilyContextHolder.set(context);
return hmilyParticipant;
} | 3.68 |
morf_AbstractSqlDialectTest_checkDatabaseByteArrayToRecordValue | /**
* Format a value through the result set record for testing.
*
* @param value The value to format.
* @return The formatted value.
*/
private String checkDatabaseByteArrayToRecordValue(final byte[] value) throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.getBytes(anyInt())).thenReturn(value == null ? null : value);
return testDialect.resultSetToRecord(resultSet, ImmutableList.of(column("a", DataType.BLOB))).getString("a");
} | 3.68 |
flink_RocksDBStateBackend_getEmbeddedRocksDBStateBackend | /** @return The underlying {@link EmbeddedRocksDBStateBackend} instance. */
@VisibleForTesting
EmbeddedRocksDBStateBackend getEmbeddedRocksDBStateBackend() {
return rocksDBStateBackend;
} | 3.68 |
open-banking-gateway_Xs2aLogResolver_log | //responses
public void log(String message, Response<T> response) {
ResponseLog<T> responseLog = new ResponseLog<>();
responseLog.setStatusCode(response.getStatusCode());
responseLog.setHeaders(response.getHeaders());
responseLog.setBody(response.getBody());
if (log.isDebugEnabled()) {
log.debug(message, responseLog);
} else {
log.info(message, responseLog.getNotSensitiveData());
}
} | 3.68 |
hmily_AbstractHmilySQLParserExecutor_generateHmilyInsertStatement | /**
* Generate Hmily insert statement.
*
* @param insertStatement insert statement
* @param hmilyInsertStatement hmily insert statement
* @return hmily insert statement
*/
public HmilyInsertStatement generateHmilyInsertStatement(final InsertStatement insertStatement, final HmilyInsertStatement hmilyInsertStatement) {
return InsertStatementAssembler.assembleHmilyInsertStatement(insertStatement, hmilyInsertStatement);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.