name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_StreamerUtil_haveSuccessfulCommits | /**
* Returns whether there are successful commits on the timeline.
*
* @param metaClient The meta client
* @return true if there is any successful commit
*/
public static boolean haveSuccessfulCommits(HoodieTableMetaClient metaClient) {
return !metaClient.getCommitsTimeline().filterCompletedInstants().empty();
} | 3.68 |
flink_Expander_create | /** Creates an Expander. * */
public static Expander create(FlinkPlannerImpl planner) {
return new Expander(planner);
} | 3.68 |
hbase_RegionMetrics_getRequestCount | /**
* Returns the number of write requests and read requests and coprocessor service requests made to
* region
*/
default long getRequestCount() {
return getReadRequestCount() + getWriteRequestCount() + getCpRequestCount();
} | 3.68 |
framework_AbstractComponent_focus | /**
* Sets the focus for this component if the component is {@link Focusable}.
*/
protected void focus() {
if (this instanceof Focusable) {
final VaadinSession session = getSession();
if (session != null) {
getUI().setFocusedComponent((Focusable) this);
delayedFocus = false;
} else {
delayedFocus = true;
}
}
} | 3.68 |
hbase_MobUtils_isMobRegionName | /**
* Gets whether the current region name follows the pattern of a mob region name.
* @param tableName The current table name.
* @param regionName The current region name.
* @return True if the current region name follows the pattern of a mob region name.
*/
public static boolean isMobRegionName(TableName tableName, byte[] regionName) {
return Bytes.equals(regionName, getMobRegionInfo(tableName).getRegionName());
} | 3.68 |
framework_ConnectorTracker_readObject | /* Special serialization to JsonObjects which are not serializable */
private void readObject(ObjectInputStream in)
throws IOException, ClassNotFoundException {
in.defaultReadObject();
// Read String versions of JsonObjects and parse into JsonObjects as
// JsonObject is not serializable
diffStates = new HashMap<>();
@SuppressWarnings("unchecked")
Map<ClientConnector, String> stringDiffStates = (HashMap<ClientConnector, String>) in
.readObject();
diffStates = new HashMap<>(stringDiffStates.size() * 2);
for (ClientConnector key : stringDiffStates.keySet()) {
try {
diffStates.put(key, Json.parse(stringDiffStates.get(key)));
} catch (JsonException e) {
throw new IOException(e);
}
}
} | 3.68 |
flink_TaskLocalStateStoreImpl_pruneCheckpoints | /** Pruning the useless checkpoints, it should be called only when holding the {@link #lock}. */
protected void pruneCheckpoints(LongPredicate pruningChecker, boolean breakOnceCheckerFalse) {
final List<Tuple2<Long, TaskStateSnapshot>> toRemove = new ArrayList<>();
synchronized (lock) {
Iterator<Map.Entry<Long, TaskStateSnapshot>> entryIterator =
storedTaskStateByCheckpointID.entrySet().iterator();
while (entryIterator.hasNext()) {
Map.Entry<Long, TaskStateSnapshot> snapshotEntry = entryIterator.next();
long entryCheckpointId = snapshotEntry.getKey();
if (pruningChecker.test(entryCheckpointId)) {
toRemove.add(Tuple2.of(entryCheckpointId, snapshotEntry.getValue()));
entryIterator.remove();
} else if (breakOnceCheckerFalse) {
break;
}
}
}
asyncDiscardLocalStateForCollection(toRemove);
} | 3.68 |
hudi_HoodieInMemoryHashIndex_canIndexLogFiles | /**
* Mapping is available in HBase already.
*/
@Override
public boolean canIndexLogFiles() {
return true;
} | 3.68 |
flink_SingleInputOperator_setInputs | /**
* Sets the input to the union of the given operators.
*
* @param inputs The operator(s) that form the input.
* @deprecated This method will be removed in future versions. Use the {@link Union} operator
* instead.
*/
@Deprecated
@SuppressWarnings("unchecked")
public void setInputs(List<Operator<IN>> inputs) {
this.input = Operator.createUnionCascade(null, inputs.toArray(new Operator[inputs.size()]));
} | 3.68 |
hadoop_LocalCacheDirectoryManager_incrementFileCountForPath | /**
* Increment the file count for a relative directory within the cache
*
* @param relPath the relative path
*/
public synchronized void incrementFileCountForPath(String relPath) {
relPath = relPath == null ? "" : relPath.trim();
Directory subDir = knownDirectories.get(relPath);
if (subDir == null) {
int dirnum = Directory.getDirectoryNumber(relPath);
totalSubDirectories = Math.max(dirnum, totalSubDirectories);
subDir = new Directory(dirnum);
nonFullDirectories.add(subDir);
knownDirectories.put(subDir.getRelativePath(), subDir);
}
if (subDir.incrementAndGetCount() >= perDirectoryFileLimit) {
nonFullDirectories.remove(subDir);
}
} | 3.68 |
flink_WindowedStream_max | /**
* Applies an aggregation that gives the maximum value of the pojo data stream at the given
* field expression for every window. A field expression is either the name of a public field or
* a getter method with parentheses of the {@link DataStream DataStreams} underlying type. A dot
* can be used to drill down into objects, as in {@code "field1.getInnerField2()" }.
*
* @param field The field expression based on which the aggregation will be applied.
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> max(String field) {
return aggregate(
new ComparableAggregator<>(
field,
input.getType(),
AggregationFunction.AggregationType.MAX,
false,
input.getExecutionConfig()));
} | 3.68 |
streampipes_DataStreamBuilder_properties | /**
* Assigns a list of new event properties to the stream's schema.
*
* @param properties The event properties that should be added.
* @return this
*/
public DataStreamBuilder properties(List<EventProperty> properties) {
this.eventProperties.addAll(properties);
return me();
} | 3.68 |
hadoop_FSStarvedApps_addStarvedApp | /**
* Add a starved application if it is not already added.
* @param app application to add
*/
void addStarvedApp(FSAppAttempt app) {
if (!app.equals(appBeingProcessed) && !appsToProcess.contains(app)) {
appsToProcess.add(app);
}
} | 3.68 |
morf_SqlScriptExecutorProvider_afterExecute | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#afterExecute(java.lang.String,
* long)
*/
@Override
public void afterExecute(String sql, long numberOfRowsUpdated) {
// Defaults to no-op
} | 3.68 |
framework_ScrollbarBundle_getScrollbarThickness | /**
* Gets the scrollbar's thickness.
* <p>
* This value will differ from the value in the DOM, if the thickness was
* set to 0 with {@link #setScrollbarThickness(double)}, as the scrollbar is
* then treated as "invisible."
*
* @return the scrollbar's thickness in pixels
*/
public final double getScrollbarThickness() {
if (!isInvisibleScrollbar) {
return parseCssDimensionToPixels(internalGetScrollbarThickness());
} else {
return 0;
}
} | 3.68 |
hbase_ZKUtil_toZooKeeperOp | /**
* Convert from ZKUtilOp to ZKOp
*/
private static Op toZooKeeperOp(ZKWatcher zkw, ZKUtilOp op) throws UnsupportedOperationException {
if (op == null) {
return null;
}
if (op instanceof CreateAndFailSilent) {
CreateAndFailSilent cafs = (CreateAndFailSilent) op;
return Op.create(cafs.getPath(), cafs.getData(), zkw.createACL(cafs.getPath()),
CreateMode.PERSISTENT);
} else if (op instanceof DeleteNodeFailSilent) {
DeleteNodeFailSilent dnfs = (DeleteNodeFailSilent) op;
return Op.delete(dnfs.getPath(), -1);
} else if (op instanceof SetData) {
SetData sd = (SetData) op;
return Op.setData(sd.getPath(), sd.getData(), sd.getVersion());
} else {
throw new UnsupportedOperationException(
"Unexpected ZKUtilOp type: " + op.getClass().getName());
}
} | 3.68 |
flink_CatalogManager_getCatalogOrError | /**
* Gets a catalog by name.
*
* @param catalogName name of the catalog to retrieve
* @return the requested catalog
* @throws CatalogNotExistException if the catalog does not exist
*/
public Catalog getCatalogOrError(String catalogName) throws CatalogNotExistException {
return getCatalog(catalogName).orElseThrow(() -> new CatalogNotExistException(catalogName));
} | 3.68 |
hadoop_FederationBlock_initSubClusterPage | /**
* Initialize the Federation page of the sub-cluster.
*
* @param tbody HTML tbody.
* @param lists subCluster page data list.
*/
private void initSubClusterPage(TBODY<TABLE<Hamlet>> tbody, List<Map<String, String>> lists) {
// Sort the SubClusters
List<SubClusterInfo> subClusters = getSubClusterInfoList();
// Iterate through the sub-clusters and display data for each sub-cluster.
// If a sub-cluster cannot display data, skip it.
for (SubClusterInfo subCluster : subClusters) {
try {
initSubClusterPageItem(tbody, subCluster, lists);
} catch (Exception e) {
LOG.error("init subCluster = {} page data error.", subCluster, e);
}
}
} | 3.68 |
morf_SqlScriptExecutor_get | /**
* @return the value
*/
T get() {
return value;
} | 3.68 |
morf_SchemaBean_tables | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.metadata.Schema#tables()
*/
@Override
public Collection<Table> tables() {
return tables.values();
} | 3.68 |
dubbo_Environment_updateAppConfigMap | /**
* Merge target map properties into app configuration
*
* @param map
*/
public void updateAppConfigMap(Map<String, String> map) {
this.appConfiguration.addProperties(map);
} | 3.68 |
morf_SelectStatement_getHaving | /**
* Gets the grouping filter
*
* @return the having fields
*/
public Criterion getHaving() {
return having;
} | 3.68 |
hudi_AvroInternalSchemaConverter_buildAvroSchemaFromInternalSchema | /**
* Converts hudi internal Schema into an Avro Schema.
*
* @param schema a hudi internal Schema.
* @param recordName the record name
* @return a Avro schema match hudi internal schema.
*/
public static Schema buildAvroSchemaFromInternalSchema(InternalSchema schema, String recordName) {
Map<Type, Schema> cache = new HashMap<>();
return visitInternalSchemaToBuildAvroSchema(schema.getRecord(), cache, recordName);
} | 3.68 |
framework_ErrorLevel_intValue | /**
* Integer representation of error severity for comparison.
*
* @return integer for error severity
*/
public int intValue() {
return ordinal();
} | 3.68 |
framework_AbstractRenderer_getParentGrid | /**
* Gets the {@link Grid} this renderer is attached to. Used internally for
* indicating the source grid of possible events emitted by this renderer,
* such as {@link RendererClickEvent}s.
*
* @return the grid this renderer is attached to or {@code null} if
* unattached
*/
@SuppressWarnings("unchecked")
protected Grid<T> getParentGrid() {
if (super.getParent() == null) {
return null;
}
return (Grid<T>) super.getParent().getParent();
} | 3.68 |
hmily_SofaHmilyOrderApplication_main | /**
* main.
*
* @param args args.
*/
public static void main(final String[] args) {
SpringApplication springApplication = new SpringApplication(SofaHmilyOrderApplication.class);
springApplication.run(args);
} | 3.68 |
pulsar_SimpleLoadManagerImpl_isLoadReportGenerationIntervalPassed | /**
* Check if last generated load-report time passed the minimum time for load-report update.
*
* @return true: if last load-report generation passed the minimum interval and load-report can be generated false:
* if load-report generation has not passed minimum interval to update load-report again
*/
private boolean isLoadReportGenerationIntervalPassed() {
long timeSinceLastGenMillis = System.currentTimeMillis() - lastLoadReport.getTimestamp();
return timeSinceLastGenMillis > pulsar.getConfiguration().getLoadBalancerReportUpdateMinIntervalMillis();
} | 3.68 |
hudi_StreamSync_startCommit | /**
* Try to start a new commit.
* <p>
* Exception will be thrown if it failed in 2 tries.
*
* @return Instant time of the commit
*/
private String startCommit(String instantTime, boolean retryEnabled) {
final int maxRetries = 2;
int retryNum = 1;
RuntimeException lastException = null;
while (retryNum <= maxRetries) {
try {
String commitActionType = CommitUtils.getCommitActionType(cfg.operation, HoodieTableType.valueOf(cfg.tableType));
writeClient.startCommitWithTime(instantTime, commitActionType);
return instantTime;
} catch (IllegalArgumentException ie) {
lastException = ie;
if (!retryEnabled) {
throw ie;
}
LOG.error("Got error trying to start a new commit. Retrying after sleeping for a sec", ie);
retryNum++;
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// No-Op
}
}
instantTime = writeClient.createNewInstantTime();
}
throw lastException;
} | 3.68 |
flink_MailboxExecutor_submit | /**
* Submits the given command for execution in the future in the mailbox thread and returns a
* Future representing that command. The Future's {@code get} method will return {@code null}
* upon <em>successful</em> completion.
*
* <p>An optional description can (and should) be added to ease debugging and error-reporting.
* The description may contain placeholder that refer to the provided description arguments
* using {@link java.util.Formatter} syntax. The actual description is only formatted on demand.
*
* @param command the command to submit
* @param description the optional description for the command that is used for debugging and
* error-reporting.
* @return a Future representing pending completion of the task
* @throws RejectedExecutionException if this task cannot be accepted for execution, e.g.
* because the mailbox is quiesced or closed.
*/
default @Nonnull <T> Future<T> submit(@Nonnull Callable<T> command, String description) {
FutureTaskWithException<T> future = new FutureTaskWithException<>(command);
execute(future, description, EMPTY_ARGS);
return future;
} | 3.68 |
querydsl_SQLTemplates_serialize | /**
* template method for SELECT serialization
*
* @param metadata
* @param forCountRow
* @param context
*/
public void serialize(QueryMetadata metadata, boolean forCountRow, SQLSerializer context) {
context.serializeForQuery(metadata, forCountRow);
if (!metadata.getFlags().isEmpty()) {
context.serialize(Position.END, metadata.getFlags());
}
} | 3.68 |
graphhopper_GraphHopper_getProfile | /**
* Returns the profile for the given profile name, or null if it does not exist
*/
public Profile getProfile(String profileName) {
return profilesByName.get(profileName);
} | 3.68 |
hadoop_RenameOperation_recursiveDirectoryRename | /**
* Execute a full recursive rename.
* There is a special handling of directly markers here -only leaf markers
* are copied. This reduces incompatibility "regions" across versions.
* @throws IOException failure
*/
protected void recursiveDirectoryRename() throws IOException {
final StoreContext storeContext = getStoreContext();
LOG.debug("rename: renaming directory {} to {}", sourcePath, destPath);
// This is a directory-to-directory copy
String dstKey = maybeAddTrailingSlash(destKey);
String srcKey = maybeAddTrailingSlash(sourceKey);
// Verify dest is not a child of the source directory
if (dstKey.startsWith(srcKey)) {
throw new RenameFailedException(srcKey, dstKey,
"cannot rename a directory to a subdirectory of itself ");
}
// start the async dir cleanup
final CompletableFuture<Long> abortUploads;
if (dirOperationsPurgeUploads) {
final String key = srcKey;
LOG.debug("All uploads under {} will be deleted", key);
abortUploads = submit(getStoreContext().getExecutor(), () ->
callbacks.abortMultipartUploadsUnderPrefix(key));
} else {
abortUploads = null;
}
if (destStatus != null
&& destStatus.isEmptyDirectory() == Tristate.TRUE) {
// delete unnecessary fake directory at the destination.
LOG.debug("Deleting fake directory marker at destination {}",
destStatus.getPath());
// Although the dir marker policy doesn't always need to do this,
// it's simplest just to be consistent here.
callbacks.deleteObjectAtPath(destStatus.getPath(), dstKey, false);
}
Path parentPath = storeContext.keyToPath(srcKey);
// Track directory markers so that we know which leaf directories need to be
// recreated
DirMarkerTracker dirMarkerTracker = new DirMarkerTracker(parentPath,
false);
final RemoteIterator<S3ALocatedFileStatus> iterator =
callbacks.listFilesAndDirectoryMarkers(parentPath,
sourceStatus,
true);
while (iterator.hasNext()) {
// get the next entry in the listing.
S3ALocatedFileStatus child = iterator.next();
LOG.debug("To rename {}", child);
// convert it to an S3 key.
String k = storeContext.pathToKey(child.getPath());
// possibly adding a "/" if it represents directory and it does
// not have a trailing slash already.
String key = (child.isDirectory() && !k.endsWith("/"))
? k + "/"
: k;
// the source object to copy as a path.
Path childSourcePath = storeContext.keyToPath(key);
List<DirMarkerTracker.Marker> markersToDelete;
boolean isMarker = key.endsWith("/");
if (isMarker) {
// add the marker to the tracker.
// it will not be deleted _yet_ but it may find a list of parent
// markers which may now be deleted.
markersToDelete = dirMarkerTracker.markerFound(
childSourcePath, key, child);
} else {
// it is a file.
// note that it has been found -this may find a list of parent
// markers which may now be deleted.
markersToDelete = dirMarkerTracker.fileFound(
childSourcePath, key, child);
// the destination key is that of the key under the source tree,
// remapped under the new destination path.
String newDestKey =
dstKey + key.substring(srcKey.length());
Path childDestPath = storeContext.keyToPath(newDestKey);
// mark the source file for deletion on a successful copy.
queueToDelete(childSourcePath, key);
// now begin the single copy
CompletableFuture<Path> copy = initiateCopy(child, key,
newDestKey, childDestPath);
activeCopies.add(copy);
bytesCopied.addAndGet(sourceStatus.getLen());
}
// add any markers to delete to the operation so they get cleaned
// incrementally
queueToDelete(markersToDelete);
// and trigger any end of loop operations
endOfLoopActions();
} // end of iteration through the list
// finally process remaining directory markers
copyEmptyDirectoryMarkers(srcKey, dstKey, dirMarkerTracker);
// await the final set of copies and their deletion
// This will notify the renameTracker that these objects
// have been deleted.
completeActiveCopiesAndDeleteSources("final copy and delete");
// and if uploads were being aborted, wait for that to finish
uploadsAborted = waitForCompletionIgnoringExceptions(abortUploads);
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_setProperty | /**
* Set property.
*
* @param key property key
* @param value property value
* @return job configuration builder
*/
public Builder setProperty(final String key, final String value) {
props.setProperty(key, value);
return this;
} | 3.68 |
streampipes_StreamRequirementsBuilder_any | /**
* Creates a new stream requirement without any further property requirements.
*
* @return {@link CollectedStreamRequirements}
*/
public static CollectedStreamRequirements any() {
return StreamRequirementsBuilder.create().build();
} | 3.68 |
flink_BufferCompressor_compressToOriginalBuffer | /**
* The difference between this method and {@link #compressToIntermediateBuffer(Buffer)} is that
* this method will copy the compressed data back to the input {@link Buffer} starting from
* offset 0.
*
* <p>The caller must guarantee that the input {@link Buffer} is writable.
*/
public Buffer compressToOriginalBuffer(Buffer buffer) {
int compressedLen;
if ((compressedLen = compress(buffer)) == 0) {
return buffer;
}
// copy the compressed data back
int memorySegmentOffset = buffer.getMemorySegmentOffset();
MemorySegment segment = buffer.getMemorySegment();
segment.put(memorySegmentOffset, internalBufferArray, 0, compressedLen);
return new ReadOnlySlicedNetworkBuffer(
buffer.asByteBuf(), 0, compressedLen, memorySegmentOffset, true);
} | 3.68 |
querydsl_ExpressionUtils_createRootVariable | /**
* Create a new root variable based on the given path
*
* @param path base path
* @return variable name
*/
public static String createRootVariable(Path<?> path) {
return path.accept(ToStringVisitor.DEFAULT, TEMPLATES);
} | 3.68 |
flink_AvailabilityProvider_getUnavailableToResetAvailable | /**
* Returns the previously not completed future and resets the constant completed {@link
* #AVAILABLE} as the current state.
*/
public CompletableFuture<?> getUnavailableToResetAvailable() {
CompletableFuture<?> toNotify = availableFuture;
availableFuture = AVAILABLE;
return toNotify;
} | 3.68 |
flink_DynamicSinkUtils_pushMetadataProjection | /**
* Creates a projection that reorders physical and metadata columns according to the consumed
* data type of the sink. It casts metadata columns into the expected data type.
*
* @see SupportsWritingMetadata
*/
private static void pushMetadataProjection(
FlinkRelBuilder relBuilder,
FlinkTypeFactory typeFactory,
ResolvedSchema schema,
DynamicTableSink sink) {
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final List<Column> columns = schema.getColumns();
final List<Integer> physicalColumns = extractPhysicalColumns(schema);
final Map<String, Integer> keyToMetadataColumn =
extractPersistedMetadataColumns(schema).stream()
.collect(
Collectors.toMap(
pos -> {
final MetadataColumn metadataColumn =
(MetadataColumn) columns.get(pos);
return metadataColumn
.getMetadataKey()
.orElse(metadataColumn.getName());
},
Function.identity()));
final List<Integer> metadataColumns =
createRequiredMetadataColumns(schema, sink).stream()
.map(col -> col.getMetadataKey().orElse(col.getName()))
.map(keyToMetadataColumn::get)
.collect(Collectors.toList());
final List<String> fieldNames =
Stream.concat(
physicalColumns.stream().map(columns::get).map(Column::getName),
metadataColumns.stream()
.map(columns::get)
.map(MetadataColumn.class::cast)
.map(c -> c.getMetadataKey().orElse(c.getName())))
.collect(Collectors.toList());
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
final List<RexNode> fieldNodes =
Stream.concat(
physicalColumns.stream()
.map(
pos -> {
final int posAdjusted =
adjustByVirtualColumns(columns, pos);
return relBuilder.field(posAdjusted);
}),
metadataColumns.stream()
.map(
pos -> {
final MetadataColumn metadataColumn =
(MetadataColumn) columns.get(pos);
final String metadataKey =
metadataColumn
.getMetadataKey()
.orElse(
metadataColumn
.getName());
final LogicalType expectedType =
metadataMap
.get(metadataKey)
.getLogicalType();
final RelDataType expectedRelDataType =
typeFactory
.createFieldTypeFromLogicalType(
expectedType);
final int posAdjusted =
adjustByVirtualColumns(columns, pos);
return rexBuilder.makeAbstractCast(
expectedRelDataType,
relBuilder.field(posAdjusted));
}))
.collect(Collectors.toList());
relBuilder.projectNamed(fieldNodes, fieldNames, true);
} | 3.68 |
hadoop_LoadedManifestData_deleteEntrySequenceFile | /**
* Delete the entry sequence file.
* @return whether or not the delete was successful.
*/
public boolean deleteEntrySequenceFile() {
return getEntrySequenceFile().delete();
} | 3.68 |
streampipes_PipelineManager_getPipelinesContainingElements | /**
* Checks for the pipelines that contain the processing element
*
* @param elementId the id of the processing Element
* @return all pipelines containing the element
*/
public static List<Pipeline> getPipelinesContainingElements(String elementId) {
return PipelineManager.getAllPipelines().stream()
.filter(pipeline ->
mergePipelineElement(pipeline)
.anyMatch(el -> el.getElementId().equals(elementId)))
.collect(Collectors.toList());
} | 3.68 |
flink_FutureUtils_orTimeout | /**
* Times the given future out after the timeout.
*
* @param future to time out
* @param timeout after which the given future is timed out
* @param timeUnit time unit of the timeout
* @param timeoutFailExecutor executor that will complete the future exceptionally after the
* timeout is reached
* @param timeoutMsg timeout message for exception
* @param <T> type of the given future
* @return The timeout enriched future
*/
public static <T> CompletableFuture<T> orTimeout(
CompletableFuture<T> future,
long timeout,
TimeUnit timeUnit,
Executor timeoutFailExecutor,
@Nullable String timeoutMsg) {
if (!future.isDone()) {
final ScheduledFuture<?> timeoutFuture =
Delayer.delay(
() -> timeoutFailExecutor.execute(new Timeout(future, timeoutMsg)),
timeout,
timeUnit);
future.whenComplete(
(T value, Throwable throwable) -> {
if (!timeoutFuture.isDone()) {
timeoutFuture.cancel(false);
}
});
}
return future;
} | 3.68 |
hadoop_UnitsConversionUtil_compare | /**
* Compare a value in a given unit with a value in another unit. The return
* value is equivalent to the value returned by compareTo.
*
* @param unitA first unit
* @param valueA first value
* @param unitB second unit
* @param valueB second value
* @return +1, 0 or -1 depending on whether the relationship is greater than,
* equal to or lesser than
*/
public static int compare(String unitA, long valueA, String unitB,
long valueB) {
checkUnitArgument(unitA);
checkUnitArgument(unitB);
if (unitA.equals(unitB)) {
return Long.compare(valueA, valueB);
}
Converter unitAC = getConverter(unitA);
Converter unitBC = getConverter(unitB);
int unitAPos = SORTED_UNITS.indexOf(unitA);
int unitBPos = SORTED_UNITS.indexOf(unitB);
try {
long tmpA = valueA;
long tmpB = valueB;
if (unitAPos < unitBPos) {
tmpB = convert(unitB, unitA, valueB);
} else {
tmpA = convert(unitA, unitB, valueA);
}
return Long.compare(tmpA, tmpB);
} catch (IllegalArgumentException ie) {
BigInteger tmpA = BigInteger.valueOf(valueA);
BigInteger tmpB = BigInteger.valueOf(valueB);
if (unitAPos < unitBPos) {
tmpB = tmpB.multiply(BigInteger.valueOf(unitBC.numerator));
tmpB = tmpB.multiply(BigInteger.valueOf(unitAC.denominator));
tmpB = tmpB.divide(BigInteger.valueOf(unitBC.denominator));
tmpB = tmpB.divide(BigInteger.valueOf(unitAC.numerator));
} else {
tmpA = tmpA.multiply(BigInteger.valueOf(unitAC.numerator));
tmpA = tmpA.multiply(BigInteger.valueOf(unitBC.denominator));
tmpA = tmpA.divide(BigInteger.valueOf(unitAC.denominator));
tmpA = tmpA.divide(BigInteger.valueOf(unitBC.numerator));
}
return tmpA.compareTo(tmpB);
}
} | 3.68 |
framework_VComboBox_getMainWidth | /**
* Get the width of the select in pixels where the text area and icon has
* been included.
*
* @return The width in pixels
*/
private int getMainWidth() {
return getOffsetWidth();
} | 3.68 |
hbase_FileKeyStoreLoaderBuilderProvider_getBuilderForKeyStoreFileType | /**
* Returns a {@link FileKeyStoreLoader.Builder} that can build a loader which loads keys and certs
* from files of the given {@link KeyStoreFileType}.
* @param type the file type to load keys/certs from.
* @return a new Builder.
*/
static FileKeyStoreLoader.Builder<? extends FileKeyStoreLoader>
getBuilderForKeyStoreFileType(KeyStoreFileType type) {
switch (Objects.requireNonNull(type)) {
case JKS:
return new JKSFileLoader.Builder();
case PEM:
return new PEMFileLoader.Builder();
case PKCS12:
return new PKCS12FileLoader.Builder();
case BCFKS:
return new BCFKSFileLoader.Builder();
default:
throw new AssertionError("Unexpected StoreFileType: " + type.name());
}
} | 3.68 |
pulsar_ProxyExtensions_extension | /**
* Return the handler for the provided <tt>extension</tt>.
*
* @param extension the extension to use
* @return the extension to handle the provided extension
*/
public ProxyExtension extension(String extension) {
ProxyExtensionWithClassLoader h = extensions.get(extension);
if (null == h) {
return null;
} else {
return h.getExtension();
}
} | 3.68 |
graphhopper_Frequency_compareTo | /** must have a comparator since they go in a navigable set that is serialized */
@Override
public int compareTo(Frequency o) {
return this.start_time - o.start_time;
} | 3.68 |
pulsar_SystemTopicBasedTopicPoliciesService_readMorePoliciesAsync | /**
* This is an async method for the background reader to continue syncing new messages.
*
* Note: You should not do any blocking call here. because it will affect
* #{@link SystemTopicBasedTopicPoliciesService#getTopicPoliciesAsync(TopicName)} method to block loading topic.
*/
private void readMorePoliciesAsync(SystemTopicClient.Reader<PulsarEvent> reader) {
reader.readNextAsync()
.thenAccept(msg -> {
refreshTopicPoliciesCache(msg);
notifyListener(msg);
})
.whenComplete((__, ex) -> {
if (ex == null) {
readMorePoliciesAsync(reader);
} else {
Throwable cause = FutureUtil.unwrapCompletionException(ex);
if (cause instanceof PulsarClientException.AlreadyClosedException) {
log.warn("Read more topic policies exception, close the read now!", ex);
cleanCacheAndCloseReader(
reader.getSystemTopic().getTopicName().getNamespaceObject(), false);
} else {
log.warn("Read more topic polices exception, read again.", ex);
readMorePoliciesAsync(reader);
}
}
});
} | 3.68 |
morf_ChangePrimaryKeyColumns_getTableName | /**
* @return the table name
*/
public String getTableName() {
return tableName;
} | 3.68 |
framework_Slot_setSpacing | /**
* Set the spacing for the slot. The spacing determines if there should be
* empty space around the slot when the slot.
*
* @param spacing
* Should spacing be enabled
*/
public void setSpacing(boolean spacing) {
if (spacing && spacer == null) {
spacer = DOM.createDiv();
spacer.addClassName("v-spacing");
/*
* This has to be done here for the initial render. In other cases
* where the spacer already exists onAttach will handle it.
*/
getElement().getParentElement().insertBefore(spacer, getElement());
} else if (!spacing && spacer != null) {
// Remove listener before spacer to avoid memory leak
LayoutManager lm = layout.getLayoutManager();
if (lm != null && spacingResizeListener != null) {
lm.removeElementResizeListener(spacer, spacingResizeListener);
}
spacer.removeFromParent();
spacer = null;
}
} | 3.68 |
framework_AbstractProperty_getProperty | /**
* Gets the Property whose value has changed.
*
* @return source Property of the event.
*/
@Override
public Property getProperty() {
return (Property) getSource();
} | 3.68 |
hadoop_ManifestSuccessData_getMetrics | /**
* @return any metrics.
*/
public Map<String, Long> getMetrics() {
return metrics;
} | 3.68 |
open-banking-gateway_ServiceAccountsOper_createOrActivateOrDeactivateServiceAccounts | // Unfortunately @PostConstruct can't have Transactional annotation
@PostConstruct
public void createOrActivateOrDeactivateServiceAccounts() {
txOper.execute(callback -> {
users.deactivateAllServiceAccounts();
if (null == serviceAccounts.getAccounts()) {
return null;
}
for (ServiceAccountsConfig.ServiceAccount account : serviceAccounts.getAccounts()) {
UserEntity user = users.findById(account.getLogin())
.map(it -> authorizeService.updatePasswordButDontSave(it, account.getPassword()))
.orElseGet(() -> authorizeService.createUserEntityWithPasswordEnabledButDontSave(account.getLogin(), account.getPassword()));
user.setActive(true);
user.setServiceAccount(true);
users.save(user);
}
return null;
});
} | 3.68 |
hbase_WALEdit_isReplay | /**
* @return True when current WALEdit is created by log replay. Replication skips WALEdits from
* replay.
*/
public boolean isReplay() {
return this.replay;
} | 3.68 |
dubbo_NetUtils_matchIpExpression | /**
* Check if address matches with specified pattern, currently only supports ipv4, use {@link this#matchIpExpression(String, String, int)} for ipv6 addresses.
*
* @param pattern cird pattern
* @param address 'ip:port'
* @return true if address matches with the pattern
*/
public static boolean matchIpExpression(String pattern, String address) throws UnknownHostException {
if (address == null) {
return false;
}
String host = address;
int port = 0;
// only works for ipv4 address with 'ip:port' format
if (address.endsWith(":")) {
String[] hostPort = address.split(":");
host = hostPort[0];
port = StringUtils.parseInteger(hostPort[1]);
}
// if the pattern is subnet format, it will not be allowed to config port param in pattern.
if (pattern.contains("/")) {
CIDRUtils utils = new CIDRUtils(pattern);
return utils.isInRange(host);
}
return matchIpRange(pattern, host, port);
} | 3.68 |
querydsl_ComparableExpression_notBetween | /**
* Create a {@code this not between from and to} expression
*
* <p>Is equivalent to {@code this < from || this > to}</p>
*
* @param from inclusive start of range
* @param to inclusive end of range
* @return this not between from and to
*/
public BooleanExpression notBetween(Expression<T> from, Expression<T> to) {
return between(from, to).not();
} | 3.68 |
graphhopper_EdgeChangeBuilder_addRemovedEdges | /**
* Adds the ids of the removed edges at the real tower nodes. We need to do this such that we cannot 'skip'
* virtual nodes by just using the original edges and also to prevent u-turns at the real nodes adjacent to the
* virtual ones.
*/
private void addRemovedEdges(int towerNode) {
if (isVirtualNode(towerNode))
throw new IllegalStateException("Node should not be virtual:" + towerNode + ", " + edgeChangesAtRealNodes);
QueryOverlay.EdgeChanges edgeChanges = edgeChangesAtRealNodes.get(towerNode);
List<EdgeIteratorState> existingEdges = edgeChanges.getAdditionalEdges();
IntArrayList removedEdges = edgeChanges.getRemovedEdges();
for (EdgeIteratorState existingEdge : existingEdges) {
removedEdges.add(getClosestEdge(existingEdge.getAdjNode()));
}
} | 3.68 |
flink_GenericArrayData_toObjectArray | /**
* Converts this {@link GenericArrayData} into an array of Java {@link Object}.
*
* <p>The method will convert a primitive array into an object array. But it will not convert
* internal data structures into external data structures (e.g. {@link StringData} to {@link
* String}).
*/
public Object[] toObjectArray() {
if (isPrimitiveArray) {
Class<?> arrayClass = array.getClass();
if (int[].class.equals(arrayClass)) {
return ArrayUtils.toObject((int[]) array);
} else if (long[].class.equals(arrayClass)) {
return ArrayUtils.toObject((long[]) array);
} else if (float[].class.equals(arrayClass)) {
return ArrayUtils.toObject((float[]) array);
} else if (double[].class.equals(arrayClass)) {
return ArrayUtils.toObject((double[]) array);
} else if (short[].class.equals(arrayClass)) {
return ArrayUtils.toObject((short[]) array);
} else if (byte[].class.equals(arrayClass)) {
return ArrayUtils.toObject((byte[]) array);
} else if (boolean[].class.equals(arrayClass)) {
return ArrayUtils.toObject((boolean[]) array);
}
throw new RuntimeException("Unsupported primitive array: " + arrayClass);
} else {
return (Object[]) array;
}
} | 3.68 |
hadoop_OBSBlockOutputStream_mockPutPartError | /**
* Set mock error.
*
* @param isException mock error
*/
@VisibleForTesting
public void mockPutPartError(final boolean isException) {
this.mockUploadPartError = isException;
} | 3.68 |
framework_Table_setColumnAlignments | /**
* Sets the column alignments.
*
* <p>
* The amount of items in the array must match the amount of properties
* identified by {@link #getVisibleColumns()}. The possible values for the
* alignments include:
* <ul>
* <li>{@link Align#LEFT}: Left alignment</li>
* <li>{@link Align#CENTER}: Centered</li>
* <li>{@link Align#RIGHT}: Right alignment</li>
* </ul>
* The alignments default to {@link Align#LEFT}
* </p>
*
* @param columnAlignments
* the Column alignments array.
*/
public void setColumnAlignments(Align... columnAlignments) {
if (columnAlignments.length != visibleColumns.size()) {
throw new IllegalArgumentException(
"The length of the alignments array must match the number of visible columns");
}
// Resets the alignments
final Map<Object, Align> newCA = new HashMap<Object, Align>();
int i = 0;
for (final Object column : visibleColumns) {
if (i >= columnAlignments.length) {
break;
}
newCA.put(column, columnAlignments[i++]);
}
this.columnAlignments = newCA;
// Assures the visual refresh. No need to reset the page buffer before
// as the content has not changed, only the alignments.
refreshRenderedCells();
} | 3.68 |
AreaShop_SignsFeature_needsPeriodicUpdate | /**
* Check if any of the signs need periodic updating.
* @return true if one or more of the signs need periodic updating, otherwise false
*/
public boolean needsPeriodicUpdate() {
boolean result = false;
for(RegionSign sign : signs.values()) {
result |= sign.needsPeriodicUpdate();
}
return result;
} | 3.68 |
flink_DemultiplexingRecordDeserializer_getNextRecord | /** Summarizes the status and watermarks of all virtual channels. */
@Override
public DeserializationResult getNextRecord(DeserializationDelegate<StreamElement> delegate)
throws IOException {
DeserializationResult result;
do {
result = currentVirtualChannel.getNextRecord(delegate);
if (result.isFullRecord()) {
final StreamElement element = delegate.getInstance();
if (element.isRecord() || element.isLatencyMarker()) {
return result;
} else if (element.isWatermark()) {
// basically, do not emit a watermark if not all virtual channel are past it
final Watermark minWatermark =
channels.values().stream()
.map(virtualChannel -> virtualChannel.lastWatermark)
.min(Comparator.comparing(Watermark::getTimestamp))
.orElseThrow(
() ->
new IllegalStateException(
"Should always have a watermark"));
// at least one virtual channel has no watermark, don't emit any watermark yet
if (minWatermark.equals(Watermark.UNINITIALIZED)) {
continue;
}
delegate.setInstance(minWatermark);
return result;
} else if (element.isWatermarkStatus()) {
// summarize statuses across all virtual channels
// duplicate statuses are filtered in StatusWatermarkValve
if (channels.values().stream().anyMatch(d -> d.watermarkStatus.isActive())) {
delegate.setInstance(WatermarkStatus.ACTIVE);
}
return result;
}
}
// loop is only re-executed for suppressed watermark
} while (!result.isBufferConsumed());
return DeserializationResult.PARTIAL_RECORD;
} | 3.68 |
morf_HumanReadableStatementProducer_renameIndex | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#renameIndex(java.lang.String, java.lang.String, java.lang.String)
*/
@Override
public void renameIndex(String tableName, String fromIndexName, String toIndexName) {
consumer.schemaChange(HumanReadableStatementHelper.generateRenameIndexString(tableName, fromIndexName, toIndexName));
} | 3.68 |
flink_StateBackendLoader_fromApplicationOrConfigOrDefault | /**
* This is the state backend loader that loads a {@link DelegatingStateBackend} wrapping the
* state backend loaded from {@link
* StateBackendLoader#loadFromApplicationOrConfigOrDefaultInternal} when delegation is enabled.
* If delegation is not enabled, the underlying wrapped state backend is returned instead.
*
* @param fromApplication StateBackend defined from application
* @param isChangelogStateBackendEnableFromApplication whether to enable the
* ChangelogStateBackend from application
* @param config The configuration to load the state backend from
* @param classLoader The class loader that should be used to load the state backend
* @param logger Optionally, a logger to log actions to (may be null)
* @return The instantiated state backend.
* @throws DynamicCodeLoadingException Thrown if a state backend (factory) is configured and the
* (factory) class was not found or could not be instantiated
* @throws IllegalConfigurationException May be thrown by the StateBackendFactory when creating
* / configuring the state backend in the factory
* @throws IOException May be thrown by the StateBackendFactory when instantiating the state
* backend
*/
public static StateBackend fromApplicationOrConfigOrDefault(
@Nullable StateBackend fromApplication,
TernaryBoolean isChangelogStateBackendEnableFromApplication,
Configuration config,
ClassLoader classLoader,
@Nullable Logger logger)
throws IllegalConfigurationException, DynamicCodeLoadingException, IOException {
StateBackend rootBackend =
loadFromApplicationOrConfigOrDefaultInternal(
fromApplication, config, classLoader, logger);
// Configuration from application will override the one from env.
boolean enableChangeLog =
TernaryBoolean.TRUE.equals(isChangelogStateBackendEnableFromApplication)
|| (TernaryBoolean.UNDEFINED.equals(
isChangelogStateBackendEnableFromApplication)
&& config.get(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG));
StateBackend backend;
if (enableChangeLog) {
backend = wrapStateBackend(rootBackend, classLoader, CHANGELOG_STATE_BACKEND);
LOG.info(
"State backend loader loads {} to delegate {}",
backend.getClass().getSimpleName(),
rootBackend.getClass().getSimpleName());
} else {
backend = rootBackend;
LOG.info(
"State backend loader loads the state backend as {}",
backend.getClass().getSimpleName());
}
return backend;
} | 3.68 |
hadoop_LocatedFileStatus_getBlockLocations | /**
* Get the file's block locations
*
* In HDFS, the returned BlockLocation will have different formats for
* replicated and erasure coded file.
* Please refer to
* {@link FileSystem#getFileBlockLocations(FileStatus, long, long)}
* for more details.
*
* @return the file's block locations
*/
public BlockLocation[] getBlockLocations() {
return locations;
} | 3.68 |
querydsl_Expressions_asTime | /**
* Create a new TimeExpression
*
* @param value the time
* @return new TimeExpression
*/
public static <T extends Comparable<?>> TimeExpression<T> asTime(T value) {
return asTime(constant(value));
} | 3.68 |
hbase_ProcedureExecutor_removeChore | /**
* Remove a chore procedure from the executor
* @param chore the chore to remove
* @return whether the chore is removed, or it will be removed later
*/
public boolean removeChore(@Nullable ProcedureInMemoryChore<TEnvironment> chore) {
if (chore == null) {
return true;
}
chore.setState(ProcedureState.SUCCESS);
return timeoutExecutor.remove(chore);
} | 3.68 |
dubbo_DubboBootstrap_provider | // {@link ProviderConfig} correlative methods
public Module provider(Consumer<ProviderBuilder> builderConsumer) {
return provider(null, builderConsumer);
} | 3.68 |
hudi_HoodieAvroUtils_getNestedFieldVal | /**
* Obtain value of the provided field, denoted by dot notation. e.g: a.b.c
*/
public static Object getNestedFieldVal(GenericRecord record, String fieldName, boolean returnNullIfNotFound, boolean consistentLogicalTimestampEnabled) {
String[] parts = fieldName.split("\\.");
GenericRecord valueNode = record;
for (int i = 0; i < parts.length; i++) {
String part = parts[i];
Object val;
try {
val = HoodieAvroUtils.getFieldVal(valueNode, part, returnNullIfNotFound);
} catch (AvroRuntimeException e) {
if (returnNullIfNotFound) {
return null;
} else {
throw new HoodieException(
fieldName + "(Part -" + parts[i] + ") field not found in record. Acceptable fields were :"
+ valueNode.getSchema().getFields().stream().map(Field::name).collect(Collectors.toList()));
}
}
if (i == parts.length - 1) {
// return, if last part of name
if (val == null) {
return null;
} else {
Schema fieldSchema = valueNode.getSchema().getField(part).schema();
return convertValueForSpecificDataTypes(fieldSchema, val, consistentLogicalTimestampEnabled);
}
} else {
if (!(val instanceof GenericRecord)) {
if (returnNullIfNotFound) {
return null;
} else {
throw new HoodieException("Cannot find a record at part value :" + part);
}
} else {
valueNode = (GenericRecord) val;
}
}
}
// This can only be reached if the length of parts is 0
if (returnNullIfNotFound) {
return null;
} else {
throw new HoodieException(
fieldName + " field not found in record. Acceptable fields were :"
+ valueNode.getSchema().getFields().stream().map(Field::name).collect(Collectors.toList()));
}
} | 3.68 |
hbase_CoprocessorWhitelistMasterObserver_verifyCoprocessors | /**
* Perform the validation checks for a coprocessor to determine if the path is white listed or
* not.
* @throws IOException if path is not included in whitelist or a failure occurs in processing
* @param ctx as passed in from the coprocessor
* @param htd as passed in from the coprocessor
*/
private static void verifyCoprocessors(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableDescriptor htd) throws IOException {
Collection<String> paths = ctx.getEnvironment().getConfiguration()
.getStringCollection(CP_COPROCESSOR_WHITELIST_PATHS_KEY);
for (CoprocessorDescriptor cp : htd.getCoprocessorDescriptors()) {
if (cp.getJarPath().isPresent()) {
if (paths.stream().noneMatch(p -> {
Path wlPath = new Path(p);
if (validatePath(new Path(cp.getJarPath().get()), wlPath)) {
LOG.debug(String.format("Coprocessor %s found in directory %s", cp.getClassName(), p));
return true;
}
return false;
})) {
throw new IOException(String.format("Loading %s DENIED in %s", cp.getClassName(),
CP_COPROCESSOR_WHITELIST_PATHS_KEY));
}
}
}
} | 3.68 |
flink_OverWindowPartitionedOrderedPreceding_following | /**
* Set the following offset (based on time or row-count intervals) for over window.
*
* @param following following offset that relative to the current row.
* @return an over window with defined following
*/
public OverWindowPartitionedOrderedPreceding following(Expression following) {
optionalFollowing = Optional.of(following);
return this;
} | 3.68 |
flink_Pattern_next | /**
* Appends a new group pattern to the existing one. The new pattern enforces strict temporal
* contiguity. This means that the whole pattern sequence matches only if an event which matches
* this pattern directly follows the preceding matching event. Thus, there cannot be any events
* in between two matching events.
*
* @param group the pattern to append
* @return A new pattern which is appended to this one
*/
public GroupPattern<T, F> next(Pattern<T, F> group) {
return new GroupPattern<>(this, group, ConsumingStrategy.STRICT, afterMatchSkipStrategy);
} | 3.68 |
framework_Calendar_setFirstDayOfWeek | /**
* Allow setting first day of week independent of Locale. Set to null if you
* want first day of week being defined by the locale
*
* @since 7.6
* @param dayOfWeek
* any of java.util.Calendar.SUNDAY..java.util.Calendar.SATURDAY
* or null to revert to default first day of week by locale
*/
public void setFirstDayOfWeek(Integer dayOfWeek) {
int minimalSupported = java.util.Calendar.SUNDAY;
int maximalSupported = java.util.Calendar.SATURDAY;
if (dayOfWeek != null && (dayOfWeek < minimalSupported
|| dayOfWeek > maximalSupported)) {
throw new IllegalArgumentException(String.format(
"Day of week must be between %s and %s. Actually received: %s",
minimalSupported, maximalSupported, dayOfWeek));
}
customFirstDayOfWeek = dayOfWeek;
markAsDirty();
} | 3.68 |
flink_InstantiationUtil_createCopyWritable | /**
* Clones the given writable using the {@link IOReadableWritable serialization}.
*
* @param original Object to clone
* @param <T> Type of the object to clone
* @return Cloned object
* @throws IOException Thrown is the serialization fails.
*/
public static <T extends IOReadableWritable> T createCopyWritable(T original)
throws IOException {
if (original == null) {
return null;
}
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (DataOutputViewStreamWrapper out = new DataOutputViewStreamWrapper(baos)) {
original.write(out);
}
final ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
try (DataInputViewStreamWrapper in = new DataInputViewStreamWrapper(bais)) {
@SuppressWarnings("unchecked")
T copy = (T) instantiate(original.getClass());
copy.read(in);
return copy;
}
} | 3.68 |
framework_UIDL_getIntArrayVariable | /**
* Gets the value of the named variable.
*
* @param name
* the name of the variable
* @return the value of the variable
*/
public int[] getIntArrayVariable(String name) {
return var().getIntArray(name);
} | 3.68 |
hbase_ZKUtil_submitBatchedMultiOrSequential | /**
* Chunks the provided {@code ops} when their approximate size exceeds the the configured limit.
* Take caution that this can ONLY be used for operations where atomicity is not important, e.g.
* deletions. It must not be used when atomicity of the operations is critical.
* @param zkw reference to the {@link ZKWatcher} which contains
* configuration and constants
* @param runSequentialOnMultiFailure if true when we get a ZooKeeper exception that could retry
* the operations one-by-one (sequentially)
* @param ops list of ZKUtilOp {@link ZKUtilOp} to partition while
* submitting batched multi or sequential
* @throws KeeperException unexpected ZooKeeper Exception / Zookeeper unreachable
*/
private static void submitBatchedMultiOrSequential(ZKWatcher zkw,
boolean runSequentialOnMultiFailure, List<ZKUtilOp> ops) throws KeeperException {
// at least one element should exist
if (ops.isEmpty()) {
return;
}
final int maxMultiSize = zkw.getRecoverableZooKeeper().getMaxMultiSizeLimit();
// Batch up the items to over smashing through jute.maxbuffer with too many Ops.
final List<List<ZKUtilOp>> batchedOps = partitionOps(ops, maxMultiSize);
// Would use forEach() but have to handle KeeperException
for (List<ZKUtilOp> batch : batchedOps) {
multiOrSequential(zkw, batch, runSequentialOnMultiFailure);
}
} | 3.68 |
hudi_HoodieGauge_getValue | /**
* Returns the metric's current value.
*
* @return the metric's current value
*/
@Override
public T getValue() {
return value;
} | 3.68 |
hudi_WriteStatus_markFailure | /**
* Used by native write handles like HoodieRowCreateHandle and HoodieRowDataCreateHandle.
*
* @see WriteStatus#markFailure(HoodieRecord, Throwable, Option)
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.EVOLVING)
public void markFailure(String recordKey, String partitionPath, Throwable t) {
if (failedRecords.isEmpty() || (random.nextDouble() <= failureFraction)) {
// Guaranteed to have at-least one error
HoodieRecordDelegate recordDelegate = HoodieRecordDelegate.create(recordKey, partitionPath);
failedRecords.add(Pair.of(recordDelegate, t));
errors.put(recordDelegate.getHoodieKey(), t);
}
updateStatsForFailure();
} | 3.68 |
flink_EventTimeTriggers_withLateFirings | /**
* Creates a new {@code Trigger} like the this, except that it fires repeatedly whenever the
* given {@code Trigger} fires after the watermark has passed the end of the window.
*/
public Trigger<W> withLateFirings(Trigger<W> lateFirings) {
checkNotNull(lateFirings);
if (lateFirings instanceof ElementTriggers.EveryElement) {
// every-element late firing can be ignored
return this;
} else {
return new AfterEndOfWindowEarlyAndLate<>(earlyTrigger, lateFirings);
}
} | 3.68 |
hbase_MultiRowRangeFilter_get | /**
* Gets the RowRange at the given offset.
*/
@SuppressWarnings({ "unchecked", "TypeParameterUnusedInFormals" })
public <T extends BasicRowRange> T get(int i) {
return (T) ranges.get(i);
} | 3.68 |
graphhopper_ResponsePath_getDescend | /**
* This method returns the total elevation change (going downwards) in meter.
* <p>
*
* @return decline in meter
*/
public double getDescend() {
return descend;
} | 3.68 |
hbase_QuotaObserverChore_getTablesByNamespace | /**
* Returns a view of all tables that reside in a namespace with a namespace quota, grouped by
* the namespace itself.
*/
public Multimap<String, TableName> getTablesByNamespace() {
Multimap<String, TableName> tablesByNS = HashMultimap.create();
for (TableName tn : tablesWithNamespaceQuotas) {
tablesByNS.put(tn.getNamespaceAsString(), tn);
}
return tablesByNS;
} | 3.68 |
pulsar_WaterMarkEventGenerator_computeWaterMarkTs | /**
* Computes the min ts across all input topics.
*/
private long computeWaterMarkTs() {
long ts = 0;
// only if some data has arrived on each input topic
if (topicToTs.size() >= inputTopics.size()) {
ts = Long.MAX_VALUE;
for (Map.Entry<String, Long> entry : topicToTs.entrySet()) {
ts = Math.min(ts, entry.getValue());
}
}
return ts - eventTsLagMs;
} | 3.68 |
hbase_Table_close | /**
* Releases any resources held or pending changes in internal buffers.
* @throws IOException if a remote or network exception occurs.
*/
@Override
default void close() throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
hadoop_RouterObserverReadProxyProvider_autoMsyncIfNecessary | /**
* This will call {@link ClientProtocol#msync()} on the active NameNode
* (via the {@link #innerProxy}) to update the state of this client, only
* if at least {@link #autoMsyncPeriodMs} ms has elapsed since the last time
* an msync was performed.
*
* @see #autoMsyncPeriodMs
*/
private void autoMsyncIfNecessary() throws IOException {
if (autoMsyncPeriodMs == 0) {
// Always msync
getProxyAsClientProtocol(innerProxy.getProxy().proxy).msync();
} else if (autoMsyncPeriodMs > 0) {
if (Time.monotonicNow() - lastMsyncTimeMs > autoMsyncPeriodMs) {
synchronized (this) {
// Use a synchronized block so that only one thread will msync
// if many operations are submitted around the same time.
// Re-check the entry criterion since the status may have changed
// while waiting for the lock.
if (Time.monotonicNow() - lastMsyncTimeMs > autoMsyncPeriodMs) {
getProxyAsClientProtocol(innerProxy.getProxy().proxy).msync();
lastMsyncTimeMs = Time.monotonicNow();
}
}
}
}
} | 3.68 |
framework_AbstractSelect_setItemCaption | /**
* Override the caption of an item. Setting caption explicitly overrides id,
* item and index captions.
*
* @param itemId
* the id of the item to be recaptioned.
* @param caption
* the New caption.
*/
public void setItemCaption(Object itemId, String caption) {
if (itemId != null) {
itemCaptions.put(itemId, caption);
markAsDirty();
}
} | 3.68 |
querydsl_OrderSpecifier_getTarget | /**
* Get the target expression of this OrderSpecifier
*
* @return target expression
*/
public Expression<T> getTarget() {
return target;
} | 3.68 |
hudi_OptionsResolver_isDeltaTimeCompaction | /**
* Returns whether the compaction strategy is based on elapsed delta time.
*/
public static boolean isDeltaTimeCompaction(Configuration conf) {
final String strategy = conf.getString(FlinkOptions.COMPACTION_TRIGGER_STRATEGY).toLowerCase(Locale.ROOT);
return FlinkOptions.TIME_ELAPSED.equals(strategy) || FlinkOptions.NUM_OR_TIME.equals(strategy);
} | 3.68 |
hbase_ByteBufferUtils_intFitsIn | /**
* Check how many bytes is required to store value.
* @param value Value which size will be tested.
* @return How many bytes are required to store value.
*/
public static int intFitsIn(final int value) {
if (value < 0) {
return 4;
}
if (value < (1 << (2 * 8))) {
if (value < (1 << (1 * 8))) {
return 1;
}
return 2;
}
if (value <= (1 << (3 * 8))) {
return 3;
}
return 4;
} | 3.68 |
flink_ChangelogMode_newBuilder | /** Builder for configuring and creating instances of {@link ChangelogMode}. */
public static Builder newBuilder() {
return new Builder();
} | 3.68 |
framework_GridConnector_getDetailsListener | /**
* Gets the listener used by this connector for tracking when row detail
* visibility changes.
*
* @since 7.5.0
* @return the used details listener
*/
public DetailsListener getDetailsListener() {
return detailsListener;
} | 3.68 |
hbase_HBaseTestingUtility_shutdownMiniMapReduceCluster | /**
* Stops the previously started <code>MiniMRCluster</code>.
*/
public void shutdownMiniMapReduceCluster() {
if (mrCluster != null) {
LOG.info("Stopping mini mapreduce cluster...");
mrCluster.shutdown();
mrCluster = null;
LOG.info("Mini mapreduce cluster stopped");
}
// Restore configuration to point to local jobtracker
conf.set("mapreduce.jobtracker.address", "local");
} | 3.68 |
framework_SQLUtil_escapeSQL | /**
* Escapes different special characters in strings that are passed to SQL.
* Replaces the following:
*
* <list>
* <li>' is replaced with ''</li>
* <li>\x00 is removed</li>
* <li>\ is replaced with \\</li>
* <li>" is replaced with \"</li>
* <li>\x1a is removed</li> </list>
*
* Also note! The escaping done here may or may not be enough to prevent any
* and all SQL injections so it is recommended to check user input before
* giving it to the SQLContainer/TableQuery.
*
* @param constant
* @return \\\'\'
*/
public static String escapeSQL(String constant) {
if (constant == null) {
return null;
}
String fixedConstant = constant;
fixedConstant = fixedConstant.replaceAll("\\\\x00", "");
fixedConstant = fixedConstant.replaceAll("\\\\x1a", "");
fixedConstant = fixedConstant.replaceAll("'", "''");
fixedConstant = fixedConstant.replaceAll("\\\\", "\\\\\\\\");
fixedConstant = fixedConstant.replaceAll("\\\"", "\\\\\"");
return fixedConstant;
} | 3.68 |
pulsar_ResourceUsage_compareTo | /**
* this may be wrong since we are comparing available and not the usage.
*
* @param o
* @return
*/
public int compareTo(ResourceUsage o) {
double required = o.limit - o.usage;
double available = limit - usage;
return Double.compare(available, required);
} | 3.68 |
MagicPlugin_PreLoadEvent_registerBlockBlockManager | /**
* Register a BlockBreakManager, for controlling whether or not players can break blocks with magic.
*
* @param manager The manager to add.
*/
public void registerBlockBlockManager(BlockBreakManager manager) {
blockBreakManagers.add(manager);
} | 3.68 |
hbase_RegionStates_removeServer | /**
* Called by SCP at end of successful processing.
*/
public void removeServer(final ServerName serverName) {
serverMap.remove(serverName);
} | 3.68 |
streampipes_UserStorage_checkUser | /**
* @param username
* @return True if user exists exactly once, false otherwise
*/
@Override
public boolean checkUser(String username) {
List<Principal> users = findByKey(viewName, username.toLowerCase(), Principal.class);
return users.size() == 1;
} | 3.68 |
AreaShop_SignsFeature_getSignByLocation | /**
* Get a sign by a location.
* @param location The location to get the sign for
* @return The RegionSign that is at the location, or null if none
*/
public static RegionSign getSignByLocation(Location location) {
return allSigns.get(locationToString(location));
} | 3.68 |
framework_HierarchicalContainer_setChildrenAllowed | /**
* <p>
* Sets the given Item's capability to have children. If the Item identified
* with the itemId already has children and the areChildrenAllowed is false
* this method fails and <code>false</code> is returned; the children must
* be first explicitly removed with
* {@link #setParent(Object itemId, Object newParentId)} or
* {@link Container#removeItem(Object itemId)}.
* </p>
*
* @param itemId
* the ID of the Item in the container whose child capability is
* to be set.
* @param childrenAllowed
* the boolean value specifying if the Item can have children or
* not.
* @return <code>true</code> if the operation succeeded, <code>false</code>
* if not
*/
@Override
public boolean setChildrenAllowed(Object itemId, boolean childrenAllowed) {
// Checks that the item is in the container
if (!containsId(itemId)) {
return false;
}
// Updates status
if (childrenAllowed) {
noChildrenAllowed.remove(itemId);
} else {
noChildrenAllowed.add(itemId);
}
return true;
} | 3.68 |
flink_CallExpression_temporary | /**
* Creates a {@link CallExpression} to a temporary function (potentially shadowing a {@link
* Catalog} function or providing a system function).
*/
public static CallExpression temporary(
FunctionIdentifier functionIdentifier,
FunctionDefinition functionDefinition,
List<ResolvedExpression> args,
DataType dataType) {
return new CallExpression(
true,
Preconditions.checkNotNull(
functionIdentifier,
"Function identifier must not be null for temporary functions."),
functionDefinition,
args,
dataType);
} | 3.68 |
hudi_AbstractTableFileSystemView_getCompletionTime | /**
* Returns the completion time for instant.
*/
public Option<String> getCompletionTime(String instantTime) {
return this.completionTimeQueryView.getCompletionTime(instantTime, instantTime);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.