name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
dubbo_ReferenceAnnotationBeanPostProcessor_getReferenceBeans | /**
* Gets all beans of {@link ReferenceBean}
*
* @deprecated use {@link ReferenceBeanManager#getReferences()} instead
*/
@Deprecated
public Collection<ReferenceBean<?>> getReferenceBeans() {
return Collections.emptyList();
} | 3.68 |
flink_ExtractionUtils_resolveVariable | /** Resolves a {@link TypeVariable} using the given type hierarchy if possible. */
static Type resolveVariable(List<Type> typeHierarchy, TypeVariable<?> variable) {
// iterate through hierarchy from top to bottom until type variable gets a non-variable
// assigned
for (int i = typeHierarchy.size() - 1; i >= 0; i--) {
final Type currentType = typeHierarchy.get(i);
if (currentType instanceof ParameterizedType) {
final Type resolvedType =
resolveVariableInParameterizedType(
variable, (ParameterizedType) currentType);
if (resolvedType instanceof TypeVariable) {
// follow type variables transitively
variable = (TypeVariable<?>) resolvedType;
} else if (resolvedType != null) {
return resolvedType;
}
}
}
// unresolved variable
return variable;
} | 3.68 |
rocketmq-connect_SourceOffsetCompute_sourceQueryPartitions | /**
* source partitions
*
* @param offsetSuffix
* @return
*/
public static Map<String, String> sourceQueryPartitions(String prefix, String offsetSuffix) {
Map<String, String> partition = Collections.singletonMap(JdbcSourceConfigConstants.QUERY_NAME_KEY(offsetSuffix),
JdbcSourceConfigConstants.QUERY_NAME_VALUE);
partition.put(TOPIC, prefix);
return partition;
} | 3.68 |
flink_ProgramOptionsUtils_containsPythonDependencyOptions | /**
* @return True if the commandline contains "-pyfs", "-pyarch", "-pyreq", "-pyexec", "-pypath"
* options, false otherwise.
*/
public static boolean containsPythonDependencyOptions(CommandLine line) {
return line.hasOption(PYFILES_OPTION.getOpt())
|| line.hasOption(PYREQUIREMENTS_OPTION.getOpt())
|| line.hasOption(PYARCHIVE_OPTION.getOpt())
|| line.hasOption(PYEXEC_OPTION.getOpt())
|| line.hasOption(PYCLIENTEXEC_OPTION.getOpt())
|| line.hasOption(PYTHON_PATH.getOpt());
} | 3.68 |
framework_VAbstractCalendarPanel_handleKeyPress | /**
* Handles the keypress from both the onKeyPress event and the onKeyDown
* event
*
* @param event
* The keydown/keypress event
*/
private void handleKeyPress(DomEvent<?> event) {
// Check tabs
int keycode = event.getNativeEvent().getKeyCode();
if (keycode == KeyCodes.KEY_TAB
&& event.getNativeEvent().getShiftKey()) {
if (onTabOut(event)) {
return;
}
}
// Handle the navigation
if (handleNavigation(keycode,
event.getNativeEvent().getCtrlKey()
|| event.getNativeEvent().getMetaKey(),
event.getNativeEvent().getShiftKey())) {
event.preventDefault();
}
} | 3.68 |
framework_VAbstractCalendarPanel_onKeyPress | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.KeyPressHandler#onKeyPress(com.google
* .gwt.event.dom.client.KeyPressEvent)
*/
@Override
public void onKeyPress(KeyPressEvent event) {
handleKeyPress(event);
} | 3.68 |
hbase_StoreFileReader_readCompleted | /**
* Indicate that the scanner has finished reading with this reader. We need to decrement the ref
* count, and also, if this is not the common pread reader, we should close it.
*/
void readCompleted() {
storeFileInfo.decreaseRefCount();
if (context.getReaderType() == ReaderType.STREAM) {
try {
reader.close(false);
} catch (IOException e) {
LOG.warn("failed to close stream reader", e);
}
}
} | 3.68 |
flink_WrappingProxyUtil_stripProxy | /**
* Expects a proxy, and returns the unproxied delegate.
*
* @param wrappingProxy The initial proxy.
* @param <T> The type of the delegate. Note that all proxies in the chain must be assignable to
* T.
* @return The unproxied delegate.
*/
@SuppressWarnings("unchecked")
public static <T> T stripProxy(@Nullable final WrappingProxy<T> wrappingProxy) {
if (wrappingProxy == null) {
return null;
}
T delegate = wrappingProxy.getWrappedDelegate();
int numProxiesStripped = 0;
while (delegate instanceof WrappingProxy) {
throwIfSafetyNetExceeded(++numProxiesStripped);
delegate = ((WrappingProxy<T>) delegate).getWrappedDelegate();
}
return delegate;
} | 3.68 |
flink_RocksDBStateUploader_uploadFilesToCheckpointFs | /**
* Upload all the files to checkpoint fileSystem using specified number of threads.
*
* @param files The files will be uploaded to checkpoint filesystem.
* @param checkpointStreamFactory The checkpoint streamFactory used to create outputstream.
* @param stateScope
* @throws Exception Thrown if can not upload all the files.
*/
public List<HandleAndLocalPath> uploadFilesToCheckpointFs(
@Nonnull List<Path> files,
CheckpointStreamFactory checkpointStreamFactory,
CheckpointedStateScope stateScope,
CloseableRegistry closeableRegistry,
CloseableRegistry tmpResourcesRegistry)
throws Exception {
List<CompletableFuture<HandleAndLocalPath>> futures =
createUploadFutures(
files,
checkpointStreamFactory,
stateScope,
closeableRegistry,
tmpResourcesRegistry);
List<HandleAndLocalPath> handles = new ArrayList<>(files.size());
try {
FutureUtils.waitForAll(futures).get();
for (CompletableFuture<HandleAndLocalPath> future : futures) {
handles.add(future.get());
}
} catch (ExecutionException e) {
Throwable throwable = ExceptionUtils.stripExecutionException(e);
throwable = ExceptionUtils.stripException(throwable, RuntimeException.class);
if (throwable instanceof IOException) {
throw (IOException) throwable;
} else {
throw new FlinkRuntimeException("Failed to upload data for state handles.", e);
}
}
return handles;
} | 3.68 |
framework_VaadinServletResponse_getCurrent | /**
* Gets the currently processed Vaadin servlet response. The current
* response is automatically defined when the request is started. The
* current response can not be used in e.g. background threads because of
* the way server implementations reuse response instances.
*
* @return the current Vaadin servlet response instance if available,
* otherwise <code>null</code>
* @since 8.1
*/
public static VaadinServletResponse getCurrent() {
VaadinResponse currentResponse = VaadinResponse.getCurrent();
if (currentResponse instanceof VaadinServletResponse) {
return (VaadinServletResponse) currentResponse;
} else {
return null;
}
} | 3.68 |
hbase_LruAdaptiveBlockCache_updateSizeMetrics | /**
* Helper function that updates the local size counter and also updates any per-cf or
* per-blocktype metrics it can discern from given {@link LruCachedBlock}
*/
private long updateSizeMetrics(LruCachedBlock cb, boolean evict) {
long heapsize = cb.heapSize();
BlockType bt = cb.getBuffer().getBlockType();
if (evict) {
heapsize *= -1;
}
if (bt != null && bt.isData()) {
dataBlockSize.add(heapsize);
}
return size.addAndGet(heapsize);
} | 3.68 |
hadoop_FutureIOSupport_propagateOptions | /**
* Propagate options to any builder.
* {@link FutureIO#propagateOptions(FSBuilder, Configuration, String, boolean)}
* @param builder builder to modify
* @param conf configuration to read
* @param prefix prefix to scan/strip
* @param mandatory are the options to be mandatory or optional?
*/
@Deprecated
public static void propagateOptions(
final FSBuilder<?, ?> builder,
final Configuration conf,
final String prefix,
final boolean mandatory) {
FutureIO.propagateOptions(builder, conf, prefix, mandatory);
} | 3.68 |
flink_DualInputSemanticProperties_addReadFields | /**
* Adds, to the existing information, field(s) that are read in the source record(s) from the
* first input.
*
* @param input the input of the read fields
* @param readFields the position(s) in the source record(s)
*/
public void addReadFields(int input, FieldSet readFields) {
if (input != 0 && input != 1) {
throw new IndexOutOfBoundsException();
} else if (input == 0) {
this.readFields1 =
(this.readFields1 == null)
? readFields.clone()
: this.readFields1.addFields(readFields);
} else {
this.readFields2 =
(this.readFields2 == null)
? readFields.clone()
: this.readFields2.addFields(readFields);
}
} | 3.68 |
zxing_DataMatrixReader_extractPureBits | /**
* This method detects a code in a "pure" image -- that is, pure monochrome image
* which contains only an unrotated, unskewed, image of a code, with some white border
* around it. This is a specialized method that works exceptionally fast in this special
* case.
*/
private static BitMatrix extractPureBits(BitMatrix image) throws NotFoundException {
int[] leftTopBlack = image.getTopLeftOnBit();
int[] rightBottomBlack = image.getBottomRightOnBit();
if (leftTopBlack == null || rightBottomBlack == null) {
throw NotFoundException.getNotFoundInstance();
}
int moduleSize = moduleSize(leftTopBlack, image);
int top = leftTopBlack[1];
int bottom = rightBottomBlack[1];
int left = leftTopBlack[0];
int right = rightBottomBlack[0];
int matrixWidth = (right - left + 1) / moduleSize;
int matrixHeight = (bottom - top + 1) / moduleSize;
if (matrixWidth <= 0 || matrixHeight <= 0) {
throw NotFoundException.getNotFoundInstance();
}
// Push in the "border" by half the module width so that we start
// sampling in the middle of the module. Just in case the image is a
// little off, this will help recover.
int nudge = moduleSize / 2;
top += nudge;
left += nudge;
// Now just read off the bits
BitMatrix bits = new BitMatrix(matrixWidth, matrixHeight);
for (int y = 0; y < matrixHeight; y++) {
int iOffset = top + y * moduleSize;
for (int x = 0; x < matrixWidth; x++) {
if (image.get(left + x * moduleSize, iOffset)) {
bits.set(x, y);
}
}
}
return bits;
} | 3.68 |
hbase_BaseReplicationEndpoint_getScopeWALEntryFilter | /**
* Returns a WALEntryFilter for checking the scope. Subclasses can return null if they don't want
* this filter
*/
protected WALEntryFilter getScopeWALEntryFilter() {
return new ScopeWALEntryFilter();
} | 3.68 |
hbase_TableDescriptorBuilder_hasRegionMemStoreReplication | /** Returns true if the read-replicas memstore replication is enabled. */
@Override
public boolean hasRegionMemStoreReplication() {
return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf,
DEFAULT_REGION_MEMSTORE_REPLICATION);
} | 3.68 |
pulsar_ProducerConfiguration_setProperty | /**
* Set a name/value property with this producer.
*
* @param key
* @param value
* @return
*/
public ProducerConfiguration setProperty(String key, String value) {
checkArgument(key != null);
checkArgument(value != null);
conf.getProperties().put(key, value);
return this;
} | 3.68 |
hudi_LSMTimeline_allSnapshotVersions | /**
* Returns all the valid snapshot versions.
*/
public static List<Integer> allSnapshotVersions(HoodieTableMetaClient metaClient) throws IOException {
return Arrays.stream(metaClient.getFs().listStatus(new Path(metaClient.getArchivePath()), getManifestFilePathFilter()))
.map(fileStatus -> fileStatus.getPath().getName())
.map(LSMTimeline::getManifestVersion)
.collect(Collectors.toList());
} | 3.68 |
hibernate-validator_XmlParserHelper_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
hadoop_IrqHandler_raise | /**
* Raise the signal.
*/
public void raise() {
Signal.raise(signal);
} | 3.68 |
flink_Tuple_newInstance | // BEGIN_OF_TUPLE_DEPENDENT_CODE
// GENERATED FROM org.apache.flink.api.java.tuple.TupleGenerator.
public static Tuple newInstance(int arity) {
switch (arity) {
case 0:
return Tuple0.INSTANCE;
case 1:
return new Tuple1();
case 2:
return new Tuple2();
case 3:
return new Tuple3();
case 4:
return new Tuple4();
case 5:
return new Tuple5();
case 6:
return new Tuple6();
case 7:
return new Tuple7();
case 8:
return new Tuple8();
case 9:
return new Tuple9();
case 10:
return new Tuple10();
case 11:
return new Tuple11();
case 12:
return new Tuple12();
case 13:
return new Tuple13();
case 14:
return new Tuple14();
case 15:
return new Tuple15();
case 16:
return new Tuple16();
case 17:
return new Tuple17();
case 18:
return new Tuple18();
case 19:
return new Tuple19();
case 20:
return new Tuple20();
case 21:
return new Tuple21();
case 22:
return new Tuple22();
case 23:
return new Tuple23();
case 24:
return new Tuple24();
case 25:
return new Tuple25();
default:
throw new IllegalArgumentException(
"The tuple arity must be in [0, " + MAX_ARITY + "].");
}
} | 3.68 |
flink_SegmentsUtil_copyToUnsafe | /**
* Copy segments to target unsafe pointer.
*
* @param segments Source segments.
* @param offset The position where the bytes are started to be read from these memory segments.
* @param target The unsafe memory to copy the bytes to.
* @param pointer The position in the target unsafe memory to copy the chunk to.
* @param numBytes the number bytes to copy.
*/
public static void copyToUnsafe(
MemorySegment[] segments, int offset, Object target, int pointer, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
segments[0].copyToUnsafe(offset, target, pointer, numBytes);
} else {
copyMultiSegmentsToUnsafe(segments, offset, target, pointer, numBytes);
}
} | 3.68 |
framework_VDragAndDropWrapper_startNextUpload | /** For internal use only. May be removed or replaced in the future. */
public void startNextUpload() {
Scheduler.get().scheduleDeferred(() -> {
if (!uploading) {
if (!fileIds.isEmpty()) {
uploading = true;
final Integer fileId = fileIds.remove(0);
VHtml5File file = files.remove(0);
final String receiverUrl = client.translateVaadinUri(
fileIdToReceiver.remove(fileId.toString()));
ExtendedXHR extendedXHR = (ExtendedXHR) ExtendedXHR
.create();
extendedXHR.setOnReadyStateChange(readyStateChangeHandler);
extendedXHR.open("POST", receiverUrl);
extendedXHR.postFile(file);
}
}
});
} | 3.68 |
querydsl_JTSGeometryExpressions_collect | /**
* Return a specified ST_Geometry value from a collection of other geometries.
*
* @param expr1 geometry
* @param expr2 other geometry
* @return geometry collection
*/
public static JTSGeometryExpression<?> collect(Expression<? extends Geometry> expr1, Expression<? extends Geometry> expr2) {
return geometryOperation(SpatialOps.COLLECT2, expr1, expr2);
} | 3.68 |
framework_SQLContainer_itemChangeNotification | /**
* Notifies this container that a property in the given item has been
* modified. The change will be buffered or made instantaneously depending
* on auto commit mode.
*
* @param changedItem
* item that has a modified property
*/
void itemChangeNotification(RowItem changedItem) {
if (autoCommit) {
try {
queryDelegate.beginTransaction();
if (queryDelegate.storeRow(changedItem) == 0) {
queryDelegate.rollback();
refresh();
throw new ConcurrentModificationException(
"Item with the ID '" + changedItem.getId()
+ "' has been externally modified.");
}
queryDelegate.commit();
if (notificationsEnabled) {
CacheFlushNotifier.notifyOfCacheFlush(this);
}
getLogger().log(Level.FINER, "Row updated to DB...");
} catch (SQLException e) {
getLogger().log(Level.WARNING,
"itemChangeNotification failed, rolling back...", e);
try {
queryDelegate.rollback();
} catch (SQLException ee) {
/* Nothing can be done here */
getLogger().log(Level.SEVERE, "Rollback failed", e);
}
throw new RuntimeException(e);
}
} else {
if (!(changedItem.getId() instanceof TemporaryRowId)
&& !modifiedItems.contains(changedItem)) {
modifiedItems.add(changedItem);
}
}
} | 3.68 |
framework_Form_setLayout | /**
* Sets the layout of the form.
*
* <p>
* If set to null then Form uses a FormLayout by default.
* </p>
*
* @param layout
* the layout of the form.
*/
public void setLayout(Layout layout) {
// Use orderedlayout by default
if (layout == null) {
layout = new FormLayout();
}
// reset cursor memory
gridlayoutCursorX = -1;
gridlayoutCursorY = -1;
// Move fields from previous layout
if (getLayout() != null) {
for (Object property : propertyIds) {
Field<?> f = getField(property);
detachField(f);
if (layout instanceof CustomLayout) {
((CustomLayout) layout).addComponent(f,
property.toString());
} else {
layout.addComponent(f);
}
}
getLayout().setParent(null);
}
// Replace the previous layout
layout.setParent(this);
getState().layout = layout;
} | 3.68 |
open-banking-gateway_HbciFlowNameSelector_getNameForValidation | /**
* Sub-process name for current context (PSU/FinTech input) validation.
*/
public String getNameForValidation(HbciContext ctx) {
return actionName(ctx);
} | 3.68 |
flink_FlinkJoinToMultiJoinRule_withOperandFor | /** Defines an operand tree for the given classes. */
default Config withOperandFor(Class<? extends Join> joinClass) {
return withOperandSupplier(
b0 ->
b0.operand(joinClass)
.inputs(
b1 -> b1.operand(RelNode.class).anyInputs(),
b2 -> b2.operand(RelNode.class).anyInputs()))
.as(Config.class);
} | 3.68 |
hadoop_S3ClientFactory_withMultipartCopyEnabled | /**
* Set the multipart flag..
*
* @param value new value
* @return the builder
*/
public S3ClientCreationParameters withMultipartCopyEnabled(final boolean value) {
this.multipartCopy = value;
return this;
} | 3.68 |
framework_VAccordion_setHeightFromWidget | /**
* Queries the height from the wrapped widget and uses it to set this
* stack item's height.
*/
public void setHeightFromWidget() {
Widget widget = getChildWidget();
if (widget == null) {
return;
}
int paintableHeight = widget.getElement().getOffsetHeight();
setHeight(paintableHeight);
} | 3.68 |
framework_VCalendar_recalculateHeights | /**
* Recalculates the heights of the sub-components in the calendar.
*/
protected void recalculateHeights() {
if (monthGrid != null) {
if (intHeight == -1) {
monthGrid.addStyleDependentName("sizedheight");
} else {
monthGrid.removeStyleDependentName("sizedheight");
}
monthGrid.updateCellSizes(intWidth - weekToolbar.getOffsetWidth(),
intHeight - nameToolbar.getOffsetHeight());
weekToolbar.setHeightPX((intHeight == -1) ? intHeight
: intHeight - nameToolbar.getOffsetHeight());
} else if (weekGrid != null) {
weekGrid.setHeightPX((intHeight == -1) ? intHeight
: intHeight - weeklyLongEvents.getOffsetHeight()
- dayToolbar.getOffsetHeight());
}
} | 3.68 |
flink_TableFunctionResultFuture_setResultFuture | /** Sets the current collector, which used to emit the final row. */
public void setResultFuture(ResultFuture<?> resultFuture) {
this.resultFuture = resultFuture;
} | 3.68 |
hadoop_CommitContext_getPendingSetSerializer | /**
* Get a serializer for .pendingset files.
* @return a serializer.
*/
public JsonSerialization<PendingSet> getPendingSetSerializer() {
return pendingSetSerializer.getForCurrentThread();
} | 3.68 |
hadoop_MapReduceJobPropertiesParser_extractMinHeapOpts | /**
* Extracts the -Xms heap option from the specified string.
*/
public static void extractMinHeapOpts(String javaOptions,
List<String> heapOpts, List<String> others) {
for (String opt : javaOptions.split(" ")) {
Matcher matcher = MIN_HEAP_PATTERN.matcher(opt);
if (matcher.find()) {
heapOpts.add(opt);
} else {
others.add(opt);
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterDecimalColumn | /**
* Test altering a floating point column.
*/
@Test
public void testAlterDecimalColumn() {
testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, FLOAT_FIELD), column(FLOAT_FIELD, DataType.DECIMAL, 14, 3).nullable(), expectedAlterTableAlterDecimalColumnStatement());
} | 3.68 |
pulsar_AuthorizationService_grantSubscriptionPermissionAsync | /**
* Grant permission to roles that can access subscription-admin api.
*
* @param namespace
* @param subscriptionName
* @param roles
* @param authDataJson
* additional authdata in json for targeted authorization provider
* @return
*/
public CompletableFuture<Void> grantSubscriptionPermissionAsync(NamespaceName namespace, String subscriptionName,
Set<String> roles, String authDataJson) {
return provider.grantSubscriptionPermissionAsync(namespace, subscriptionName, roles, authDataJson);
} | 3.68 |
framework_VaadinServletService_getServlet | /**
* Retrieves a reference to the servlet associated with this service. Should
* be overridden (or otherwise intercepted) if the no-arg constructor is
* used to prevent NPEs.
*
* @return A reference to the VaadinServlet this service is using
*/
public VaadinServlet getServlet() {
return servlet;
} | 3.68 |
hbase_MetricRegistryInfo_getMetricsContext | /**
* Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string.
* eg. regionserver, master, thriftserver
* @return The string context used to register this source to hadoop's metrics2 system.
*/
public String getMetricsContext() {
return metricsContext;
} | 3.68 |
hbase_AbstractRpcBasedConnectionRegistry_groupCall | /**
* send requests concurrently to hedgedReadsFanout end points. If any of the request is succeeded,
* we will complete the future and quit. If all the requests in one round are failed, we will
* start another round to send requests concurrently tohedgedReadsFanout end points. If all end
* points have been tried and all of them are failed, we will fail the future.
*/
private <T extends Message> void groupCall(CompletableFuture<T> future, Set<ServerName> servers,
List<ClientMetaService.Interface> stubs, int startIndexInclusive, Callable<T> callable,
Predicate<T> isValidResp, String debug, ConcurrentLinkedQueue<Throwable> errors) {
int endIndexExclusive = Math.min(startIndexInclusive + hedgedReadFanOut, stubs.size());
AtomicInteger remaining = new AtomicInteger(endIndexExclusive - startIndexInclusive);
for (int i = startIndexInclusive; i < endIndexExclusive; i++) {
addListener(call(stubs.get(i), callable), (r, e) -> {
// a simple check to skip all the later operations earlier
if (future.isDone()) {
return;
}
if (e == null && !isValidResp.test(r)) {
e = badResponse(debug);
}
if (e != null) {
// make sure when remaining reaches 0 we have all exceptions in the errors queue
errors.add(e);
if (remaining.decrementAndGet() == 0) {
if (endIndexExclusive == stubs.size()) {
// we are done, complete the future with exception
RetriesExhaustedException ex =
new RetriesExhaustedException("masters", stubs.size(), new ArrayList<>(errors));
future.completeExceptionally(new MasterRegistryFetchException(servers, ex));
} else {
groupCall(future, servers, stubs, endIndexExclusive, callable, isValidResp, debug,
errors);
}
}
} else {
// do not need to decrement the counter any more as we have already finished the future.
future.complete(r);
}
});
}
} | 3.68 |
hudi_GenericRecordFullPayloadGenerator_getUpdatePayload | /**
* Update a given {@link GenericRecord} with random value. The fields in {@code blacklistFields} will not be updated.
*
* @param record GenericRecord to update
* @param blacklistFields Fields whose value should not be touched
* @return The updated {@link GenericRecord}
*/
public GenericRecord getUpdatePayload(GenericRecord record, Set<String> blacklistFields) {
return randomize(record, blacklistFields);
} | 3.68 |
hbase_HRegionFileSystem_deleteRegionFromFileSystem | /**
* Remove the region from the table directory, archiving the region's hfiles.
* @param conf the {@link Configuration} to use
* @param fs {@link FileSystem} from which to remove the region
* @param tableDir {@link Path} to where the table is being stored
* @param regionInfo {@link RegionInfo} for region to be deleted
* @throws IOException if the request cannot be completed
*/
public static void deleteRegionFromFileSystem(final Configuration conf, final FileSystem fs,
final Path tableDir, final RegionInfo regionInfo) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
Path regionDir = regionFs.getRegionDir();
if (!fs.exists(regionDir)) {
LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("DELETING region " + regionDir);
}
// Archive region
Path rootDir = CommonFSUtils.getRootDir(conf);
HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
// Delete empty region dir
if (!fs.delete(regionDir, true)) {
LOG.warn("Failed delete of " + regionDir);
}
} | 3.68 |
hadoop_LocatedFileStatusFetcher_getIOStatistics | /**
* Return any IOStatistics collected during listing.
* @return IO stats accrued.
*/
@Override
public synchronized IOStatistics getIOStatistics() {
return iostats;
} | 3.68 |
framework_HierarchyMapper_getParentIndex | /**
* Finds the index of the parent of the item in given target index.
*
* @param item
* the item to get the parent of
* @return the parent index or a negative value if the parent is not found
*
*/
public Integer getParentIndex(T item) {
// TODO: This can be optimized.
List<T> flatHierarchy = getHierarchy(null).collect(Collectors.toList());
return flatHierarchy.indexOf(getParentOfItem(item));
} | 3.68 |
AreaShop_FileManager_saveGroupsIsRequired | /**
* Save the group file to disk.
*/
public void saveGroupsIsRequired() {
saveGroupsRequired = true;
} | 3.68 |
pulsar_ManagedLedgerMetrics_aggregate | /**
* Aggregation by namespace (not thread-safe).
*
* @param ledgersByDimension
* @return
*/
private List<Metrics> aggregate(Map<Metrics, List<ManagedLedgerImpl>> ledgersByDimension) {
metricsCollection.clear();
for (Entry<Metrics, List<ManagedLedgerImpl>> e : ledgersByDimension.entrySet()) {
Metrics metrics = e.getKey();
List<ManagedLedgerImpl> ledgers = e.getValue();
// prepare aggregation map
tempAggregatedMetricsMap.clear();
// generate the collections by each metrics and then apply the aggregation
for (ManagedLedgerImpl ledger : ledgers) {
ManagedLedgerMXBean lStats = ledger.getStats();
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_AddEntryBytesRate",
lStats.getAddEntryBytesRate());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_AddEntryWithReplicasBytesRate",
lStats.getAddEntryWithReplicasBytesRate());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_AddEntryErrors",
(double) lStats.getAddEntryErrors());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_AddEntryMessagesRate",
lStats.getAddEntryMessagesRate());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_AddEntrySucceed",
(double) lStats.getAddEntrySucceed());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_NumberOfMessagesInBacklog",
(double) lStats.getNumberOfMessagesInBacklog());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_ReadEntriesBytesRate",
lStats.getReadEntriesBytesRate());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_ReadEntriesErrors",
(double) lStats.getReadEntriesErrors());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_ReadEntriesRate",
lStats.getReadEntriesRate());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_ReadEntriesOpsCacheMissesRate",
lStats.getReadEntriesOpsCacheMissesRate());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_ReadEntriesSucceeded",
(double) lStats.getReadEntriesSucceeded());
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_StoredMessagesSize",
(double) lStats.getStoredMessagesSize());
// handle bucket entries initialization here
BRK_ML_ADDENTRYLATENCYBUCKETS.populateBucketEntries(tempAggregatedMetricsMap,
lStats.getAddEntryLatencyBuckets(),
statsPeriodSeconds);
BRK_ML_LEDGERADDENTRYLATENCYBUCKETS.populateBucketEntries(tempAggregatedMetricsMap,
lStats.getLedgerAddEntryLatencyBuckets(),
statsPeriodSeconds);
BRK_ML_LEDGERSWITCHLATENCYBUCKETS.populateBucketEntries(tempAggregatedMetricsMap,
lStats.getLedgerSwitchLatencyBuckets(),
statsPeriodSeconds);
BRK_ML_ENTRYSIZEBUCKETS.populateBucketEntries(tempAggregatedMetricsMap,
lStats.getEntrySizeBuckets(),
statsPeriodSeconds);
populateAggregationMapWithSum(tempAggregatedMetricsMap, "brk_ml_MarkDeleteRate",
lStats.getMarkDeleteRate());
}
// SUM up collections of each metrics
for (Entry<String, Double> ma : tempAggregatedMetricsMap.entrySet()) {
metrics.put(ma.getKey(), ma.getValue());
}
metricsCollection.add(metrics);
}
return metricsCollection;
} | 3.68 |
shardingsphere-elasticjob_JobAPIFactory_createShardingOperateAPI | /**
* Create job sharding operate API.
*
* @param connectString registry center connect string
* @param namespace registry center namespace
* @param digest registry center digest
* @return job sharding operate API
*/
public static ShardingOperateAPI createShardingOperateAPI(final String connectString, final String namespace, final String digest) {
return new ShardingOperateAPIImpl(RegistryCenterFactory.createCoordinatorRegistryCenter(connectString, namespace, digest));
} | 3.68 |
hbase_RecoverableZooKeeper_filterByPrefix | /**
* Filters the given node list by the given prefixes. This method is all-inclusive--if any element
* in the node list starts with any of the given prefixes, then it is included in the result.
* @param nodes the nodes to filter
* @param prefixes the prefixes to include in the result
* @return list of every element that starts with one of the prefixes
*/
private static List<String> filterByPrefix(List<String> nodes, String... prefixes) {
List<String> lockChildren = new ArrayList<>();
for (String child : nodes) {
for (String prefix : prefixes) {
if (child.startsWith(prefix)) {
lockChildren.add(child);
break;
}
}
}
return lockChildren;
} | 3.68 |
morf_GraphBasedUpgradeScriptGenerator_generatePreUpgradeStatements | /**
* @return pre-upgrade statements to be executed before the Graph Based Upgrade
*/
public List<String> generatePreUpgradeStatements() {
ImmutableList.Builder<String> statements = ImmutableList.builder();
// Initialisation SQL (zzzUpgradeStatus table & optimistic locking to prevent duplicate execution of upgrade script)
statements.addAll(initialisationSql);
// temp table
statements.addAll(connectionResources.sqlDialect().tableDeploymentStatements(idTable));
statements.addAll(UpgradeHelper.preSchemaUpgrade(upgradeSchemas,
viewChanges,
viewChangesDeploymentHelperFactory.create(connectionResources)));
return statements.build();
} | 3.68 |
hadoop_TimelineEntity_getEntityId | /**
* Get the entity Id
*
* @return the entity Id
*/
@XmlElement(name = "entity")
public String getEntityId() {
return entityId;
} | 3.68 |
framework_DragSourceExtension_onDragStart | /**
* Method invoked when a <code>dragstart</code> has been sent from client
* side. Fires the {@link DragStartEvent}.
*/
protected void onDragStart() {
DragStartEvent<T> event = new DragStartEvent<>(getParent(),
getState(false).effectAllowed);
fireEvent(event);
} | 3.68 |
framework_MouseEvents_getRelativeX | /**
* Returns the relative mouse position (x coordinate) when the click
* took place. The position is relative to the clicked component.
*
* @return The mouse cursor x position relative to the clicked layout
* component or -1 if no x coordinate available
*/
public int getRelativeX() {
return details.getRelativeX();
} | 3.68 |
flink_ShortSummaryAggregator_min | /** Like Math.min() except for shorts. */
public static Short min(Short a, Short b) {
return a <= b ? a : b;
} | 3.68 |
hbase_CompactionConfiguration_getMinCompactSize | /** Returns lower bound below which compaction is selected without ratio test */
public long getMinCompactSize() {
return minCompactSize;
} | 3.68 |
flink_ParameterTool_fromSystemProperties | /**
* Returns {@link ParameterTool} from the system properties. Example on how to pass system
* properties: -Dkey1=value1 -Dkey2=value2
*
* @return A {@link ParameterTool}
*/
public static ParameterTool fromSystemProperties() {
return fromMap((Map) System.getProperties());
} | 3.68 |
querydsl_ComparableExpression_gtAll | /**
* Create a {@code this > all right} expression
*
* @param right rhs of the comparison
* @return this > all right
*/
public BooleanExpression gtAll(SubQueryExpression<? extends T> right) {
return gt(ExpressionUtils.all(right));
} | 3.68 |
hbase_AsyncAdmin_normalize | /**
* Invoke region normalizer. Can NOT run for various reasons. Check logs.
* @return true if region normalizer ran, false otherwise. The return value will be wrapped by a
* {@link CompletableFuture}
*/
default CompletableFuture<Boolean> normalize() {
return normalize(new NormalizeTableFilterParams.Builder().build());
} | 3.68 |
hbase_Constraints_disable | /**
* Turn off processing constraints for a given table, even if constraints have been turned on or
* added.
*/
public static TableDescriptorBuilder disable(TableDescriptorBuilder builder) throws IOException {
try {
return builder.removeCoprocessor(ConstraintProcessor.class.getName());
} catch (IllegalArgumentException e) {
LOG.warn("ConstraintProcessor was unset.", e);
return builder;
}
} | 3.68 |
morf_SchemaValidator_validateColumns | /**
* Validates a {@link Table}'s {@link Column}s meet the rules.
*
* @param table The {@link Table} on which to validate columns.
*/
private void validateColumns(Table table) {
validateColumnNames(FluentIterable.from(table.columns()).transform(COLUMN_TO_NAME).toList(), table.getName());
for (Column column : table.columns()) {
if (column.getType().hasWidth() && column.getWidth() == 0) {
validationFailures.add("Column [" + column.getName() + "] on table [" + table.getName() + "] is not allowed - column data type [" + column.getType() + "] requires width to be specified");
}
if (column.isPrimaryKey() && column.isNullable()) {
validationFailures.add("Column [" + column.getName() + "] on table [" + table.getName() + "] is both nullable and in the primary key. This is not permitted.");
}
}
} | 3.68 |
hudi_AvroSchemaCompatibility_mergedWith | /**
* Merges the current {@code SchemaCompatibilityResult} with the supplied result
* into a new instance, combining the list of
* {@code Incompatibility Incompatibilities} and regressing to the
* {@code SchemaCompatibilityType#INCOMPATIBLE INCOMPATIBLE} state if any
* incompatibilities are encountered.
*
* @param toMerge The {@code SchemaCompatibilityResult} to merge with the
* current instance.
* @return A {@code SchemaCompatibilityResult} that combines the state of the
* current and supplied instances.
*/
public SchemaCompatibilityResult mergedWith(SchemaCompatibilityResult toMerge) {
List<Incompatibility> mergedIncompatibilities = new ArrayList<>(mIncompatibilities);
mergedIncompatibilities.addAll(toMerge.getIncompatibilities());
SchemaCompatibilityType compatibilityType = mCompatibilityType == SchemaCompatibilityType.COMPATIBLE
? toMerge.mCompatibilityType
: SchemaCompatibilityType.INCOMPATIBLE;
return new SchemaCompatibilityResult(compatibilityType, mergedIncompatibilities);
} | 3.68 |
hadoop_CloseableReferenceCount_getReferenceCount | /**
* Get the current reference count.
*
* @return The current reference count.
*/
public int getReferenceCount() {
return status.get() & (~STATUS_CLOSED_MASK);
} | 3.68 |
hbase_TableHFileArchiveTracker_updateWatchedTables | /**
* Read the list of children under the archive znode as table names and then sets those tables to
* the list of tables that we should archive
* @throws KeeperException if there is an unexpected zk exception
*/
private void updateWatchedTables() throws KeeperException {
// get the children and watch for new children
LOG.debug("Updating watches on tables to archive.");
// get the children and add watches for each of the children
List<String> tables = ZKUtil.listChildrenAndWatchThem(watcher, archiveHFileZNode);
LOG.debug("Starting archive for tables:" + tables);
// if archiving is still enabled
if (tables != null && tables.size() > 0) {
getMonitor().setArchiveTables(tables);
} else {
LOG.debug("No tables to archive.");
// only if we currently have a tracker, then clear the archive
clearTables();
}
} | 3.68 |
rocketmq-connect_ColumnDefinition_scale | /**
* Gets the column's number of digits to right of the decimal point. 0 is returned for data types
* where the scale is not applicable.
*
* @return scale
*/
public int scale() {
return scale;
} | 3.68 |
hudi_IncrSourceHelper_filterAndGenerateCheckpointBasedOnSourceLimit | /**
* Adjust the source dataset to size based batch based on last checkpoint key.
*
* @param sourceData Source dataset
* @param sourceLimit Max number of bytes to be read from source
* @param queryInfo Query Info
* @return end instants along with filtered rows.
*/
public static Pair<CloudObjectIncrCheckpoint, Option<Dataset<Row>>> filterAndGenerateCheckpointBasedOnSourceLimit(Dataset<Row> sourceData,
long sourceLimit, QueryInfo queryInfo,
CloudObjectIncrCheckpoint cloudObjectIncrCheckpoint) {
if (sourceData.isEmpty()) {
return Pair.of(cloudObjectIncrCheckpoint, Option.empty());
}
// Let's persist the dataset to avoid triggering the dag repeatedly
sourceData.persist(StorageLevel.MEMORY_AND_DISK());
// Set ordering in query to enable batching
Dataset<Row> orderedDf = QueryRunner.applyOrdering(sourceData, queryInfo.getOrderByColumns());
Option<String> lastCheckpoint = Option.of(cloudObjectIncrCheckpoint.getCommit());
Option<String> lastCheckpointKey = Option.ofNullable(cloudObjectIncrCheckpoint.getKey());
Option<String> concatenatedKey = lastCheckpoint.flatMap(checkpoint -> lastCheckpointKey.map(key -> checkpoint + key));
// Filter until last checkpoint key
if (concatenatedKey.isPresent()) {
orderedDf = orderedDf.withColumn("commit_key",
functions.concat(functions.col(queryInfo.getOrderColumn()), functions.col(queryInfo.getKeyColumn())));
// Apply incremental filter
orderedDf = orderedDf.filter(functions.col("commit_key").gt(concatenatedKey.get())).drop("commit_key");
// We could be just at the end of the commit, so return empty
if (orderedDf.isEmpty()) {
LOG.info("Empty ordered source, returning endpoint:" + queryInfo.getEndInstant());
sourceData.unpersist();
return Pair.of(new CloudObjectIncrCheckpoint(queryInfo.getEndInstant(), lastCheckpointKey.get()), Option.empty());
}
}
// Limit based on sourceLimit
WindowSpec windowSpec = Window.orderBy(col(queryInfo.getOrderColumn()), col(queryInfo.getKeyColumn()));
// Add the 'cumulativeSize' column with running sum of 'limitColumn'
Dataset<Row> aggregatedData = orderedDf.withColumn(CUMULATIVE_COLUMN_NAME,
sum(col(queryInfo.getLimitColumn())).over(windowSpec));
Dataset<Row> collectedRows = aggregatedData.filter(col(CUMULATIVE_COLUMN_NAME).leq(sourceLimit));
Row row = null;
if (collectedRows.isEmpty()) {
// If the first element itself exceeds limits then return first element
LOG.info("First object exceeding source limit: " + sourceLimit + " bytes");
row = aggregatedData.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).first();
collectedRows = aggregatedData.limit(1);
} else {
// Get the last row and form composite key
row = collectedRows.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).orderBy(
col(queryInfo.getOrderColumn()).desc(), col(queryInfo.getKeyColumn()).desc()).first();
}
LOG.info("Processed batch size: " + row.get(row.fieldIndex(CUMULATIVE_COLUMN_NAME)) + " bytes");
sourceData.unpersist();
return Pair.of(new CloudObjectIncrCheckpoint(row.getString(0), row.getString(1)), Option.of(collectedRows));
} | 3.68 |
hadoop_AzureBlobFileSystem_openFileWithOptions | /**
* Takes config and other options through
* {@link org.apache.hadoop.fs.impl.OpenFileParameters}. Ensure that
* FileStatus entered is up-to-date, as it will be used to create the
* InputStream (with info such as contentLength, eTag)
* @param path The location of file to be opened
* @param parameters OpenFileParameters instance; can hold FileStatus,
* Configuration, bufferSize and mandatoryKeys
*/
@Override
protected CompletableFuture<FSDataInputStream> openFileWithOptions(
final Path path, final OpenFileParameters parameters) throws IOException {
LOG.debug("AzureBlobFileSystem.openFileWithOptions path: {}", path);
AbstractFSBuilderImpl.rejectUnknownMandatoryKeys(
parameters.getMandatoryKeys(),
FS_OPTION_OPENFILE_STANDARD_OPTIONS,
"for " + path);
return LambdaUtils.eval(
new CompletableFuture<>(), () ->
open(path, Optional.of(parameters)));
} | 3.68 |
hbase_ServerName_valueOf | /**
* Retrieve an instance of {@link ServerName}. Callers should use the {@link #equals(Object)}
* method to compare returned instances, though we may return a shared immutable object as an
* internal optimization.
* @param address the {@link Address} to use for getting the {@link ServerName}
* @param startCode the startcode to use for getting the {@link ServerName}
* @return the constructed {@link ServerName}
* @see #valueOf(String, int, long)
*/
public static ServerName valueOf(final Address address, final long startCode) {
return valueOf(address.getHostname(), address.getPort(), startCode);
} | 3.68 |
flink_MetricFetcherImpl_retrieveAndQueryMetrics | /**
* Retrieves and queries the specified QueryServiceGateway.
*
* @param queryServiceAddress specifying the QueryServiceGateway
*/
private CompletableFuture<Void> retrieveAndQueryMetrics(String queryServiceAddress) {
LOG.debug("Retrieve metric query service gateway for {}", queryServiceAddress);
final CompletableFuture<MetricQueryServiceGateway> queryServiceGatewayFuture =
queryServiceRetriever.retrieveService(queryServiceAddress);
return queryServiceGatewayFuture.thenComposeAsync(this::queryMetrics, executor);
} | 3.68 |
graphhopper_LMApproximator_forLandmarks | /**
* @param weighting the weighting used for the current path calculation, not necessarily the same that we used for the LM preparation.
* All edge weights must be larger or equal compared to those used for the preparation.
*/
public static LMApproximator forLandmarks(Graph g, Weighting weighting, LandmarkStorage lms, int activeLM) {
return new LMApproximator(g, lms.getWeighting(), weighting, lms.getBaseNodes(), lms, activeLM, lms.getFactor(), false);
} | 3.68 |
flink_RemoteInputChannel_shouldBeSpilled | /**
* @return if given {@param sequenceNumber} should be spilled given {@link
* #lastBarrierSequenceNumber}. We might not have yet received {@link CheckpointBarrier} and
* we might need to spill everything. If we have already received it, there is a bit nasty
* corner case of {@link SequenceBuffer#sequenceNumber} overflowing that needs to be handled
* as well.
*/
private boolean shouldBeSpilled(int sequenceNumber) {
if (lastBarrierSequenceNumber == NONE) {
return true;
}
checkState(
receivedBuffers.size() < Integer.MAX_VALUE / 2,
"Too many buffers for sequenceNumber overflow detection code to work correctly");
boolean possibleOverflowAfterOvertaking = Integer.MAX_VALUE / 2 < lastBarrierSequenceNumber;
boolean possibleOverflowBeforeOvertaking =
lastBarrierSequenceNumber < -Integer.MAX_VALUE / 2;
if (possibleOverflowAfterOvertaking) {
return sequenceNumber < lastBarrierSequenceNumber && sequenceNumber > 0;
} else if (possibleOverflowBeforeOvertaking) {
return sequenceNumber < lastBarrierSequenceNumber || sequenceNumber > 0;
} else {
return sequenceNumber < lastBarrierSequenceNumber;
}
} | 3.68 |
flink_ByteValue_setValue | /**
* Sets the encapsulated byte to the specified value.
*
* @param value the new value of the encapsulated byte.
*/
public void setValue(byte value) {
this.value = value;
} | 3.68 |
hadoop_ResourceSet_getLocalizationStatuses | /**
* Get all the localization statuses.
* @return the localization statuses.
*/
public List<LocalizationStatus> getLocalizationStatuses() {
List<LocalizationStatus> statuses = new ArrayList<>();
localizedResources.forEach((key, path) -> {
LocalizationStatus status = LocalizationStatus.newInstance(key,
LocalizationState.COMPLETED);
statuses.add(status);
});
pendingResources.forEach((lrReq, keys) ->
keys.forEach(key -> {
LocalizationStatus status = LocalizationStatus.newInstance(key,
LocalizationState.PENDING);
statuses.add(status);
}));
synchronized (resourcesFailedToBeLocalized) {
statuses.addAll(resourcesFailedToBeLocalized);
}
return statuses;
} | 3.68 |
hadoop_VersionInfoMojo_getBuildTime | /**
* Returns a string representing current build time.
*
* @return String representing current build time
*/
private String getBuildTime() {
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'");
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
return dateFormat.format(new Date());
} | 3.68 |
framework_Window_isModal | /**
* @return true if this window is modal.
*/
public boolean isModal() {
return getState(false).modal;
} | 3.68 |
hadoop_FedBalance_submit | /**
* Start a ProcedureScheduler and submit the job.
*
* @param command the command options.
* @param inputSrc the source input. This specifies the source path.
* @param inputDst the dst input. This specifies the dst path.
*/
private int submit(CommandLine command, String inputSrc, String inputDst)
throws IOException {
Builder builder = new Builder(inputSrc, inputDst);
// parse options.
builder.setForceCloseOpen(command.hasOption(FORCE_CLOSE_OPEN.getOpt()));
if (command.hasOption(MAP.getOpt())) {
builder.setMap(Integer.parseInt(command.getOptionValue(MAP.getOpt())));
}
if (command.hasOption(BANDWIDTH.getOpt())) {
builder.setBandWidth(
Integer.parseInt(command.getOptionValue(BANDWIDTH.getOpt())));
}
if (command.hasOption(DELAY_DURATION.getOpt())) {
builder.setDelayDuration(
Long.parseLong(command.getOptionValue(DELAY_DURATION.getOpt())));
}
if (command.hasOption(DIFF_THRESHOLD.getOpt())) {
builder.setDiffThreshold(Integer.parseInt(
command.getOptionValue(DIFF_THRESHOLD.getOpt())));
}
if (command.hasOption(TRASH.getOpt())) {
String val = command.getOptionValue(TRASH.getOpt());
if (val.equalsIgnoreCase("skip")) {
builder.setTrashOpt(TrashOption.SKIP);
} else if (val.equalsIgnoreCase("trash")) {
builder.setTrashOpt(TrashOption.TRASH);
} else if (val.equalsIgnoreCase("delete")) {
builder.setTrashOpt(TrashOption.DELETE);
} else {
printUsage();
return -1;
}
}
// Submit the job.
BalanceProcedureScheduler scheduler =
new BalanceProcedureScheduler(getConf());
scheduler.init(false);
try {
BalanceJob balanceJob = builder.build();
// Submit and wait until the job is done.
scheduler.submit(balanceJob);
scheduler.waitUntilDone(balanceJob);
} catch (IOException e) {
LOG.error("Submit balance job failed.", e);
return -1;
} finally {
scheduler.shutDown();
}
return 0;
} | 3.68 |
hbase_AvlUtil_remove | /**
* Remove a node from the tree
* @param head the head of the linked list
* @param node the node to remove from the list
* @return the new head of the list
*/
public static <TNode extends AvlLinkedNode> TNode remove(TNode head, TNode node) {
assert isLinked(node) : node + " is not linked";
if (node != node.iterNext) {
node.iterPrev.iterNext = node.iterNext;
node.iterNext.iterPrev = node.iterPrev;
head = (head == node) ? (TNode) node.iterNext : head;
} else {
head = null;
}
node.iterNext = null;
node.iterPrev = null;
return head;
} | 3.68 |
flink_FlinkPreparingTableBase_isTemporal | /** We recognize all tables in FLink are temporal as they are changeable. */
public boolean isTemporal() {
return true;
} | 3.68 |
flink_FineGrainedSlotManager_checkResourceRequirements | /**
* DO NOT call this method directly. Use {@link #checkResourceRequirementsWithDelay()} instead.
*/
private void checkResourceRequirements() {
if (!started) {
return;
}
Map<JobID, Collection<ResourceRequirement>> missingResources =
resourceTracker.getMissingResources();
if (missingResources.isEmpty()) {
if (resourceAllocator.isSupported()
&& !taskManagerTracker.getPendingTaskManagers().isEmpty()) {
taskManagerTracker.replaceAllPendingAllocations(Collections.emptyMap());
checkResourcesNeedReconcile();
declareNeededResourcesWithDelay();
}
return;
}
logMissingAndAvailableResource(missingResources);
missingResources =
missingResources.entrySet().stream()
.collect(
Collectors.toMap(
Map.Entry::getKey, e -> new ArrayList<>(e.getValue())));
final ResourceAllocationResult result =
resourceAllocationStrategy.tryFulfillRequirements(
missingResources, taskManagerTracker, this::isBlockedTaskManager);
// Allocate slots according to the result
allocateSlotsAccordingTo(result.getAllocationsOnRegisteredResources());
final Set<PendingTaskManagerId> failAllocations;
if (resourceAllocator.isSupported()) {
// Allocate task managers according to the result
failAllocations =
allocateTaskManagersAccordingTo(result.getPendingTaskManagersToAllocate());
// Record slot allocation of pending task managers
final Map<PendingTaskManagerId, Map<JobID, ResourceCounter>>
pendingResourceAllocationResult =
new HashMap<>(result.getAllocationsOnPendingResources());
pendingResourceAllocationResult.keySet().removeAll(failAllocations);
taskManagerTracker.replaceAllPendingAllocations(pendingResourceAllocationResult);
} else {
failAllocations =
result.getPendingTaskManagersToAllocate().stream()
.map(PendingTaskManager::getPendingTaskManagerId)
.collect(Collectors.toSet());
}
unfulfillableJobs.clear();
unfulfillableJobs.addAll(result.getUnfulfillableJobs());
for (PendingTaskManagerId pendingTaskManagerId : failAllocations) {
unfulfillableJobs.addAll(
result.getAllocationsOnPendingResources().get(pendingTaskManagerId).keySet());
}
// Notify jobs that can not be fulfilled
if (sendNotEnoughResourceNotifications) {
for (JobID jobId : unfulfillableJobs) {
LOG.warn("Could not fulfill resource requirements of job {}.", jobId);
resourceEventListener.notEnoughResourceAvailable(
jobId, resourceTracker.getAcquiredResources(jobId));
}
}
if (resourceAllocator.isSupported()) {
checkResourcesNeedReconcile();
declareNeededResourcesWithDelay();
}
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_reconcileIntervalMinutes | /**
* Set reconcile interval minutes for job sharding status.
*
* <p>
* Monitor the status of the job server at regular intervals, and resharding if incorrect.
* </p>
*
* @param reconcileIntervalMinutes reconcile interval minutes for job sharding status
* @return ElasticJob configuration builder
*/
public Builder reconcileIntervalMinutes(final int reconcileIntervalMinutes) {
this.reconcileIntervalMinutes = reconcileIntervalMinutes;
return this;
} | 3.68 |
flink_SourceTestSuiteBase_generateTestDataForWriter | /**
* Generate a set of split writers.
*
* @param externalContext External context
* @param splitIndex the split index
* @param writer the writer to send data
* @return List of generated test records
*/
protected List<T> generateTestDataForWriter(
DataStreamSourceExternalContext<T> externalContext,
TestingSourceSettings sourceSettings,
int splitIndex,
ExternalSystemSplitDataWriter<T> writer) {
final List<T> testRecordCollection =
externalContext.generateTestData(
sourceSettings, splitIndex, ThreadLocalRandom.current().nextLong());
LOG.debug("Writing {} records to external system", testRecordCollection.size());
writer.writeRecords(testRecordCollection);
return testRecordCollection;
} | 3.68 |
morf_AbstractSqlDialectTest_testRemoveSimplePrimaryKeyColumn | /**
* Tests removing the simple primary key column.
*/
@Test
public void testRemoveSimplePrimaryKeyColumn() {
testAlterTableColumn(TEST_TABLE,
AlterationType.DROP,
getColumn(TEST_TABLE, "id"), null,
expectedAlterRemoveColumnFromSimpleKeyStatements());
} | 3.68 |
hudi_FlinkClientUtil_getHadoopConfiguration | /**
* Returns a new Hadoop Configuration object using the path to the hadoop conf configured.
*
* @param hadoopConfDir Hadoop conf directory path.
* @return A Hadoop configuration instance.
*/
private static org.apache.hadoop.conf.Configuration getHadoopConfiguration(String hadoopConfDir) {
if (new File(hadoopConfDir).exists()) {
org.apache.hadoop.conf.Configuration hadoopConfiguration = new org.apache.hadoop.conf.Configuration();
File coreSite = new File(hadoopConfDir, "core-site.xml");
if (coreSite.exists()) {
hadoopConfiguration.addResource(new Path(coreSite.getAbsolutePath()));
}
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
if (hdfsSite.exists()) {
hadoopConfiguration.addResource(new Path(hdfsSite.getAbsolutePath()));
}
File yarnSite = new File(hadoopConfDir, "yarn-site.xml");
if (yarnSite.exists()) {
hadoopConfiguration.addResource(new Path(yarnSite.getAbsolutePath()));
}
// Add mapred-site.xml. We need to read configurations like compression codec.
File mapredSite = new File(hadoopConfDir, "mapred-site.xml");
if (mapredSite.exists()) {
hadoopConfiguration.addResource(new Path(mapredSite.getAbsolutePath()));
}
return hadoopConfiguration;
}
return null;
} | 3.68 |
flink_ParquetVectorizedInputFormat_nextBatch | /** Advances to the next batch of rows. Returns false if there are no more. */
private boolean nextBatch(ParquetReaderBatch<T> batch) throws IOException {
for (WritableColumnVector v : batch.writableVectors) {
v.reset();
}
batch.columnarBatch.setNumRows(0);
if (rowsReturned >= totalRowCount) {
return false;
}
if (rowsReturned == totalCountLoadedSoFar) {
readNextRowGroup();
}
int num = (int) Math.min(batchSize, totalCountLoadedSoFar - rowsReturned);
for (int i = 0; i < columnReaders.length; ++i) {
if (columnReaders[i] == null) {
batch.writableVectors[i].fillWithNulls();
} else {
//noinspection unchecked
columnReaders[i].readToVector(num, batch.writableVectors[i]);
}
}
rowsReturned += num;
batch.columnarBatch.setNumRows(num);
return true;
} | 3.68 |
framework_VScrollTable_updateSortingProperties | /** For internal use only. May be removed or replaced in the future. */
public void updateSortingProperties(UIDL uidl) {
oldSortColumn = sortColumn;
if (uidl.hasVariable("sortascending")) {
sortAscending = uidl.getBooleanVariable("sortascending");
sortColumn = uidl.getStringVariable("sortcolumn");
}
} | 3.68 |
framework_VaadinService_unlockSession | /**
* Releases the lock for the given session for this service instance.
* Typically you want to call {@link VaadinSession#unlock()} instead of this
* method.
* <p>
* Note: The method and its signature has been changed to get lock instance
* as parameter in Vaadin 8.14.0. If you have overriden this method, you need
* to update your implementation.
* <p>
* Note: Overriding this method is not recommended, for custom lock storage
* strategy override {@link #getSessionLock(WrappedSession)} and
* {@link #setSessionLock(WrappedSession,Lock)} instead.
*
* @param wrappedSession
* The session to unlock, used only with assert
* @param lock
* Lock instance to unlock
*/
protected void unlockSession(WrappedSession wrappedSession, Lock lock) {
assert ((ReentrantLock) lock).isHeldByCurrentThread() : "Trying to unlock the session but it has not been locked by this thread";
lock.unlock();
} | 3.68 |
hbase_RegionScannerImpl_populateFromJoinedHeap | /** Returns true if more cells exist after this batch, false if scanner is done */
private boolean populateFromJoinedHeap(List<Cell> results, ScannerContext scannerContext)
throws IOException {
assert joinedContinuationRow != null;
boolean moreValues =
populateResult(results, this.joinedHeap, scannerContext, joinedContinuationRow);
if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) {
// We are done with this row, reset the continuation.
joinedContinuationRow = null;
}
// As the data is obtained from two independent heaps, we need to
// ensure that result list is sorted, because Result relies on that.
results.sort(comparator);
return moreValues;
} | 3.68 |
flink_OperatorInformation_getOutputType | /** Gets the return type of the user code function. */
public TypeInformation<OUT> getOutputType() {
return outputType;
} | 3.68 |
framework_ScrollbarBundle_setStylePrimaryName | /**
* Sets the primary style name.
*
* @param primaryStyleName
* The primary style name to use
*/
public void setStylePrimaryName(String primaryStyleName) {
root.setClassName(primaryStyleName + "-scroller");
} | 3.68 |
hbase_RegionServerAccounting_getGlobalMemStoreOffHeapSize | /** Returns the global memstore off-heap size in the RegionServer */
public long getGlobalMemStoreOffHeapSize() {
return this.globalMemStoreOffHeapSize.sum();
} | 3.68 |
hadoop_UpdateContainerTokenEvent_getUpdatedToken | /**
* Update Container Token.
*
* @return Container Token.
*/
public ContainerTokenIdentifier getUpdatedToken() {
return updatedToken;
} | 3.68 |
streampipes_PrimitivePropertyBuilder_measurementUnit | /**
* Defines the measurement unit (e.g., tons) of the event property.
*
* @param measurementUnit The measurement unit as a URI from a vocabulary (e.g., QUDT).
* @return
*/
public PrimitivePropertyBuilder measurementUnit(URI measurementUnit) {
this.eventProperty.setMeasurementUnit(measurementUnit);
return this;
} | 3.68 |
hadoop_ApplicationServiceRecordProcessor_createAAAAInfo | /**
* Create an application AAAA record descriptor.
*
* @param record the service record.
* @throws Exception if there is an issue during descriptor creation.
*/
protected void createAAAAInfo(ServiceRecord record)
throws Exception {
AAAAApplicationRecordDescriptor
recordInfo = new AAAAApplicationRecordDescriptor(
getPath(), record);
registerRecordDescriptor(Type.AAAA, recordInfo);
} | 3.68 |
hbase_MasterObserver_preCreateTableRegionsInfos | /**
* Called before we create the region infos for this table. Called as part of create table RPC
* call.
* @param ctx the environment to interact with the framework and master
* @param desc the TableDescriptor for the table
* @return the TableDescriptor used to create the table. Default is the one passed in. Return
* {@code null} means cancel the creation.
*/
default TableDescriptor preCreateTableRegionsInfos(
final ObserverContext<MasterCoprocessorEnvironment> ctx, TableDescriptor desc)
throws IOException {
return desc;
} | 3.68 |
pulsar_ResourceGroupService_incrementUsage | /**
* Increments usage stats for the resource groups associated with the given namespace and tenant.
* Expected to be called when a message is produced or consumed on a topic, or when we calculate
* usage periodically in the background by going through broker-service stats. [Not yet decided
* which model we will follow.] Broker-service stats will be cumulative, while calls from the
* topic produce/consume code will be per-produce/consume.
*
* If the tenant and NS are associated with different RGs, the statistics on both RGs are updated.
* If the tenant and NS are associated with the same RG, the stats on the RG are updated only once
* (to avoid a direct double-counting).
* ToDo: will this distinction result in "expected semantics", or shock from users?
* For now, the only caller is internal to this class.
*
* @param tenantName
* @param nsName
* @param monClass
* @param incStats
* @returns true if the stats were updated; false if nothing was updated.
*/
protected boolean incrementUsage(String tenantName, String nsName,
ResourceGroupMonitoringClass monClass,
BytesAndMessagesCount incStats) throws PulsarAdminException {
final ResourceGroup nsRG = this.namespaceToRGsMap.get(NamespaceName.get(tenantName, nsName));
final ResourceGroup tenantRG = this.tenantToRGsMap.get(tenantName);
if (tenantRG == null && nsRG == null) {
return false;
}
// Expect stats to increase monotonically.
if (incStats.bytes < 0 || incStats.messages < 0) {
String errMesg = String.format("incrementUsage on tenant=%s, NS=%s: bytes (%s) or mesgs (%s) is negative",
tenantName, nsName, incStats.bytes, incStats.messages);
throw new PulsarAdminException(errMesg);
}
if (nsRG == tenantRG) {
// Update only once in this case.
// Note that we will update both tenant and namespace RGs in other cases.
nsRG.incrementLocalUsageStats(monClass, incStats);
rgLocalUsageMessages.labels(nsRG.resourceGroupName, monClass.name()).inc(incStats.messages);
rgLocalUsageBytes.labels(nsRG.resourceGroupName, monClass.name()).inc(incStats.bytes);
return true;
}
if (tenantRG != null) {
tenantRG.incrementLocalUsageStats(monClass, incStats);
rgLocalUsageMessages.labels(tenantRG.resourceGroupName, monClass.name()).inc(incStats.messages);
rgLocalUsageBytes.labels(tenantRG.resourceGroupName, monClass.name()).inc(incStats.bytes);
}
if (nsRG != null) {
nsRG.incrementLocalUsageStats(monClass, incStats);
rgLocalUsageMessages.labels(nsRG.resourceGroupName, monClass.name()).inc(incStats.messages);
rgLocalUsageBytes.labels(nsRG.resourceGroupName, monClass.name()).inc(incStats.bytes);
}
return true;
} | 3.68 |
hadoop_FederationStateStoreFacade_addOrUpdateApplicationHomeSubCluster | /**
* Add or Update ApplicationHomeSubCluster.
*
* @param applicationId applicationId, is the id of the application.
* @param subClusterId homeSubClusterId, this is selected by strategy.
* @param retryCount number of retries.
* @param appSubmissionContext appSubmissionContext.
* @throws YarnException yarn exception.
*/
public void addOrUpdateApplicationHomeSubCluster(ApplicationId applicationId,
SubClusterId subClusterId, int retryCount, ApplicationSubmissionContext appSubmissionContext)
throws YarnException {
Boolean exists = existsApplicationHomeSubCluster(applicationId);
ApplicationHomeSubCluster appHomeSubCluster =
ApplicationHomeSubCluster.newInstance(applicationId, Time.now(),
subClusterId, appSubmissionContext);
if (!exists || retryCount == 0) {
// persist the mapping of applicationId and the subClusterId which has
// been selected as its home.
addApplicationHomeSubCluster(applicationId, appHomeSubCluster);
} else {
// update the mapping of applicationId and the home subClusterId to
// the new subClusterId we have selected.
updateApplicationHomeSubCluster(subClusterId, applicationId, appHomeSubCluster);
}
} | 3.68 |
dubbo_AbstractDubboConfigBinder_getPropertySources | /**
* Get multiple {@link PropertySource propertySources}
*
* @return multiple {@link PropertySource propertySources}
*/
protected Iterable<PropertySource<?>> getPropertySources() {
return propertySources;
} | 3.68 |
flink_FlinkContainersSettings_getBaseImage | /**
* Gets base image.
*
* @return The base image.
*/
public String getBaseImage() {
return baseImage;
} | 3.68 |
flink_OperatingSystemRestriction_restrictTo | /**
* Restricts the execution to the given set of operating systems.
*
* @param reason reason for the restriction
* @param operatingSystems allowed operating systems
* @throws AssumptionViolatedException if this method is called on a forbidden operating system
*/
public static void restrictTo(final String reason, final OperatingSystem... operatingSystems)
throws AssumptionViolatedException {
final EnumSet<OperatingSystem> allowed = EnumSet.copyOf(Arrays.asList(operatingSystems));
Assume.assumeTrue(reason, allowed.contains(OperatingSystem.getCurrentOperatingSystem()));
} | 3.68 |
querydsl_OrderSpecifier_nullsFirst | /**
* Create a new OrderSpecifier instance with null first enabled
*
* @return new instance with null first enabled
*/
public OrderSpecifier<T> nullsFirst() {
return new OrderSpecifier<T>(order, target, NullHandling.NullsFirst);
} | 3.68 |
framework_Navigator_destroy | /**
* Creates view change event for given {@code view}, {@code viewName} and
* {@code parameters}.
*
* @since 7.6.7
*/
public void destroy() {
stateManager.setNavigator(null);
ui.setNavigator(null);
} | 3.68 |
framework_ProfilerCompilationCanary_canaryWithProfiler | /*
* We don't care about running this method, we just want to make sure that
* the generated implementation is empty.
*/
public static void canaryWithProfiler() {
Profiler.enter("canaryWithProfiler");
Profiler.leave("canaryWithProfiler");
} | 3.68 |
flink_HiveFunctionDefinitionFactory_createFunctionDefinitionFromHiveFunction | /**
* Create a FunctionDefinition from a Hive function's class name. Called directly by {@link
* org.apache.flink.table.module.hive.HiveModule}.
*/
public FunctionDefinition createFunctionDefinitionFromHiveFunction(
String name, String functionClassName, Context context) {
Class<?> functionClz;
try {
functionClz = context.getClassLoader().loadClass(functionClassName);
LOG.info("Successfully loaded Hive udf '{}' with class '{}'", name, functionClassName);
} catch (ClassNotFoundException e) {
throw new TableException(
String.format("Failed to initiate an instance of class %s.", functionClassName),
e);
}
if (UDF.class.isAssignableFrom(functionClz)) {
LOG.info("Transforming Hive function '{}' into a HiveSimpleUDF", name);
return new HiveSimpleUDF(new HiveFunctionWrapper<>(functionClz), hiveShim);
} else if (GenericUDF.class.isAssignableFrom(functionClz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDF", name);
return new HiveGenericUDF(new HiveFunctionWrapper<>(functionClz), hiveShim);
} else if (GenericUDTF.class.isAssignableFrom(functionClz)) {
LOG.info("Transforming Hive function '{}' into a HiveGenericUDTF", name);
return new HiveGenericUDTF(new HiveFunctionWrapper<>(functionClz), hiveShim);
} else if (GenericUDAFResolver2.class.isAssignableFrom(functionClz)
|| GenericUDAFResolver.class.isAssignableFrom(functionClz)
|| UDAF.class.isAssignableFrom(functionClz)) {
if (GenericUDAFResolver2.class.isAssignableFrom(functionClz)) {
LOG.info(
"Transforming Hive function '{}' into a HiveGenericUDAF without UDAF bridging",
name);
return new HiveGenericUDAF(
new HiveFunctionWrapper<>(functionClz), false, true, hiveShim);
} else if (GenericUDAFResolver.class.isAssignableFrom(functionClz)) {
LOG.info(
"Transforming Hive function '{}' into a HiveGenericUDAF without UDAF bridging",
name);
return new HiveGenericUDAF(
new HiveFunctionWrapper<>(functionClz), false, false, hiveShim);
} else {
LOG.info(
"Transforming Hive function '{}' into a HiveGenericUDAF with UDAF bridging",
name);
return new HiveGenericUDAF(
new HiveFunctionWrapper<>(functionClz), true, false, hiveShim);
}
} else {
throw new IllegalArgumentException(
String.format(
"HiveFunctionDefinitionFactory cannot initiate FunctionDefinition for class %s",
functionClassName));
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.