name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
AreaShop_RegionAccessSet_asUniqueIdList | /**
* Get this access set as a list of player UUIDs.
* @return List of player UUIDs, first players already added by UUID, then players added by name, groups are not in the list
*/
public List<UUID> asUniqueIdList() {
List<UUID> result = new ArrayList<>();
result.addAll(playerUniqueIds);
for(String playerName : playerNames) {
OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(playerName);
if(offlinePlayer != null && offlinePlayer.getUniqueId() != null) {
result.add(offlinePlayer.getUniqueId());
}
}
return result;
} | 3.68 |
hbase_IdentityTableMapper_map | /**
* Pass the key, value to reduce.
* @param key The current key.
* @param value The current value.
* @param context The current context.
* @throws IOException When writing the record fails.
* @throws InterruptedException When the job is aborted.
*/
public void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
context.write(key, value);
} | 3.68 |
morf_UseIndex_getTable | /**
* @return The table whose index should be used.
*/
public TableReference getTable() {
return table;
} | 3.68 |
flink_AbstractServerBase_getQueryExecutor | /** Returns the thread-pool responsible for processing incoming requests. */
protected ExecutorService getQueryExecutor() {
return queryExecutor;
} | 3.68 |
hbase_CommonFSUtils_removeWALRootPath | /**
* Checks for the presence of the WAL log root path (using the provided conf object) in the given
* path. If it exists, this method removes it and returns the String representation of remaining
* relative path.
* @param path must not be null
* @param conf must not be null
* @return String representation of the remaining relative path
* @throws IOException from underlying filesystem
*/
public static String removeWALRootPath(Path path, final Configuration conf) throws IOException {
Path root = getWALRootDir(conf);
String pathStr = path.toString();
// check that the path is absolute... it has the root path in it.
if (!pathStr.startsWith(root.toString())) {
return pathStr;
}
// if not, return as it is.
return pathStr.substring(root.toString().length() + 1);// remove the "/" too.
} | 3.68 |
framework_DetailsManagerConnector_refreshDetailsVisibilityWithRange | /**
* Refreshes the existence of details components within the given range, and
* gives a delayed notice to Grid if any got added or updated.
*/
private void refreshDetailsVisibilityWithRange(Range rangeToRefresh) {
if (!getState().hasDetailsGenerator) {
markDetailsAddedOrUpdatedForDelayedAlertToGrid(false);
return;
}
boolean newOrUpdatedDetails = false;
// Don't update the latestVisibleRowRange class variable here, the
// calling method should take care of that if relevant.
Range currentVisibleRowRange = getWidget().getEscalator()
.getVisibleRowRange();
Range[] partitions = currentVisibleRowRange
.partitionWith(rangeToRefresh);
// only inspect the range where visible and refreshed rows overlap
Range intersectingRange = partitions[1];
for (int i = intersectingRange.getStart(); i < intersectingRange
.getEnd(); ++i) {
String id = getDetailsComponentConnectorId(i);
detachIfNeeded(i, id);
if (id == null) {
continue;
}
indexToDetailConnectorId.put(i, id);
getWidget().setDetailsVisible(i, true);
newOrUpdatedDetails = true;
}
markDetailsAddedOrUpdatedForDelayedAlertToGrid(newOrUpdatedDetails);
} | 3.68 |
flink_MemorySegment_get | /**
* Bulk get method. Copies {@code numBytes} bytes from this memory segment, starting at position
* {@code offset} to the target {@code ByteBuffer}. The bytes will be put into the target buffer
* starting at the buffer's current position. If this method attempts to write more bytes than
* the target byte buffer has remaining (with respect to {@link ByteBuffer#remaining()}), this
* method will cause a {@link java.nio.BufferOverflowException}.
*
* @param offset The position where the bytes are started to be read from in this memory
* segment.
* @param target The ByteBuffer to copy the bytes to.
* @param numBytes The number of bytes to copy.
* @throws IndexOutOfBoundsException If the offset is invalid, or this segment does not contain
* the given number of bytes (starting from offset), or the target byte buffer does not have
* enough space for the bytes.
* @throws ReadOnlyBufferException If the target buffer is read-only.
*/
public void get(int offset, ByteBuffer target, int numBytes) {
// check the byte array offset and length
if ((offset | numBytes | (offset + numBytes)) < 0) {
throw new IndexOutOfBoundsException();
}
if (target.isReadOnly()) {
throw new ReadOnlyBufferException();
}
final int targetOffset = target.position();
final int remaining = target.remaining();
if (remaining < numBytes) {
throw new BufferOverflowException();
}
if (target.isDirect()) {
// copy to the target memory directly
final long targetPointer = getByteBufferAddress(target) + targetOffset;
final long sourcePointer = address + offset;
if (sourcePointer <= addressLimit - numBytes) {
UNSAFE.copyMemory(heapMemory, sourcePointer, null, targetPointer, numBytes);
target.position(targetOffset + numBytes);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
throw new IndexOutOfBoundsException();
}
} else if (target.hasArray()) {
// move directly into the byte array
get(offset, target.array(), targetOffset + target.arrayOffset(), numBytes);
// this must be after the get() call to ensue that the byte buffer is not
// modified in case the call fails
target.position(targetOffset + numBytes);
} else {
// other types of byte buffers
throw new IllegalArgumentException(
"The target buffer is not direct, and has no array.");
}
} | 3.68 |
framework_TableConnector_updateFromUIDL | /*
* (non-Javadoc)
*
* @see com.vaadin.client.Paintable#updateFromUIDL(com.vaadin.client.UIDL,
* com.vaadin.client.ApplicationConnection)
*/
@Override
public void updateFromUIDL(UIDL uidl, ApplicationConnection client) {
getWidget().rendering = true;
// If a row has an open context menu, it will be closed as the row is
// detached. Retain a reference here so we can restore the menu if
// required.
ContextMenuDetails contextMenuBeforeUpdate = getWidget().contextMenu;
if (uidl.hasAttribute(TableConstants.ATTRIBUTE_PAGEBUFFER_FIRST)) {
getWidget().serverCacheFirst = uidl
.getIntAttribute(TableConstants.ATTRIBUTE_PAGEBUFFER_FIRST);
getWidget().serverCacheLast = uidl
.getIntAttribute(TableConstants.ATTRIBUTE_PAGEBUFFER_LAST);
} else {
getWidget().serverCacheFirst = -1;
getWidget().serverCacheLast = -1;
}
/*
* We need to do this before updateComponent since updateComponent calls
* this.setHeight() which will calculate a new body height depending on
* the space available.
*/
if (uidl.hasAttribute("colfooters")) {
getWidget().showColFooters = uidl.getBooleanAttribute("colfooters");
}
getWidget().tFoot.setVisible(getWidget().showColFooters);
if (!isRealUpdate(uidl)) {
getWidget().rendering = false;
return;
}
getWidget().paintableId = uidl.getStringAttribute("id");
getWidget().immediate = getState().immediate;
int previousTotalRows = getWidget().totalRows;
getWidget().updateTotalRows(uidl);
boolean totalRowsHaveChanged = (getWidget().totalRows != previousTotalRows);
getWidget().updateDragMode(uidl);
// Update child measure hint
int childMeasureHint = uidl.hasAttribute("measurehint")
? uidl.getIntAttribute("measurehint")
: 0;
getWidget().setChildMeasurementHint(
ChildMeasurementHint.values()[childMeasureHint]);
getWidget().updateSelectionProperties(uidl, getState(), isReadOnly());
if (uidl.hasAttribute("alb")) {
getWidget().bodyActionKeys = uidl.getStringArrayAttribute("alb");
} else {
// Need to clear the actions if the action handlers have been
// removed
getWidget().bodyActionKeys = null;
}
getWidget().setCacheRateFromUIDL(uidl);
getWidget().recalcWidths = uidl.hasAttribute("recalcWidths");
if (getWidget().recalcWidths) {
getWidget().tHead.clear();
getWidget().tFoot.clear();
}
getWidget().updatePageLength(uidl);
getWidget().updateFirstVisibleAndScrollIfNeeded(uidl);
getWidget().showRowHeaders = uidl.getBooleanAttribute("rowheaders");
getWidget().showColHeaders = uidl.getBooleanAttribute("colheaders");
getWidget().updateSortingProperties(uidl);
getWidget().updateActionMap(uidl);
getWidget().updateColumnProperties(uidl);
UIDL ac = uidl.getChildByTagName("-ac");
if (ac == null) {
if (getWidget().dropHandler != null) {
// remove dropHandler if not present anymore
getWidget().dropHandler = null;
}
} else {
if (getWidget().dropHandler == null) {
getWidget().dropHandler = getWidget().new VScrollTableDropHandler();
}
getWidget().dropHandler.updateAcceptRules(ac);
}
UIDL partialRowAdditions = uidl.getChildByTagName("prows");
UIDL partialRowUpdates = uidl.getChildByTagName("urows");
if (partialRowUpdates != null || partialRowAdditions != null) {
getWidget().postponeSanityCheckForLastRendered = true;
// we may have pending cache row fetch, cancel it. See #2136
getWidget().rowRequestHandler.cancel();
getWidget().updateRowsInBody(partialRowUpdates);
getWidget().addAndRemoveRows(partialRowAdditions);
// sanity check (in case the value has slipped beyond the total
// amount of rows)
getWidget().scrollBody
.setLastRendered(getWidget().scrollBody.getLastRendered());
getWidget().updateMaxIndent();
} else {
getWidget().postponeSanityCheckForLastRendered = false;
UIDL rowData = uidl.getChildByTagName("rows");
if (rowData != null) {
// we may have pending cache row fetch, cancel it. See #2136
getWidget().rowRequestHandler.cancel();
if (!getWidget().recalcWidths
&& getWidget().initializedAndAttached) {
getWidget().updateBody(rowData,
uidl.getIntAttribute("firstrow"),
uidl.getIntAttribute("rows"));
if (getWidget().headerChangedDuringUpdate) {
getWidget().triggerLazyColumnAdjustment(true);
}
} else {
getWidget().initializeRows(uidl, rowData);
}
}
}
boolean keyboardSelectionOverRowFetchInProgress = getWidget()
.selectSelectedRows(uidl);
// If a row had an open context menu before the update, and after the
// update there's a row with the same key as that row, restore the
// context menu. See #8526.
showSavedContextMenu(contextMenuBeforeUpdate);
if (!getWidget().isSelectable()) {
getWidget().scrollBody.addStyleName(
getWidget().getStylePrimaryName() + "-body-noselection");
} else {
getWidget().scrollBody.removeStyleName(
getWidget().getStylePrimaryName() + "-body-noselection");
}
getWidget().hideScrollPositionAnnotation();
// selection is no in sync with server, avoid excessive server visits by
// clearing to flag used during the normal operation
if (!keyboardSelectionOverRowFetchInProgress) {
getWidget().selectionChanged = false;
}
/*
* This is called when the Home or page up button has been pressed in
* selectable mode and the next selected row was not yet rendered in the
* client
*/
if (getWidget().selectFirstItemInNextRender
|| getWidget().focusFirstItemInNextRender) {
getWidget().selectFirstRenderedRowInViewPort(
getWidget().focusFirstItemInNextRender);
getWidget().selectFirstItemInNextRender = getWidget().focusFirstItemInNextRender = false;
}
/*
* This is called when the page down or end button has been pressed in
* selectable mode and the next selected row was not yet rendered in the
* client
*/
if (getWidget().selectLastItemInNextRender
|| getWidget().focusLastItemInNextRender) {
getWidget().selectLastRenderedRowInViewPort(
getWidget().focusLastItemInNextRender);
getWidget().selectLastItemInNextRender = getWidget().focusLastItemInNextRender = false;
}
getWidget().multiselectPending = false;
if (getWidget().focusedRow != null) {
if (!getWidget().focusedRow.isAttached()
&& !getWidget().rowRequestHandler
.isRequestHandlerRunning()) {
// focused row has been orphaned, can't focus
if (getWidget().selectedRowKeys
.contains(getWidget().focusedRow.getKey())) {
// if row cache was refreshed, focused row should be
// in selection and exists with same index
getWidget().setRowFocus(getWidget().getRenderedRowByKey(
getWidget().focusedRow.getKey()));
} else if (!getWidget().selectedRowKeys.isEmpty()) {
// try to focus any row in selection
getWidget().setRowFocus(getWidget().getRenderedRowByKey(
getWidget().selectedRowKeys.iterator().next()));
} else {
// try to focus any row
getWidget().focusRowFromBody();
}
}
}
/*
* If the server has (re)initialized the rows, our selectionRangeStart
* row will point to an index that the server knows nothing about,
* causing problems if doing multi selection with shift. The field will
* be cleared a little later when the row focus has been restored.
* (#8584)
*/
if (uidl.hasAttribute(TableConstants.ATTRIBUTE_KEY_MAPPER_RESET)
&& uidl.getBooleanAttribute(
TableConstants.ATTRIBUTE_KEY_MAPPER_RESET)
&& getWidget().selectionRangeStart != null) {
assert !getWidget().selectionRangeStart.isAttached();
getWidget().selectionRangeStart = getWidget().focusedRow;
}
getWidget().tabIndex = getState().tabIndex;
getWidget().setProperTabIndex();
Scheduler.get().scheduleFinally(new ScheduledCommand() {
@Override
public void execute() {
getWidget().resizeSortedColumnForSortIndicator();
}
});
// Remember this to detect situations where overflow hack might be
// needed during scrolling
getWidget().lastRenderedHeight = getWidget().scrollBody
.getOffsetHeight();
getWidget().rendering = false;
getWidget().headerChangedDuringUpdate = false;
getWidget().collapsibleMenuContent = getState().collapseMenuContent;
} | 3.68 |
hbase_Bytes_copy | /**
* Copy the byte array given in parameter and return an instance of a new byte array with the same
* length and the same content.
* @param bytes the byte array to copy from
* @return a copy of the given designated byte array
*/
public static byte[] copy(byte[] bytes, final int offset, final int length) {
if (bytes == null) return null;
byte[] result = new byte[length];
System.arraycopy(bytes, offset, result, 0, length);
return result;
} | 3.68 |
flink_ExecutionEnvironment_setNumberOfExecutionRetries | /**
* Sets the number of times that failed tasks are re-executed. A value of zero effectively
* disables fault tolerance. A value of {@code -1} indicates that the system default value (as
* defined in the configuration) should be used.
*
* @param numberOfExecutionRetries The number of times the system will try to re-execute failed
* tasks.
* @deprecated This method will be replaced by {@link #setRestartStrategy}. The {@link
* RestartStrategies.FixedDelayRestartStrategyConfiguration} contains the number of
* execution retries.
*/
@Deprecated
@PublicEvolving
public void setNumberOfExecutionRetries(int numberOfExecutionRetries) {
config.setNumberOfExecutionRetries(numberOfExecutionRetries);
} | 3.68 |
hbase_OrderedInt16_decodeShort | /**
* Read a {@code short} value from the buffer {@code src}.
* @param src the {@link PositionedByteRange} to read the {@code float} from
* @return the {@code short} read from buffer
*/
public short decodeShort(PositionedByteRange src) {
return OrderedBytes.decodeInt16(src);
} | 3.68 |
hadoop_AzureBlobFileSystemStore_breakLease | /**
* Break any current lease on an ABFS file.
*
* @param path file name
* @param tracingContext TracingContext instance to track correlation IDs
* @throws AzureBlobFileSystemException on any exception while breaking the lease
*/
public void breakLease(final Path path, final TracingContext tracingContext) throws AzureBlobFileSystemException {
LOG.debug("lease path: {}", path);
client.breakLease(getRelativePath(path), tracingContext);
} | 3.68 |
hadoop_WrappedIOStatistics_setWrapped | /**
* Set the wrapped statistics.
* Will fail if the field is already set.
* @param wrapped new value
*/
protected void setWrapped(final IOStatistics wrapped) {
Preconditions.checkState(this.wrapped == null,
"Attempted to overwrite existing wrapped statistics");
this.wrapped = wrapped;
} | 3.68 |
hbase_HBaseTestingUtility_getFromStoreFile | /**
* Do a small get/scan against one store. This is required because store has no actual methods of
* querying itself, and relies on StoreScanner.
*/
public static List<Cell> getFromStoreFile(HStore store, byte[] row, NavigableSet<byte[]> columns)
throws IOException {
Get get = new Get(row);
Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
s.put(store.getColumnFamilyDescriptor().getName(), columns);
return getFromStoreFile(store, get);
} | 3.68 |
hbase_BucketCache_getAllocationFailWarningMessage | /**
* Prepare and return a warning message for Bucket Allocator Exception
* @param fle The exception
* @param re The RAMQueueEntry for which the exception was thrown.
* @return A warning message created from the input RAMQueueEntry object.
*/
private static String getAllocationFailWarningMessage(final BucketAllocatorException fle,
final RAMQueueEntry re) {
final StringBuilder sb = new StringBuilder();
sb.append("Most recent failed allocation after ");
sb.append(ALLOCATION_FAIL_LOG_TIME_PERIOD);
sb.append(" ms;");
if (re != null) {
if (re.getData() instanceof HFileBlock) {
final HFileContext fileContext = ((HFileBlock) re.getData()).getHFileContext();
final String columnFamily = Bytes.toString(fileContext.getColumnFamily());
final String tableName = Bytes.toString(fileContext.getTableName());
if (tableName != null) {
sb.append(" Table: ");
sb.append(tableName);
}
if (columnFamily != null) {
sb.append(" CF: ");
sb.append(columnFamily);
}
sb.append(" HFile: ");
if (fileContext.getHFileName() != null) {
sb.append(fileContext.getHFileName());
} else {
sb.append(re.getKey());
}
} else {
sb.append(" HFile: ");
sb.append(re.getKey());
}
}
sb.append(" Message: ");
sb.append(fle.getMessage());
return sb.toString();
} | 3.68 |
hbase_ServerManager_loadLastFlushedSequenceIds | /**
* Load last flushed sequence id of each region from HDFS, if persisted
*/
public void loadLastFlushedSequenceIds() throws IOException {
if (!persistFlushedSequenceId) {
return;
}
Configuration conf = master.getConfiguration();
Path rootDir = CommonFSUtils.getRootDir(conf);
Path lastFlushedSeqIdPath = new Path(rootDir, LAST_FLUSHED_SEQ_ID_FILE);
FileSystem fs = FileSystem.get(conf);
if (!fs.exists(lastFlushedSeqIdPath)) {
LOG.info("No .lastflushedseqids found at " + lastFlushedSeqIdPath
+ " will record last flushed sequence id"
+ " for regions by regionserver report all over again");
return;
} else {
LOG.info("begin to load .lastflushedseqids at " + lastFlushedSeqIdPath);
}
FSDataInputStream in = fs.open(lastFlushedSeqIdPath);
try {
FlushedSequenceId flushedSequenceId = FlushedSequenceId.parseDelimitedFrom(in);
if (flushedSequenceId == null) {
LOG.info(".lastflushedseqids found at {} is empty", lastFlushedSeqIdPath);
return;
}
for (FlushedRegionSequenceId flushedRegionSequenceId : flushedSequenceId
.getRegionSequenceIdList()) {
byte[] encodedRegionName = flushedRegionSequenceId.getRegionEncodedName().toByteArray();
flushedSequenceIdByRegion.putIfAbsent(encodedRegionName,
flushedRegionSequenceId.getSeqId());
if (
flushedRegionSequenceId.getStoresList() != null
&& flushedRegionSequenceId.getStoresList().size() != 0
) {
ConcurrentNavigableMap<byte[], Long> storeFlushedSequenceId =
computeIfAbsent(storeFlushedSequenceIdsByRegion, encodedRegionName,
() -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR));
for (FlushedStoreSequenceId flushedStoreSequenceId : flushedRegionSequenceId
.getStoresList()) {
storeFlushedSequenceId.put(flushedStoreSequenceId.getFamily().toByteArray(),
flushedStoreSequenceId.getSeqId());
}
}
}
} finally {
in.close();
}
} | 3.68 |
hadoop_JavaCommandLineBuilder_addMandatoryConfOption | /**
* Add a mandatory config option
* @param conf configuration
* @param key key
* @throws BadConfigException if the key is missing
*/
public void addMandatoryConfOption(Configuration conf,
String key) throws BadConfigException {
if (!addConfOption(conf, key)) {
throw new BadConfigException("Missing configuration option: " + key);
}
} | 3.68 |
hudi_RepairUtils_findInstantFilesToRemove | /**
* Finds the dangling files to remove for a given instant to repair.
*
* @param instantToRepair Instant timestamp to repair.
* @param baseAndLogFilesFromFs A {@link List} of base and log files based on the file system.
* @param activeTimeline {@link HoodieActiveTimeline} instance.
* @param archivedTimeline {@link HoodieArchivedTimeline} instance.
* @return A {@link List} of relative file paths to base path for removing.
*/
public static List<String> findInstantFilesToRemove(
String instantToRepair, List<String> baseAndLogFilesFromFs,
HoodieActiveTimeline activeTimeline, HoodieArchivedTimeline archivedTimeline) {
// Skips the instant if it is requested or inflight in active timeline
if (!activeTimeline.filter(instant -> instant.getTimestamp().equals(instantToRepair)
&& !instant.isCompleted()).empty()) {
return Collections.emptyList();
}
try {
boolean doesInstantExist = false;
Option<Set<String>> filesFromTimeline = Option.empty();
Option<HoodieInstant> instantOption = activeTimeline.filterCompletedInstants().filter(
instant -> instant.getTimestamp().equals(instantToRepair)).firstInstant();
if (instantOption.isPresent()) {
// Completed instant in active timeline
doesInstantExist = true;
filesFromTimeline = RepairUtils.getBaseAndLogFilePathsFromTimeline(
activeTimeline, instantOption.get());
} else {
instantOption = archivedTimeline.filterCompletedInstants().filter(
instant -> instant.getTimestamp().equals(instantToRepair)).firstInstant();
if (instantOption.isPresent()) {
// Completed instant in archived timeline
doesInstantExist = true;
filesFromTimeline = RepairUtils.getBaseAndLogFilePathsFromTimeline(
archivedTimeline, instantOption.get());
}
}
if (doesInstantExist) {
if (!filesFromTimeline.isPresent() || filesFromTimeline.get().isEmpty()) {
// Skips if no instant details
return Collections.emptyList();
}
// Excludes committed base and log files from timeline
Set<String> filesToRemove = new HashSet<>(baseAndLogFilesFromFs);
filesToRemove.removeAll(filesFromTimeline.get());
return new ArrayList<>(filesToRemove);
} else {
// The instant does not exist in the whole timeline (neither completed nor requested/inflight),
// this means the files from this instant are dangling, which should be removed
return baseAndLogFilesFromFs;
}
} catch (IOException e) {
// In case of failure, does not remove any files for the instant
return Collections.emptyList();
}
} | 3.68 |
framework_NotificationElement_getType | /**
* Returns type of the Notification element.
*
* @return type of the Notification element
*/
public String getType() {
// The info about notification type can be taken only from css rule of
// the notification
// To get type we search for css rules which represent notification type
// This map maps css style rule to type of a notification
Map<String, String> styleToTypeMap = initStyleToTypeMap();
for (Map.Entry<String, String> entry : styleToTypeMap.entrySet()) {
String notifType = entry.getKey();
// Check notification has css style which describes notification
// type
if (getAttribute("class").contains(notifType)) {
return entry.getValue();
}
}
return "";
} | 3.68 |
framework_DefaultConnectionStateHandler_updateDialog | /**
* Called whenever a reconnect attempt fails to allow updating of dialog
* contents.
*/
protected void updateDialog() {
reconnectDialog.setText(getDialogText(reconnectAttempt));
} | 3.68 |
flink_OneInputOperatorTransformation_setMaxParallelism | /**
* Sets the maximum parallelism of this operator.
*
* <p>The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
* number of key groups used for partitioned state.
*
* @param maxParallelism Maximum parallelism
* @return The operator with set maximum parallelism
*/
@PublicEvolving
public OneInputOperatorTransformation<T> setMaxParallelism(int maxParallelism) {
this.operatorMaxParallelism = OptionalInt.of(maxParallelism);
return this;
} | 3.68 |
hadoop_QueueStateManager_activateQueue | /**
* Active the queue.
* @param queueName the queue name
* @throws YarnException if the queue does not exist
* or the queue can not be activated.
*/
@SuppressWarnings("unchecked")
public synchronized void activateQueue(String queueName)
throws YarnException {
SchedulerQueue<T> queue = queueManager.getQueue(queueName);
if (queue == null) {
throw new YarnException("The specified queue:" + queueName
+ " does not exist!");
}
queue.activateQueue();
} | 3.68 |
hbase_HttpServer_needsClientAuth | /**
* Specify whether the server should authorize the client in SSL connections.
*/
public Builder needsClientAuth(boolean value) {
this.needsClientAuth = value;
return this;
} | 3.68 |
pulsar_ConsumerConfiguration_getReceiverQueueSize | /**
* @return the configure receiver queue size value
*/
public int getReceiverQueueSize() {
return conf.getReceiverQueueSize();
} | 3.68 |
hadoop_AbstractS3ACommitter_maybeCreateSuccessMarker | /**
* if the job requires a success marker on a successful job,
* create the {@code _SUCCESS} file.
*
* While the classic committers create a 0-byte file, the S3A committers
* PUT up a the contents of a {@link SuccessData} file.
* The file is returned, even if no marker is created.
* This is so it can be saved to a report directory.
* @param context job context
* @param filenames list of filenames.
* @param ioStatistics any IO Statistics to include
* @throws IOException IO failure
* @return the success data.
*/
protected SuccessData maybeCreateSuccessMarker(
final JobContext context,
final List<String> filenames,
final IOStatisticsSnapshot ioStatistics)
throws IOException {
SuccessData successData =
createSuccessData(context, filenames, ioStatistics,
getDestFS().getConf());
if (createJobMarker) {
// save it to the job dest dir
commitOperations.createSuccessMarker(getOutputPath(), successData, true);
}
return successData;
} | 3.68 |
flink_DeltaIteration_setSolutionSetUnManaged | /**
* Sets whether to keep the solution set in managed memory (safe against heap exhaustion) or
* unmanaged memory (objects on heap).
*
* @param solutionSetUnManaged True to keep the solution set in unmanaged memory, false to keep
* it in managed memory.
* @see #isSolutionSetUnManaged()
*/
public void setSolutionSetUnManaged(boolean solutionSetUnManaged) {
this.solutionSetUnManaged = solutionSetUnManaged;
} | 3.68 |
hbase_TableDescriptorBuilder_toStringCustomizedValues | /**
* @return Name of this table and then a map of all of the column family descriptors (with only
* the non-default column family attributes)
*/
@Override
public String toStringCustomizedValues() {
StringBuilder s = new StringBuilder();
s.append('\'').append(Bytes.toString(name.getName())).append('\'');
s.append(getValues(false));
families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues()));
return s.toString();
} | 3.68 |
open-banking-gateway_Xs2aOauth2Parameters_toParameters | // TODO - MapStruct?
public Oauth2Service.Parameters toParameters() {
Oauth2Service.Parameters parameters = new Oauth2Service.Parameters();
parameters.setRedirectUri(oauth2RedirectBackLink);
parameters.setState(state);
parameters.setConsentId(consentId);
parameters.setPaymentId(paymentId);
parameters.setScaOAuthLink(scaOauthLink);
parameters.setScope(scope);
return parameters;
} | 3.68 |
MagicPlugin_BlockFace_getModZ | /**
* Get the amount of Z-coordinates to modify to get the represented block
*
* @return Amount of Z-coordinates to modify
*/
public int getModZ() {
return modZ;
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setCompressTags | /**
* Set whether the tags should be compressed along with DataBlockEncoding. When no
* DataBlockEncoding is been used, this is having no effect.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) {
return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags));
} | 3.68 |
hadoop_JWTRedirectAuthenticationHandler_setPublicKey | /**
* Primarily for testing, this provides a way to set the publicKey for
* signature verification without needing to get a PEM encoded value.
*
* @param pk publicKey for the token signtature verification
*/
public void setPublicKey(RSAPublicKey pk) {
publicKey = pk;
} | 3.68 |
hbase_TableHFileArchiveTracker_create | /**
* Create an archive tracker with the special passed in table monitor. Should only be used in
* special cases (e.g. testing)
* @param zkw Watcher for the ZooKeeper cluster that we should track
* @param monitor Monitor for which tables need hfile archiving
* @return ZooKeeper tracker to monitor for this server if this server should archive hfiles for a
* given table
*/
private static TableHFileArchiveTracker create(ZKWatcher zkw, HFileArchiveTableMonitor monitor) {
return new TableHFileArchiveTracker(zkw, monitor);
} | 3.68 |
hibernate-validator_ConstraintHelper_getAllValidatorDescriptors | /**
* Returns the constraint validator classes for the given constraint
* annotation type, as retrieved from
*
* <ul>
* <li>{@link Constraint#validatedBy()},
* <li>internally registered validators for built-in constraints</li>
* <li>XML configuration and</li>
* <li>programmatically registered validators (see
* {@link org.hibernate.validator.cfg.ConstraintMapping#constraintDefinition(Class)}).</li>
* </ul>
*
* The result is cached internally.
*
* @param annotationType The constraint annotation type.
* @param <A> the type of the annotation
*
* @return The validator classes for the given type.
*/
public <A extends Annotation> List<ConstraintValidatorDescriptor<A>> getAllValidatorDescriptors(Class<A> annotationType) {
Contracts.assertNotNull( annotationType, MESSAGES.classCannotBeNull() );
return validatorDescriptors.computeIfAbsent( annotationType, a -> getDefaultValidatorDescriptors( a ) );
} | 3.68 |
framework_LayoutManager_setNeedsHorizontalLayout | /**
* Marks that a ManagedLayout should be layouted horizontally in the next
* layout phase even if none of the elements managed by the layout have been
* resized horizontally.
* <p>
* For SimpleManagedLayout which is always layouted in both directions, this
* has the same effect as {@link #setNeedsLayout(ManagedLayout)}.
* <p>
* This method should not be invoked during a layout phase since it only
* controls what will happen in the beginning of the next phase. If you want
* to explicitly cause some layout to be considered in an ongoing layout
* phase, you should use {@link #setNeedsMeasure(ComponentConnector)}
* instead.
*
* @param layout
* the managed layout that should be layouted
*/
public final void setNeedsHorizontalLayout(ManagedLayout layout) {
if (isLayoutRunning()) {
getLogger().warning(
"setNeedsHorizontalLayout should not be run while a layout phase is in progress.");
}
needsHorizontalLayout.add(layout.getConnectorId());
} | 3.68 |
framework_ColorPickerPreviewElement_setColorTextFieldValue | /**
* Set value of TextField in ColorPickerPreview. Any existing value in the
* field is replaced.
*
* @param value
* text to insert
*
* @since 8.4
*/
public void setColorTextFieldValue(String value) {
// Select all text
getColorTextField().sendKeys(Keys.chord(Keys.CONTROL, "a"));
getColorTextField().sendKeys(value);
} | 3.68 |
dubbo_AccessLogData_set | /**
* Add log key along with his value.
*
* @param key Any not null or non empty string
* @param value Any object including null.
*/
private void set(String key, Object value) {
data.put(key, value);
} | 3.68 |
pulsar_ManagedCursorContainer_hasDurableCursors | /**
* Check whether that are any durable cursors.
* @return true if there are durable cursors and false if there are not
*/
public boolean hasDurableCursors() {
long stamp = rwLock.tryOptimisticRead();
int count = durableCursorCount;
if (!rwLock.validate(stamp)) {
// Fallback to read lock
stamp = rwLock.readLock();
try {
count = durableCursorCount;
} finally {
rwLock.unlockRead(stamp);
}
}
return count > 0;
} | 3.68 |
flink_BiConsumerWithException_unchecked | /**
* Convert a {@link BiConsumerWithException} into a {@link BiConsumer}.
*
* @param biConsumerWithException BiConsumer with exception to convert into a {@link
* BiConsumer}.
* @param <A> first input type
* @param <B> second input type
* @return {@link BiConsumer} which rethrows all checked exceptions as unchecked.
*/
static <A, B> BiConsumer<A, B> unchecked(
BiConsumerWithException<A, B, ?> biConsumerWithException) {
return (A a, B b) -> {
try {
biConsumerWithException.accept(a, b);
} catch (Throwable t) {
ExceptionUtils.rethrow(t);
}
};
} | 3.68 |
framework_DefaultFieldFactory_createCaptionByPropertyId | /**
* If name follows method naming conventions, convert the name to spaced
* upper case text. For example, convert "firstName" to "First Name"
*
* @param propertyId
* @return the formatted caption string
*/
public static String createCaptionByPropertyId(Object propertyId) {
return SharedUtil.propertyIdToHumanFriendly(propertyId);
} | 3.68 |
hudi_FileStatusDTO_safeReadAndSetMetadata | /**
* Used to safely handle FileStatus calls which might fail on some FileSystem implementation.
* (DeprecatedLocalFileSystem)
*/
private static void safeReadAndSetMetadata(FileStatusDTO dto, FileStatus fileStatus) {
try {
dto.owner = fileStatus.getOwner();
dto.group = fileStatus.getGroup();
dto.permission = FSPermissionDTO.fromFsPermission(fileStatus.getPermission());
} catch (IllegalArgumentException ie) {
// Deprecated File System (testing) does not work well with this call
// skipping
}
} | 3.68 |
graphhopper_VectorTile_addAllGeometry | /**
* <pre>
* Contains a stream of commands and parameters (vertices).
* A detailed description on geometry encoding is located in
* section 4.3 of the specification.
* </pre>
*
* <code>repeated uint32 geometry = 4 [packed = true];</code>
*/
public Builder addAllGeometry(
java.lang.Iterable<? extends java.lang.Integer> values) {
ensureGeometryIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, geometry_);
onChanged();
return this;
} | 3.68 |
morf_OracleDialect_alterTableChangeColumnStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableChangeColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column, org.alfasoftware.morf.metadata.Column)
*/
@Override
public Collection<String> alterTableChangeColumnStatements(Table table, Column oldColumn, Column newColumn) {
List<String> result = new ArrayList<>();
Table oldTable = oldTableForChangeColumn(table, oldColumn, newColumn);
String truncatedTableName = truncatedTableName(oldTable.getName());
boolean recreatePrimaryKey = oldColumn.isPrimaryKey() || newColumn.isPrimaryKey();
if (recreatePrimaryKey && !primaryKeysForTable(oldTable).isEmpty()) {
result.add(dropPrimaryKeyConstraint(truncatedTableName));
}
for (Index index : oldTable.indexes()) {
for (String column : index.columnNames()) {
if (column.equalsIgnoreCase(oldColumn.getName())) {
result.addAll(indexDropStatements(oldTable, index));
}
}
}
if (!newColumn.getName().equalsIgnoreCase(oldColumn.getName())) {
result.add("ALTER TABLE " + schemaNamePrefix() + truncatedTableName + " RENAME COLUMN " + oldColumn.getName() + " TO " + newColumn.getName());
}
boolean includeNullability = newColumn.isNullable() != oldColumn.isNullable();
boolean includeColumnType = newColumn.getType() != oldColumn.getType() || newColumn.getWidth() != oldColumn.getWidth() || newColumn.getScale() != oldColumn.getScale();
String sqlRepresentationOfColumnType = sqlRepresentationOfColumnType(newColumn, includeNullability, true, includeColumnType);
if (!StringUtils.isBlank(sqlRepresentationOfColumnType)) {
StringBuilder statement = new StringBuilder()
.append("ALTER TABLE ")
.append(schemaNamePrefix())
.append(truncatedTableName)
.append(" MODIFY (")
.append(newColumn.getName())
.append(' ')
.append(sqlRepresentationOfColumnType)
.append(")");
result.add(statement.toString());
}
if (!StringUtils.isBlank(oldColumn.getDefaultValue()) && StringUtils.isBlank(newColumn.getDefaultValue())) {
StringBuilder statement = new StringBuilder()
.append("ALTER TABLE ")
.append(schemaNamePrefix())
.append(truncatedTableName)
.append(" MODIFY (")
.append(newColumn.getName())
.append(" DEFAULT NULL")
.append(")");
result.add(statement.toString());
}
if (recreatePrimaryKey && !primaryKeysForTable(table).isEmpty()) {
result.add(generatePrimaryKeyStatement(namesOfColumns(SchemaUtils.primaryKeysForTable(table)), truncatedTableName));
}
for (Index index : table.indexes()) {
for (String column : index.columnNames()) {
if (column.equalsIgnoreCase(newColumn.getName())) {
result.addAll(addIndexStatements(table, index));
}
}
}
result.add(columnComment(newColumn, truncatedTableName));
return result;
} | 3.68 |
hadoop_PerGpuTemperature_getSlowThresholdGpuTemp | /**
* Get celsius GPU temperature which could make GPU runs slower
* @return temperature
*/
@XmlJavaTypeAdapter(PerGpuDeviceInformation.StrToFloatBeforeSpaceAdapter.class)
@XmlElement(name = "gpu_temp_slow_threshold")
public Float getSlowThresholdGpuTemp() {
return slowThresholdGpuTemp;
} | 3.68 |
framework_UIDL_iterator | /**
* Gets an iterator that can be used to iterate trough the children of this
* UIDL.
* <p>
* The Object returned by <code>next()</code> will be appropriately typed -
* if it's UIDL, {@link #getTag()} can be used to check which section is in
* question.
* </p>
* <p>
* The basic use case is to iterate over the children of an UIDL update, and
* update the appropriate part of the widget for each child encountered, e.g
* if <code>getTag()</code> returns "color", one would update the widgets
* color to reflect the value of the "color" section.
* </p>
*
* @return an iterator for iterating over UIDL children
* @since 8.2
*/
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
int index = -1;
@Override
public void remove() {
throw new UnsupportedOperationException();
}
@Override
public Object next() {
if (hasNext()) {
int typeOfChild = typeOfChild(++index);
switch (typeOfChild) {
case CHILD_TYPE_UIDL:
UIDL childUIDL = getChildUIDL(index);
return childUIDL;
case CHILD_TYPE_STRING:
return getChildString(index);
case CHILD_TYPE_XML:
return getChildXML(index);
default:
throw new IllegalStateException("Illegal child in tag "
+ getTag() + " at index " + index);
}
}
return null;
}
@Override
public boolean hasNext() {
int count = getChildCount();
return count > index + 1;
}
};
} | 3.68 |
hbase_ZKNodeTracker_checkIfBaseNodeAvailable | /**
* Checks if the baseznode set as per the property 'zookeeper.znode.parent' exists.
* @return true if baseznode exists. false if doesnot exists.
*/
public boolean checkIfBaseNodeAvailable() {
try {
if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().baseZNode) == -1) {
return false;
}
} catch (KeeperException e) {
abortable.abort("Exception while checking if basenode (" + watcher.getZNodePaths().baseZNode
+ ") exists in ZooKeeper.", e);
}
return true;
} | 3.68 |
hbase_MunkresAssignment_solve | /**
* Get the optimal assignments. The returned array will have the same number of elements as the
* number of elements as the number of rows in the input cost matrix. Each element will indicate
* which column should be assigned to that row or -1 if no column should be assigned, i.e. if
* result[i] = j then row i should be assigned to column j. Subsequent invocations of this method
* will simply return the same object without additional computation.
* @return an array with the optimal assignments
*/
public int[] solve() {
// If this assignment problem has already been solved, return the known
// solution
if (assignments != null) {
return assignments;
}
preliminaries();
// Find the optimal assignments.
while (!testIsDone()) {
while (!stepOne()) {
stepThree();
}
stepTwo();
}
// Extract the assignments from the mask matrix.
if (transposed) {
assignments = new int[cols];
outer: for (int c = 0; c < cols; c++) {
for (int r = 0; r < rows; r++) {
if (mask[r][c] == STAR) {
assignments[c] = r;
continue outer;
}
}
// There is no assignment for this row of the input/output.
assignments[c] = -1;
}
} else {
assignments = new int[rows];
outer: for (int r = 0; r < rows; r++) {
for (int c = 0; c < cols; c++) {
if (mask[r][c] == STAR) {
assignments[r] = c;
continue outer;
}
}
}
}
// Once the solution has been computed, there is no need to keep any of the
// other internal structures. Clear all unnecessary internal references so
// the garbage collector may reclaim that memory.
cost = null;
mask = null;
rowsCovered = null;
colsCovered = null;
path = null;
leastInRow = null;
leastInRowIndex = null;
rowAdjust = null;
colAdjust = null;
return assignments;
} | 3.68 |
hbase_HFileBlockIndex_getNumEntries | /** Returns the number of entries in this block index chunk */
public int getNumEntries() {
return blockKeys.size();
} | 3.68 |
framework_PushAtmosphereHandler_onConnect | /**
* Called when the client sends the first request (to establish a push
* connection)
*
* @param resource
*/
private void onConnect(AtmosphereResource resource) {
resource.addEventListener(new AtmosphereResourceListener());
pushHandler.onConnect(resource);
} | 3.68 |
zilla_HpackContext_staticIndex16 | // Index in static table for the given name of length 16
private static int staticIndex16(DirectBuffer name)
{
switch (name.getByte(15))
{
case 'e':
if (STATIC_TABLE[27].name.equals(name)) // content-language
{
return 27;
}
if (STATIC_TABLE[61].name.equals(name)) // www-authenticate
{
return 61;
}
break;
case 'g':
if (STATIC_TABLE[26].name.equals(name)) // content-encoding
{
return 26;
}
break;
case 'n':
if (STATIC_TABLE[29].name.equals(name)) // content-location
{
return 29;
}
}
return -1;
} | 3.68 |
hmily_HmilyTransactionHolder_registerParticipantByNested | /**
* when nested transaction add participant.
*
* @param participantId key
* @param hmilyParticipant {@linkplain HmilyParticipant}
*/
public void registerParticipantByNested(final Long participantId, final HmilyParticipant hmilyParticipant) {
if (Objects.isNull(hmilyParticipant)) {
return;
}
HmilyParticipantCacheManager.getInstance().cacheHmilyParticipant(participantId, hmilyParticipant);
} | 3.68 |
flink_MetricGroup_gauge | /**
* Registers a new {@link org.apache.flink.metrics.Gauge} with Flink.
*
* @param name name of the gauge
* @param gauge gauge to register
* @param <T> return type of the gauge
* @return the given gauge
*/
default <T, G extends Gauge<T>> G gauge(int name, G gauge) {
return gauge(String.valueOf(name), gauge);
} | 3.68 |
flink_Description_list | /** Adds a bulleted list to the description. */
public DescriptionBuilder list(InlineElement... elements) {
blocks.add(ListElement.list(elements));
return this;
} | 3.68 |
hbase_BackupManager_writeBackupStartCode | /**
* Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte.
* @param startCode start code
* @throws IOException exception
*/
public void writeBackupStartCode(Long startCode) throws IOException {
systemTable.writeBackupStartCode(startCode, backupInfo.getBackupRootDir());
} | 3.68 |
hadoop_RouterObserverReadProxyProvider_getProxyAsClientProtocol | /**
* Return the input proxy, cast as a {@link ClientProtocol}. This catches any
* {@link ClassCastException} and wraps it in a more helpful message. This
* should ONLY be called if the caller is certain that the proxy is, in fact,
* a {@link ClientProtocol}.
*/
private ClientProtocol getProxyAsClientProtocol(T proxy) {
assert proxy instanceof ClientProtocol : "BUG: Attempted to use proxy of class "
+ proxy.getClass()
+ " as if it was a ClientProtocol.";
return (ClientProtocol) proxy;
} | 3.68 |
hbase_HRegion_setMobFileCache | /**
* Only used for unit test which doesn't start region server.
*/
public void setMobFileCache(MobFileCache mobFileCache) {
this.mobFileCache = mobFileCache;
} | 3.68 |
framework_DragAndDropEvent_getTargetDetails | /**
* @return the TargetDetails containing drop target related details of drag
* and drop operation
*/
public TargetDetails getTargetDetails() {
return dropTargetDetails;
} | 3.68 |
framework_LegacyWindow_removeAllComponents | /**
* This implementation removes the components from the content container (
* {@link #getContent()}) instead of from the actual UI.
*
* This method should only be called when the content is a
* {@link ComponentContainer} (default {@link VerticalLayout} or explicitly
* set).
*/
public void removeAllComponents() {
getContent().removeAllComponents();
} | 3.68 |
hbase_SimpleRegionNormalizer_getRegionSizeMB | /** Returns size of region in MB and if region is not found than -1 */
private long getRegionSizeMB(RegionInfo hri) {
ServerName sn =
masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
if (sn == null) {
LOG.debug("{} region was not found on any Server", hri.getRegionNameAsString());
return -1;
}
ServerMetrics serverMetrics = masterServices.getServerManager().getLoad(sn);
if (serverMetrics == null) {
LOG.debug("server {} was not found in ServerManager", sn.getServerName());
return -1;
}
RegionMetrics regionLoad = serverMetrics.getRegionMetrics().get(hri.getRegionName());
if (regionLoad == null) {
LOG.debug("{} was not found in RegionsLoad", hri.getRegionNameAsString());
return -1;
}
return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);
} | 3.68 |
framework_StringToBooleanConverter_getPresentationType | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#getPresentationType()
*/
@Override
public Class<String> getPresentationType() {
return String.class;
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_isNewVersionBehavior | /**
* By default, HBase only consider timestamp in versions. So a previous Delete with higher ts
* will mask a later Put with lower ts. Set this to true to enable new semantics of versions. We
* will also consider mvcc in versions. See HBASE-15968 for details.
*/
@Override
public boolean isNewVersionBehavior() {
return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, Boolean::parseBoolean,
DEFAULT_NEW_VERSION_BEHAVIOR);
} | 3.68 |
hadoop_HdfsFileStatus_atime | /**
* Set the access time of this entity (default = 0).
* @param atime Last accessed time
* @return This Builder instance
*/
public Builder atime(long atime) {
this.atime = atime;
return this;
} | 3.68 |
flink_JobManagerCheckpointStorage_getCheckpointPath | /** @return The location where checkpoints will be externalized if set. */
@Nullable
public Path getCheckpointPath() {
return location.getBaseCheckpointPath();
} | 3.68 |
hadoop_AbfsOutputStream_hsync | /** Similar to posix fsync, flush out the data in client's user buffer
* all the way to the disk device (but the disk may have it in its cache).
* @throws IOException if error occurs
*/
@Override
public void hsync() throws IOException {
if (supportFlush) {
flushInternal(false);
}
} | 3.68 |
hadoop_AbstractDelegationTokenBinding_getKind | /**
* Get the kind of the tokens managed here.
* @return the token kind.
*/
public Text getKind() {
return kind;
} | 3.68 |
hbase_StoreFileScanner_getScannerOrder | /**
* @see KeyValueScanner#getScannerOrder()
*/
@Override
public long getScannerOrder() {
return scannerOrder;
} | 3.68 |
framework_GenericFontIcon_getHtml | /**
* Utility method for generating HTML that displays an icon from specific
* fontFamiliy with a given codePoint in the font.
*
* @param fontFamily
* Name of the font family
* @param codePoint
* Icon's character code point in the font
* @return
*/
public static String getHtml(String fontFamily, int codePoint) {
return "<span class=\"v-icon\" style=\"font-family: " + fontFamily
+ ";\">&#x" + Integer.toHexString(codePoint) + ";</span>";
} | 3.68 |
pulsar_AuthorizationService_allowNamespacePolicyOperationAsync | /**
* Grant authorization-action permission on a namespace to the given client.
*
* @param namespaceName
* @param operation
* @param role
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when namespace not found
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> allowNamespacePolicyOperationAsync(NamespaceName namespaceName,
PolicyName policy,
PolicyOperation operation,
String role,
AuthenticationDataSource authData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.allowNamespacePolicyOperationAsync(namespaceName, policy, operation, role, authData);
} | 3.68 |
graphhopper_MinHeapWithUpdate_contains | /**
* @return true if the heap contains an element with the given id
*/
public boolean contains(int id) {
checkIdInRange(id);
return positions[id] != NOT_PRESENT;
} | 3.68 |
flink_FieldReferenceLookup_lookupField | /**
* Tries to resolve {@link FieldReferenceExpression} using given name in underlying inputs.
*
* @param name name of field to look for
* @return resolved field reference or empty if could not find field with given name.
* @throws org.apache.flink.table.api.ValidationException if the name is ambiguous.
*/
public Optional<FieldReferenceExpression> lookupField(String name) {
List<FieldReference> matchingFields =
fieldReferences.stream()
.map(input -> input.get(name))
.filter(Objects::nonNull)
.collect(toList());
if (matchingFields.size() == 1) {
return Optional.of(matchingFields.get(0).toExpr());
} else if (matchingFields.size() == 0) {
return Optional.empty();
} else {
throw failAmbiguousColumn(name);
}
} | 3.68 |
flink_CompactingHashTable_getMinPartition | /** @return number of memory segments in the smallest partition */
private int getMinPartition() {
int minPartition = Integer.MAX_VALUE;
for (InMemoryPartition<T> p1 : this.partitions) {
if (p1.getBlockCount() < minPartition) {
minPartition = p1.getBlockCount();
}
}
return minPartition;
} | 3.68 |
hadoop_IOStatisticsSnapshot_aggregate | /**
* Aggregate the current statistics with the
* source reference passed in.
*
* The operation is synchronized.
* @param source source; may be null
* @return true if a merge took place.
*/
@Override
public synchronized boolean aggregate(
@Nullable IOStatistics source) {
if (source == null) {
return false;
}
aggregateMaps(counters, source.counters(),
IOStatisticsBinding::aggregateCounters,
IOStatisticsBinding::passthroughFn);
aggregateMaps(gauges, source.gauges(),
IOStatisticsBinding::aggregateGauges,
IOStatisticsBinding::passthroughFn);
aggregateMaps(minimums, source.minimums(),
IOStatisticsBinding::aggregateMinimums,
IOStatisticsBinding::passthroughFn);
aggregateMaps(maximums, source.maximums(),
IOStatisticsBinding::aggregateMaximums,
IOStatisticsBinding::passthroughFn);
aggregateMaps(meanStatistics, source.meanStatistics(),
IOStatisticsBinding::aggregateMeanStatistics, MeanStatistic::copy);
return true;
} | 3.68 |
open-banking-gateway_FintechConsentAccessImpl_findSingleByCurrentServiceSession | /**
* Finds the most recent granted consent for the current service session.
*/
@Override
public Optional<ProtocolFacingConsent> findSingleByCurrentServiceSession() {
return ConsentAccessUtil.getProtocolFacingConsent(findByCurrentServiceSessionOrderByModifiedDesc());
} | 3.68 |
framework_GridLayout_removeComponent | /**
* Removes the component specified by its cell coordinates.
*
* @param column
* the component's column, starting from 0.
* @param row
* the component's row, starting from 0.
*/
public void removeComponent(int column, int row) {
// Finds the area
for (final Component component : components) {
final ChildComponentData childData = getState().childData
.get(component);
if (childData.column1 == column && childData.row1 == row) {
removeComponent(component);
return;
}
}
} | 3.68 |
hbase_Query_getACL | /** Returns The serialized ACL for this operation, or null if none */
public byte[] getACL() {
return getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
} | 3.68 |
hadoop_RMWebAppUtil_getCallerUserGroupInformation | /**
* Helper method to retrieve the UserGroupInformation from the
* HttpServletRequest.
*
* @param hsr the servlet request
* @param usePrincipal true if we need to use the principal user, remote
* otherwise.
* @return the user group information of the caller.
**/
public static UserGroupInformation getCallerUserGroupInformation(
HttpServletRequest hsr, boolean usePrincipal) {
String remoteUser = hsr.getRemoteUser();
if (usePrincipal) {
Principal princ = hsr.getUserPrincipal();
remoteUser = princ == null ? null : princ.getName();
}
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
return callerUGI;
} | 3.68 |
hbase_CatalogReplicaLoadBalanceSimpleSelector_stop | // This class implements the Stoppable interface as chores needs a Stopable object, there is
// no-op on this Stoppable object currently.
@Override
public void stop(String why) {
isStopped = true;
} | 3.68 |
hmily_DatabaseMetaDataDialectHandler_getSchema | /**
* Get schema.
*
* @param connection connection
* @return schema
*/
default String getSchema(final Connection connection) {
try {
return connection.getSchema();
} catch (final SQLException ignored) {
return null;
}
} | 3.68 |
hbase_ByteBufferUtils_readLong | /**
* Read long which was written to fitInBytes bytes and increment position.
* @param fitInBytes In how many bytes given long is stored.
* @return The value of parsed long.
*/
public static long readLong(ByteBuffer in, final int fitInBytes) {
long tmpLength = 0;
for (int i = 0; i < fitInBytes; ++i) {
tmpLength |= (in.get() & 0xffL) << (8L * i);
}
return tmpLength;
} | 3.68 |
framework_StringToFloatConverter_getModelType | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#getModelType()
*/
@Override
public Class<Float> getModelType() {
return Float.class;
} | 3.68 |
flink_StreamConfig_getManagedMemoryFractionOperatorUseCaseOfSlot | /**
* Fraction of total managed memory in the slot that this operator should use for the given use
* case.
*/
public double getManagedMemoryFractionOperatorUseCaseOfSlot(
ManagedMemoryUseCase managedMemoryUseCase,
Configuration taskManagerConfig,
ClassLoader cl) {
return ManagedMemoryUtils.convertToFractionOfSlot(
managedMemoryUseCase,
config.getDouble(getManagedMemoryFractionConfigOption(managedMemoryUseCase)),
getAllManagedMemoryUseCases(),
taskManagerConfig,
config.getOptional(STATE_BACKEND_USE_MANAGED_MEMORY),
cl);
} | 3.68 |
hudi_KafkaConnectUtils_walkTreeForXml | /**
* Files walk to find xml
*/
private static List<Path> walkTreeForXml(Path basePath) throws IOException {
if (Files.notExists(basePath)) {
return new ArrayList<>();
}
return Files.walk(basePath, FileVisitOption.FOLLOW_LINKS)
.filter(path -> path.toFile().isFile())
.filter(path -> path.toString().endsWith(".xml"))
.collect(Collectors.toList());
} | 3.68 |
morf_AbstractSelectStatement_asTable | /**
* @return a reference to the alias of the select statement.
*/
public TableReference asTable() {
return new TableReference(getAlias());
} | 3.68 |
framework_VListSelect_setTabIndex | /**
* Sets the tab index.
*
* @param tabIndex
* the tab index to set
*/
public void setTabIndex(int tabIndex) {
select.setTabIndex(tabIndex);
} | 3.68 |
hadoop_MagicCommitTracker_initialize | /**
* Initialize the tracker.
* @return true, indicating that the multipart commit must start.
* @throws IOException any IO problem.
*/
@Override
public boolean initialize() throws IOException {
return true;
} | 3.68 |
pulsar_PulsarClientImpl_getSchema | /**
* Read the schema information for a given topic.
*
* If the topic does not exist or it has no schema associated, it will return an empty response
*/
public CompletableFuture<Optional<SchemaInfo>> getSchema(String topic) {
TopicName topicName;
try {
topicName = TopicName.get(topic);
} catch (Throwable t) {
return FutureUtil
.failedFuture(
new PulsarClientException.InvalidTopicNameException("Invalid topic name: '" + topic + "'"));
}
return lookup.getSchema(topicName);
} | 3.68 |
flink_StructuredType_newBuilder | /**
* Creates a builder for a {@link StructuredType} that is not stored in a catalog and is
* identified by an implementation {@link Class}.
*/
public static StructuredType.Builder newBuilder(Class<?> implementationClass) {
return new StructuredType.Builder(implementationClass);
} | 3.68 |
morf_AliasedField_withImmutableBuildersDisabled | /**
* Allows tests to run with immutable building behaviour turned off.
*
* TODO remove when we remove the old mutable behaviour
*
* @param runnable The code to run.
*/
public static void withImmutableBuildersDisabled(Runnable runnable) {
withImmutableBuilders(runnable, false);
} | 3.68 |
hadoop_INodeSymlink_isSymlink | /** @return true unconditionally. */
@Override
public boolean isSymlink() {
return true;
} | 3.68 |
framework_VPopupView_setHideOnMouseOut | /**
* Should this popup automatically hide when the user takes the mouse
* cursor out of the popup area? If this is {@code false}, the user must
* click outside the popup to close it. The default is {@code true}.
*
* @param hideOnMouseOut
* {@code true} if this popup should hide when mouse is moved
* away, {@code false} otherwise
*/
public void setHideOnMouseOut(boolean hideOnMouseOut) {
this.hideOnMouseOut = hideOnMouseOut;
} | 3.68 |
hbase_ReplicationSourceManager_addPeer | /**
* <ol>
* <li>Add peer to replicationPeers</li>
* <li>Add the normal source and related replication queue</li>
* <li>Add HFile Refs</li>
* </ol>
* @param peerId the id of replication peer
*/
public void addPeer(String peerId) throws IOException {
boolean added = false;
try {
added = this.replicationPeers.addPeer(peerId);
} catch (ReplicationException e) {
throw new IOException(e);
}
if (added) {
addSource(peerId, false);
}
} | 3.68 |
AreaShop_GeneralRegion_isDeleted | /**
* Check if the region has been deleted.
* @return true if the region has been deleted, otherwise false
*/
public boolean isDeleted() {
return deleted;
} | 3.68 |
hbase_RowFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.RowFilter.Builder builder = FilterProtos.RowFilter.newBuilder();
builder.setCompareFilter(super.convert());
return builder.build().toByteArray();
} | 3.68 |
hbase_MobFileName_getFileName | /**
* Gets the file name.
* @return The file name.
*/
public String getFileName() {
return this.fileName;
} | 3.68 |
hbase_SequenceIdAccounting_flattenToLowestSequenceId | /**
* @return New Map that has same keys as <code>src</code> but instead of a Map for a value, it
* instead has found the smallest sequence id and it returns that as the value instead.
*/
private <T extends Map<?, Long>> Map<byte[], Long> flattenToLowestSequenceId(Map<byte[], T> src) {
if (src == null || src.isEmpty()) {
return null;
}
Map<byte[], Long> tgt = new HashMap<>();
for (Map.Entry<byte[], T> entry : src.entrySet()) {
long lowestSeqId = getLowestSequenceId(entry.getValue());
if (lowestSeqId != HConstants.NO_SEQNUM) {
tgt.put(entry.getKey(), lowestSeqId);
}
}
return tgt;
} | 3.68 |
graphhopper_CHStorage_getNodes | /**
* The number of nodes of this storage.
*/
public int getNodes() {
return nodeCount;
} | 3.68 |
hadoop_ErasureCoderOptions_allowChangeInputs | /**
* Allow changing input buffer content (not positions). Maybe better
* performance if not allowed.
* @return true if allowing input content to be changed, false otherwise
*/
public boolean allowChangeInputs() {
return allowChangeInputs;
} | 3.68 |
framework_VScrollTable_getColWidth | /**
* Returns the width available for column content.
*
* @param columnIndex
* @return
*/
public int getColWidth(int columnIndex) {
if (tBodyMeasurementsDone) {
if (renderedRows.isEmpty()) {
// no rows yet rendered
return 0;
}
for (Widget row : renderedRows) {
if (!(row instanceof VScrollTableGeneratedRow)) {
TableRowElement tr = row.getElement().cast();
// Spanned rows might cause an NPE.
if (columnIndex < tr.getChildCount()) {
Element wrapperdiv = tr.getCells()
.getItem(columnIndex).getFirstChildElement()
.cast();
return wrapperdiv.getOffsetWidth();
}
}
}
return 0;
} else {
return 0;
}
} | 3.68 |
hudi_StreamSync_syncOnce | /**
* Run one round of delta sync and return new compaction instant if one got scheduled.
*/
public Pair<Option<String>, JavaRDD<WriteStatus>> syncOnce() throws IOException {
Pair<Option<String>, JavaRDD<WriteStatus>> result = null;
Timer.Context overallTimerContext = metrics.getOverallTimerContext();
// Refresh Timeline
refreshTimeline();
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder()
.setConf(new Configuration(fs.getConf()))
.setBasePath(cfg.targetBasePath)
.setRecordMergerStrategy(props.getProperty(HoodieWriteConfig.RECORD_MERGER_STRATEGY.key(), HoodieWriteConfig.RECORD_MERGER_STRATEGY.defaultValue()))
.setTimeGeneratorConfig(HoodieTimeGeneratorConfig.newBuilder().fromProperties(props).withPath(cfg.targetBasePath).build())
.build();
String instantTime = metaClient.createNewInstantTime();
InputBatch inputBatch = readFromSource(instantTime, metaClient);
if (inputBatch != null) {
// this is the first input batch. If schemaProvider not set, use it and register Avro Schema and start
// compactor
if (writeClient == null) {
this.schemaProvider = inputBatch.getSchemaProvider();
// Setup HoodieWriteClient and compaction now that we decided on schema
setupWriteClient(inputBatch.getBatch());
} else {
Schema newSourceSchema = inputBatch.getSchemaProvider().getSourceSchema();
Schema newTargetSchema = inputBatch.getSchemaProvider().getTargetSchema();
if ((newSourceSchema != null && !processedSchema.isSchemaPresent(newSourceSchema))
|| (newTargetSchema != null && !processedSchema.isSchemaPresent(newTargetSchema))) {
String sourceStr = newSourceSchema == null ? NULL_PLACEHOLDER : newSourceSchema.toString(true);
String targetStr = newTargetSchema == null ? NULL_PLACEHOLDER : newTargetSchema.toString(true);
LOG.info("Seeing new schema. Source: {0}, Target: {1}", sourceStr, targetStr);
// We need to recreate write client with new schema and register them.
reInitWriteClient(newSourceSchema, newTargetSchema, inputBatch.getBatch());
if (newSourceSchema != null) {
processedSchema.addSchema(newSourceSchema);
}
if (newTargetSchema != null) {
processedSchema.addSchema(newTargetSchema);
}
}
}
// complete the pending compaction before writing to sink
if (cfg.retryLastPendingInlineCompactionJob && getHoodieClientConfig(this.schemaProvider).inlineCompactionEnabled()) {
Option<String> pendingCompactionInstant = getLastPendingCompactionInstant(allCommitsTimelineOpt);
if (pendingCompactionInstant.isPresent()) {
HoodieWriteMetadata<JavaRDD<WriteStatus>> writeMetadata = writeClient.compact(pendingCompactionInstant.get());
writeClient.commitCompaction(pendingCompactionInstant.get(), writeMetadata.getCommitMetadata().get(), Option.empty());
refreshTimeline();
reInitWriteClient(schemaProvider.getSourceSchema(), schemaProvider.getTargetSchema(), null);
}
} else if (cfg.retryLastPendingInlineClusteringJob && getHoodieClientConfig(this.schemaProvider).inlineClusteringEnabled()) {
// complete the pending clustering before writing to sink
Option<String> pendingClusteringInstant = getLastPendingClusteringInstant(allCommitsTimelineOpt);
if (pendingClusteringInstant.isPresent()) {
writeClient.cluster(pendingClusteringInstant.get());
}
}
result = writeToSinkAndDoMetaSync(instantTime, inputBatch, metrics, overallTimerContext);
}
metrics.updateStreamerSyncMetrics(System.currentTimeMillis());
return result;
} | 3.68 |
dubbo_PojoUtils_mapToPojo | /**
* convert map to a specific class instance
*
* @param map map wait for convert
* @param cls the specified class
* @param <T> the type of {@code cls}
* @return class instance declare in param {@code cls}
* @throws ReflectiveOperationException if the instance creation is failed
* @since 2.7.10
*/
public static <T> T mapToPojo(Map<String, Object> map, Class<T> cls) throws ReflectiveOperationException {
T instance = cls.getDeclaredConstructor().newInstance();
Map<String, Field> beanPropertyFields = ReflectUtils.getBeanPropertyFields(cls);
for (Map.Entry<String, Field> entry : beanPropertyFields.entrySet()) {
String name = entry.getKey();
Field field = entry.getValue();
Object mapObject = map.get(name);
if (mapObject == null) {
continue;
}
Type type = field.getGenericType();
Object fieldObject = getFieldObject(mapObject, type);
field.set(instance, fieldObject);
}
return instance;
} | 3.68 |
pulsar_PulsarAdminImpl_proxyStats | /**
* @return the proxy statics
*/
public ProxyStats proxyStats() {
return proxyStats;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.