name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_RegistryDNSServer_processServiceRecords | /**
* iterates thru the supplied service records, executing the provided registry
* command.
* @param records the service records.
* @param command the registry command.
* @throws IOException
*/
private void processServiceRecords(Map<String, ServiceRecord> records,
ManagementCommand command)
throws IOException {
for (Map.Entry<String, ServiceRecord> entry : records.entrySet()) {
processServiceRecord(entry.getKey(), entry.getValue(), command);
}
} | 3.68 |
framework_DefaultEditorEventHandler_isTouchOpenEvent | /**
* Returns whether the given event is a touch event that should open the
* editor.
*
* @param event
* the received event
* @return whether the event is a touch open event
*/
protected boolean isTouchOpenEvent(EditorDomEvent<T> event) {
final Event e = event.getDomEvent();
final int type = e.getTypeInt();
final double now = Duration.currentTimeMillis();
final int currentX = WidgetUtil.getTouchOrMouseClientX(e);
final int currentY = WidgetUtil.getTouchOrMouseClientY(e);
final boolean validTouchOpenEvent = type == Event.ONTOUCHEND
&& now - lastTouchEventTime < 500
&& lastTouchEventRow == event.getCell().getRowIndex()
&& Math.abs(lastTouchEventX - currentX) < 20
&& Math.abs(lastTouchEventY - currentY) < 20;
if (type == Event.ONTOUCHSTART) {
lastTouchEventX = currentX;
lastTouchEventY = currentY;
}
if (type == Event.ONTOUCHEND) {
lastTouchEventTime = now;
lastTouchEventRow = event.getCell().getRowIndex();
}
return validTouchOpenEvent;
} | 3.68 |
framework_Calendar_getTimeZone | /**
* Returns a time zone that is currently used by this component.
*
* @return Component's Time zone
*/
public TimeZone getTimeZone() {
if (timezone == null) {
return currentCalendar.getTimeZone();
}
return timezone;
} | 3.68 |
querydsl_SQLExpressions_addWeeks | /**
* Add the given amount of weeks to the date
*
* @param date date
* @param weeks weeks to add
* @return converted date
*/
public static <D extends Comparable> DateExpression<D> addWeeks(DateExpression<D> date, int weeks) {
return Expressions.dateOperation(date.getType(), Ops.DateTimeOps.ADD_WEEKS, date, ConstantImpl.create(weeks));
} | 3.68 |
framework_PointerEventSupportImpl_isSupported | /**
* @return true if the pointer events are supported, false otherwise
*/
protected boolean isSupported() {
return false;
} | 3.68 |
framework_VButton_cleanupCaptureState | /**
* Resets internal state if this button can no longer service events. This
* can occur when the widget becomes detached or disabled.
*/
private void cleanupCaptureState() {
if (isCapturing || isFocusing) {
DOM.releaseCapture(getElement());
isCapturing = false;
isFocusing = false;
}
} | 3.68 |
framework_DefaultConnectionStateHandler_scheduleReconnect | /**
* Called after a problem occurred.
*
* This method is responsible for re-sending the payload to the server (if
* not null) or re-send a heartbeat request at some point
*
* @param payload
* the payload that did not reach the server, null if the problem
* was detected by a heartbeat
*/
protected void scheduleReconnect(final JsonObject payload) {
// Here and not in timer to avoid TB for getting in between
// The request is still open at this point to avoid interference, so we
// do not need to start a new one
if (reconnectAttempt == 1) {
// Try once immediately
doReconnect(payload);
} else {
scheduledReconnect = new Timer() {
@Override
public void run() {
scheduledReconnect = null;
doReconnect(payload);
}
};
scheduledReconnect.schedule(getConfiguration().reconnectInterval);
}
} | 3.68 |
hbase_TableModel_getName | /** Returns the name */
@XmlAttribute
public String getName() {
return name;
} | 3.68 |
framework_VAbsoluteLayout_getWidgetCount | /*
* (non-Javadoc)
*
* @see com.google.gwt.user.client.ui.ComplexPanel#getWidgetCount()
*/
@Override
public int getWidgetCount() {
int counter = 0;
for (int i = 0; i < super.getWidgetCount(); i++) {
if (super.getWidget(i) instanceof AbsoluteWrapper) {
counter++;
}
}
return counter;
} | 3.68 |
hbase_Scan_isScanMetricsEnabled | /** Returns True if collection of scan metrics is enabled. For advanced users. */
public boolean isScanMetricsEnabled() {
byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
return attr == null ? false : Bytes.toBoolean(attr);
} | 3.68 |
hadoop_AbstractS3ACommitter_initiateTaskOperation | /**
* Start a ask commit/abort commit operations.
* This may have a different thread count.
* If configured to collect statistics,
* The IO StatisticsContext is reset.
* @param context job or task context
* @return a commit context through which the operations can be invoked.
* @throws IOException failure.
*/
protected CommitContext initiateTaskOperation(
final JobContext context)
throws IOException {
CommitContext commitContext = getCommitOperations().createCommitContext(
context,
getOutputPath(),
getTaskCommitThreadCount(context),
IOStatisticsContext.getCurrentIOStatisticsContext());
commitContext.maybeResetIOStatisticsContext();
return commitContext;
} | 3.68 |
framework_VScrollTable_setColWidth | /**
* Sets the content width of a column.
*
* Due IE limitation, we must set the width to a wrapper elements inside
* table cells (with overflow hidden, which does not work on td
* elements).
*
* To get this work properly crossplatform, we will also set the width
* of td.
*
* @param colIndex
* The column Index
* @param w
* The content width
*/
public void setColWidth(int colIndex, int w) {
for (Widget row : renderedRows) {
((VScrollTableRow) row).setCellWidth(colIndex, w);
}
} | 3.68 |
rocketmq-connect_WorkerSinkTask_pollConsumer | /**
* poll consumer
*
* @param timeoutMs
* @return
*/
private List<MessageExt> pollConsumer(long timeoutMs) {
List<MessageExt> msgs = consumer.poll(timeoutMs);
// metrics
recordReadSuccess(msgs.size());
return msgs;
} | 3.68 |
flink_WindowsGrouping_hasTriggerWindow | /**
* Check if there are windows could be triggered according to the current watermark.
*
* @return true when there are windows to be triggered. It is designed to be idempotent.
*/
public boolean hasTriggerWindow() {
skipEmptyWindow();
Preconditions.checkState(
watermark == Long.MIN_VALUE || nextWindow != null,
"next trigger window cannot be null.");
return nextWindow != null && nextWindow.getEnd() <= watermark;
} | 3.68 |
pulsar_Producer_close | /**
* Close the producer immediately if: a. the connection is dropped b. it's a graceful close and no pending publish
* acks are left else wait for pending publish acks
*
* @return completable future indicate completion of close
*/
public synchronized CompletableFuture<Void> close(boolean removeFromTopic) {
if (log.isDebugEnabled()) {
log.debug("Closing producer {} -- isClosed={}", this, isClosed);
}
if (!isClosed) {
isClosed = true;
if (log.isDebugEnabled()) {
log.debug("Trying to close producer {} -- cnxIsActive: {} -- pendingPublishAcks: {}", this,
cnx.isActive(), pendingPublishAcks);
}
if (!cnx.isActive() || pendingPublishAcks == 0) {
closeNow(removeFromTopic);
}
}
return closeFuture;
} | 3.68 |
framework_AbstractComponentContainer_fireComponentAttachEvent | /**
* Fires the component attached event. This should be called by the
* addComponent methods after the component have been added to this
* container.
*
* @param component
* the component that has been added to this container.
*/
protected void fireComponentAttachEvent(Component component) {
fireEvent(new ComponentAttachEvent(this, component));
} | 3.68 |
graphhopper_BikeCommonAverageSpeedParser_applyMaxSpeed | /**
* @param way needed to retrieve tags
* @param speed speed guessed e.g. from the road type or other tags
* @return The assumed average speed.
*/
double applyMaxSpeed(ReaderWay way, double speed, boolean bwd) {
double maxSpeed = getMaxSpeed(way, bwd);
// We strictly obey speed limits, see #600
return isValidSpeed(maxSpeed) && speed > maxSpeed ? maxSpeed : speed;
} | 3.68 |
flink_LongHashPartition_valueIter | /** Returns an iterator of BinaryRowData for multiple linked values. */
MatchIterator valueIter(long address) {
iterator.set(address);
return iterator;
} | 3.68 |
hbase_VersionedSegmentsList_getEstimatedUniquesFrac | // Estimates fraction of unique keys
double getEstimatedUniquesFrac() {
int segmentCells = 0;
int maxCells = 0;
double est = 0;
for (ImmutableSegment s : storeSegments) {
double segmentUniques = s.getNumUniqueKeys();
if (segmentUniques != CellSet.UNKNOWN_NUM_UNIQUES) {
segmentCells = s.getCellsCount();
if (segmentCells > maxCells) {
maxCells = segmentCells;
est = segmentUniques / segmentCells;
}
}
// else ignore this segment specifically since if the unique number is unknown counting
// cells can be expensive
}
if (maxCells == 0) {
return 1.0;
}
return est;
} | 3.68 |
flink_FileSystem_getDefaultBlockSize | /**
* Return the number of bytes that large input files should be optimally be split into to
* minimize I/O time.
*
* @return the number of bytes that large input files should be optimally be split into to
* minimize I/O time
* @deprecated This value is no longer used and is meaningless.
*/
@Deprecated
public long getDefaultBlockSize() {
return 32 * 1024 * 1024; // 32 MB;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_getFunctionalIndexUpdates | /**
* Loads the file slices touched by the commit due to given instant time and returns the records for the functional index.
*
* @param commitMetadata {@code HoodieCommitMetadata}
* @param indexPartition partition name of the functional index
* @param instantTime timestamp at of the current update commit
*/
private HoodieData<HoodieRecord> getFunctionalIndexUpdates(HoodieCommitMetadata commitMetadata, String indexPartition, String instantTime) throws Exception {
HoodieFunctionalIndexDefinition indexDefinition = getFunctionalIndexDefinition(indexPartition);
List<Pair<String, FileSlice>> partitionFileSlicePairs = new ArrayList<>();
HoodieTableFileSystemView fsView = HoodieTableMetadataUtil.getFileSystemView(metadataMetaClient);
commitMetadata.getPartitionToWriteStats().forEach((dataPartition, value) -> {
List<FileSlice> fileSlices = getPartitionLatestFileSlicesIncludingInflight(metadataMetaClient, Option.ofNullable(fsView), dataPartition);
fileSlices.forEach(fileSlice -> {
// Filter log files for the instant time and add to this partition fileSlice pairs
List<HoodieLogFile> logFilesForInstant = fileSlice.getLogFiles()
.filter(logFile -> logFile.getDeltaCommitTime().equals(instantTime))
.collect(Collectors.toList());
partitionFileSlicePairs.add(Pair.of(dataPartition, new FileSlice(
fileSlice.getFileGroupId(), fileSlice.getBaseInstantTime(), fileSlice.getBaseFile().orElse(null), logFilesForInstant)));
});
});
int parallelism = Math.min(partitionFileSlicePairs.size(), dataWriteConfig.getMetadataConfig().getFunctionalIndexParallelism());
Schema readerSchema = getProjectedSchemaForFunctionalIndex(indexDefinition, dataMetaClient);
return getFunctionalIndexRecords(partitionFileSlicePairs, indexDefinition, dataMetaClient, parallelism, readerSchema, hadoopConf);
} | 3.68 |
framework_VaadinService_accessSession | /**
* Implementation for {@link VaadinSession#access(Runnable)}. This method is
* implemented here instead of in {@link VaadinSession} to enable overriding
* the implementation without using a custom subclass of VaadinSession.
*
* @since 7.1
* @see VaadinSession#access(Runnable)
*
* @param session
* the vaadin session to access
* @param runnable
* the runnable to run with the session locked
*
* @return a future that can be used to check for task completion and to
* cancel the task
*/
public Future<Void> accessSession(VaadinSession session,
Runnable runnable) {
FutureAccess future = new FutureAccess(session, runnable);
session.getPendingAccessQueue().add(future);
ensureAccessQueuePurged(session);
return future;
} | 3.68 |
flink_BinarySegmentUtils_inFirstSegment | /** Is it just in first MemorySegment, we use quick way to do something. */
private static boolean inFirstSegment(MemorySegment[] segments, int offset, int numBytes) {
return numBytes + offset <= segments[0].size();
} | 3.68 |
hibernate-validator_ConstraintDefinitionContribution_getValidatorDescriptors | /**
* Returns a list of constraint validator descriptors for the constraint type of this instance.
*/
public List<ConstraintValidatorDescriptor<A>> getValidatorDescriptors() {
return validatorDescriptors;
} | 3.68 |
framework_Escalator_paintInsertCells | /**
* Inserts new cell elements into a single row element, invoking
* {@link #getEscalatorUpdater()}
* {@link EscalatorUpdater#preAttach(Row, Iterable) preAttach} and
* {@link EscalatorUpdater#postAttach(Row, Iterable) postAttach} before
* and after inserting the cells, respectively.
* <p>
* Precondition: The row must be already attached to the DOM and the
* FlyweightCell instances corresponding to the new columns added to
* {@code flyweightRow}.
*
* @param tr
* the row in which to insert the cells
* @param logicalRowIndex
* the index of the row
* @param offset
* the index of the first cell
* @param numberOfCells
* the number of cells to insert
*/
private void paintInsertCells(final TableRowElement tr,
int logicalRowIndex, final int offset,
final int numberOfCells) {
assert root.isOrHasChild(
tr) : "The row must be attached to the document";
flyweightRow.setup(tr, logicalRowIndex,
columnConfiguration.getCalculatedColumnWidths());
Iterable<FlyweightCell> cells = flyweightRow
.getUnattachedCells(offset, numberOfCells);
for (FlyweightCell cell : cells) {
final double colWidth = columnConfiguration
.getColumnWidthActual(cell.getColumn());
final TableCellElement cellElem = createCellElement(colWidth);
cell.setElement(cellElem);
}
getEscalatorUpdater().preAttach(flyweightRow, cells);
Node referenceCell;
if (offset != 0) {
referenceCell = tr.getChild(offset - 1);
} else {
referenceCell = null;
}
for (FlyweightCell cell : cells) {
referenceCell = insertAfterReferenceAndUpdateIt(tr,
cell.getElement(), referenceCell);
}
getEscalatorUpdater().postAttach(flyweightRow, cells);
getEscalatorUpdater().update(flyweightRow, cells);
assert flyweightRow.teardown();
} | 3.68 |
hadoop_SuccessData_addDiagnostic | /**
* Add a diagnostics entry.
* @param key name
* @param value value
*/
public void addDiagnostic(String key, String value) {
diagnostics.put(key, value);
} | 3.68 |
hbase_TimestampsFilter_parseFrom | /**
* Parse a serialized representation of {@link TimestampsFilter}
* @param pbBytes A pb serialized {@link TimestampsFilter} instance
* @return An instance of {@link TimestampsFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static TimestampsFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.TimestampsFilter proto;
try {
proto = FilterProtos.TimestampsFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new TimestampsFilter(proto.getTimestampsList(),
proto.hasCanHint() && proto.getCanHint());
} | 3.68 |
hudi_FileSystemViewManager_close | /**
* Closes all views opened.
*/
public void close() {
if (!this.globalViewMap.isEmpty()) {
this.globalViewMap.values().forEach(SyncableFileSystemView::close);
this.globalViewMap.clear();
}
} | 3.68 |
flink_DataSet_fullOuterJoin | /**
* Initiates a Full Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two {@link DataSet DataSets} on key
* equality and provides multiple ways to combine joining elements into one DataSet.
*
* <p>Elements of <b>both</b> DataSets that do not have a matching element on the opposing side
* are joined with {@code null} and emitted to the resulting DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @param strategy The strategy that should be used execute the join. If {@code null} is given,
* then the optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> fullOuterJoin(DataSet<R> other, JoinHint strategy) {
switch (strategy) {
case OPTIMIZER_CHOOSES:
case REPARTITION_SORT_MERGE:
case REPARTITION_HASH_FIRST:
case REPARTITION_HASH_SECOND:
return new JoinOperatorSetsBase<>(this, other, strategy, JoinType.FULL_OUTER);
default:
throw new InvalidProgramException(
"Invalid JoinHint for FullOuterJoin: " + strategy);
}
} | 3.68 |
flink_RpcEndpoint_isRunning | /**
* Returns whether the RPC endpoint is started and not stopped or being stopped.
*
* @return whether the RPC endpoint is started and not stopped or being stopped.
*/
protected boolean isRunning() {
validateRunsInMainThread();
return isRunning;
} | 3.68 |
flink_RequestedGlobalProperties_isMetBy | /**
* Checks, if this set of interesting properties, is met by the given produced properties.
*
* @param props The properties for which to check whether they meet these properties.
* @return True, if the properties are met, false otherwise.
*/
public boolean isMetBy(GlobalProperties props) {
if (this.partitioning == PartitioningProperty.ANY_DISTRIBUTION) {
return true;
} else if (this.partitioning == PartitioningProperty.FULL_REPLICATION) {
return props.isFullyReplicated();
} else if (props.isFullyReplicated()) {
return false;
} else if (this.partitioning == PartitioningProperty.RANDOM_PARTITIONED) {
return true;
} else if (this.partitioning == PartitioningProperty.ANY_PARTITIONING) {
return checkCompatiblePartitioningFields(props);
} else if (this.partitioning == PartitioningProperty.HASH_PARTITIONED) {
return props.getPartitioning() == PartitioningProperty.HASH_PARTITIONED
&& checkCompatiblePartitioningFields(props);
} else if (this.partitioning == PartitioningProperty.RANGE_PARTITIONED) {
return props.getPartitioning() == PartitioningProperty.RANGE_PARTITIONED
&& props.matchesOrderedPartitioning(this.ordering);
} else if (this.partitioning == PartitioningProperty.FORCED_REBALANCED) {
return props.getPartitioning() == PartitioningProperty.FORCED_REBALANCED;
} else if (this.partitioning == PartitioningProperty.CUSTOM_PARTITIONING) {
return props.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING
&& checkCompatiblePartitioningFields(props)
&& props.getCustomPartitioner().equals(this.customPartitioner);
} else {
throw new CompilerException("Properties matching logic leaves open cases.");
}
} | 3.68 |
hbase_Pair_setSecond | /**
* Replace the second element of the pair.
* @param b operand
*/
public void setSecond(T2 b) {
this.second = b;
} | 3.68 |
hbase_Bytes_readStringFixedSize | /**
* Reads a fixed-size field and interprets it as a string padded with zeros.
*/
public static String readStringFixedSize(final DataInput in, int size) throws IOException {
byte[] b = new byte[size];
in.readFully(b);
int n = b.length;
while (n > 0 && b[n - 1] == 0)
--n;
return toString(b, 0, n);
} | 3.68 |
hbase_FilterBase_reset | /**
* Filters that are purely stateless and do nothing in their reset() methods can inherit this
* null/empty implementation. {@inheritDoc}
*/
@Override
public void reset() throws IOException {
} | 3.68 |
hbase_RestoreSnapshotHelper_restoreRegion | /**
* Restore region by removing files not in the snapshot and adding the missing ones from the
* snapshot.
*/
private void restoreRegion(final RegionInfo regionInfo,
final SnapshotRegionManifest regionManifest, Path regionDir) throws IOException {
Map<String, List<SnapshotRegionManifest.StoreFile>> snapshotFiles =
getRegionHFileReferences(regionManifest);
String tableName = tableDesc.getTableName().getNameAsString();
final String snapshotName = snapshotDesc.getName();
Path regionPath = new Path(tableDir, regionInfo.getEncodedName());
HRegionFileSystem regionFS = (fs.exists(regionPath))
? HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, false)
: HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo);
// Restore families present in the table
for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) {
byte[] family = Bytes.toBytes(familyDir.getName());
Set<String> familyFiles = getTableRegionFamilyFiles(familyDir);
List<SnapshotRegionManifest.StoreFile> snapshotFamilyFiles =
snapshotFiles.remove(familyDir.getName());
List<StoreFileInfo> filesToTrack = new ArrayList<>();
if (snapshotFamilyFiles != null) {
List<SnapshotRegionManifest.StoreFile> hfilesToAdd = new ArrayList<>();
for (SnapshotRegionManifest.StoreFile storeFile : snapshotFamilyFiles) {
if (familyFiles.contains(storeFile.getName())) {
// HFile already present
familyFiles.remove(storeFile.getName());
// no need to restore already present files, but we need to add those to tracker
filesToTrack
.add(new StoreFileInfo(conf, fs, new Path(familyDir, storeFile.getName()), true));
} else {
// HFile missing
hfilesToAdd.add(storeFile);
}
}
// Remove hfiles not present in the snapshot
for (String hfileName : familyFiles) {
Path hfile = new Path(familyDir, hfileName);
if (!fs.getFileStatus(hfile).isDirectory()) {
LOG.trace("Removing HFile=" + hfileName + " not present in snapshot=" + snapshotName
+ " from region=" + regionInfo.getEncodedName() + " table=" + tableName);
HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile);
}
}
// Restore Missing files
for (SnapshotRegionManifest.StoreFile storeFile : hfilesToAdd) {
LOG.debug("Restoring missing HFileLink " + storeFile.getName() + " of snapshot="
+ snapshotName + " to region=" + regionInfo.getEncodedName() + " table=" + tableName);
String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs);
// mark the reference file to be added to tracker
filesToTrack.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true));
}
} else {
// Family doesn't exists in the snapshot
LOG.trace("Removing family=" + Bytes.toString(family) + " in snapshot=" + snapshotName
+ " from region=" + regionInfo.getEncodedName() + " table=" + tableName);
HFileArchiver.archiveFamilyByFamilyDir(fs, conf, regionInfo, familyDir, family);
fs.delete(familyDir, true);
}
StoreFileTracker tracker =
StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder()
.withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build());
// simply reset list of tracked files with the matching files
// and the extra one present in the snapshot
tracker.set(filesToTrack);
}
// Add families not present in the table
for (Map.Entry<String, List<SnapshotRegionManifest.StoreFile>> familyEntry : snapshotFiles
.entrySet()) {
Path familyDir = new Path(regionDir, familyEntry.getKey());
StoreFileTracker tracker =
StoreFileTrackerFactory.create(conf, true, StoreContext.getBuilder()
.withFamilyStoreDirectoryPath(familyDir).withRegionFileSystem(regionFS).build());
List<StoreFileInfo> files = new ArrayList<>();
if (!fs.mkdirs(familyDir)) {
throw new IOException("Unable to create familyDir=" + familyDir);
}
for (SnapshotRegionManifest.StoreFile storeFile : familyEntry.getValue()) {
LOG.trace("Adding HFileLink (Not present in the table) " + storeFile.getName()
+ " of snapshot " + snapshotName + " to table=" + tableName);
String fileName = restoreStoreFile(familyDir, regionInfo, storeFile, createBackRefs);
files.add(new StoreFileInfo(conf, fs, new Path(familyDir, fileName), true));
}
tracker.set(files);
}
} | 3.68 |
hbase_StripeStoreFileManager_findStripeIndexByEndRow | /**
* Finds the stripe index by end row.
*/
private final int findStripeIndexByEndRow(byte[] endRow) {
assert !isInvalid(endRow);
if (isOpen(endRow)) return state.stripeEndRows.length;
return Arrays.binarySearch(state.stripeEndRows, endRow, Bytes.BYTES_COMPARATOR);
} | 3.68 |
flink_RootExceptionHistoryEntry_fromGlobalFailure | /**
* Creates a {@code RootExceptionHistoryEntry} based on the passed {@link ErrorInfo}. No
* concurrent failures will be added.
*
* @param errorInfo The failure information that shall be used to initialize the {@code
* RootExceptionHistoryEntry}.
* @return The {@code RootExceptionHistoryEntry} instance.
* @throws NullPointerException if {@code errorInfo} is {@code null} or the passed info does not
* contain a {@code Throwable}.
* @throws IllegalArgumentException if the passed {@code timestamp} is not bigger than {@code
* 0}.
*/
public static RootExceptionHistoryEntry fromGlobalFailure(ErrorInfo errorInfo) {
Preconditions.checkNotNull(errorInfo, "errorInfo");
return fromGlobalFailure(
errorInfo.getException(),
errorInfo.getTimestamp(),
FailureEnricherUtils.EMPTY_FAILURE_LABELS,
Collections.emptyList());
} | 3.68 |
morf_ChangeColumn_getTableName | /**
* Gets the name of the table to change.
*
* @return the name of the table to change
*/
public String getTableName() {
return tableName;
} | 3.68 |
hadoop_HttpReferrerAuditHeader_withAttribute | /**
* Add an attribute to the current map.
* Replaces any with the existing key.
* @param key key to set/update
* @param value new value
* @return the builder
*/
public Builder withAttribute(String key, String value) {
attributes.put(key, value);
return this;
} | 3.68 |
rocketmq-connect_Worker_checkAndNewConnectors | /**
* check and new connectors
*
* @param assigns
*/
private Map<String, ConnectKeyValue> checkAndNewConnectors(Map<String, ConnectKeyValue> assigns) {
if (assigns == null || assigns.isEmpty()) {
return new HashMap<>();
}
Map<String, ConnectKeyValue> newConnectors = new HashMap<>();
for (String connectName : assigns.keySet()) {
if (!connectors.containsKey(connectName)) {
newConnectors.put(connectName, assigns.get(connectName));
}
}
return newConnectors;
} | 3.68 |
pulsar_CmdProduce_updateConfig | /**
* Set Pulsar client configuration.
*
*/
public void updateConfig(ClientBuilder newBuilder, Authentication authentication, String serviceURL) {
this.clientBuilder = newBuilder;
this.authentication = authentication;
this.serviceURL = serviceURL;
} | 3.68 |
querydsl_SQLMergeClause_keys | /**
* Set the keys to be used in the MERGE clause
*
* @param paths keys
* @return the current object
*/
public SQLMergeClause keys(Path<?>... paths) {
keys.addAll(Arrays.asList(paths));
return this;
} | 3.68 |
hadoop_RouterClientMetrics_incInvokedMethod | /**
* Increase the metrics based on the method being invoked.
* @param method method being invoked
*/
public void incInvokedMethod(Method method) {
switch (method.getName()) {
case "getBlockLocations":
getBlockLocationsOps.incr();
break;
case "getServerDefaults":
getServerDefaultsOps.incr();
break;
case "create":
createOps.incr();
break;
case "append":
appendOps.incr();
break;
case "recoverLease":
recoverLeaseOps.incr();
break;
case "setReplication":
setReplicationOps.incr();
break;
case "setStoragePolicy":
setStoragePolicyOps.incr();
break;
case "getStoragePolicies":
getStoragePoliciesOps.incr();
break;
case "setPermission":
setPermissionOps.incr();
break;
case "setOwner":
setOwnerOps.incr();
break;
case "addBlock":
addBlockOps.incr();
break;
case "getAdditionalDatanode":
getAdditionalDatanodeOps.incr();
break;
case "abandonBlock":
abandonBlockOps.incr();
break;
case "complete":
completeOps.incr();
break;
case "updateBlockForPipeline":
updateBlockForPipelineOps.incr();
break;
case "updatePipeline":
updatePipelineOps.incr();
break;
case "getPreferredBlockSize":
getPreferredBlockSizeOps.incr();
break;
case "rename":
renameOps.incr();
break;
case "rename2":
rename2Ops.incr();
break;
case "concat":
concatOps.incr();
break;
case "truncate":
truncateOps.incr();
break;
case "delete":
deleteOps.incr();
break;
case "mkdirs":
mkdirsOps.incr();
break;
case "renewLease":
renewLeaseOps.incr();
break;
case "getListing":
getListingOps.incr();
break;
case "getBatchedListing":
getBatchedListingOps.incr();
break;
case "getFileInfo":
getFileInfoOps.incr();
break;
case "isFileClosed":
isFileClosedOps.incr();
break;
case "getFileLinkInfo":
getFileLinkInfoOps.incr();
break;
case "getLocatedFileInfo":
getLocatedFileInfoOps.incr();
break;
case "getStats":
getStatsOps.incr();
break;
case "getDatanodeReport":
getDatanodeReportOps.incr();
break;
case "getDatanodeStorageReport":
getDatanodeStorageReportOps.incr();
break;
case "setSafeMode":
setSafeModeOps.incr();
break;
case "restoreFailedStorage":
restoreFailedStorageOps.incr();
break;
case "saveNamespace":
saveNamespaceOps.incr();
break;
case "rollEdits":
rollEditsOps.incr();
break;
case "refreshNodes":
refreshNodesOps.incr();
break;
case "finalizeUpgrade":
finalizeUpgradeOps.incr();
break;
case "upgradeStatus":
upgradeStatusOps.incr();
break;
case "rollingUpgrade":
rollingUpgradeOps.incr();
break;
case "metaSave":
metaSaveOps.incr();
break;
case "listCorruptFileBlocks":
listCorruptFileBlocksOps.incr();
break;
case "setBalancerBandwidth":
setBalancerBandwidthOps.incr();
break;
case "getContentSummary":
getContentSummaryOps.incr();
break;
case "fsync":
fsyncOps.incr();
break;
case "setTimes":
setTimesOps.incr();
break;
case "createSymlink":
createSymlinkOps.incr();
break;
case "getLinkTarget":
getLinkTargetOps.incr();
break;
case "allowSnapshot":
allowSnapshotOps.incr();
break;
case "disallowSnapshot":
disallowSnapshotOps.incr();
break;
case "renameSnapshot":
renameSnapshotOps.incr();
break;
case "getSnapshottableDirListing":
getSnapshottableDirListingOps.incr();
break;
case "getSnapshotListing":
getSnapshotListingOps.incr();
break;
case "getSnapshotDiffReport":
getSnapshotDiffReportOps.incr();
break;
case "getSnapshotDiffReportListing":
getSnapshotDiffReportListingOps.incr();
break;
case "addCacheDirective":
addCacheDirectiveOps.incr();
break;
case "modifyCacheDirective":
modifyCacheDirectiveOps.incr();
break;
case "removeCacheDirective":
removeCacheDirectiveOps.incr();
break;
case "listCacheDirectives":
listCacheDirectivesOps.incr();
break;
case "addCachePool":
addCachePoolOps.incr();
break;
case "modifyCachePool":
modifyCachePoolOps.incr();
break;
case "removeCachePool":
removeCachePoolOps.incr();
break;
case "listCachePools":
listCachePoolsOps.incr();
break;
case "modifyAclEntries":
modifyAclEntriesOps.incr();
break;
case "removeAclEntries":
removeAclEntriesOps.incr();
break;
case "removeDefaultAcl":
removeDefaultAclOps.incr();
break;
case "removeAcl":
removeAclOps.incr();
break;
case "setAcl":
setAclOps.incr();
break;
case "getAclStatus":
getAclStatusOps.incr();
break;
case "createEncryptionZone":
createEncryptionZoneOps.incr();
break;
case "getEZForPath":
getEZForPathOps.incr();
break;
case "listEncryptionZones":
listEncryptionZonesOps.incr();
break;
case "reencryptEncryptionZone":
reencryptEncryptionZoneOps.incr();
break;
case "listReencryptionStatus":
listReencryptionStatusOps.incr();
break;
case "setXAttr":
setXAttrOps.incr();
break;
case "getXAttrs":
getXAttrsOps.incr();
break;
case "listXAttrs":
listXAttrsOps.incr();
break;
case "removeXAttr":
removeXAttrsOps.incr();
break;
case "checkAccess":
checkAccessOps.incr();
break;
case "getCurrentEditLogTxid":
getCurrentEditLogTxidOps.incr();
break;
case "getEditsFromTxid":
getEditsFromTxidOps.incr();
break;
case "getDataEncryptionKey":
getDataEncryptionKeyOps.incr();
break;
case "createSnapshot":
createSnapshotOps.incr();
break;
case "deleteSnapshot":
deleteSnapshotOps.incr();
break;
case "setQuota":
setQuotaOps.incr();
break;
case "getQuotaUsage":
getQuotaUsageOps.incr();
break;
case "reportBadBlocks":
reportBadBlocksOps.incr();
break;
case "unsetStoragePolicy":
unsetStoragePolicyOps.incr();
break;
case "getStoragePolicy":
getStoragePolicyOps.incr();
break;
case "getErasureCodingPolicies":
getErasureCodingPoliciesOps.incr();
break;
case "getErasureCodingCodecs":
getErasureCodingCodecsOps.incr();
break;
case "addErasureCodingPolicies":
addErasureCodingPoliciesOps.incr();
break;
case "removeErasureCodingPolicy":
removeErasureCodingPolicyOps.incr();
break;
case "disableErasureCodingPolicy":
disableErasureCodingPolicyOps.incr();
break;
case "enableErasureCodingPolicy":
enableErasureCodingPolicyOps.incr();
break;
case "getErasureCodingPolicy":
getErasureCodingPolicyOps.incr();
break;
case "setErasureCodingPolicy":
setErasureCodingPolicyOps.incr();
break;
case "unsetErasureCodingPolicy":
unsetErasureCodingPolicyOps.incr();
break;
case "getECTopologyResultForPolicies":
getECTopologyResultForPoliciesOps.incr();
break;
case "getECBlockGroupStats":
getECBlockGroupStatsOps.incr();
break;
case "getReplicatedBlockStats":
getReplicatedBlockStatsOps.incr();
break;
case "listOpenFiles":
listOpenFilesOps.incr();
break;
case "msync":
msyncOps.incr();
break;
case "satisfyStoragePolicy":
satisfyStoragePolicyOps.incr();
break;
case "getHAServiceState":
getHAServiceStateOps.incr();
break;
case "getSlowDatanodeReport":
getSlowDatanodeReportOps.incr();
break;
default:
otherOps.incr();
}
} | 3.68 |
flink_StreamProjection_projectTuple17 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>
SingleOutputStreamOperator<
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>
projectTuple17() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>
tType =
new TupleTypeInfo<
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
hadoop_RejectPlacementRule_setConfig | /**
* The Reject rule does not use any configuration. Override and ignore all
* configuration.
* @param initArg the config to be set
*/
@Override
public void setConfig(Object initArg) {
// This rule ignores all config, just log and return
LOG.debug("RejectPlacementRule instantiated");
} | 3.68 |
morf_AbstractSqlDialectTest_testPreInsertWithPresetAutonumStatementsNotInsertingUnderAutonumLimit | /**
* Tests the SQL statement that are run before a data insert.
*/
@SuppressWarnings("unchecked")
@Test
public void testPreInsertWithPresetAutonumStatementsNotInsertingUnderAutonumLimit() {
compareStatements(
expectedPreInsertStatementsNotInsertingUnderAutonumLimit(),
testDialect.preInsertWithPresetAutonumStatements(metadata.getTable(TEST_TABLE), false),
testDialect.preInsertWithPresetAutonumStatements(metadata.getTable(AUTO_NUMBER_TABLE), false)
);
} | 3.68 |
framework_Table_isMultiSelectTouchDetectionEnabled | /**
* Returns if touch screen detection is used to toggle multi select mode.
*
* @return If touch screen detection for multi select is enabled
*/
public boolean isMultiSelectTouchDetectionEnabled() {
return multiSelectTouchDetectionEnabled;
} | 3.68 |
pulsar_MetaStore_getManagedLedgerInfo | /**
* Get the metadata used by the ManagedLedger.
*
* @param ledgerName
* the name of the ManagedLedger
* @param createIfMissing
* whether the managed ledger metadata should be created if it doesn't exist already
* @throws MetaStoreException
*/
default void getManagedLedgerInfo(String ledgerName, boolean createIfMissing,
MetaStoreCallback<ManagedLedgerInfo> callback) {
getManagedLedgerInfo(ledgerName, createIfMissing, null, callback);
} | 3.68 |
hbase_MasterObserver_preRemoveServers | /**
* Called before servers are removed from rsgroup
* @param ctx the environment to interact with the framework and master
* @param servers set of decommissioned servers to remove
*/
default void preRemoveServers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<Address> servers) throws IOException {
} | 3.68 |
morf_ResolvedTables_isPortableSqlStatementUsed | /**
* @return true if {@link PortableSqlStatement} has been used in the analysed SQL/DDL element.
*/
public boolean isPortableSqlStatementUsed() {
return portableSqlStatementUsed;
} | 3.68 |
hbase_MasterObserver_postListNamespaceDescriptors | /**
* Called after a listNamespaceDescriptors request has been processed.
* @param ctx the environment to interact with the framework and master
* @param descriptors the list of descriptors about to be returned
*/
default void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors) throws IOException {
} | 3.68 |
hadoop_CommitUtilsWithMR_getTempJobAttemptPath | /**
* Compute a path for temporary data associated with a job.
* This data is <i>not magic</i>
* @param jobUUID unique Job ID.
* @param out output directory of job
* @param appAttemptId the ID of the application attempt for this job.
* @return the path to store temporary job attempt data.
*/
public static Path getTempJobAttemptPath(String jobUUID,
Path out, final int appAttemptId) {
return new Path(new Path(out, TEMP_DATA),
formatAppAttemptDir(jobUUID, appAttemptId));
} | 3.68 |
hbase_MasterRpcServices_deleteSnapshot | /**
* Execute Delete Snapshot operation.
* @return DeleteSnapshotResponse (a protobuf wrapped void) if the snapshot existed and was
* deleted properly.
* @throws ServiceException wrapping SnapshotDoesNotExistException if specified snapshot did not
* exist.
*/
@Override
public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
DeleteSnapshotRequest request) throws ServiceException {
try {
server.checkInitialized();
server.snapshotManager.checkSnapshotSupport();
LOG.info(server.getClientIdAuditPrefix() + " delete " + request.getSnapshot());
server.snapshotManager.deleteSnapshot(request.getSnapshot());
return DeleteSnapshotResponse.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
hadoop_BlockBlobInputStream_capacity | /**
* Gets the current capacity of the stream.
*/
public synchronized int capacity() {
return length;
} | 3.68 |
hadoop_InterruptEscalator_getServiceWasShutdown | /**
* Probe for the service being shutdown.
* @return true if the service has been shutdown in the runnable
*/
private boolean getServiceWasShutdown() {
return serviceWasShutdown.get();
} | 3.68 |
hadoop_SlowPeerTracker_getReportsForAllDataNodes | /**
* Retrieve all reports for all nodes. Stale reports are excluded.
*
* @return map from SlowNodeId {@literal ->} (set of nodes reporting peers).
*/
public Map<String, SortedSet<SlowPeerLatencyWithReportingNode>> getReportsForAllDataNodes() {
if (allReports.isEmpty()) {
return ImmutableMap.of();
}
final Map<String, SortedSet<SlowPeerLatencyWithReportingNode>> allNodesValidReports =
new HashMap<>();
final long now = timer.monotonicNow();
for (Map.Entry<String, ConcurrentMap<String, LatencyWithLastReportTime>> entry
: allReports.entrySet()) {
SortedSet<SlowPeerLatencyWithReportingNode> validReports =
filterNodeReports(entry.getValue(), now);
if (!validReports.isEmpty()) {
allNodesValidReports.put(entry.getKey(), validReports);
}
}
return allNodesValidReports;
} | 3.68 |
flink_ProcessingTimeTrigger_create | /** Creates a new trigger that fires once system time passes the end of the window. */
public static ProcessingTimeTrigger create() {
return new ProcessingTimeTrigger();
} | 3.68 |
hudi_SparkRDDWriteClient_commit | /**
* Complete changes performed at the given instantTime marker with specified action.
*/
@Override
public boolean commit(String instantTime, JavaRDD<WriteStatus> writeStatuses, Option<Map<String, String>> extraMetadata,
String commitActionType, Map<String, List<String>> partitionToReplacedFileIds,
Option<BiConsumer<HoodieTableMetaClient, HoodieCommitMetadata>> extraPreCommitFunc) {
context.setJobStatus(this.getClass().getSimpleName(), "Committing stats: " + config.getTableName());
List<HoodieWriteStat> writeStats = writeStatuses.map(WriteStatus::getStat).collect();
return commitStats(instantTime, HoodieJavaRDD.of(writeStatuses), writeStats, extraMetadata, commitActionType, partitionToReplacedFileIds, extraPreCommitFunc);
} | 3.68 |
framework_Calendar_getFirstVisibleHourOfDay | /**
* Returns the first visible hour in the week view. Returns the hour using a
* 24h time format
*
*/
public int getFirstVisibleHourOfDay() {
return firstHour;
} | 3.68 |
flink_BlockInfo_setRecordCount | /**
* Sets the recordCount to the specified value.
*
* @param recordCount the recordCount to set
*/
public void setRecordCount(long recordCount) {
this.recordCount = recordCount;
} | 3.68 |
framework_VScrollTable_getNavigationLeftKey | /**
* Get the key that scrolls to the left in the table. By default it is the
* left arrow key but by overriding this you can change the key to whatever
* you want.
*
* @return The keycode of the key
*/
protected int getNavigationLeftKey() {
return KeyCodes.KEY_LEFT;
} | 3.68 |
hadoop_EmptyS3AStatisticsContext_trackDuration | /**
* Always return the stub duration tracker.
* @param key statistic key prefix
* @param count #of times to increment the matching counter in this
* operation.
* @return stub tracker.
*/
public DurationTracker trackDuration(String key, long count) {
return stubDurationTracker();
} | 3.68 |
hudi_DateTimeUtils_plural | /**
* @param label the original label
* @return both the singular format and plural format of the original label
*/
private static String[] plural(String label) {
return new String[] {label, label + PLURAL_SUFFIX};
} | 3.68 |
flink_ResourceGuard_closeUninterruptibly | /**
* If the current thread is {@linkplain Thread#interrupt interrupted} while waiting for the
* close method to complete, then it will continue to wait. When the thread does return from
* this method its interrupt status will be set.
*/
@SuppressWarnings("WeakerAccess")
public void closeUninterruptibly() {
boolean interrupted = false;
synchronized (lock) {
closed = true;
while (leaseCount > 0) {
try {
lock.wait();
} catch (InterruptedException e) {
interrupted = true;
}
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
} | 3.68 |
dubbo_GsonUtils_setSupportGson | /**
* @deprecated for uts only
*/
@Deprecated
protected static void setSupportGson(Boolean supportGson) {
GsonUtils.supportGson = supportGson;
} | 3.68 |
querydsl_MongodbExpressions_geoIntersects | /**
* Finds documents whose geospatial data intersects
*
* @param expr location
* @param latVal latitude
* @param longVal longitude
* @return predicate
*/
public static BooleanExpression geoIntersects(Expression<Double[]> expr, double latVal, double longVal) {
return Expressions.booleanOperation(MongodbOps.GEO_INTERSECTS, expr, ConstantImpl.create(new Double[]{latVal, longVal}));
} | 3.68 |
flink_HsSubpartitionFileReaderImpl_updateCachedRegionIfNeeded | /** Points the cursors to the given buffer index, if possible. */
private void updateCachedRegionIfNeeded(int bufferIndex) {
if (isInCachedRegion(bufferIndex)) {
int numAdvance = bufferIndex - currentBufferIndex;
numSkip += numAdvance;
numReadable -= numAdvance;
currentBufferIndex = bufferIndex;
return;
}
Optional<HsFileDataIndex.ReadableRegion> lookupResultOpt =
dataIndex.getReadableRegion(subpartitionId, bufferIndex, consumingOffset);
if (!lookupResultOpt.isPresent()) {
currentBufferIndex = -1;
numReadable = 0;
numSkip = 0;
offset = -1L;
} else {
HsFileDataIndex.ReadableRegion cachedRegion = lookupResultOpt.get();
currentBufferIndex = bufferIndex;
numSkip = cachedRegion.numSkip;
numReadable = cachedRegion.numReadable;
offset = cachedRegion.offset;
}
} | 3.68 |
flink_AbstractUdfOperator_asArray | /**
* Generic utility function that wraps a single class object into an array of that class type.
*
* @param <U> The type of the classes.
* @param clazz The class object to be wrapped.
* @return An array wrapping the class object.
*/
protected static <U> Class<U>[] asArray(Class<U> clazz) {
@SuppressWarnings("unchecked")
Class<U>[] array = new Class[] {clazz};
return array;
} | 3.68 |
morf_AbstractDatabaseType_splitJdbcUrl | /**
* Splits up a JDBC URL into its components and returns the result
* as a stack, allowing it to be parsed from left to right.
*
* @param jdbcUrl The JDBC URL.
* @return The split result as a stack.
*/
protected final Stack<String> splitJdbcUrl(String jdbcUrl) {
Stack<String> splitURL = split(jdbcUrl.trim(), "[/;:@]+");
if (!splitURL.pop().equalsIgnoreCase("jdbc") || !splitURL.pop().equals(":")) {
throw new IllegalArgumentException("[" + jdbcUrl + "] is not a valid JDBC URL");
}
return splitURL;
} | 3.68 |
hbase_SplitTableRegionProcedure_postSplitRegion | /**
* Post split region actions
* @param env MasterProcedureEnv
**/
private void postSplitRegion(final MasterProcedureEnv env) throws IOException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.postCompletedSplitRegionAction(daughterOneRI, daughterTwoRI, getUser());
}
} | 3.68 |
framework_VDragAndDropManager_visitServer | /**
* Visits server during drag and drop procedure. Transferable and event type
* is given to server side counterpart of DropHandler.
*
* If another server visit is started before the current is received, the
* current is just dropped. TODO consider if callback should have
* interrupted() method for cleanup.
*
* @param acceptCallback
* the callback that should handle the matching server response
* when it arrives
*/
public void visitServer(VDragEventServerCallback acceptCallback) {
doRequest(DragEventType.ENTER);
serverCallback = acceptCallback;
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations8 | /**
* Test for proper SQL mathematics operation generation from DSL expressions.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations8() {
String result = testDialect.getSqlFrom(field("a").plus(field("b")).plus(field("c")).plus(field("d")).plus(field("e")));
assertEquals(expectedSqlForMathOperations8(), result);
} | 3.68 |
hudi_QuickstartUtils_generateInserts | /**
* Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
*/
public List<HoodieRecord> generateInserts(Integer n) throws IOException {
String randomString = generateRandomString();
return generateInsertsStream(randomString, n).collect(Collectors.toList());
} | 3.68 |
flink_InputTypeStrategies_compositeSequence | /**
* An strategy that lets you apply other strategies for subsequences of the actual arguments.
*
* <p>The {@link #sequence(ArgumentTypeStrategy...)} should be preferred in most of the cases.
* Use this strategy only if you need to apply a common logic to a subsequence of the arguments.
*/
public static SubsequenceStrategyBuilder compositeSequence() {
return new SubsequenceStrategyBuilder();
} | 3.68 |
morf_InsertStatement_getFromTable | /**
* Gets the table to select from. This is a short-hand for "SELECT * FROM [Table]".
*
* @return the table to select from.
*/
public TableReference getFromTable() {
return fromTable;
} | 3.68 |
flink_BlobUtils_createBlobStoreFromConfig | /**
* Creates a BlobStore based on the parameters set in the configuration.
*
* @param config configuration to use
* @return a (distributed) blob store for high availability
* @throws IOException thrown if the (distributed) file storage cannot be created
*/
public static BlobStoreService createBlobStoreFromConfig(Configuration config)
throws IOException {
if (HighAvailabilityMode.isHighAvailabilityModeActivated(config)) {
return createFileSystemBlobStore(config);
} else {
return new VoidBlobStore();
}
} | 3.68 |
hadoop_NMContainerTokenSecretManager_setMasterKey | /**
* Used by NodeManagers to create a token-secret-manager with the key obtained
* from the RM. This can happen during registration or when the RM rolls the
* master-key and signals the NM.
*
* @param masterKeyRecord
*/
@Private
public synchronized void setMasterKey(MasterKey masterKeyRecord) {
// Update keys only if the key has changed.
if (super.currentMasterKey == null || super.currentMasterKey.getMasterKey()
.getKeyId() != masterKeyRecord.getKeyId()) {
LOG.info("Rolling master-key for container-tokens, got key with id "
+ masterKeyRecord.getKeyId());
if (super.currentMasterKey != null) {
updatePreviousMasterKey(super.currentMasterKey);
}
updateCurrentMasterKey(new MasterKeyData(masterKeyRecord,
createSecretKey(masterKeyRecord.getBytes().array())));
}
} | 3.68 |
framework_CheckBox_setValue | /**
* Sets the value of this CheckBox. If the new value is not equal to
* {@code getValue()}, fires a {@link ValueChangeEvent}. Throws
* {@code NullPointerException} if the value is null.
*
* @param value
* the new value, not {@code null}
* @param userOriginated
* {@code true} if this event originates from the client,
* {@code false} otherwise.
* @throws NullPointerException
* if {@code value} is {@code null}
*/
@Override
protected boolean setValue(Boolean value, boolean userOriginated) {
Objects.requireNonNull(value, "CheckBox value must not be null");
return super.setValue(value, userOriginated);
} | 3.68 |
hadoop_QuotaUsage_getStorageTypeHeader | /**
* return the header of with the StorageTypes.
*
* @param storageTypes storage types.
* @return storage header string
*/
public static String getStorageTypeHeader(List<StorageType> storageTypes) {
StringBuilder header = new StringBuilder();
for (StorageType st : storageTypes) {
/* the field length is 13/17 for quota and remain quota
* as the max length for quota name is ARCHIVE_QUOTA
* and remain quota name REM_ARCHIVE_QUOTA */
String storageName = st.toString();
header.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT,
storageName + "_QUOTA", "REM_" + storageName + "_QUOTA"));
}
return header.toString();
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectOrderByDescendingScript | /**
* Tests a select with a descending "order by" clause.
*/
@Test
public void testSelectOrderByDescendingScript() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(ALTERNATE_TABLE))
.orderBy(new FieldReference(STRING_FIELD, Direction.DESCENDING));
String expectedSql = "SELECT stringField FROM " + tableName(ALTERNATE_TABLE) + " ORDER BY stringField DESC";
if (!nullOrder().equals(StringUtils.EMPTY)) {
expectedSql = expectedSql + " " + nullOrderForDirection(Direction.DESCENDING);
}
assertEquals("Select with descending order by", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
framework_CustomizedSystemMessages_setCommunicationErrorCaption | /**
* Sets the caption of the notification. Set to null for no caption. If both
* caption and message is null, the notification is disabled;
*
* @param communicationErrorCaption
* the caption
*/
public void setCommunicationErrorCaption(String communicationErrorCaption) {
this.communicationErrorCaption = communicationErrorCaption;
} | 3.68 |
framework_VaadinResponse_setNoCacheHeaders | /**
* Sets all conceivable headers that might prevent a response from being
* stored in any caches.
*
* @since 8.3.2
*/
public default void setNoCacheHeaders() {
// no-store to disallow storing even if cache would be revalidated
// must-revalidate to not use stored value even if someone asks for it
setHeader("Cache-Control", "no-cache, no-store, must-revalidate");
// Also set legacy values in case of old proxies in between
setHeader("Pragma", "no-cache");
setHeader("Expires", "0");
} | 3.68 |
framework_TableElement_getHeaderCell | /**
* Returns the header cell with the given column index.
*
* @param column
* 0 based column index
* @return TableHeaderElement containing the wanted header cell
*/
public TableHeaderElement getHeaderCell(int column) {
TestBenchElement headerCell = wrapElement(
findElement(By.vaadin("#header[" + column + "]")),
getCommandExecutor());
return headerCell.wrap(TableHeaderElement.class);
} | 3.68 |
hmily_RepositoryPathUtils_buildMongoTableName | /**
* Build mongo table name string.
*
* @param applicationName the application name
* @return the string
*/
public static String buildMongoTableName(final String applicationName) {
return CommonConstant.DB_SUFFIX + applicationName.replaceAll("-", "_");
} | 3.68 |
hadoop_CSQueueStore_get | /**
* Getter method for the queue it can find queues by both full and
* short names.
* @param name Full or short name of the queue
* @return the queue
*/
public CSQueue get(String name) {
if (name == null) {
return null;
}
try {
modificationLock.readLock().lock();
return getMap.getOrDefault(name, null);
} finally {
modificationLock.readLock().unlock();
}
} | 3.68 |
druid_FilterChainImpl_statement_executeQuery | // //////////////////////////////////////// statement
@Override
public ResultSetProxy statement_executeQuery(StatementProxy statement, String sql) throws SQLException {
if (this.pos < filterSize) {
return nextFilter().statement_executeQuery(this, statement, sql);
}
ResultSet resultSet = statement.getRawObject().executeQuery(sql);
if (resultSet == null) {
return null;
}
return new ResultSetProxyImpl(statement, resultSet, dataSource.createResultSetId(),
statement.getLastExecuteSql());
} | 3.68 |
hibernate-validator_ConstraintAnnotationVisitor_visitTypeAsInterface | /**
* <p>
* Checks whether the given annotations are correctly specified at the given
* interface type declaration. The following checks are performed:
* </p>
* <ul>
* <li>
* Constraint annotations may at types supported by the constraints.</li>
* <li>
* </ul>
*/
@Override
public Void visitTypeAsInterface(TypeElement e, List<AnnotationMirror> p) {
checkConstraints( e, p );
return null;
} | 3.68 |
graphhopper_GraphHopper_importAndClose | /**
* Imports and processes data, storing it to disk when complete.
*/
public void importAndClose() {
if (!load()) {
printInfo();
process(true);
} else {
printInfo();
logger.info("Graph already imported into " + ghLocation);
}
close();
} | 3.68 |
hbase_HBaseTestingUtility_bloomAndCompressionCombinations | /**
* Create all combinations of Bloom filters and compression algorithms for testing.
*/
private static List<Object[]> bloomAndCompressionCombinations() {
List<Object[]> configurations = new ArrayList<>();
for (Compression.Algorithm comprAlgo : HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
for (BloomType bloomType : BloomType.values()) {
configurations.add(new Object[] { comprAlgo, bloomType });
}
}
return Collections.unmodifiableList(configurations);
} | 3.68 |
hmily_MetricsReporter_registerHistogram | /**
* Register histogram.
*
* @param name name
* @param document document for histogram
*/
public static void registerHistogram(final String name, final String document) {
registerHistogram(name, null, document);
} | 3.68 |
hbase_HealthChecker_init | /**
* Initialize.
* @param location the location of the health script
* @param timeout the timeout to be used for the health script
*/
public void init(String location, long timeout) {
this.healthCheckScript = location;
this.scriptTimeout = timeout;
ArrayList<String> execScript = new ArrayList<>();
execScript.add(healthCheckScript);
this.shexec = new ShellCommandExecutor(execScript.toArray(new String[execScript.size()]), null,
null, scriptTimeout);
LOG.info("HealthChecker initialized with script at " + this.healthCheckScript + ", timeout="
+ timeout);
} | 3.68 |
flink_DataViewUtils_adjustDataViews | /**
* Modifies the data type of an accumulator regarding data views.
*
* <p>For performance reasons, each data view is wrapped into a RAW type which gives it {@link
* LazyBinaryFormat} semantics and avoids multiple deserialization steps during access.
* Furthermore, a data view will not be serialized if a state backend is used (the serializer of
* the RAW type will be a {@link NullSerializer} in this case).
*/
public static DataType adjustDataViews(
DataType accumulatorDataType, boolean hasStateBackedDataViews) {
final Function<DataType, TypeSerializer<?>> serializer;
if (hasStateBackedDataViews) {
serializer = dataType -> NullSerializer.INSTANCE;
} else {
serializer = ExternalSerializer::of;
}
return DataTypeUtils.transform(
accumulatorDataType, new DataViewsTransformation(serializer));
} | 3.68 |
flink_MutableHashTable_close | /**
* Closes the hash table. This effectively releases all internal structures and closes all open
* files and removes them. The call to this method is valid both as a cleanup after the complete
* inputs were properly processed, and as an cancellation call, which cleans up all resources
* that are currently held by the hash join.
*/
public void close() {
// make sure that we close only once
if (!this.closed.compareAndSet(false, true)) {
return;
}
// clear the iterators, so the next call to next() will notice
this.bucketIterator = null;
this.probeIterator = null;
// release the table structure
releaseTable();
// clear the memory in the partitions
clearPartitions();
// clear the current probe side channel, if there is one
if (this.currentSpilledProbeSide != null) {
try {
this.currentSpilledProbeSide.closeAndDelete();
} catch (Throwable t) {
LOG.warn(
"Could not close and delete the temp file for the current spilled partition probe side.",
t);
}
}
// clear the partitions that are still to be done (that have files on disk)
for (int i = 0; i < this.partitionsPending.size(); i++) {
final HashPartition<BT, PT> p = this.partitionsPending.get(i);
p.clearAllMemory(this.availableMemory);
}
// return the write-behind buffers
for (int i = 0; i < this.numWriteBehindBuffers + this.writeBehindBuffersAvailable; i++) {
try {
this.availableMemory.add(this.writeBehindBuffers.take());
} catch (InterruptedException iex) {
throw new RuntimeException("Hashtable closing was interrupted");
}
}
this.writeBehindBuffersAvailable = 0;
} | 3.68 |
hudi_OptionsResolver_isBulkInsertOperation | /**
* Returns whether the table operation is 'bulk_insert'.
*/
public static boolean isBulkInsertOperation(Configuration conf) {
WriteOperationType operationType = WriteOperationType.fromValue(conf.getString(FlinkOptions.OPERATION));
return operationType == WriteOperationType.BULK_INSERT;
} | 3.68 |
aws-saas-boost_AwsClientBuilderFactory_decorateBuilderWithDefaults | // VisibleForTesting
<C extends SdkClient, B extends AwsClientBuilder<B, C>> B decorateBuilderWithDefaults(B builder) {
return builder
.credentialsProvider(credentialsProvider)
.region(awsRegion);
} | 3.68 |
hbase_ServerRpcConnection_processRequest | /**
* Has the request header and the request param and optionally encoded data buffer all in this one
* array.
* <p/>
* Will be overridden in tests.
*/
protected void processRequest(ByteBuff buf) throws IOException, InterruptedException {
long totalRequestSize = buf.limit();
int offset = 0;
// Here we read in the header. We avoid having pb
// do its default 4k allocation for CodedInputStream. We force it to use
// backing array.
CodedInputStream cis = createCis(buf);
int headerSize = cis.readRawVarint32();
offset = cis.getTotalBytesRead();
Message.Builder builder = RequestHeader.newBuilder();
ProtobufUtil.mergeFrom(builder, cis, headerSize);
RequestHeader header = (RequestHeader) builder.build();
offset += headerSize;
Context traceCtx = GlobalOpenTelemetry.getPropagators().getTextMapPropagator()
.extract(Context.current(), header.getTraceInfo(), getter);
// n.b. Management of this Span instance is a little odd. Most exit paths from this try scope
// are early-exits due to error cases. There's only one success path, the asynchronous call to
// RpcScheduler#dispatch. The success path assumes ownership of the span, which is represented
// by null-ing out the reference in this scope. All other paths end the span. Thus, and in
// order to avoid accidentally orphaning the span, the call to Span#end happens in a finally
// block iff the span is non-null.
Span span = TraceUtil.createRemoteSpan("RpcServer.process", traceCtx);
try (Scope ignored = span.makeCurrent()) {
int id = header.getCallId();
// HBASE-28128 - if server is aborting, don't bother trying to process. It will
// fail at the handler layer, but worse might result in CallQueueTooBigException if the
// queue is full but server is not properly processing requests. Better to throw an aborted
// exception here so that the client can properly react.
if (rpcServer.server != null && rpcServer.server.isAborted()) {
RegionServerAbortedException serverIsAborted = new RegionServerAbortedException(
"Server " + rpcServer.server.getServerName() + " aborting");
this.rpcServer.metrics.exception(serverIsAborted);
sendErrorResponseForCall(id, totalRequestSize, span, serverIsAborted.getMessage(),
serverIsAborted);
return;
}
if (RpcServer.LOG.isTraceEnabled()) {
RpcServer.LOG.trace("RequestHeader " + TextFormat.shortDebugString(header)
+ " totalRequestSize: " + totalRequestSize + " bytes");
}
// Enforcing the call queue size, this triggers a retry in the client
// This is a bit late to be doing this check - we have already read in the
// total request.
if (
(totalRequestSize + this.rpcServer.callQueueSizeInBytes.sum())
> this.rpcServer.maxQueueSizeInBytes
) {
this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
sendErrorResponseForCall(id, totalRequestSize, span,
"Call queue is full on " + this.rpcServer.server.getServerName()
+ ", is hbase.ipc.server.max.callqueue.size too small?",
RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
return;
}
MethodDescriptor md = null;
Message param = null;
CellScanner cellScanner = null;
try {
if (header.hasRequestParam() && header.getRequestParam()) {
md = this.service.getDescriptorForType().findMethodByName(header.getMethodName());
if (md == null) {
throw new UnsupportedOperationException(header.getMethodName());
}
builder = this.service.getRequestPrototype(md).newBuilderForType();
cis.resetSizeCounter();
int paramSize = cis.readRawVarint32();
offset += cis.getTotalBytesRead();
if (builder != null) {
ProtobufUtil.mergeFrom(builder, cis, paramSize);
param = builder.build();
}
offset += paramSize;
} else {
// currently header must have request param, so we directly throw
// exception here
String msg = "Invalid request header: " + TextFormat.shortDebugString(header)
+ ", should have param set in it";
RpcServer.LOG.warn(msg);
throw new DoNotRetryIOException(msg);
}
if (header.hasCellBlockMeta()) {
buf.position(offset);
ByteBuff dup = buf.duplicate();
dup.limit(offset + header.getCellBlockMeta().getLength());
cellScanner = this.rpcServer.cellBlockBuilder.createCellScannerReusingBuffers(this.codec,
this.compressionCodec, dup);
}
} catch (Throwable thrown) {
InetSocketAddress address = this.rpcServer.getListenerAddress();
String msg = (address != null ? address : "(channel closed)")
+ " is unable to read call parameter from client " + getHostAddress();
RpcServer.LOG.warn(msg, thrown);
this.rpcServer.metrics.exception(thrown);
final Throwable responseThrowable;
if (thrown instanceof LinkageError) {
// probably the hbase hadoop version does not match the running hadoop version
responseThrowable = new DoNotRetryIOException(thrown);
} else if (thrown instanceof UnsupportedOperationException) {
// If the method is not present on the server, do not retry.
responseThrowable = new DoNotRetryIOException(thrown);
} else {
responseThrowable = thrown;
}
sendErrorResponseForCall(id, totalRequestSize, span,
msg + "; " + responseThrowable.getMessage(), responseThrowable);
return;
}
int timeout = 0;
if (header.hasTimeout() && header.getTimeout() > 0) {
timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout());
}
ServerCall<?> call = createCall(id, this.service, md, header, param, cellScanner,
totalRequestSize, this.addr, timeout, this.callCleanup);
if (this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) {
// unset span do that it's not closed in the finally block
span = null;
} else {
this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize());
this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
call.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION,
"Call queue is full on " + this.rpcServer.server.getServerName()
+ ", too many items queued ?");
TraceUtil.setError(span, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
call.sendResponseIfReady();
}
} finally {
if (span != null) {
span.end();
}
}
} | 3.68 |
aws-saas-boost_CognitoKeyProvider_jwksUrl | // https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-verifying-a-jwt.html
protected static URL jwksUrl() {
URL url = null;
try {
url = new URL("https://cognito-idp." + AWS_REGION + ".amazonaws.com/" + USER_POOL_ID
+ "/.well-known/jwks.json");
} catch (MalformedURLException e) {
LOGGER.error(Utils.getFullStackTrace(e));
}
return url;
} | 3.68 |
querydsl_ExpressionUtils_list | /**
* Create a list expression for the given arguments
*
* @param exprs list elements
* @return list expression
*/
@SuppressWarnings("unchecked")
public static <T> Expression<T> list(Class<T> clazz, List<? extends Expression<?>> exprs) {
Expression<T> rv = (Expression<T>) exprs.get(0);
if (exprs.size() == 1) {
rv = operation(clazz, Ops.SINGLETON, rv, exprs.get(0));
} else {
for (int i = 1; i < exprs.size(); i++) {
rv = operation(clazz, Ops.LIST, rv, exprs.get(i));
}
}
return rv;
} | 3.68 |
hmily_InLineServiceImpl_confirm | /**
* Confrim.
*/
public void confirm() {
System.out.println("执行inline confirm......");
} | 3.68 |
framework_ApplicationConnection_addGetParameters | /**
* Adds the get parameters to the uri and returns the new uri that contains
* the parameters.
*
* @param uri
* The uri to which the parameters should be added.
* @param extraParams
* One or more parameters in the format "a=b" or "c=d&e=f". An
* empty string is allowed but will not modify the url.
* @return The modified URI with the get parameters in extraParams added.
* @deprecated Use {@link SharedUtil#addGetParameters(String,String)}
* instead
*/
@Deprecated
public static String addGetParameters(String uri, String extraParams) {
return SharedUtil.addGetParameters(uri, extraParams);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.