name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Costs_getCpuCost | /**
* Gets the cost for the CPU.
*
* @return The CPU Cost.
*/
public double getCpuCost() {
return this.cpuCost;
} | 3.68 |
flink_ArrayResultIterator_set | /**
* Sets the records to be returned by this iterator. Each record's {@link RecordAndPosition}
* will have the same offset (for {@link RecordAndPosition#getOffset()}. The first returned
* record will have a records-to-skip count of {@code skipCountOfFirst + 1}, following the
* contract that each record needs to point to the position AFTER itself (because a checkpoint
* taken after the record was emitted needs to resume from after that record).
*/
public void set(
final E[] records, final int num, final long offset, final long skipCountOfFirst) {
this.records = records;
this.num = num;
this.pos = 0;
this.recordAndPosition.set(null, offset, skipCountOfFirst);
} | 3.68 |
shardingsphere-elasticjob_JobScheduleController_isPaused | /**
* Judge job is pause or not.
*
* @return job is pause or not
*/
public synchronized boolean isPaused() {
try {
return !scheduler.isShutdown() && Trigger.TriggerState.PAUSED == scheduler.getTriggerState(new TriggerKey(triggerIdentity));
} catch (final SchedulerException ex) {
throw new JobSystemException(ex);
}
} | 3.68 |
Activiti_BaseEntityEventListener_onInitialized | /**
* Called when an entity initialized event is received.
*/
protected void onInitialized(ActivitiEvent event) {
// Default implementation is a NO-OP
} | 3.68 |
flink_DataSink_withParameters | /**
* Pass a configuration to the OutputFormat.
*
* @param parameters Configuration parameters
*/
public DataSink<T> withParameters(Configuration parameters) {
this.parameters = parameters;
return this;
} | 3.68 |
streampipes_AssetLinkBuilder_withResourceId | /**
* Sets the resource ID for the AssetLink being built.
*
* @param resourceId The resource ID to set.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withResourceId(String resourceId) {
this.assetLink.setResourceId(resourceId);
return this;
} | 3.68 |
flink_PartitionTempFileManager_collectPartSpecToPaths | /** Collect all partitioned paths, aggregate according to partition spec. */
public static Map<LinkedHashMap<String, String>, List<Path>> collectPartSpecToPaths(
FileSystem fs, List<Path> taskPaths, int partColSize) {
Map<LinkedHashMap<String, String>, List<Path>> specToPaths = new HashMap<>();
for (Path taskPath : taskPaths) {
searchPartSpecAndPaths(fs, taskPath, partColSize)
.forEach(
tuple2 ->
specToPaths.compute(
tuple2.f0,
(spec, paths) -> {
paths = paths == null ? new ArrayList<>() : paths;
paths.add(tuple2.f1);
return paths;
}));
}
return specToPaths;
} | 3.68 |
hbase_TableBackupClient_cleanupTargetDir | /**
* Clean up the uncompleted data at target directory if the ongoing backup has already entered the
* copy phase.
*/
protected static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) {
try {
// clean up the uncompleted data at target directory if the ongoing backup has already entered
// the copy phase
LOG.debug("Trying to cleanup up target dir. Current backup phase: " + backupInfo.getPhase());
if (
backupInfo.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
|| backupInfo.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
|| backupInfo.getPhase().equals(BackupPhase.STORE_MANIFEST)
) {
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
// now treat one backup as a transaction, clean up data that has been partially copied at
// table level
for (TableName table : backupInfo.getTables()) {
Path targetDirPath = new Path(HBackupFileSystem
.getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(), table));
if (outputFs.delete(targetDirPath, true)) {
LOG.debug(
"Cleaning up uncompleted backup data at " + targetDirPath.toString() + " done.");
} else {
LOG.debug("No data has been copied to " + targetDirPath.toString() + ".");
}
Path tableDir = targetDirPath.getParent();
FileStatus[] backups = CommonFSUtils.listStatus(outputFs, tableDir);
if (backups == null || backups.length == 0) {
outputFs.delete(tableDir, true);
LOG.debug(tableDir.toString() + " is empty, remove it.");
}
}
}
} catch (IOException e1) {
LOG.error("Cleaning up uncompleted backup data of " + backupInfo.getBackupId() + " at "
+ backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
}
} | 3.68 |
hadoop_NamenodeStatusReport_getNumDecomDeadDatanodes | /**
* Get the number of dead decommissioned nodes.
*
* @return The number of dead decommissioned nodes.
*/
public int getNumDecomDeadDatanodes() {
return this.deadDecomDatanodes;
} | 3.68 |
flink_JobVertex_setSlotSharingGroup | /**
* Associates this vertex with a slot sharing group for scheduling. Different vertices in the
* same slot sharing group can run one subtask each in the same slot.
*
* @param grp The slot sharing group to associate the vertex with.
*/
public void setSlotSharingGroup(SlotSharingGroup grp) {
checkNotNull(grp);
if (this.slotSharingGroup != null) {
this.slotSharingGroup.removeVertexFromGroup(this.getID());
}
grp.addVertexToGroup(this.getID());
this.slotSharingGroup = grp;
} | 3.68 |
framework_Header_join | /**
* Merges column cells in the row. Original cells are hidden, and new
* merged cell is shown instead. The cell has a width of all merged
* cells together, inherits styles of the first merged cell but has
* empty caption.
*
* @param cellsToMerge
* the cells which should be merged. The cells should not be
* merged to any other cell set.
* @return the remaining visible cell after the merge
*
* @see #join(Set)
* @see com.vaadin.ui.AbstractComponent#setCaption(String) setCaption
*/
@Override
public HeaderCell join(HeaderCell... cellsToMerge) {
return join(Stream.of(cellsToMerge));
} | 3.68 |
framework_FocusableFlexTable_setFocus | /**
* Sets the keyboard focus to the panel.
*
* @param focus
* Should the panel have keyboard focus. If true the keyboard
* focus will be moved to the
*/
public void setFocus(boolean focus) {
if (focus) {
FocusImpl.getFocusImplForPanel().focus(getElement());
} else {
FocusImpl.getFocusImplForPanel().blur(getElement());
}
} | 3.68 |
hbase_FilterList_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.FilterList.Builder builder = FilterProtos.FilterList.newBuilder();
builder.setOperator(FilterProtos.FilterList.Operator.valueOf(operator.name()));
ArrayList<Filter> filters = filterListBase.getFilters();
for (int i = 0, n = filters.size(); i < n; i++) {
builder.addFilters(ProtobufUtil.toFilter(filters.get(i)));
}
return builder.build().toByteArray();
} | 3.68 |
morf_AbstractSqlDialectTest_testUseIndexOnMerge | /**
* Check that we don't allow the use of the use index hint with a MERGE.
*/
@Test(expected = IllegalArgumentException.class)
public void testUseIndexOnMerge() {
testDialect.convertStatementToSQL(
merge()
.into(tableRef("a"))
.from(
select()
.from(tableRef("b"))
.useIndex(tableRef("b"), "b_1")
)
.tableUniqueKey(field("id"))
);
} | 3.68 |
hbase_MemStoreFlusher_unregisterFlushRequestListener | /**
* Unregister the listener from MemstoreFlushListeners
* @return true when passed listener is unregistered successfully.
*/
@Override
public boolean unregisterFlushRequestListener(final FlushRequestListener listener) {
return this.flushRequestListeners.remove(listener);
} | 3.68 |
hbase_RegionNormalizerWorkQueue_take | /**
* Retrieves and removes the head of this queue, waiting if necessary until an element becomes
* available.
* @return the head of this queue
* @throws InterruptedException if interrupted while waiting
*/
public E take() throws InterruptedException {
E x;
// Take a write lock. If the delegate's queue is empty we need it to await(), which will
// drop the lock, then reacquire it; or if the queue is not empty we will use an iterator
// to mutate the head.
lock.writeLock().lockInterruptibly();
try {
while (delegate.isEmpty()) {
notEmpty.await(); // await drops the lock, then reacquires it
}
final Iterator<E> iter = delegate.iterator();
x = iter.next();
iter.remove();
if (!delegate.isEmpty()) {
notEmpty.signal();
}
} finally {
lock.writeLock().unlock();
}
return x;
} | 3.68 |
hbase_AbstractFSWAL_append | /**
* Append a set of edits to the WAL.
* <p/>
* The WAL is not flushed/sync'd after this transaction completes BUT on return this edit must
* have its region edit/sequence id assigned else it messes up our unification of mvcc and
* sequenceid. On return <code>key</code> will have the region edit/sequence id filled in.
* <p/>
* NOTE: This append, at a time that is usually after this call returns, starts an mvcc
* transaction by calling 'begin' wherein which we assign this update a sequenceid. At assignment
* time, we stamp all the passed in Cells inside WALEdit with their sequenceId. You must
* 'complete' the transaction this mvcc transaction by calling
* MultiVersionConcurrencyControl#complete(...) or a variant otherwise mvcc will get stuck. Do it
* in the finally of a try/finally block within which this append lives and any subsequent
* operations like sync or update of memstore, etc. Get the WriteEntry to pass mvcc out of the
* passed in WALKey <code>walKey</code> parameter. Be warned that the WriteEntry is not
* immediately available on return from this method. It WILL be available subsequent to a sync of
* this append; otherwise, you will just have to wait on the WriteEntry to get filled in.
* @param hri the regioninfo associated with append
* @param key Modified by this call; we add to it this edits region edit/sequence id.
* @param edits Edits to append. MAY CONTAIN NO EDITS for case where we want to get an edit
* sequence id that is after all currently appended edits.
* @param inMemstore Always true except for case where we are writing a region event meta marker
* edit, for example, a compaction completion record into the WAL or noting a
* Region Open event. In these cases the entry is just so we can finish an
* unfinished compaction after a crash when the new Server reads the WAL on
* recovery, etc. These transition event 'Markers' do not go via the memstore.
* When memstore is false, we presume a Marker event edit.
* @return Returns a 'transaction id' and <code>key</code> will have the region edit/sequence id
* in it.
*/
protected long append(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean inMemstore)
throws IOException {
if (markerEditOnly && !edits.isMetaEdit()) {
throw new IOException("WAL is closing, only marker edit is allowed");
}
long txid =
stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, waitingConsumePayloads);
if (shouldScheduleConsumer()) {
consumeExecutor.execute(consumer);
}
return txid;
} | 3.68 |
hudi_RunLengthDecoder_readUnsignedVarInt | /**
* Reads the next varint encoded int.
*/
private int readUnsignedVarInt() throws IOException {
int value = 0;
int shift = 0;
int b;
do {
b = in.read();
value |= (b & 0x7F) << shift;
shift += 7;
} while ((b & 0x80) != 0);
return value;
} | 3.68 |
flink_RocksDBStateBackend_getRocksDBOptions | /**
* Gets {@link org.rocksdb.Options} for the RocksDB instances.
*
* <p>The options created by the factory here are applied on top of the pre-defined options
* profile selected via {@link #setPredefinedOptions(PredefinedOptions)}. If the pre-defined
* options profile is the default ({@link PredefinedOptions#DEFAULT}), then the factory fully
* controls the RocksDB options.
*/
@Nullable
public RocksDBOptionsFactory getRocksDBOptions() {
return rocksDBStateBackend.getRocksDBOptions();
} | 3.68 |
hbase_Bytes_toBinaryByteArrays | /**
* Create an array of byte[] given an array of String.
* @param t operands
* @return Array of binary byte arrays made from passed array of binary strings
*/
public static byte[][] toBinaryByteArrays(final String[] t) {
byte[][] result = new byte[t.length][];
for (int i = 0; i < t.length; i++) {
result[i] = Bytes.toBytesBinary(t[i]);
}
return result;
} | 3.68 |
hbase_ProcedureStoreTracker_resetTo | /**
* Resets internal state to same as given {@code tracker}, and change the deleted flag according
* to the modified flag if {@code resetDelete} is true. Does deep copy of the bitmap.
* <p/>
* The {@code resetDelete} will be set to true when building cleanup tracker, please see the
* comments in {@link BitSetNode#BitSetNode(BitSetNode, boolean)} to learn how we change the
* deleted flag if {@code resetDelete} is true.
*/
public void resetTo(ProcedureStoreTracker tracker, boolean resetDelete) {
reset();
// resetDelete will true if we are building the cleanup tracker, as we will reset deleted flags
// for all the unmodified bits to 1, the partial flag is useless so set it to false for not
// confusing the developers when debugging.
this.partial = resetDelete ? false : tracker.partial;
this.minModifiedProcId = tracker.minModifiedProcId;
this.maxModifiedProcId = tracker.maxModifiedProcId;
this.keepDeletes = tracker.keepDeletes;
for (Map.Entry<Long, BitSetNode> entry : tracker.map.entrySet()) {
map.put(entry.getKey(), new BitSetNode(entry.getValue(), resetDelete));
}
} | 3.68 |
hbase_MasterProcedureScheduler_wakeMetaExclusiveLock | /**
* Wake the procedures waiting for meta.
* @see #waitMetaExclusiveLock(Procedure)
* @param procedure the procedure releasing the lock
* @deprecated only used for {@link RecoverMetaProcedure}. Should be removed along with
* {@link RecoverMetaProcedure}.
*/
@Deprecated
public void wakeMetaExclusiveLock(Procedure<?> procedure) {
schedLock();
try {
final LockAndQueue lock = locking.getMetaLock();
lock.releaseExclusiveLock(procedure);
addToRunQueue(metaRunQueue, getMetaQueue(), () -> procedure + " released exclusive lock");
int waitingCount = wakeWaitingProcedures(lock);
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.68 |
graphhopper_Helper_isFileMapped | /**
* Determines if the specified ByteBuffer is one which maps to a file!
*/
public static boolean isFileMapped(ByteBuffer bb) {
if (bb instanceof MappedByteBuffer) {
try {
((MappedByteBuffer) bb).isLoaded();
return true;
} catch (UnsupportedOperationException ex) {
}
}
return false;
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_getNumberOfTransferThreads | /** Gets the number of threads used to transfer files while snapshotting/restoring. */
public int getNumberOfTransferThreads() {
return numberOfTransferThreads == UNDEFINED_NUMBER_OF_TRANSFER_THREADS
? CHECKPOINT_TRANSFER_THREAD_NUM.defaultValue()
: numberOfTransferThreads;
} | 3.68 |
AreaShop_GeneralRegion_getTeleportFeature | /**
* Get the teleport feature to teleport players to the region and signs.
* @return The TeleportFeature
*/
public TeleportFeature getTeleportFeature() {
return getFeature(TeleportFeature.class);
} | 3.68 |
hadoop_FilePool_getInputFiles | /**
* Gather a collection of files at least as large as minSize.
* @return The total size of files returned.
*/
public long getInputFiles(long minSize, Collection<FileStatus> files)
throws IOException {
updateLock.readLock().lock();
try {
return root.selectFiles(minSize, files);
} finally {
updateLock.readLock().unlock();
}
} | 3.68 |
flink_DispatcherResourceCleanerFactory_ofLocalResource | /**
* A simple wrapper for the resources that don't have any artifacts that can outlive the {@link
* org.apache.flink.runtime.dispatcher.Dispatcher}, but we still want to clean up their local
* state when we terminate globally.
*
* @param localResource Local resource that we want to clean during a global cleanup.
* @return Globally cleanable resource.
*/
private static GloballyCleanableResource ofLocalResource(
LocallyCleanableResource localResource) {
return localResource::localCleanupAsync;
} | 3.68 |
hadoop_YarnVersionInfo_getRevision | /**
* Get the subversion revision number for the root directory
* @return the revision number, eg. "451451"
*/
public static String getRevision() {
return YARN_VERSION_INFO._getRevision();
} | 3.68 |
MagicPlugin_ConfigUtils_fromLocation | /**
* Serialises a location as a relative offset to another location.
*
* <p>One of the following formats may be used, depending on the
* availability of the locations, whether or not they are in the same
* world and whether or not the pitch and the yaw are the same.
*
* <ul>
* <li>{@code ""}, the empty string, when no location was specified, or the
* center location was not valid.
* <li>{@code "x,y,z"}, offsets relative to the center, with the same
* orientation.
* <li>{@code "x,y,z,yaw,pitch"}, offset relative to the center, with
* absolute rotations.
* <li>{@code "x,y,z,world,yaw,pitch"}, an absolute location an orientation
* </ul>
*
* @param location The location to serialise.
* @param relativeTo The center to make the location relative to.
* @return The string representation.
*/
@Nonnull
public static String fromLocation(
@Nullable Location location,
@Nullable Location relativeTo) {
if (location == null || location.getWorld() == null) {
// Invalid location
return "";
} else if (relativeTo != null && relativeTo.getWorld() == null) {
// Invalid center
// FIXME: Shouldn't we just return a non-relative location?
return "";
} else if (relativeTo == null
|| !relativeTo.getWorld().equals(location.getWorld())) {
// No relative, or they are not in the same world
return fromLocation(location);
}
// Make location relative to relativeTo
location = location.clone();
location.subtract(relativeTo);
String serialized = location.getX() + "," + location.getY() + "," + location.getZ();
if (location.getPitch() != relativeTo.getPitch() || location.getYaw() != relativeTo.getYaw()) {
serialized += "," + location.getYaw() + "," + location.getPitch();
}
return serialized;
} | 3.68 |
hbase_CellUtil_copyValueTo | /**
* Copies the value to the given bytebuffer
* @param cell the cell whose value has to be copied
* @param destination the destination bytebuffer to which the value has to be copied
* @param destinationOffset the offset in the destination bytebuffer
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyValueTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int vlen = cell.getValueLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) cell).getValueByteBuffer(),
destination, ((ByteBufferExtendedCell) cell).getValuePosition(), destinationOffset, vlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getValueArray(),
cell.getValueOffset(), vlen);
}
return destinationOffset + vlen;
} | 3.68 |
framework_AbstractRemoteDataSource_setTrackInvalidatedRows | /**
* Sets whether or not to track invalidated rows inside
* {@link #insertRowData(int, int)} and use them to fill cache when
* {{@link #setRowData(int, List)}} is called.
*
* @param trackInvalidatedRows
* a boolean value specifying if to track invalidated rows or
* not, default value <code>true</code>
*/
public void setTrackInvalidatedRows(boolean trackInvalidatedRows) {
this.trackInvalidatedRows = trackInvalidatedRows;
} | 3.68 |
hbase_AbstractRpcClient_isTcpNoDelay | // for writing tests that want to throw exception when connecting.
protected boolean isTcpNoDelay() {
return tcpNoDelay;
} | 3.68 |
flink_SkipListUtils_removeAllValues | /**
* Free the space of the linked values, and the head value is pointed by the given pointer.
*
* @param valuePointer the pointer of the value to start removing.
* @param spaceAllocator the space allocator.
*/
static void removeAllValues(long valuePointer, Allocator spaceAllocator) {
long nextValuePointer;
while (valuePointer != NIL_VALUE_POINTER) {
nextValuePointer = helpGetNextValuePointer(valuePointer, spaceAllocator);
spaceAllocator.free(valuePointer);
valuePointer = nextValuePointer;
}
} | 3.68 |
flink_ObjectIdentifier_asSummaryString | /** Returns a string that summarizes this instance for printing to a console or log. */
public String asSummaryString() {
if (catalogName == null) {
return objectName;
}
return String.join(".", catalogName, databaseName, objectName);
} | 3.68 |
pulsar_OwnershipCache_isNamespaceBundleOwned | /**
* Checked whether a particular bundle is currently owned by this broker.
*
* @param bundle
* @return
*/
public boolean isNamespaceBundleOwned(NamespaceBundle bundle) {
OwnedBundle ownedBundle = getOwnedBundle(bundle);
return ownedBundle != null && ownedBundle.isActive();
} | 3.68 |
querydsl_AbstractCollQuery_innerJoin | /**
* Define an inner join from the Map typed path to the alias
*
* @param <P> type of expression
* @param target target of the join
* @param alias alias for the join target
* @return current object
*/
public <P> Q innerJoin(MapExpression<?,P> target, Path<P> alias) {
getMetadata().addJoin(JoinType.INNERJOIN, createAlias(target, alias));
return queryMixin.getSelf();
} | 3.68 |
hadoop_StoragePolicySatisfyManager_isEnabled | /**
* @return true if sps is configured as an external
* service, false otherwise.
*/
public boolean isEnabled() {
return mode == StoragePolicySatisfierMode.EXTERNAL;
} | 3.68 |
hbase_UserQuotaState_getTableLimiter | /**
* Return the limiter for the specified table associated with this quota. If the table does not
* have its own quota limiter the global one will be returned. In case there is no quota limiter
* associated with this object a noop limiter will be returned.
* @return the quota limiter for the specified table
*/
public synchronized QuotaLimiter getTableLimiter(final TableName table) {
lastQuery = EnvironmentEdgeManager.currentTime();
if (tableLimiters != null) {
QuotaLimiter limiter = tableLimiters.get(table);
if (limiter != null) return limiter;
}
if (namespaceLimiters != null) {
QuotaLimiter limiter = namespaceLimiters.get(table.getNamespaceAsString());
if (limiter != null) return limiter;
}
return getGlobalLimiterWithoutUpdatingLastQuery();
} | 3.68 |
flink_HsMemoryDataSpiller_spillAsync | /**
* Spilling buffers to disk asynchronously.
*
* @param bufferToSpill buffers need to be spilled, must ensure that it is sorted by
* (subpartitionId, bufferIndex).
* @return the completable future contains spilled buffers information.
*/
public CompletableFuture<List<SpilledBuffer>> spillAsync(
List<BufferWithIdentity> bufferToSpill) {
CompletableFuture<List<SpilledBuffer>> spilledFuture = new CompletableFuture<>();
ioExecutor.execute(() -> spill(bufferToSpill, spilledFuture));
return spilledFuture;
} | 3.68 |
morf_SchemaModificationAdapter_initialiseTableSchema | /**
* Make sure the database is ready to receive the data. Check the table schema matches what we expect, if it doesn't drop and re-create.
*
* @param table The source table we're expecting.
*/
private void initialiseTableSchema(Table table) {
SqlScriptExecutor sqlExecutor = databaseDataSetConsumer.getSqlExecutor();
// check whether the table already exists
if (schemaResource.tableExists(table.getName())) {
// if the table exists, we need to check it's of the right schema
Table databaseTableMetaData = schemaResource.getTable(table.getName());
if (!new SchemaHomology().tablesMatch(table, databaseTableMetaData)) {
// there was a difference. Drop and re-deploy
log.debug("Replacing table [" + table.getName() + "] with different version");
dropExistingViewsIfNecessary();
dropExistingIndexesIfNecessary(table);
sqlExecutor.execute(sqlDialect.dropStatements(databaseTableMetaData), connection);
sqlExecutor.execute(sqlDialect.tableDeploymentStatements(table), connection);
} else {
// Remove the index names that are now part of the modified schema
table.indexes().forEach(index -> existingIndexNamesAndTables.remove(index.getName().toUpperCase()));
}
} else {
log.debug("Deploying missing table [" + table.getName() + "]");
dropExistingViewsIfNecessary();
dropExistingIndexesIfNecessary(table);
sqlExecutor.execute(sqlDialect.tableDeploymentStatements(table), connection);
}
} | 3.68 |
hbase_Sleeper_getPeriod | /** Returns the sleep period in milliseconds */
public final int getPeriod() {
return period;
} | 3.68 |
MagicPlugin_TextUtils_toHexString | // This is mainly meant for colors, since it only uses 6 characters for RGB
public static String toHexString(int i) {
return String.format("%06X", i);
} | 3.68 |
flink_SlotSharingGroup_setExternalResource | /**
* Add the given external resource. The old value with the same resource name will be
* replaced if present.
*/
public Builder setExternalResource(String name, double value) {
this.externalResources.put(name, value);
return this;
} | 3.68 |
hbase_ExecutorService_submit | /**
* Submit the event to the queue for handling.
*/
void submit(final EventHandler event) {
// If there is a listener for this type, make sure we call the before
// and after process methods.
this.threadPoolExecutor.execute(event);
} | 3.68 |
hadoop_TypedBytesInput_readVector | /**
* Reads the vector following a <code>Type.VECTOR</code> code.
* @return the obtained vector
* @throws IOException
*/
@SuppressWarnings("unchecked")
public ArrayList readVector() throws IOException {
int length = readVectorHeader();
ArrayList result = new ArrayList(length);
for (int i = 0; i < length; i++) {
result.add(read());
}
return result;
} | 3.68 |
hadoop_EncryptionSecretOperations_getSSECustomerKey | /***
* Gets the SSE-C client side key if present.
*
* @param secrets source of the encryption secrets.
* @return an optional key to attach to a request.
*/
public static Optional<String> getSSECustomerKey(final EncryptionSecrets secrets) {
if (secrets.hasEncryptionKey() && secrets.getEncryptionMethod() == S3AEncryptionMethods.SSE_C) {
return Optional.of(secrets.getEncryptionKey());
} else {
return Optional.empty();
}
} | 3.68 |
flink_RocksDBOptionsFactory_createNativeMetricsOptions | /**
* This method should enable certain RocksDB metrics to be forwarded to Flink's metrics
* reporter.
*
* <p>Enabling these monitoring options may degrade RockDB performance and should be set with
* care.
*
* @param nativeMetricOptions The options object with the pre-defined options.
* @return The options object on which the additional options are set.
*/
default RocksDBNativeMetricOptions createNativeMetricsOptions(
RocksDBNativeMetricOptions nativeMetricOptions) {
return nativeMetricOptions;
} | 3.68 |
flink_CsvReader_parseQuotedStrings | /**
* Enables quoted String parsing. Field delimiters in quoted Strings are ignored. A String is
* parsed as quoted if it starts and ends with a quoting character and as unquoted otherwise.
* Leading or tailing whitespaces are not allowed.
*
* @param quoteCharacter The character which is used as quoting character.
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader parseQuotedStrings(char quoteCharacter) {
this.parseQuotedStrings = true;
this.quoteCharacter = quoteCharacter;
return this;
} | 3.68 |
flink_LocalProperties_areFieldsUnique | /**
* Checks whether the given set of fields is unique, as specified in these local properties.
*
* @param set The set to check.
* @return True, if the given column combination is unique, false if not.
*/
public boolean areFieldsUnique(FieldSet set) {
return this.uniqueFields != null && this.uniqueFields.contains(set);
} | 3.68 |
pulsar_NarUnpacker_calculateMd5sum | /**
* Calculates an md5 sum of the specified file.
*
* @param file
* to calculate the md5sum of
* @return the md5sum bytes
* @throws IOException
* if cannot read file
*/
private static byte[] calculateMd5sum(final File file) throws IOException {
try (final FileInputStream inputStream = new FileInputStream(file)) {
final MessageDigest md5 = MessageDigest.getInstance("md5");
final byte[] buffer = new byte[1024];
int read = inputStream.read(buffer);
while (read > -1) {
md5.update(buffer, 0, read);
read = inputStream.read(buffer);
}
return md5.digest();
} catch (NoSuchAlgorithmException nsae) {
throw new IllegalArgumentException(nsae);
}
} | 3.68 |
hbase_VisibilityClient_clearAuths | /**
* Removes given labels from user's globally authorized list of labels.
*/
public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths,
final String user) throws Throwable {
return setOrClearAuths(connection, auths, user, false);
} | 3.68 |
dubbo_ServiceInstancesChangedListener_doOnEvent | /**
* @param event
*/
private synchronized void doOnEvent(ServiceInstancesChangedEvent event) {
if (destroyed.get() || !accept(event) || isRetryAndExpired(event)) {
return;
}
refreshInstance(event);
if (logger.isDebugEnabled()) {
logger.debug(event.getServiceInstances().toString());
}
Map<String, List<ServiceInstance>> revisionToInstances = new HashMap<>();
Map<ServiceInfo, Set<String>> localServiceToRevisions = new HashMap<>();
// grouping all instances of this app(service name) by revision
for (Map.Entry<String, List<ServiceInstance>> entry : allInstances.entrySet()) {
List<ServiceInstance> instances = entry.getValue();
for (ServiceInstance instance : instances) {
String revision = getExportedServicesRevision(instance);
if (revision == null || EMPTY_REVISION.equals(revision)) {
if (logger.isDebugEnabled()) {
logger.debug("Find instance without valid service metadata: " + instance.getAddress());
}
continue;
}
List<ServiceInstance> subInstances =
revisionToInstances.computeIfAbsent(revision, r -> new LinkedList<>());
subInstances.add(instance);
}
}
// get MetadataInfo with revision
for (Map.Entry<String, List<ServiceInstance>> entry : revisionToInstances.entrySet()) {
String revision = entry.getKey();
List<ServiceInstance> subInstances = entry.getValue();
MetadataInfo metadata = subInstances.stream()
.map(ServiceInstance::getServiceMetadata)
.filter(Objects::nonNull)
.filter(m -> revision.equals(m.getRevision()))
.findFirst()
.orElseGet(() -> serviceDiscovery.getRemoteMetadata(revision, subInstances));
parseMetadata(revision, metadata, localServiceToRevisions);
// update metadata into each instance, in case new instance created.
for (ServiceInstance tmpInstance : subInstances) {
MetadataInfo originMetadata = tmpInstance.getServiceMetadata();
if (originMetadata == null || !Objects.equals(originMetadata.getRevision(), metadata.getRevision())) {
tmpInstance.setServiceMetadata(metadata);
}
}
}
int emptyNum = hasEmptyMetadata(revisionToInstances);
if (emptyNum != 0) { // retry every 10 seconds
hasEmptyMetadata = true;
if (retryPermission.tryAcquire()) {
if (retryFuture != null && !retryFuture.isDone()) {
// cancel last retryFuture because only one retryFuture will be canceled at destroy().
retryFuture.cancel(true);
}
try {
retryFuture = scheduler.schedule(
new AddressRefreshRetryTask(retryPermission, event.getServiceName()),
10_000L,
TimeUnit.MILLISECONDS);
} catch (Exception e) {
logger.error(
INTERNAL_ERROR,
"unknown error in registry module",
"",
"Error submitting async retry task.");
}
logger.warn(
INTERNAL_ERROR, "unknown error in registry module", "", "Address refresh try task submitted");
}
// return if all metadata is empty, this notification will not take effect.
if (emptyNum == revisionToInstances.size()) {
// 1-17 - Address refresh failed.
logger.error(
REGISTRY_FAILED_REFRESH_ADDRESS,
"metadata Server failure",
"",
"Address refresh failed because of Metadata Server failure, wait for retry or new address refresh event.");
return;
}
}
hasEmptyMetadata = false;
Map<String, Map<Integer, Map<Set<String>, Object>>> protocolRevisionsToUrls = new HashMap<>();
Map<String, List<ProtocolServiceKeyWithUrls>> newServiceUrls = new HashMap<>();
for (Map.Entry<ServiceInfo, Set<String>> entry : localServiceToRevisions.entrySet()) {
ServiceInfo serviceInfo = entry.getKey();
Set<String> revisions = entry.getValue();
Map<Integer, Map<Set<String>, Object>> portToRevisions =
protocolRevisionsToUrls.computeIfAbsent(serviceInfo.getProtocol(), k -> new HashMap<>());
Map<Set<String>, Object> revisionsToUrls =
portToRevisions.computeIfAbsent(serviceInfo.getPort(), k -> new HashMap<>());
Object urls = revisionsToUrls.computeIfAbsent(
revisions,
k -> getServiceUrlsCache(
revisionToInstances, revisions, serviceInfo.getProtocol(), serviceInfo.getPort()));
List<ProtocolServiceKeyWithUrls> list =
newServiceUrls.computeIfAbsent(serviceInfo.getPath(), k -> new LinkedList<>());
list.add(new ProtocolServiceKeyWithUrls(serviceInfo.getProtocolServiceKey(), (List<URL>) urls));
}
this.serviceUrls = newServiceUrls;
this.notifyAddressChanged();
} | 3.68 |
hadoop_DockerContainerDeletionTask_getContainerId | /**
* Get the id of the container to delete.
*
* @return the id of the container to delete.
*/
public String getContainerId() {
return containerId;
} | 3.68 |
morf_InsertStatement_into | /**
* Inserts into a specific table.
*
* <blockquote><pre>insert().into(tableRef("agreement"));</pre></blockquote>
*
* @param intoTable the table to insert into.
* @return a statement with the changes applied.
*/
public InsertStatement into(TableReference intoTable) {
return copyOnWriteOrMutate(
b -> b.into(intoTable),
() -> this.table = intoTable
);
} | 3.68 |
hadoop_RLESparseResourceAllocation_getLatestNonNullTime | /**
* Get the timestamp of the latest non-null resource allocation.
*
* @return the timestamp of the last resource allocation
*/
public long getLatestNonNullTime() {
readLock.lock();
try {
if (cumulativeCapacity.isEmpty()) {
return -1;
} else {
// the last entry might contain null (to terminate
// the sequence)... return previous one.
Entry<Long, Resource> last = cumulativeCapacity.lastEntry();
if (last.getValue() == null) {
return cumulativeCapacity.floorKey(last.getKey() - 1);
} else {
return last.getKey();
}
}
} finally {
readLock.unlock();
}
} | 3.68 |
morf_ConnectionResourcesBean_getInstanceName | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#getInstanceName()
*/
@Override
public String getInstanceName() {
return instanceName;
} | 3.68 |
hadoop_ApplicationColumnPrefix_getColumnPrefix | /**
* @return the column name value
*/
private String getColumnPrefix() {
return columnPrefix;
} | 3.68 |
hadoop_SQLDelegationTokenSecretManager_setDelegationTokenSeqNum | /**
* Updates the value of the last reserved sequence number.
* @param seqNum Value to update the sequence number to.
*/
@Override
public void setDelegationTokenSeqNum(int seqNum) {
try {
updateSequenceNum(seqNum);
} catch (SQLException e) {
throw new RuntimeException(
"Failed to update token sequence number in SQL secret manager", e);
}
} | 3.68 |
querydsl_JTSCurveExpression_length | /**
* The length of this Curve in its associated spatial reference.
*
* @return length
*/
public NumberExpression<Double> length() {
if (length == null) {
length = Expressions.numberOperation(Double.class, SpatialOps.LENGTH, mixin);
}
return length;
} | 3.68 |
hbase_ConnectionCache_getCurrentConnection | /**
* Get the cached connection for the current user. If none or timed out, create a new one.
*/
ConnectionInfo getCurrentConnection() throws IOException {
String userName = getEffectiveUser();
ConnectionInfo connInfo = connections.get(userName);
if (connInfo == null || !connInfo.updateAccessTime()) {
Lock lock = locker.acquireLock(userName);
try {
connInfo = connections.get(userName);
if (connInfo == null) {
UserGroupInformation ugi = realUser;
if (!userName.equals(realUserName)) {
ugi = UserGroupInformation.createProxyUser(userName, realUser);
}
User user = userProvider.create(ugi);
Connection conn = ConnectionFactory.createConnection(conf, user);
connInfo = new ConnectionInfo(conn, userName);
connections.put(userName, connInfo);
}
} finally {
lock.unlock();
}
}
return connInfo;
} | 3.68 |
flink_CheckpointProperties_discardOnJobCancelled | /**
* Returns whether the checkpoint should be discarded when the owning job reaches the {@link
* JobStatus#CANCELED} state.
*
* @return <code>true</code> if the checkpoint should be discarded when the owning job reaches
* the {@link JobStatus#CANCELED} state; <code>false</code> otherwise.
* @see CompletedCheckpointStore
*/
boolean discardOnJobCancelled() {
return discardCancelled;
} | 3.68 |
dubbo_PropertySourcesUtils_getPropertyNames | /**
* Get the property names as the array from the specified {@link PropertySource} instance.
*
* @param propertySource {@link PropertySource} instance
* @return non-null
*/
public static String[] getPropertyNames(PropertySource propertySource) {
String[] propertyNames = propertySource instanceof EnumerablePropertySource
? ((EnumerablePropertySource) propertySource).getPropertyNames()
: null;
if (propertyNames == null) {
propertyNames = ObjectUtils.EMPTY_STRING_ARRAY;
}
return propertyNames;
} | 3.68 |
framework_VDebugWindow_getMillisSinceReset | /**
* Gets the milliseconds since last {@link #resetTimer()} call.
*
* @return
*/
static int getMillisSinceReset() {
if (lastReset == null) {
lastReset = new Duration();
}
return lastReset.elapsedMillis();
} | 3.68 |
flink_ResourceSpec_setExtendedResource | /**
* Add the given extended resource. The old value with the same resource name will be
* replaced if present.
*/
public Builder setExtendedResource(ExternalResource extendedResource) {
this.extendedResources.put(extendedResource.getName(), extendedResource);
return this;
} | 3.68 |
hadoop_RollingFileSystemSink_initFs | /**
* Initialize the connection to HDFS and create the base directory. Also
* launch the flush thread.
*/
private boolean initFs() {
boolean success = false;
fileSystem = getFileSystem();
// This step isn't strictly necessary, but it makes debugging issues much
// easier. We try to create the base directory eagerly and fail with
// copious debug info if it fails.
try {
fileSystem.mkdirs(basePath);
success = true;
} catch (Exception ex) {
if (!ignoreError) {
throw new MetricsException("Failed to create " + basePath + "["
+ SOURCE_KEY + "=" + source + ", "
+ ALLOW_APPEND_KEY + "=" + allowAppend + ", "
+ stringifySecurityProperty(KEYTAB_PROPERTY_KEY) + ", "
+ stringifySecurityProperty(USERNAME_PROPERTY_KEY)
+ "] -- " + ex.toString(), ex);
}
}
if (success) {
// If we're permitted to append, check if we actually can
if (allowAppend) {
allowAppend = checkAppend(fileSystem);
}
flushTimer = new Timer("RollingFileSystemSink Flusher", true);
setInitialFlushTime(new Date());
}
return success;
} | 3.68 |
framework_BrowserWindowOpener_getUriFragment | /**
* Gets that URI fragment configured for opened windows.
*
* @return the URI fragment string, or <code>null</code> if no fragment is
* configured.
*
* @see #setUriFragment(String)
*/
public String getUriFragment() {
return getState(false).uriFragment;
} | 3.68 |
flink_SingleInputSemanticProperties_addForwardedField | /**
* Adds, to the existing information, a field that is forwarded directly from the source
* record(s) to the destination record(s).
*
* @param sourceField the position in the source record(s)
* @param targetField the position in the destination record(s)
*/
public void addForwardedField(int sourceField, int targetField) {
if (isTargetFieldPresent(targetField)) {
throw new InvalidSemanticAnnotationException(
"Target field " + targetField + " was added twice.");
}
FieldSet targetFields = fieldMapping.get(sourceField);
if (targetFields != null) {
fieldMapping.put(sourceField, targetFields.addField(targetField));
} else {
fieldMapping.put(sourceField, new FieldSet(targetField));
}
} | 3.68 |
dubbo_NacosNamingServiceUtils_getGroup | /**
* The group of {@link NamingService} to register
*
* @param connectionURL {@link URL connection url}
* @return non-null, "default" as default
* @since 2.7.5
*/
public static String getGroup(URL connectionURL) {
// Compatible with nacos grouping via group.
String group = connectionURL.getParameter(GROUP_KEY, DEFAULT_GROUP);
return connectionURL.getParameter(NACOS_GROUP_KEY, group);
} | 3.68 |
querydsl_ComparableExpressionBase_coalesce | /**
* Create a {@code coalesce(this, args...)} expression
*
* @param args additional arguments
* @return coalesce
*/
@SuppressWarnings("unchecked")
public ComparableExpressionBase<T> coalesce(T... args) {
Coalesce<T> coalesce = new Coalesce<T>(getType(), mixin);
for (T arg : args) {
coalesce.add(arg);
}
return coalesce.getValue();
} | 3.68 |
flink_DataSet_writeAsFormattedText | /**
* Writes a DataSet as text file(s) to the specified location.
*
* <p>For each element of the DataSet the result of {@link TextFormatter#format(Object)} is
* written.
*
* @param filePath The path pointing to the location the text file is written to.
* @param writeMode Control the behavior for existing files. Options are NO_OVERWRITE and
* OVERWRITE.
* @param formatter formatter that is applied on every element of the DataSet.
* @return The DataSink that writes the DataSet.
* @see TextOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<String> writeAsFormattedText(
String filePath, WriteMode writeMode, TextFormatter<T> formatter) {
return map(new FormattingMapper<>(clean(formatter))).writeAsText(filePath, writeMode);
} | 3.68 |
framework_FieldGroup_commitFields | /**
* Tries to commit all bound fields one by one and gathers any validation
* exceptions in a map, which is returned to the caller
*
* @return a propertyId to validation exception map which is empty if all
* commits succeeded
*/
private Map<Field<?>, InvalidValueException> commitFields() {
Map<Field<?>, InvalidValueException> invalidValueExceptions = new HashMap<Field<?>, InvalidValueException>();
for (Field<?> f : fieldToPropertyId.keySet()) {
try {
f.commit();
} catch (InvalidValueException e) {
invalidValueExceptions.put(f, e);
}
}
return invalidValueExceptions;
} | 3.68 |
flink_Router_addRoute | /**
* Add route.
*
* <p>A path pattern can only point to one target. This method does nothing if the pattern has
* already been added.
*/
public Router<T> addRoute(HttpMethod method, String pathPattern, T target) {
getMethodlessRouter(method).addRoute(pathPattern, target);
return this;
} | 3.68 |
framework_VTabsheet_setClosable | /**
* Adds or removes the button for closing the corresponding tab and the
* style name for a closable tab.
*
* @param closable
* {@code true} if the tab is closable, {@code false}
* otherwise
*/
public void setClosable(boolean closable) {
this.closable = closable;
if (closable && closeButton == null) {
closeButton = DOM.createSpan();
closeButton.setInnerHTML("×");
closeButton
.setClassName(VTabsheet.CLASSNAME + "-caption-close");
Roles.getTabRole().setAriaHiddenState(closeButton, true);
Roles.getTabRole().setAriaDisabledState(closeButton, true);
getElement().appendChild(closeButton);
} else if (!closable && closeButton != null) {
getElement().removeChild(closeButton);
closeButton = null;
}
if (closable) {
addStyleDependentName("closable");
} else {
removeStyleDependentName("closable");
}
} | 3.68 |
flink_Savepoint_load | /**
* Loads an existing savepoint. Useful if you want to query, modify, or extend the state of an
* existing application.
*
* @param env The execution environment used to transform the savepoint.
* @param path The path to an existing savepoint on disk.
* @param stateBackend The state backend of the savepoint.
* @see #load(ExecutionEnvironment, String)
*/
public static ExistingSavepoint load(
ExecutionEnvironment env, String path, StateBackend stateBackend) throws IOException {
Preconditions.checkNotNull(stateBackend, "The state backend must not be null");
CheckpointMetadata metadata = SavepointLoader.loadSavepointMetadata(path);
int maxParallelism =
metadata.getOperatorStates().stream()
.map(OperatorState::getMaxParallelism)
.max(Comparator.naturalOrder())
.orElseThrow(
() ->
new RuntimeException(
"Savepoint must contain at least one operator state."));
SavepointMetadata savepointMetadata =
new SavepointMetadata(
maxParallelism, metadata.getMasterStates(), metadata.getOperatorStates());
return new ExistingSavepoint(env, savepointMetadata, stateBackend);
} | 3.68 |
framework_FieldBinder_bindFieldByIdentifier | /**
* Tries to bind the given {@link Component} instance to a member field of
* the bind target. The field is matched based on the given identifier. If a
* field is already bound (not null), {@link FieldBindingException} is
* thrown.
*
* @param identifier
* the identifier for the field.
* @param instance
* the instance to be bound to a field
* @return true on success
* @throws FieldBindingException
* if error occurs when trying to bind the instance to a field
*/
private boolean bindFieldByIdentifier(String identifier,
Component instance) {
try {
// create and validate field name
String fieldName = asFieldName(identifier);
if (fieldName.isEmpty()) {
return false;
}
// validate that the field can be found
Field field = fieldMap.get(fieldName.toLowerCase(Locale.ROOT));
if (field == null) {
getLogger()
.fine("No field was found by identifier " + identifier);
return false;
}
// validate that the field is not set
Object fieldValue = getFieldValue(bindTarget, field);
if (fieldValue != null) {
getLogger().fine("The field \"" + fieldName
+ "\" was already mapped. Ignoring.");
} else {
// set the field value
field.set(bindTarget, instance);
}
return true;
} catch (IllegalAccessException | IllegalArgumentException e) {
throw new FieldBindingException(
"Field binding failed for " + identifier, e);
}
} | 3.68 |
pulsar_ResourceGroupService_registerNameSpace | /**
* Registers a namespace as a user of a resource group.
*
* @param resourceGroupName
* @param fqNamespaceName (i.e., in "tenant/Namespace" format)
* @throws if the RG does not exist, or if the NS already references the RG.
*/
public void registerNameSpace(String resourceGroupName, NamespaceName fqNamespaceName) throws PulsarAdminException {
ResourceGroup rg = checkResourceGroupExists(resourceGroupName);
// Check that the NS-name doesn't already have a RG association.
// [If it does, that should be unregistered before putting a different association.]
ResourceGroup oldRG = this.namespaceToRGsMap.get(fqNamespaceName);
if (oldRG != null) {
String errMesg = "Namespace " + fqNamespaceName + " already references a resource group: " + oldRG.getID();
throw new PulsarAdminException(errMesg);
}
ResourceGroupOpStatus status = rg.registerUsage(fqNamespaceName.toString(), ResourceGroupRefTypes.Namespaces,
true, this.resourceUsageTransportManagerMgr);
if (status == ResourceGroupOpStatus.Exists) {
String errMesg = String.format("Namespace %s already references the target resource group %s",
fqNamespaceName, resourceGroupName);
throw new PulsarAdminException(errMesg);
}
// Associate this NS-name with the RG.
this.namespaceToRGsMap.put(fqNamespaceName, rg);
rgNamespaceRegisters.labels(resourceGroupName).inc();
} | 3.68 |
framework_VAbstractCalendarPanel_getSelectKey | /**
* Returns the select key which selects the value. By default this is the
* enter key but it can be changed to whatever you like by overriding this
* method.
*
* @return the select key
*/
protected int getSelectKey() {
return KeyCodes.KEY_ENTER;
} | 3.68 |
pulsar_ByteBufPair_get | /**
* Get a new {@link ByteBufPair} from the pool and assign 2 buffers to it.
*
* <p>The buffers b1 and b2 lifecycles are now managed by the ByteBufPair:
* when the {@link ByteBufPair} is deallocated, b1 and b2 will be released as well.
*
* @param b1
* @param b2
* @return
*/
public static ByteBufPair get(ByteBuf b1, ByteBuf b2) {
ByteBufPair buf = RECYCLER.get();
buf.setRefCnt(1);
buf.b1 = b1;
buf.b2 = b2;
return buf;
} | 3.68 |
hibernate-validator_NotEmptyValidatorForArraysOfShort_isValid | /**
* Checks the array is not {@code null} and not empty.
*
* @param array the array to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the array is not {@code null} and the array is not empty
*/
@Override
public boolean isValid(short[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return false;
}
return array.length > 0;
} | 3.68 |
flink_OperationManager_getOperationResultSchema | /**
* Get the {@link ResolvedSchema} of the operation.
*
* @param operationHandle identifies the {@link Operation}.
*/
public ResolvedSchema getOperationResultSchema(OperationHandle operationHandle)
throws Exception {
return getOperation(operationHandle).getResultSchema();
} | 3.68 |
querydsl_MongodbExpressions_withinBox | /**
* Finds points within bounds of the rectangle
*
* @param blLatVal bottom left latitude
* @param blLongVal bottom left longitude
* @param urLatVal upper right latitude
* @param urLongVal upper right longitude
* @return predicate
*/
public static BooleanExpression withinBox(Expression<Double[]> expr, double blLongVal, double blLatVal, double urLongVal, double urLatVal) {
return Expressions.booleanOperation(
MongodbOps.GEO_WITHIN_BOX,
expr,
ConstantImpl.create(new Double[]{blLongVal, blLatVal}),
ConstantImpl.create(new Double[]{urLongVal, urLatVal})
);
} | 3.68 |
hudi_CollectionUtils_tail | /**
* Returns last element of the array of {@code T}
*/
public static <T> T tail(T[] ts) {
checkArgument(ts.length > 0);
return ts[ts.length - 1];
} | 3.68 |
hadoop_RMWebAppUtil_createAppSubmissionContext | /**
* Create the actual ApplicationSubmissionContext to be submitted to the RM
* from the information provided by the user.
*
* @param newApp the information provided by the user
* @param conf RM configuration
* @return returns the constructed ApplicationSubmissionContext
* @throws IOException in case of Error
*/
public static ApplicationSubmissionContext createAppSubmissionContext(
ApplicationSubmissionContextInfo newApp, Configuration conf)
throws IOException {
// create local resources and app submission context
ApplicationId appid;
String error =
"Could not parse application id " + newApp.getApplicationId();
try {
appid = ApplicationId.fromString(newApp.getApplicationId());
} catch (Exception e) {
throw new BadRequestException(error);
}
ApplicationSubmissionContext appContext = ApplicationSubmissionContext
.newInstance(appid, newApp.getApplicationName(), newApp.getQueue(),
Priority.newInstance(newApp.getPriority()),
createContainerLaunchContext(newApp), newApp.getUnmanagedAM(),
newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(),
createAppSubmissionContextResource(newApp, conf),
newApp.getApplicationType(),
newApp.getKeepContainersAcrossApplicationAttempts(),
newApp.getAppNodeLabelExpression(),
newApp.getAMContainerNodeLabelExpression());
appContext.setApplicationTags(newApp.getApplicationTags());
appContext.setAttemptFailuresValidityInterval(
newApp.getAttemptFailuresValidityInterval());
if (newApp.getLogAggregationContextInfo() != null) {
appContext.setLogAggregationContext(
createLogAggregationContext(newApp.getLogAggregationContextInfo()));
}
String reservationIdStr = newApp.getReservationId();
if (reservationIdStr != null && !reservationIdStr.isEmpty()) {
ReservationId reservationId =
ReservationId.parseReservationId(reservationIdStr);
appContext.setReservationID(reservationId);
}
return appContext;
} | 3.68 |
hadoop_HdfsFileStatus_replication | /**
* Set the replication of this entity (default = 0).
* @param replication Number of replicas
* @return This Builder instance
*/
public Builder replication(int replication) {
this.replication = replication;
return this;
} | 3.68 |
hadoop_ActiveAuditManagerS3A_setActiveThreadSpan | /**
* Set a specific span as the active span.
* This will wrap it.
* @param span span to use.
* @return the wrapped span.
*/
private AuditSpanS3A setActiveThreadSpan(AuditSpanS3A span) {
return switchToActiveSpan(
new WrappingAuditSpan(span, span.isValidSpan()));
} | 3.68 |
hbase_MultiTableInputFormatBase_getSplits | /**
* Calculates the splits that will serve as input for the map tasks. The number of splits matches
* the number of regions in a table.
* @param context The current job context.
* @return The list of input splits.
* @throws IOException When creating the list of splits fails.
* @see InputFormat#getSplits(org.apache.hadoop.mapreduce.JobContext)
*/
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException {
if (scans.isEmpty()) {
throw new IOException("No scans were provided.");
}
Map<TableName, List<Scan>> tableMaps = new HashMap<>();
for (Scan scan : scans) {
byte[] tableNameBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME);
if (tableNameBytes == null) throw new IOException("A scan object did not have a table name");
TableName tableName = TableName.valueOf(tableNameBytes);
List<Scan> scanList = tableMaps.get(tableName);
if (scanList == null) {
scanList = new ArrayList<>();
tableMaps.put(tableName, scanList);
}
scanList.add(scan);
}
List<InputSplit> splits = new ArrayList<>();
Iterator iter = tableMaps.entrySet().iterator();
// Make a single Connection to the Cluster and use it across all tables.
try (Connection conn = ConnectionFactory.createConnection(context.getConfiguration())) {
while (iter.hasNext()) {
Map.Entry<TableName, List<Scan>> entry = (Map.Entry<TableName, List<Scan>>) iter.next();
TableName tableName = entry.getKey();
List<Scan> scanList = entry.getValue();
try (Table table = conn.getTable(tableName);
RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
RegionSizeCalculator sizeCalculator =
new RegionSizeCalculator(regionLocator, conn.getAdmin());
Pair<byte[][], byte[][]> keys = regionLocator.getStartEndKeys();
for (Scan scan : scanList) {
if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) {
throw new IOException(
"Expecting at least one region for table : " + tableName.getNameAsString());
}
int count = 0;
byte[] startRow = scan.getStartRow();
byte[] stopRow = scan.getStopRow();
for (int i = 0; i < keys.getFirst().length; i++) {
if (!includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
continue;
}
if (
(startRow.length == 0 || keys.getSecond()[i].length == 0
|| Bytes.compareTo(startRow, keys.getSecond()[i]) < 0)
&& (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)
) {
byte[] splitStart =
startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0
? keys.getFirst()[i]
: startRow;
byte[] splitStop =
(stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0)
&& keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow;
HRegionLocation hregionLocation =
regionLocator.getRegionLocation(keys.getFirst()[i], false);
String regionHostname = hregionLocation.getHostname();
RegionInfo regionInfo = hregionLocation.getRegion();
String encodedRegionName = regionInfo.getEncodedName();
long regionSize = sizeCalculator.getRegionSize(regionInfo.getRegionName());
TableSplit split = new TableSplit(table.getName(), scan, splitStart, splitStop,
regionHostname, encodedRegionName, regionSize);
splits.add(split);
if (LOG.isDebugEnabled()) {
LOG.debug("getSplits: split -> " + (count++) + " -> " + split);
}
}
}
}
}
}
}
return splits;
} | 3.68 |
flink_InputGateSpecUtils_getExclusiveBuffersPerChannel | /**
* Since at least one floating buffer is required, the number of required buffers is reduced by
* 1, and then the average number of buffers per channel is calculated. Returning the minimum
* value to ensure that the number of required buffers per gate is not more than the given
* requiredBuffersPerGate.}.
*/
private static int getExclusiveBuffersPerChannel(
int configuredNetworkBuffersPerChannel,
int numInputChannels,
int requiredBuffersPerGate) {
checkArgument(numInputChannels > 0, "Must be positive.");
checkArgument(requiredBuffersPerGate >= 1, "Require at least 1 buffer per gate.");
return Math.min(
configuredNetworkBuffersPerChannel,
(requiredBuffersPerGate - 1) / numInputChannels);
} | 3.68 |
framework_AbstractInMemoryContainer_internalAddAt | /**
* Adds the bean to all internal data structures at the given position.
* Fails if an item with itemId is already in the container. Returns a the
* item if it was added successfully, null otherwise.
*
* <p>
* Caller should initiate filtering after calling this method.
* </p>
*
* For internal use only - subclasses should use
* {@link #internalAddItemAtEnd(Object, Item, boolean)},
* {@link #internalAddItemAt(int, Object, Item, boolean)} and
* {@link #internalAddItemAfter(Object, Object, Item, boolean)} instead.
*
* @param position
* The position at which the item should be inserted in the
* unfiltered collection of items
* @param itemId
* The item identifier for the item to insert
* @param item
* The item to insert
*
* @return ITEMCLASS if the item was added successfully, null otherwise
*/
private ITEMCLASS internalAddAt(int position, ITEMIDTYPE itemId,
ITEMCLASS item) {
if (position < 0 || position > getAllItemIds().size() || itemId == null
|| item == null) {
return null;
}
// Make sure that the item has not been added previously
if (getAllItemIds().contains(itemId)) {
return null;
}
// "filteredList" will be updated in filterAll() which should be invoked
// by the caller after calling this method.
getAllItemIds().add(position, itemId);
registerNewItem(position, itemId, item);
return item;
} | 3.68 |
hbase_StoreUtils_getMaxMemStoreTSInList | /**
* Return the largest memstoreTS found across all storefiles in the given list. Store files that
* were created by a mapreduce bulk load are ignored, as they do not correspond to any specific
* put operation, and thus do not have a memstoreTS associated with them.
*/
public static OptionalLong getMaxMemStoreTSInList(Collection<HStoreFile> sfs) {
return sfs.stream().filter(sf -> !sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemStoreTS)
.max();
} | 3.68 |
morf_TableSetSchema_viewExists | /**
* @see org.alfasoftware.morf.metadata.Schema#viewExists(java.lang.String)
*/
@Override
public boolean viewExists(String name) {
return false;
} | 3.68 |
morf_SqlDialect_getDeleteLimitWhereClause | /**
* Returns the SQL that specifies the deletion limit in the WHERE clause, if any, for the dialect.
*
* @param limit The delete limit.
* @return The SQL fragment.
*/
protected Optional<String> getDeleteLimitWhereClause(@SuppressWarnings("unused") int limit) {
return Optional.empty();
} | 3.68 |
framework_AbstractComponentContainer_removeAllComponents | /**
* Removes all components from the container. This should probably be
* re-implemented in extending classes for a more powerful implementation.
*/
@Override
public void removeAllComponents() {
final LinkedList<Component> l = new LinkedList<>();
// Adds all components
for (final Iterator<Component> i = getComponentIterator(); i
.hasNext();) {
l.add(i.next());
}
// Removes all component
for (Component aL : l) {
removeComponent(aL);
}
} | 3.68 |
flink_DataSet_rightOuterJoin | /**
* Initiates a Right Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two {@link DataSet DataSets} on key
* equality and provides multiple ways to combine joining elements into one DataSet.
*
* <p>Elements of the <b>right</b> DataSet (i.e. {@code other}) that do not have a matching
* element on {@code this} side are joined with {@code null} and emitted to the resulting
* DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @param strategy The strategy that should be used execute the join. If {@code null} is given,
* then the optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> rightOuterJoin(DataSet<R> other, JoinHint strategy) {
switch (strategy) {
case OPTIMIZER_CHOOSES:
case REPARTITION_SORT_MERGE:
case REPARTITION_HASH_FIRST:
case REPARTITION_HASH_SECOND:
case BROADCAST_HASH_FIRST:
return new JoinOperatorSetsBase<>(this, other, strategy, JoinType.RIGHT_OUTER);
default:
throw new InvalidProgramException(
"Invalid JoinHint for RightOuterJoin: " + strategy);
}
} | 3.68 |
hadoop_Chain_write | /**
* Writes a key/value pair.
*
* @param key
* the key to write.
* @param value
* the value to write.
* @throws IOException
*/
public void write(KEYOUT key, VALUEOUT value) throws IOException,
InterruptedException {
if (outputQueue != null) {
writeToQueue(key, value);
} else {
outputContext.write(key, value);
}
} | 3.68 |
framework_StaticSection_setVisible | /**
* Sets the visibility of this section.
*
* @param visible
* {@code true} if visible; {@code false} if not
*
* @since 8.1.1
*/
public void setVisible(boolean visible) {
if (getState(false).visible != visible) {
getState(true).visible = visible;
}
} | 3.68 |
flink_SegmentsUtil_getLong | /**
* get long from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static long getLong(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getLong(offset);
} else {
return getLongMultiSegments(segments, offset);
}
} | 3.68 |
hbase_HRegion_endNonceOperation | /**
* Ends nonce operation for a mutation, if needed.
* @param success Whether the operation for this nonce has succeeded.
*/
private void endNonceOperation(boolean success) {
if (
region.rsServices != null && region.rsServices.getNonceManager() != null
&& nonce != HConstants.NO_NONCE
) {
region.rsServices.getNonceManager().endOperation(nonceGroup, nonce, success);
}
} | 3.68 |
pulsar_ShadowReplicator_getShadowReplicatorName | /**
* Cursor name fot this shadow replicator.
* @param replicatorPrefix
* @param shadowTopic
* @return
*/
public static String getShadowReplicatorName(String replicatorPrefix, String shadowTopic) {
return replicatorPrefix + "-" + Codec.encode(shadowTopic);
} | 3.68 |
hadoop_TaskPool_waitFor | /**
* Wait for all the futures to complete; there's a small sleep between
* each iteration; enough to yield the CPU.
* @param futures futures.
* @param sleepInterval Interval in milliseconds to await completion.
*/
private static void waitFor(Collection<Future<?>> futures, int sleepInterval) {
int size = futures.size();
LOG.debug("Waiting for {} tasks to complete", size);
int oldNumFinished = 0;
while (true) {
int numFinished = (int) futures.stream().filter(Future::isDone).count();
if (oldNumFinished != numFinished) {
LOG.debug("Finished count -> {}/{}", numFinished, size);
oldNumFinished = numFinished;
}
if (numFinished == size) {
// all of the futures are done, stop looping
break;
} else {
try {
Thread.sleep(sleepInterval);
} catch (InterruptedException e) {
futures.forEach(future -> future.cancel(true));
Thread.currentThread().interrupt();
break;
}
}
}
} | 3.68 |
flink_SavepointWriter_fromExistingSavepoint | /**
* Loads an existing savepoint. Useful if you want to modify or extend the state of an existing
* application.
*
* @param path The path to an existing savepoint on disk.
* @param stateBackend The state backend of the savepoint.
* @return A {@link SavepointWriter}.
* @see #fromExistingSavepoint(String)
*/
public static SavepointWriter fromExistingSavepoint(
StreamExecutionEnvironment executionEnvironment, String path, StateBackend stateBackend)
throws IOException {
return new SavepointWriter(readSavepointMetadata(path), stateBackend, executionEnvironment);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.