name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieFileGroup_getLatestDataFile | /**
* Gets the latest data file.
*/
public Option<HoodieBaseFile> getLatestDataFile() {
return Option.fromJavaOptional(getAllBaseFiles().findFirst());
} | 3.68 |
hbase_ScannerModel_setEndRow | /**
* @param endRow end row
*/
public void setEndRow(byte[] endRow) {
this.endRow = endRow;
} | 3.68 |
hbase_SaslClientAuthenticationProviders_instantiate | /**
* Instantiates all client authentication providers and returns an instance of
* {@link SaslClientAuthenticationProviders}.
*/
static SaslClientAuthenticationProviders instantiate(Configuration conf) {
ServiceLoader<SaslClientAuthenticationProvider> loader =
ServiceLoader.load(SaslClientAuthenticationProvider.class);
HashMap<Byte, SaslClientAuthenticationProvider> providerMap = new HashMap<>();
for (SaslClientAuthenticationProvider provider : loader) {
addProviderIfNotExists(provider, providerMap);
}
addExplicitProviders(conf, providerMap);
Collection<SaslClientAuthenticationProvider> providers =
Collections.unmodifiableCollection(providerMap.values());
if (LOG.isTraceEnabled()) {
String loadedProviders = providers.stream().map((provider) -> provider.getClass().getName())
.collect(Collectors.joining(", "));
LOG.trace("Found SaslClientAuthenticationProviders {}", loadedProviders);
}
AuthenticationProviderSelector selector = instantiateSelector(conf, providers);
return new SaslClientAuthenticationProviders(providers, selector);
} | 3.68 |
flink_JobManagerCheckpointStorage_getMaxStateSize | /**
* Gets the maximum size that an individual state can have, as configured in the constructor (by
* default {@value #DEFAULT_MAX_STATE_SIZE}).
*
* @return The maximum size that an individual state can have
*/
public int getMaxStateSize() {
return maxStateSize;
} | 3.68 |
hadoop_NvidiaGPUPluginForRuntimeV2_combinationRecursive | /**
* Populate combination to cost map recursively.
*
* @param cTc combinationToCost map.
* The key is device set, the value is cost
* @param allDevices all devices used to assign value to subDevicelist
* @param subDeviceList store a subset of devices temporary
* @param start start index in the allDevices
* @param end last index in the allDevices
* @param index dynamic index in subDeviceList need to be assigned
* @param r the length of the subDeviceList
*/
void combinationRecursive(Map<Set<Device>, Integer> cTc,
Device[] allDevices, Device[] subDeviceList,
int start, int end, int index, int r) {
// sub device list's length is ready to compute the cost
if (index == r) {
Set<Device> oneSet = new TreeSet<>(Arrays.asList(subDeviceList));
int cost = computeCostOfDevices(subDeviceList);
cTc.put(oneSet, cost);
return;
}
for (int i = start; i <= end; i++) {
subDeviceList[index] = allDevices[i];
combinationRecursive(cTc, allDevices, subDeviceList,
i + 1, end, index + 1, r);
}
} | 3.68 |
framework_Color_getAlpha | /**
* Returns the alpha value of the color.
*
*/
public int getAlpha() {
return alpha;
} | 3.68 |
hadoop_ScriptBasedNodeLabelsProvider_parseOutput | /**
* Method which collect lines from the output string which begins with
* Patterns provided.
*
* @param scriptOutput string
* @return true if output string has error pattern in it.
* @throws IOException
*/
@Override
Set<NodeLabel> parseOutput(String scriptOutput)
throws IOException {
String nodePartitionLabel = null;
String[] splits = scriptOutput.split("\n");
for (String line : splits) {
String trimmedLine = line.trim();
if (trimmedLine.startsWith(NODE_LABEL_PARTITION_PATTERN)) {
nodePartitionLabel =
trimmedLine.substring(NODE_LABEL_PARTITION_PATTERN.length());
}
}
return convertToNodeLabelSet(nodePartitionLabel);
} | 3.68 |
morf_CorrectPrimaryKeyColumns_apply | /**
* @see org.alfasoftware.morf.upgrade.ChangePrimaryKeyColumns#apply(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema apply(Schema schema) {
oldPrimaryKeyColumns = namesOfColumns(primaryKeysForTable(schema.getTable(tableName)));
return super.apply(schema);
} | 3.68 |
hadoop_NMTokenCache_numberOfTokensInCache | /**
* Returns the number of NMTokens present in cache.
*/
@Private
@VisibleForTesting
public int numberOfTokensInCache() {
return nmTokens.size();
} | 3.68 |
hbase_HMaster_balanceThrottling | /**
* It first sleep to the next balance plan start time. Meanwhile, throttling by the max number
* regions in transition to protect availability.
* @param nextBalanceStartTime The next balance plan start time
* @param maxRegionsInTransition max number of regions in transition
* @param cutoffTime when to exit balancer
*/
private void balanceThrottling(long nextBalanceStartTime, int maxRegionsInTransition,
long cutoffTime) {
boolean interrupted = false;
// Sleep to next balance plan start time
// But if there are zero regions in transition, it can skip sleep to speed up.
while (
!interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime
&& this.assignmentManager.getRegionStates().hasRegionsInTransition()
) {
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
interrupted = true;
}
}
// Throttling by max number regions in transition
while (
!interrupted && maxRegionsInTransition > 0
&& this.assignmentManager.getRegionStates().getRegionsInTransitionCount()
>= maxRegionsInTransition
&& EnvironmentEdgeManager.currentTime() <= cutoffTime
) {
try {
// sleep if the number of regions in transition exceeds the limit
Thread.sleep(100);
} catch (InterruptedException ie) {
interrupted = true;
}
}
if (interrupted) Thread.currentThread().interrupt();
} | 3.68 |
framework_Table_getColumnHeaderMode | /**
* Getter for property columnHeaderMode.
*
* @return the Value of property columnHeaderMode.
*/
public ColumnHeaderMode getColumnHeaderMode() {
return columnHeaderMode;
} | 3.68 |
hbase_MetricSampleQuantiles_query | /**
* Get the estimated value at the specified quantile.
* @param quantile Queried quantile, e.g. 0.50 or 0.99.
* @return Estimated value at that quantile.
*/
private long query(double quantile) throws IOException {
if (samples.isEmpty()) {
throw new IOException("No samples present");
}
int rankMin = 0;
int desired = (int) (quantile * count);
for (int i = 1; i < samples.size(); i++) {
SampleItem prev = samples.get(i - 1);
SampleItem cur = samples.get(i);
rankMin += prev.g;
if (rankMin + cur.g + cur.delta > desired + (allowableError(i) / 2)) {
return prev.value;
}
}
// edge case of wanting max value
return samples.get(samples.size() - 1).value;
} | 3.68 |
zxing_LuminanceSource_getWidth | /**
* @return The width of the bitmap.
*/
public final int getWidth() {
return width;
} | 3.68 |
framework_VDateTimeCalendarPanel_setTimeChangeListener | /**
* The time change listener is triggered when the user changes the time.
*
* @param listener
* the listener to use
*/
public void setTimeChangeListener(TimeChangeListener listener) {
timeChangeListener = listener;
} | 3.68 |
hbase_PrettyPrinter_humanReadableByte | /**
* Convert a long size to a human readable string. Example: 10763632640 -> 10763632640 B (10GB
* 25MB)
* @param size the size in bytes
* @return human readable string
*/
private static String humanReadableByte(final long size) {
StringBuilder sb = new StringBuilder();
long tb, gb, mb, kb, b;
if (size < HConstants.KB_IN_BYTES) {
sb.append(size);
sb.append(" B");
return sb.toString();
}
tb = size / HConstants.TB_IN_BYTES;
gb = (size - HConstants.TB_IN_BYTES * tb) / HConstants.GB_IN_BYTES;
mb =
(size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb) / HConstants.MB_IN_BYTES;
kb = (size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb
- HConstants.MB_IN_BYTES * mb) / HConstants.KB_IN_BYTES;
b = (size - HConstants.TB_IN_BYTES * tb - HConstants.GB_IN_BYTES * gb
- HConstants.MB_IN_BYTES * mb - HConstants.KB_IN_BYTES * kb);
sb.append(size).append(" B (");
if (tb > 0) {
sb.append(tb);
sb.append("TB");
}
if (gb > 0) {
sb.append(tb > 0 ? " " : "");
sb.append(gb);
sb.append("GB");
}
if (mb > 0) {
sb.append(tb + gb > 0 ? " " : "");
sb.append(mb);
sb.append("MB");
}
if (kb > 0) {
sb.append(tb + gb + mb > 0 ? " " : "");
sb.append(kb);
sb.append("KB");
}
if (b > 0) {
sb.append(tb + gb + mb + kb > 0 ? " " : "");
sb.append(b);
sb.append("B");
}
sb.append(")");
return sb.toString();
} | 3.68 |
dubbo_ReferenceAnnotationBeanPostProcessor_postProcessPropertyValues | /**
* Alternatives to the {@link #postProcessProperties(PropertyValues, Object, String)}, that removed as of Spring
* Framework 6.0.0, and in favor of {@link #postProcessProperties(PropertyValues, Object, String)}.
* <p>In order to be compatible with the lower version of Spring, it is still retained.
* @see #postProcessProperties
*/
public PropertyValues postProcessPropertyValues(
PropertyValues pvs, PropertyDescriptor[] pds, Object bean, String beanName) throws BeansException {
return postProcessProperties(pvs, bean, beanName);
} | 3.68 |
hudi_HeartbeatUtils_deleteHeartbeatFile | /**
* Deletes the heartbeat file for the specified instant.
* @param fs Hadoop FileSystem instance
* @param basePath Hoodie table base path
* @param instantTime Commit instant time
* @param config HoodieWriteConfig instance
* @return Boolean indicating whether heartbeat file was deleted or not
*/
public static boolean deleteHeartbeatFile(FileSystem fs, String basePath, String instantTime, HoodieWriteConfig config) {
if (config.getFailedWritesCleanPolicy().isLazy()) {
return deleteHeartbeatFile(fs, basePath, instantTime);
}
return false;
} | 3.68 |
hadoop_BlockData_getNumBlocks | /**
* Gets the number of blocks in the associated file.
* @return the number of blocks in the associated file.
*/
public int getNumBlocks() {
return numBlocks;
} | 3.68 |
pulsar_AuthenticationDataProvider_getHttpAuthType | /**
*
* @return a authentication scheme, or {@code null} if the request will not be authenticated.
*/
default String getHttpAuthType() {
return null;
} | 3.68 |
hbase_MasterProcedureManager_execProcedure | /**
* Execute a distributed procedure on cluster
* @param desc Procedure description
*/
public void execProcedure(ProcedureDescription desc) throws IOException {
} | 3.68 |
flink_BlobClient_putBuffer | /**
* Uploads data from the given byte buffer to the BLOB server.
*
* @param jobId the ID of the job the BLOB belongs to (or <tt>null</tt> if job-unrelated)
* @param value the buffer to read the data from
* @param offset the read offset within the buffer
* @param len the number of bytes to read from the buffer
* @param blobType whether the BLOB should become permanent or transient
* @return the computed BLOB key of the uploaded BLOB
* @throws IOException thrown if an I/O error occurs while uploading the data to the BLOB server
*/
BlobKey putBuffer(
@Nullable JobID jobId, byte[] value, int offset, int len, BlobKey.BlobType blobType)
throws IOException {
if (this.socket.isClosed()) {
throw new IllegalStateException(
"BLOB Client is not connected. "
+ "Client has been shut down or encountered an error before.");
}
checkNotNull(value);
if (LOG.isDebugEnabled()) {
LOG.debug(
"PUT BLOB buffer ("
+ len
+ " bytes) to "
+ socket.getLocalSocketAddress()
+ ".");
}
try (BlobOutputStream os = new BlobOutputStream(jobId, blobType, socket)) {
os.write(value, offset, len);
// Receive blob key and compare
return os.finish();
} catch (Throwable t) {
BlobUtils.closeSilently(socket, LOG);
throw new IOException("PUT operation failed: " + t.getMessage(), t);
}
} | 3.68 |
hadoop_TypedBytesOutput_writeRaw | /**
* Writes a raw sequence of typed bytes.
*
* @param bytes the bytes to be written
* @param offset an offset in the given array
* @param length number of bytes from the given array to write
* @throws IOException
*/
public void writeRaw(byte[] bytes, int offset, int length)
throws IOException {
out.write(bytes, offset, length);
} | 3.68 |
hbase_ModifyPeerProcedure_nextStateAfterRefresh | /**
* Implementation class can override this method. By default we will jump to
* POST_PEER_MODIFICATION and finish the procedure.
*/
protected PeerModificationState nextStateAfterRefresh() {
return PeerModificationState.POST_PEER_MODIFICATION;
} | 3.68 |
hbase_SequenceIdAccounting_startCacheFlush | /**
* @param encodedRegionName Region to flush.
* @param families Families to flush. May be a subset of all families in the region.
* @return Returns {@link HConstants#NO_SEQNUM} if we are flushing the whole region OR if we are
* flushing a subset of all families but there are no edits in those families not being
* flushed; in other words, this is effectively same as a flush of all of the region
* though we were passed a subset of regions. Otherwise, it returns the sequence id of the
* oldest/lowest outstanding edit.
*/
Long startCacheFlush(final byte[] encodedRegionName, final Set<byte[]> families) {
Map<byte[], Long> familytoSeq = new HashMap<>();
for (byte[] familyName : families) {
familytoSeq.put(familyName, HConstants.NO_SEQNUM);
}
return startCacheFlush(encodedRegionName, familytoSeq);
} | 3.68 |
framework_VDateField_setShowISOWeekNumbers | /**
* Sets whether ISO 8601 week numbers should be shown in the date selector
* or not. ISO 8601 defines that a week always starts with a Monday so the
* week numbers are only shown if this is the case.
*
* @param showISOWeekNumbers
* {@code true} if week number should be shown, {@code false}
* otherwise
*/
public void setShowISOWeekNumbers(boolean showISOWeekNumbers) {
this.showISOWeekNumbers = showISOWeekNumbers;
} | 3.68 |
morf_SqlDialect_autoNumberId | /**
* Builds SQL to get the autonumber value.
*
* @param statement the insert statement to get for.
* @param idTable the ID Table.
* @return SQL fetching the AutoNumber value.
*/
private String autoNumberId(InsertStatement statement, Table idTable) {
AliasedField idValue = nextIdValue(statement.getTable(), null, idTable, ID_INCREMENTOR_TABLE_COLUMN_NAME,
ID_INCREMENTOR_TABLE_COLUMN_VALUE);
return getSqlFrom(idValue);
} | 3.68 |
hbase_HFileArchiver_moveAndClose | /**
* Move the file to the given destination
* @return <tt>true</tt> on success
*/
public boolean moveAndClose(Path dest) throws IOException {
this.close();
Path p = this.getPath();
return CommonFSUtils.renameAndSetModifyTime(fs, p, dest);
} | 3.68 |
hadoop_TimelineEntity_setEvents | /**
* Set the event list to the given list of events related to the entity
*
* @param events
* events a list of events related to the entity
*/
public void setEvents(List<TimelineEvent> events) {
this.events = events;
} | 3.68 |
hadoop_LongKeyConverter_decode | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
* #decode(byte[])
*/
@Override
public Long decode(byte[] bytes) {
try {
return (Long) longConverter.decodeValue(bytes);
} catch (IOException e) {
return null;
}
} | 3.68 |
hudi_HoodieConcatHandle_write | /**
* Write old record as is w/o merging with incoming record.
*/
@Override
public void write(HoodieRecord oldRecord) {
Schema oldSchema = config.populateMetaFields() ? writeSchemaWithMetaFields : writeSchema;
String key = oldRecord.getRecordKey(oldSchema, keyGeneratorOpt);
try {
// NOTE: We're enforcing preservation of the record metadata to keep existing semantic
writeToFile(new HoodieKey(key, partitionPath), oldRecord, oldSchema, config.getPayloadConfig().getProps(), true);
} catch (IOException | RuntimeException e) {
String errMsg = String.format("Failed to write old record into new file for key %s from old file %s to new file %s with writerSchema %s",
key, getOldFilePath(), newFilePath, writeSchemaWithMetaFields.toString(true));
LOG.debug("Old record is " + oldRecord);
throw new HoodieUpsertException(errMsg, e);
}
recordsWritten++;
} | 3.68 |
hudi_Types_get | // Experimental method to support defaultValue
public static Field get(int id, boolean isOptional, String name, Type type, String doc, Object defaultValue) {
return new Field(isOptional, id, name, type, doc, defaultValue);
} | 3.68 |
hbase_StripeStoreFileManager_insertFileIntoStripe | /**
* Inserts a file in the correct place (by seqnum) in a stripe copy.
* @param stripe Stripe copy to insert into.
* @param sf File to insert.
*/
private static void insertFileIntoStripe(ArrayList<HStoreFile> stripe, HStoreFile sf) {
// The only operation for which sorting of the files matters is KeyBefore. Therefore,
// we will store the file in reverse order by seqNum from the outset.
for (int insertBefore = 0;; ++insertBefore) {
if (
insertBefore == stripe.size()
|| (StoreFileComparators.SEQ_ID.compare(sf, stripe.get(insertBefore)) >= 0)
) {
stripe.add(insertBefore, sf);
break;
}
}
} | 3.68 |
hadoop_BlockStorageMovementNeeded_addPendingWorkCount | /**
* Increment the pending work count for directory.
*/
public synchronized void addPendingWorkCount(int count) {
this.pendingWorkCount = this.pendingWorkCount + count;
} | 3.68 |
hbase_ReplicationSink_addToHashMultiMap | /**
* Simple helper to a map from key to (a list of) values TODO: Make a general utility method
* @return the list of values corresponding to key1 and key2
*/
private <K1, K2, V> List<V> addToHashMultiMap(Map<K1, Map<K2, List<V>>> map, K1 key1, K2 key2,
V value) {
Map<K2, List<V>> innerMap = map.computeIfAbsent(key1, k -> new HashMap<>());
List<V> values = innerMap.computeIfAbsent(key2, k -> new ArrayList<>());
values.add(value);
return values;
} | 3.68 |
morf_UpgradeScriptAdditionsProvider_setAllowedPredicate | /**
* Allows for filtering of script additions.
* @param scriptAdditionsPredicate Upgrade script additions.
*/
default void setAllowedPredicate(Predicate<UpgradeScriptAddition> scriptAdditionsPredicate) {
} | 3.68 |
hadoop_HttpFSServerWebApp_init | /**
* Initializes the HttpFSServer server, loads configuration and required
* services.
*
* @throws ServerException thrown if HttpFSServer server could not be
* initialized.
*/
@Override
public void init() throws ServerException {
if (SERVER != null) {
throw new RuntimeException("HttpFSServer server already initialized");
}
SERVER = this;
super.init();
adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin");
LOG.info("Connects to Namenode [{}]",
get().get(FileSystemAccess.class).getFileSystemConfiguration().
getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
setMetrics(getConfig());
} | 3.68 |
querydsl_AbstractHibernateQuery_setTimeout | /**
* Set a timeout for the underlying JDBC query.
* @param timeout the timeout in seconds
* @return the current object
*/
@SuppressWarnings("unchecked")
public Q setTimeout(int timeout) {
this.timeout = timeout;
return (Q) this;
} | 3.68 |
hadoop_StageConfig_withTaskAttemptId | /**
* Set builder value.
* @param value new value
* @return this
*/
public StageConfig withTaskAttemptId(final String value) {
checkOpen();
taskAttemptId = value;
return this;
} | 3.68 |
framework_KeyMapper_createKey | /**
* Creates a key for a new item.
* <p>
* This method can be overridden to customize the keys used.
*
* @return new key
* @since 8.1.2
*/
protected String createKey() {
return String.valueOf(++lastKey);
} | 3.68 |
hadoop_ServiceLauncher_launchService | /**
* Launch a service catching all exceptions and downgrading them to exit codes
* after logging.
*
* Sets {@link #serviceException} to this value.
* @param conf configuration to use
* @param instance optional instance of the service.
* @param processedArgs command line after the launcher-specific arguments
* have been stripped out.
* @param addShutdownHook should a shutdown hook be added to terminate
* this service on shutdown. Tests should set this to false.
* @param execute execute/wait for the service to stop.
* @return an exit exception, which will have a status code of 0 if it worked
*/
public ExitUtil.ExitException launchService(Configuration conf,
S instance,
List<String> processedArgs,
boolean addShutdownHook,
boolean execute) {
ExitUtil.ExitException exitException;
try {
int exitCode = coreServiceLaunch(conf, instance, processedArgs,
addShutdownHook, execute);
if (service != null) {
// check to see if the service failed
Throwable failure = service.getFailureCause();
if (failure != null) {
// the service exited with a failure.
// check what state it is in
Service.STATE failureState = service.getFailureState();
if (failureState == Service.STATE.STOPPED) {
// the failure occurred during shutdown, not important enough
// to bother the user as it may just scare them
LOG.debug("Failure during shutdown: {} ", failure, failure);
} else {
//throw it for the catch handlers to deal with
throw failure;
}
}
}
String name = getServiceName();
if (exitCode == 0) {
exitException = new ServiceLaunchException(exitCode,
"%s succeeded",
name);
} else {
exitException = new ServiceLaunchException(exitCode,
"%s failed ", name);
}
// either the service succeeded, or an error raised during shutdown,
// which we don't worry that much about
} catch (ExitUtil.ExitException ee) {
// exit exceptions are passed through unchanged
exitException = ee;
} catch (Throwable thrown) {
// other errors need a full log.
LOG.error("Exception raised {}",
service != null
? (service.toString() + " in state " + service.getServiceState())
: "during service instantiation",
thrown);
exitException = convertToExitException(thrown);
}
noteException(exitException);
return exitException;
}
/**
* Launch the service.
*
* All exceptions that occur are propagated upwards.
*
* If the method returns a status code, it means that it got as far starting
* the service, and if it implements {@link LaunchableService}, that the
* method {@link LaunchableService#execute()} has completed.
*
* After this method returns, the service can be retrieved returned by
* {@link #getService()}.
*
* @param conf configuration
* @param instance optional instance of the service.
* @param processedArgs arguments after the configuration parameters
* have been stripped out.
* @param addShutdownHook should a shutdown hook be added to terminate
* this service on shutdown. Tests should set this to false.
* @param execute execute/wait for the service to stop
* @throws ClassNotFoundException classname not on the classpath
* @throws IllegalAccessException not allowed at the class
* @throws InstantiationException not allowed to instantiate it
* @throws InterruptedException thread interrupted
* @throws ExitUtil.ExitException any exception defining the status code.
* @throws Exception any other failure -if it implements
* {@link ExitCodeProvider} | 3.68 |
flink_TaskExecutor_onStop | /** Called to shut down the TaskManager. The method closes all TaskManager services. */
@Override
public CompletableFuture<Void> onStop() {
log.info("Stopping TaskExecutor {}.", getAddress());
Throwable jobManagerDisconnectThrowable = null;
FlinkExpectedException cause =
new FlinkExpectedException("The TaskExecutor is shutting down.");
closeResourceManagerConnection(cause);
for (JobTable.Job job : jobTable.getJobs()) {
try {
closeJob(job, cause);
} catch (Throwable t) {
jobManagerDisconnectThrowable =
ExceptionUtils.firstOrSuppressed(t, jobManagerDisconnectThrowable);
}
}
changelogStoragesManager.shutdown();
channelStateExecutorFactoryManager.shutdown();
jobInformationCache.clear();
taskInformationCache.clear();
shuffleDescriptorsCache.clear();
Preconditions.checkState(jobTable.isEmpty());
final Throwable throwableBeforeTasksCompletion = jobManagerDisconnectThrowable;
return FutureUtils.runAfterwards(taskSlotTable.closeAsync(), this::stopTaskExecutorServices)
.handle(
(ignored, throwable) -> {
handleOnStopException(throwableBeforeTasksCompletion, throwable);
return null;
});
} | 3.68 |
framework_VAbstractOrderedLayout_removeSlot | /**
* Remove a slot from the layout.
*
* This method is called automatically by {@link #removeWidget(Widget)} and
* should not be called directly by the user. When overridden, the super
* method must be called.
*
* @since 7.6
* @param slot
* to remove
*/
protected void removeSlot(Slot slot) {
remove(slot);
} | 3.68 |
flink_SliceAssigners_sliced | /**
* Creates a {@link SliceAssigner} that assigns elements which has been attached slice end
* timestamp.
*
* @param sliceEndIndex the index of slice end field in the input row, mustn't be a negative
* value.
* @param innerAssigner the inner assigner which assigns the attached windows
*/
public static SliceAssigner sliced(int sliceEndIndex, SliceAssigner innerAssigner) {
if (innerAssigner instanceof SliceSharedAssigner) {
return new SlicedSharedSliceAssigner(
sliceEndIndex, (SliceSharedAssigner) innerAssigner);
} else {
return new SlicedUnsharedSliceAssigner(sliceEndIndex, innerAssigner);
}
} | 3.68 |
pulsar_ConsumerImpl_createEncryptionContext | /**
* Create EncryptionContext if message payload is encrypted.
*
* @param msgMetadata
* @return {@link Optional}<{@link EncryptionContext}>
*/
private Optional<EncryptionContext> createEncryptionContext(MessageMetadata msgMetadata) {
EncryptionContext encryptionCtx = null;
if (msgMetadata.getEncryptionKeysCount() > 0) {
encryptionCtx = new EncryptionContext();
Map<String, EncryptionKey> keys = msgMetadata.getEncryptionKeysList().stream()
.collect(
Collectors.toMap(EncryptionKeys::getKey,
e -> new EncryptionKey(e.getValue(),
e.getMetadatasList().stream().collect(
Collectors.toMap(KeyValue::getKey, KeyValue::getValue)))));
byte[] encParam = msgMetadata.getEncryptionParam();
Optional<Integer> batchSize = Optional
.ofNullable(msgMetadata.hasNumMessagesInBatch() ? msgMetadata.getNumMessagesInBatch() : null);
encryptionCtx.setKeys(keys);
encryptionCtx.setParam(encParam);
if (msgMetadata.hasEncryptionAlgo()) {
encryptionCtx.setAlgorithm(msgMetadata.getEncryptionAlgo());
}
encryptionCtx
.setCompressionType(CompressionCodecProvider.convertFromWireProtocol(msgMetadata.getCompression()));
encryptionCtx.setUncompressedMessageSize(msgMetadata.getUncompressedSize());
encryptionCtx.setBatchSize(batchSize);
}
return Optional.ofNullable(encryptionCtx);
} | 3.68 |
framework_TableTooManyColumns_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 14156;
} | 3.68 |
hbase_ReplicationSourceManager_drainSources | /**
* <p>
* This is used when we transit a sync replication peer to {@link SyncReplicationState#STANDBY}.
* </p>
* <p>
* When transiting to {@link SyncReplicationState#STANDBY}, we can remove all the pending wal
* files for a replication peer as we do not need to replicate them any more. And this is
* necessary, otherwise when we transit back to {@link SyncReplicationState#DOWNGRADE_ACTIVE}
* later, the stale data will be replicated again and cause inconsistency.
* </p>
* <p>
* See HBASE-20426 for more details.
* </p>
* @param peerId the id of the sync replication peer
*/
public void drainSources(String peerId) throws IOException, ReplicationException {
String terminateMessage = "Sync replication peer " + peerId
+ " is transiting to STANDBY. Will close the previous replication source and open a new one";
ReplicationPeer peer = replicationPeers.getPeer(peerId);
assert peer.getPeerConfig().isSyncReplication();
ReplicationQueueId queueId = new ReplicationQueueId(server.getServerName(), peerId);
// TODO: use empty initial offsets for now, revisit when adding support for sync replication
ReplicationSourceInterface src =
createSource(new ReplicationQueueData(queueId, ImmutableMap.of()), peer);
// synchronized here to avoid race with postLogRoll where we add new log to source and also
// walsById.
ReplicationSourceInterface toRemove;
ReplicationQueueData queueData;
synchronized (latestPaths) {
// Here we make a copy of all the remaining wal files and then delete them from the
// replication queue storage after releasing the lock. It is not safe to just remove the old
// map from walsById since later we may fail to update the replication queue storage, and when
// we retry next time, we can not know the wal files that needs to be set to the replication
// queue storage
ImmutableMap.Builder<String, ReplicationGroupOffset> builder = ImmutableMap.builder();
synchronized (walsById) {
walsById.get(queueId).forEach((group, wals) -> {
if (!wals.isEmpty()) {
builder.put(group, new ReplicationGroupOffset(wals.last(), -1));
}
});
}
queueData = new ReplicationQueueData(queueId, builder.build());
src = createSource(queueData, peer);
toRemove = sources.put(peerId, src);
if (toRemove != null) {
LOG.info("Terminate replication source for " + toRemove.getPeerId());
toRemove.terminate(terminateMessage);
toRemove.getSourceMetrics().clear();
}
}
for (Map.Entry<String, ReplicationGroupOffset> entry : queueData.getOffsets().entrySet()) {
queueStorage.setOffset(queueId, entry.getKey(), entry.getValue(), Collections.emptyMap());
}
LOG.info("Startup replication source for " + src.getPeerId());
src.startup();
synchronized (walsById) {
Map<String, NavigableSet<String>> wals = walsById.get(queueId);
queueData.getOffsets().forEach((group, offset) -> {
NavigableSet<String> walsByGroup = wals.get(group);
if (walsByGroup != null) {
walsByGroup.headSet(offset.getWal(), true).clear();
}
});
}
// synchronized on oldsources to avoid race with NodeFailoverWorker. Since NodeFailoverWorker is
// a background task, we will delete the file from replication queue storage under the lock to
// simplify the logic.
synchronized (this.oldsources) {
for (Iterator<ReplicationSourceInterface> iter = oldsources.iterator(); iter.hasNext();) {
ReplicationSourceInterface oldSource = iter.next();
if (oldSource.getPeerId().equals(peerId)) {
ReplicationQueueId oldSourceQueueId = oldSource.getQueueId();
oldSource.terminate(terminateMessage);
oldSource.getSourceMetrics().clear();
queueStorage.removeQueue(oldSourceQueueId);
walsByIdRecoveredQueues.remove(oldSourceQueueId);
iter.remove();
}
}
}
} | 3.68 |
rocketmq-connect_JdbcSourceTask_getIncrementContext | // Increment context
private IncrementContext getIncrementContext(
String querySuffix,
String tableOrQuery,
String topicPrefix,
QueryMode queryMode,
List<String> timestampColumnNames,
String incrementingColumnName,
Map<String, Object> offsetMap,
Long timestampDelay,
TimeZone timeZone
) {
IncrementContext context = new IncrementContext(
queryMode,
queryMode == QueryMode.TABLE ? dialect.parseTableNameToTableId(tableOrQuery) : null,
queryMode == QueryMode.QUERY ? tableOrQuery : null,
topicPrefix,
this.config.getOffsetSuffix(),
querySuffix,
config.getBatchMaxRows(),
timestampColumnNames != null ? timestampColumnNames : Collections.emptyList(),
incrementingColumnName,
offsetMap,
timestampDelay,
timeZone
);
return context;
} | 3.68 |
framework_ServerRpcHandler_getRawJson | /**
* Gets the entire request in JSON format, as it was received from the
* client.
* <p>
* <em>Note:</em> This is a shared reference - any modifications made
* will be shared.
*
* @return the raw JSON object that was received from the client
*
*/
public JsonObject getRawJson() {
return json;
} | 3.68 |
hadoop_PendingSet_putExtraData | /**
* Set/Update an extra data entry.
* @param key key
* @param value value
*/
public void putExtraData(String key, String value) {
extraData.put(key, value);
} | 3.68 |
hibernate-validator_TypeConstraintMappingContextImpl_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
framework_FieldGroup_getItemProperty | /**
* Gets the property with the given property id from the item.
*
* @param propertyId
* The id if the property to find
* @return The property with the given id from the item
* @throws BindException
* If the property was not found in the item or no item has been
* set
*/
protected Property getItemProperty(Object propertyId) throws BindException {
Item item = getItemDataSource();
if (item == null) {
throw new BindException("Could not lookup property with id "
+ propertyId + " as no item has been set");
}
Property<?> p = item.getItemProperty(propertyId);
if (p == null) {
throw new BindException("A property with id " + propertyId
+ " was not found in the item");
}
return p;
} | 3.68 |
pulsar_AdminResource_checkTopicExistsAsync | /**
* Check the exists topics contains the given topic.
* Since there are topic partitions and non-partitioned topics in Pulsar, must ensure both partitions
* and non-partitioned topics are not duplicated. So, if compare with a partition name, we should compare
* to the partitioned name of this partition.
*
* @param topicName given topic name
*/
protected CompletableFuture<Boolean> checkTopicExistsAsync(TopicName topicName) {
return pulsar().getNamespaceService().getListOfTopics(topicName.getNamespaceObject(),
CommandGetTopicsOfNamespace.Mode.ALL)
.thenCompose(topics -> {
boolean exists = false;
for (String topic : topics) {
if (topicName.getPartitionedTopicName().equals(
TopicName.get(topic).getPartitionedTopicName())) {
exists = true;
break;
}
}
return CompletableFuture.completedFuture(exists);
});
} | 3.68 |
graphhopper_Country_getCountryName | /**
* @return the name of this country. Avoids clash with name() method of this enum.
*/
public String getCountryName() {
return countryName;
} | 3.68 |
pulsar_AuthorizationService_allowNamespacePolicyOperation | /**
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
public boolean allowNamespacePolicyOperation(NamespaceName namespaceName,
PolicyName policy,
PolicyOperation operation,
String originalRole,
String role,
AuthenticationDataSource authData) throws Exception {
try {
return allowNamespacePolicyOperationAsync(
namespaceName, policy, operation, originalRole, role, authData).get(
conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.68 |
hadoop_TimelineEntities_setEntities | /**
* Set the entity list to the given list of entities
*
* @param entities
* a list of entities
*/
public void setEntities(List<TimelineEntity> entities) {
this.entities = entities;
} | 3.68 |
hbase_StripeCompactionPolicy_selectSimpleCompaction | /**
* Selects the compaction of a single stripe using default policy.
* @param sfs Files.
* @param allFilesOnly Whether a compaction of all-or-none files is needed.
* @return The resulting selection.
*/
private List<HStoreFile> selectSimpleCompaction(List<HStoreFile> sfs, boolean allFilesOnly,
boolean isOffpeak, boolean forceCompact) {
int minFilesLocal =
Math.max(allFilesOnly ? sfs.size() : 0, this.config.getStripeCompactMinFiles());
int maxFilesLocal = Math.max(this.config.getStripeCompactMaxFiles(), minFilesLocal);
List<HStoreFile> selected =
stripePolicy.applyCompactionPolicy(sfs, false, isOffpeak, minFilesLocal, maxFilesLocal);
if (forceCompact && (selected == null || selected.isEmpty()) && !sfs.isEmpty()) {
return stripePolicy.selectCompactFiles(sfs, maxFilesLocal, isOffpeak);
}
return selected;
} | 3.68 |
framework_GridLayoutElement_getRowCount | /**
* Gets the total number of rows in the layout.
*
* @return the number of rows in the layout
* @since 8.0.6
*/
public long getRowCount() {
Long res = (Long) getCommandExecutor()
.executeScript("return arguments[0].getRowCount()", this);
if (res == null) {
throw new IllegalStateException("getRowCount returned null");
}
return res.longValue();
} | 3.68 |
framework_DateField_setShowISOWeekNumbers | /**
* Sets the visibility of ISO 8601 week numbers in the date selector. ISO
* 8601 defines that a week always starts with a Monday so the week numbers
* are only shown if this is the case.
*
* @param showWeekNumbers
* true if week numbers should be shown, false otherwise.
*/
public void setShowISOWeekNumbers(boolean showWeekNumbers) {
showISOWeekNumbers = showWeekNumbers;
markAsDirty();
} | 3.68 |
pulsar_MessageDeduplication_recordMessagePersisted | /**
* Call this method whenever a message is persisted to get the chance to trigger a snapshot.
*/
public void recordMessagePersisted(PublishContext publishContext, PositionImpl position) {
if (!isEnabled() || publishContext.isMarkerMessage()) {
return;
}
String producerName = publishContext.getProducerName();
long sequenceId = publishContext.getSequenceId();
long highestSequenceId = publishContext.getHighestSequenceId();
if (publishContext.getOriginalProducerName() != null) {
// In case of replicated messages, this will be different from the current replicator producer name
producerName = publishContext.getOriginalProducerName();
sequenceId = publishContext.getOriginalSequenceId();
highestSequenceId = publishContext.getOriginalHighestSequenceId();
}
Boolean isLastChunk = (Boolean) publishContext.getProperty(IS_LAST_CHUNK);
if (isLastChunk == null || isLastChunk) {
highestSequencedPersisted.put(producerName, Math.max(highestSequenceId, sequenceId));
}
if (++snapshotCounter >= snapshotInterval) {
snapshotCounter = 0;
takeSnapshot(position);
}
} | 3.68 |
hbase_FileSystemUtilizationChore_getTimeUnit | /**
* Extracts the time unit for the chore period and initial delay from the configuration. The
* configuration value for {@link #FS_UTILIZATION_CHORE_TIMEUNIT_KEY} must correspond to a
* {@link TimeUnit} value.
* @param conf The configuration object.
* @return The configured time unit for the chore period and initial delay or the default value.
*/
static TimeUnit getTimeUnit(Configuration conf) {
return TimeUnit
.valueOf(conf.get(FS_UTILIZATION_CHORE_TIMEUNIT_KEY, FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT));
} | 3.68 |
flink_TableFactoryService_findAll | /**
* Finds all table factories of the given class and property map.
*
* @param factoryClass desired factory class
* @param propertyMap properties that describe the factory configuration
* @param <T> factory class type
* @return all the matching factories
*/
public static <T extends TableFactory> List<T> findAll(
Class<T> factoryClass, Map<String, String> propertyMap) {
return findAllInternal(factoryClass, propertyMap, Optional.empty());
} | 3.68 |
hadoop_TFile_createScanner | /**
* Get a scanner that covers a specific key range.
*
* @param beginKey
* Begin key of the scan (inclusive). If null, scan from the first
* key-value entry of the TFile.
* @param endKey
* End key of the scan (exclusive). If null, scan up to the last
* key-value entry of the TFile.
* @return The actual coverage of the returned scanner will cover all keys
* greater than or equal to the beginKey and less than the endKey.
* @throws IOException raised on errors performing I/O.
*
* @deprecated Use {@link #createScannerByKey(RawComparable, RawComparable)}
* instead.
*/
@Deprecated
public Scanner createScanner(RawComparable beginKey, RawComparable endKey)
throws IOException {
return createScannerByKey(beginKey, endKey);
} | 3.68 |
flink_HiveTableSink_toStagingDir | // get a staging dir
private String toStagingDir(String stagingParentDir, Configuration conf) throws IOException {
if (!stagingParentDir.endsWith(Path.SEPARATOR)) {
stagingParentDir += Path.SEPARATOR;
}
// TODO: may append something more meaningful than a timestamp, like query ID
stagingParentDir += ".staging_" + System.currentTimeMillis();
Path path = new Path(stagingParentDir);
FileSystem fs = path.getFileSystem(conf);
Preconditions.checkState(
fs.exists(path) || fs.mkdirs(path), "Failed to create staging dir " + path);
fs.deleteOnExit(path);
return stagingParentDir;
} | 3.68 |
hbase_MemStoreFlusher_isAboveLowWaterMark | /**
* Return the FlushType if we're above the low watermark
*/
private FlushType isAboveLowWaterMark() {
return server.getRegionServerAccounting().isAboveLowWaterMark();
} | 3.68 |
pulsar_DLInputStream_read | /**
* When reading the end of a stream, it will throw an EndOfStream exception. So we can use this to
* check if we read to the end.
*
* @param outputStream the data write to
* @param readFuture a future that wait to read complete
* @param num how many entries read in one time
*/
private void read(OutputStream outputStream, CompletableFuture<Void> readFuture, int num) {
reader.readBulk(num)
.whenComplete((logRecordWithDLSNS, throwable) -> {
if (null != throwable) {
if (throwable instanceof EndOfStreamException) {
readFuture.complete(null);
} else {
readFuture.completeExceptionally(throwable);
}
return;
}
CompletableFuture.runAsync(() -> logRecordWithDLSNS.forEach(logRecord -> {
try {
outputStream.write(logRecord.getPayload());
} catch (IOException e) {
readFuture.completeExceptionally(e);
}
})).thenRun(() -> read(outputStream, readFuture, num));
});
} | 3.68 |
flink_StatusWatermarkValve_markWatermarkUnaligned | /**
* Mark the {@link InputChannelStatus} as watermark-unaligned and remove it from the {@link
* #alignedChannelStatuses}.
*
* @param inputChannelStatus the input channel status to be marked
*/
private void markWatermarkUnaligned(InputChannelStatus inputChannelStatus) {
if (inputChannelStatus.isWatermarkAligned) {
inputChannelStatus.isWatermarkAligned = false;
inputChannelStatus.removeFrom(alignedChannelStatuses);
}
} | 3.68 |
hadoop_FileUnderConstructionFeature_cleanZeroSizeBlock | /**
* When deleting a file in the current fs directory, and the file is contained
* in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
void cleanZeroSizeBlock(final INodeFile f,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0
&& !blocks[blocks.length - 1].isComplete()) {
BlockInfo lastUC = blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
f.removeLastBlock(lastUC);
}
}
} | 3.68 |
framework_DataCommunicator_createKeyMapper | /**
* Creates a {@link DataKeyMapper} to use with this DataCommunicator.
* <p>
* This method is called from the constructor.
*
* @param identifierGetter
* has to return a unique key for every bean, and the returned
* key has to follow general {@code hashCode()} and
* {@code equals()} contract, see {@link Object#hashCode()} for
* details.
* @return key mapper
*
* @since 8.1
*
*/
protected DataKeyMapper<T> createKeyMapper(
ValueProvider<T, Object> identifierGetter) {
return new KeyMapper<T>(identifierGetter);
} | 3.68 |
rocketmq-connect_WorkerTask_awaitStop | /**
* Wait for this task to finish stopping.
*
* @param timeoutMs time in milliseconds to await stop
* @return true if successful, false if the timeout was reached
*/
public boolean awaitStop(long timeoutMs) {
try {
return shutdownLatch.await(timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return false;
}
} | 3.68 |
framework_ShortcutActionHandler_updateActionMap | /**
* Updates list of actions this handler listens to.
*
* @param c
* UIDL snippet containing actions
*/
public void updateActionMap(UIDL c) {
actions.clear();
for (final Object child : c) {
final UIDL action = (UIDL) child;
int[] modifiers = null;
if (action.hasAttribute("mk")) {
modifiers = action.getIntArrayAttribute("mk");
}
final ShortcutKeyCombination kc = new ShortcutKeyCombination(
action.getIntAttribute("kc"), modifiers);
final String key = action.getStringAttribute("key");
final String caption = action.getStringAttribute("caption");
actions.add(new ShortcutAction(key, kc, caption));
}
} | 3.68 |
flink_ExecutionConfig_setNumberOfExecutionRetries | /**
* Sets the number of times that failed tasks are re-executed. A value of zero effectively
* disables fault tolerance. A value of {@code -1} indicates that the system default value (as
* defined in the configuration) should be used.
*
* @param numberOfExecutionRetries The number of times the system will try to re-execute failed
* tasks.
* @return The current execution configuration
* @deprecated This method will be replaced by {@link #setRestartStrategy}. The {@link
* RestartStrategies.FixedDelayRestartStrategyConfiguration} contains the number of
* execution retries.
*/
@Deprecated
public ExecutionConfig setNumberOfExecutionRetries(int numberOfExecutionRetries) {
if (numberOfExecutionRetries < -1) {
throw new IllegalArgumentException(
"The number of execution retries must be non-negative, or -1 (use system default)");
}
configuration.set(EXECUTION_RETRIES, numberOfExecutionRetries);
return this;
} | 3.68 |
flink_InputChannel_checkpointStarted | /**
* Called by task thread when checkpointing is started (e.g., any input channel received
* barrier).
*/
public void checkpointStarted(CheckpointBarrier barrier) throws CheckpointException {} | 3.68 |
hadoop_AuthenticationToken_setMaxInactives | /**
* Sets the max inactive time of the token.
*
* @param maxInactives inactive time of the token in milliseconds
* since the epoch.
*/
public void setMaxInactives(long maxInactives) {
if (this != AuthenticationToken.ANONYMOUS) {
super.setMaxInactives(maxInactives);
}
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations11 | /**
* Test for proper SQL mathematics operation generation from DSL expressions
* that use brackets.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations11() {
String result = testDialect.getSqlFrom(bracket(field("a").divideBy(literal(100)).plus(literal(1))).divideBy(field("b")).plus(
literal(100)));
assertEquals(expectedSqlForMathOperations11(), result);
} | 3.68 |
open-banking-gateway_FintechSecureStorage_psuAspspKeyToInbox | /**
* Sends PSU/Fintech user private key to FinTechs' inbox at the consent confirmation.
* @param authSession Authorization session for this PSU/Fintech user
* @param psuKey Private Key to send to FinTechs' inbox
*/
@SneakyThrows
public void psuAspspKeyToInbox(AuthSession authSession, PubAndPrivKey psuKey) {
try (OutputStream os = datasafeServices.inboxService().write(
WriteRequest.forDefaultPublic(ImmutableSet.of(
authSession.getFintechUser().getFintech().getUserId()),
new FintechPsuAspspTuple(authSession).toDatasafePathWithoutParent()))
) {
serde.writeKey(psuKey.getPublicKey(), psuKey.getPrivateKey(), os);
}
} | 3.68 |
hadoop_ErrorTranslation_wrapWithInnerIOE | /**
* Given an outer and an inner exception, create a new IOE
* of the inner type, with the outer exception as the cause.
* The message is derived from both.
* This only works if the inner exception has a constructor which
* takes a string; if not a PathIOException is created.
* <p>
* See {@code NetUtils}.
* @param <T> type of inner exception.
* @param path path of the failure.
* @param outer outermost exception.
* @param inner inner exception.
* @return the new exception.
*/
@SuppressWarnings("unchecked")
private static <T extends IOException> IOException wrapWithInnerIOE(
String path,
Throwable outer,
T inner) {
String msg = outer.toString() + ": " + inner.getMessage();
Class<? extends Throwable> clazz = inner.getClass();
try {
Constructor<? extends Throwable> ctor = clazz.getConstructor(String.class);
Throwable t = ctor.newInstance(msg);
return (T) (t.initCause(outer));
} catch (Throwable e) {
return new PathIOException(path, msg, outer);
}
} | 3.68 |
hadoop_SnappyCompressor_getBytesWritten | /**
* Return number of bytes consumed by callers of compress since last reset.
*/
@Override
public long getBytesWritten() {
return bytesWritten;
} | 3.68 |
framework_StringLengthValidator_setMinLength | /**
* Sets the minimum permissible length.
*
* @param minLength
* the minimum length to accept or null for no limit
*/
public void setMinLength(Integer minLength) {
validator.setMaxValue(minLength);
} | 3.68 |
flink_RichOrCondition_getLeft | /** @return One of the {@link IterativeCondition conditions} combined in this condition. */
public IterativeCondition<T> getLeft() {
return getNestedConditions()[0];
} | 3.68 |
rocketmq-connect_WorkerTask_currentTaskState | /**
* current task state
*
* @return
*/
public CurrentTaskState currentTaskState() {
return new CurrentTaskState(id().connector(), taskConfig, state.get());
} | 3.68 |
hadoop_FsStatus_write | //////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(capacity);
out.writeLong(used);
out.writeLong(remaining);
} | 3.68 |
hbase_OrderedBytes_isFixedFloat32 | /**
* Return true when the next encoded value in {@code src} uses fixed-width Float32 encoding, false
* otherwise.
*/
public static boolean isFixedFloat32(PositionedByteRange src) {
return FIXED_FLOAT32
== (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.68 |
hadoop_RegistryPathUtils_createFullPath | /**
* Create a full path from the registry root and the supplied subdir
* @param path path of operation
* @return an absolute path
* @throws InvalidPathnameException if the path is invalid
*/
public static String createFullPath(String base, String path) throws
InvalidPathnameException {
Preconditions.checkArgument(path != null, "null path");
Preconditions.checkArgument(base != null, "null path");
return validateZKPath(join(base, path));
} | 3.68 |
hbase_RegionReplicaUtil_removeNonDefaultRegions | /**
* Removes the non-default replicas from the passed regions collection
*/
public static void removeNonDefaultRegions(Collection<RegionInfo> regions) {
Iterator<RegionInfo> iterator = regions.iterator();
while (iterator.hasNext()) {
RegionInfo hri = iterator.next();
if (!RegionReplicaUtil.isDefaultReplica(hri)) {
iterator.remove();
}
}
} | 3.68 |
hbase_RawFloat_encodeFloat | /**
* Write instance {@code val} into buffer {@code buff}.
*/
public int encodeFloat(byte[] buff, int offset, float val) {
return Bytes.putFloat(buff, offset, val);
} | 3.68 |
dubbo_StringUtils_isContains | /**
* @param values
* @param value
* @return contains
*/
public static boolean isContains(String[] values, String value) {
if (isNotEmpty(value) && ArrayUtils.isNotEmpty(values)) {
for (String v : values) {
if (value.equals(v)) {
return true;
}
}
}
return false;
} | 3.68 |
hbase_MasterProcedureScheduler_markTableAsDeleted | /**
* Tries to remove the queue and the table-lock of the specified table. If there are new
* operations pending (e.g. a new create), the remove will not be performed.
* @param table the name of the table that should be marked as deleted
* @param procedure the procedure that is removing the table
* @return true if deletion succeeded, false otherwise meaning that there are other new operations
* pending for that table (e.g. a new create).
*/
boolean markTableAsDeleted(final TableName table, final Procedure<?> procedure) {
schedLock();
try {
final TableQueue queue = getTableQueue(table);
final LockAndQueue tableLock = locking.getTableLock(table);
if (queue == null) return true;
if (queue.isEmpty() && tableLock.tryExclusiveLock(procedure)) {
// remove the table from the run-queue and the map
if (AvlIterableList.isLinked(queue)) {
tableRunQueue.remove(queue);
}
removeTableQueue(table);
} else {
// TODO: If there are no create, we can drop all the other ops
return false;
}
} finally {
schedUnlock();
}
return true;
} | 3.68 |
flink_PlannerFactoryUtil_createPlanner | /** Discovers a planner factory and creates a planner instance. */
public static Planner createPlanner(
Executor executor,
TableConfig tableConfig,
ClassLoader userClassLoader,
ModuleManager moduleManager,
CatalogManager catalogManager,
FunctionCatalog functionCatalog) {
final PlannerFactory plannerFactory =
FactoryUtil.discoverFactory(
Thread.currentThread().getContextClassLoader(),
PlannerFactory.class,
PlannerFactory.DEFAULT_IDENTIFIER);
final Context context =
new DefaultPlannerContext(
executor,
tableConfig,
userClassLoader,
moduleManager,
catalogManager,
functionCatalog);
return plannerFactory.create(context);
} | 3.68 |
flink_SqlGatewayRestEndpointUtils_parseToken | /** Parse token from the result uri. */
public static @Nullable Long parseToken(@Nullable String nextResultUri) {
if (nextResultUri == null || nextResultUri.length() == 0) {
return null;
}
String[] split = nextResultUri.split("/");
// remove query string
String s = split[split.length - 1];
s = s.replaceAll("\\?.*", "");
return Long.valueOf(s);
} | 3.68 |
hibernate-validator_StringHelper_isNullOrEmptyString | /**
* Indicates if the string is null or is empty ie only contains whitespaces.
*
* @param value the string considered
* @return true if the string is null or only contains whitespaces
*/
public static boolean isNullOrEmptyString(String value) {
return value == null || value.trim().isEmpty();
} | 3.68 |
hadoop_S3ListRequest_isV1 | /**
* Is this a v1 API request or v2?
* @return true if v1, false if v2
*/
public boolean isV1() {
return v1Request != null;
} | 3.68 |
flink_DataSet_rebalance | /**
* Enforces a re-balancing of the DataSet, i.e., the DataSet is evenly distributed over all
* parallel instances of the following task. This can help to improve performance in case of
* heavy data skew and compute intensive operations.
*
* <p><b>Important:</b>This operation shuffles the whole DataSet over the network and can take
* significant amount of time.
*
* @return The re-balanced DataSet.
*/
public PartitionOperator<T> rebalance() {
return new PartitionOperator<>(
this, PartitionMethod.REBALANCE, Utils.getCallLocationName());
} | 3.68 |
flink_HadoopInputs_readHadoopFile | /**
* Creates a Flink {@link InputFormat} that wraps the given Hadoop {@link
* org.apache.hadoop.mapreduce.lib.input.FileInputFormat}.
*
* @return A Flink InputFormat that wraps the Hadoop FileInputFormat.
*/
public static <K, V>
org.apache.flink.api.java.hadoop.mapreduce.HadoopInputFormat<K, V> readHadoopFile(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat<K, V>
mapreduceInputFormat,
Class<K> key,
Class<V> value,
String inputPath)
throws IOException {
return readHadoopFile(mapreduceInputFormat, key, value, inputPath, Job.getInstance());
} | 3.68 |
hbase_CoprocessorRpcUtils_get | /**
* Returns the parameter passed to {@link #run(Object)} or {@code null} if a null value was
* passed. When used asynchronously, this method will block until the {@link #run(Object)}
* method has been called.
* @return the response object or {@code null} if no response was passed
*/
public synchronized R get() throws IOException {
while (!resultSet) {
try {
this.wait();
} catch (InterruptedException ie) {
InterruptedIOException exception = new InterruptedIOException(ie.getMessage());
exception.initCause(ie);
throw exception;
}
}
return result;
} | 3.68 |
hbase_QuotaUtil_enableTableIfNotEnabled | /**
* Method to enable a table, if not already enabled. This method suppresses
* {@link TableNotDisabledException} and {@link TableNotFoundException}, if thrown while enabling
* the table.
* @param conn connection to re-use
* @param tableName name of the table to be enabled
*/
public static void enableTableIfNotEnabled(Connection conn, TableName tableName)
throws IOException {
try {
conn.getAdmin().enableTable(tableName);
} catch (TableNotDisabledException | TableNotFoundException e) {
// ignore
}
} | 3.68 |
hbase_ColumnSchemaModel_setName | /**
* @param name the table name
*/
public void setName(String name) {
this.name = name;
} | 3.68 |
hudi_BaseHoodieWriteClient_runAnyPendingLogCompactions | /**
* Run any pending log compactions.
*/
public void runAnyPendingLogCompactions() {
tableServiceClient.runAnyPendingLogCompactions(createTable(config, hadoopConf));
} | 3.68 |
hadoop_RequestFactoryImpl_withBucket | /**
* Target bucket.
* @param value new value
* @return the builder
*/
public RequestFactoryBuilder withBucket(final String value) {
bucket = value;
return this;
} | 3.68 |
hbase_RegionCoprocessorHost_postStoreFileReaderOpen | /**
* @param fs fileystem to read from
* @param p path to the file
* @param in {@link FSDataInputStreamWrapper}
* @param size Full size of the file
* @param r original reference file. This will be not null only when reading a split file.
* @param reader the base reader instance
* @return The reader to use
*/
public StoreFileReader postStoreFileReaderOpen(final FileSystem fs, final Path p,
final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
final Reference r, final StoreFileReader reader) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return reader;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, StoreFileReader>(
regionObserverGetter, reader) {
@Override
public StoreFileReader call(RegionObserver observer) throws IOException {
return observer.postStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r, getResult());
}
});
} | 3.68 |
hmily_HmilyRepositoryEventPublisher_syncPublishEvent | /**
* Sync publish event.
*
* @param hmilyLocks the hmily locks
* @param type type
*/
public void syncPublishEvent(final Collection<HmilyLock> hmilyLocks, final int type) {
HmilyRepositoryEvent event = new HmilyRepositoryEvent();
event.setType(type);
event.setTransId(hmilyLocks.iterator().next().getTransId());
event.setHmilyLocks(hmilyLocks);
HmilyRepositoryEventDispatcher.getInstance().doDispatch(event);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.