name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_IOStatisticsLogging_toString | /**
* Evaluate and stringify the statistics.
* @return a string value.
*/
@Override
public String toString() {
return statistics != null
? ioStatisticsToString(statistics)
: IOStatisticsBinding.NULL_SOURCE;
} | 3.68 |
hadoop_AllocationTags_getNamespace | /**
* @return the namespace of these tags.
*/
public TargetApplicationsNamespace getNamespace() {
return this.ns;
} | 3.68 |
flink_DelimitedInputFormat_setCharset | /**
* Set the name of the character set used for the row delimiter. This is also used by subclasses
* to interpret field delimiters, comment strings, and for configuring {@link FieldParser}s.
*
* <p>These fields are interpreted when set. Changing the charset thereafter may cause
* unexpected results.
*
* @param charset name of the charset
*/
@PublicEvolving
public void setCharset(String charset) {
this.charsetName = Preconditions.checkNotNull(charset);
this.charset = null;
if (this.delimiterString != null) {
this.delimiter = delimiterString.getBytes(getCharset());
}
} | 3.68 |
flink_ExponentialDelayRestartBackoffTimeStrategy_calculateJitterBackoffMS | /**
* Calculate jitter offset to avoid thundering herd scenario. The offset range increases with
* the number of restarts.
*
* <p>F.e. for backoff time 8 with jitter 0.25, it generates random number in range [-2, 2].
*
* @return random value in interval [-n, n], where n represents jitter * current backoff
*/
private long calculateJitterBackoffMS() {
if (jitterFactor == 0) {
return 0;
} else {
long offset = (long) (currentBackoffMS * jitterFactor);
return ThreadLocalRandom.current().nextLong(-offset, offset + 1);
}
} | 3.68 |
hadoop_DynamicIOStatistics_addCounterFunction | /**
* add a mapping of a key to a counter function.
* @param key the key
* @param eval the evaluator
*/
void addCounterFunction(String key, Function<String, Long> eval) {
counters.addFunction(key, eval);
} | 3.68 |
framework_AbstractBeanContainer_addItemAt | /**
* Adds a new bean at the given index.
*
* The bean is used both as the item contents and as the item identifier.
*
* @param index
* Index at which the bean should be added.
* @param newItemId
* The item id for the bean to add to the container.
* @param bean
* The bean to add to the container.
*
* @return Returns the new BeanItem or null if the operation fails.
*/
protected BeanItem<BEANTYPE> addItemAt(int index, IDTYPE newItemId,
BEANTYPE bean) {
if (!validateBean(bean)) {
return null;
}
return internalAddItemAt(index, newItemId, createBeanItem(bean), true);
} | 3.68 |
flink_SupportsRowLevelDelete_getRowLevelDeleteMode | /**
* Planner will rewrite delete statement to query base on the {@link RowLevelDeleteInfo},
* keeping the query of delete unchanged by default(in `DELETE_ROWS` mode), or changing the
* query to the complementary set in REMAINING_ROWS mode.
*
* <p>Take the following SQL as an example:
*
* <pre>{@code
* DELETE FROM t WHERE y = 2;
* }</pre>
*
* <p>If returns {@link SupportsRowLevelDelete.RowLevelDeleteMode#DELETED_ROWS}, the sink
* will get the rows to be deleted which match the filter [y = 2].
*
* <p>If returns {@link SupportsRowLevelDelete.RowLevelDeleteMode#REMAINING_ROWS}, the sink
* will get the rows which don't match the filter [y = 2].
*
* <p>Note: All rows will be of RowKind#DELETE when RowLevelDeleteMode is DELETED_ROWS, and
* RowKind#INSERT when RowLevelDeleteMode is REMAINING_ROWS.
*/
default RowLevelDeleteMode getRowLevelDeleteMode() {
return RowLevelDeleteMode.DELETED_ROWS;
} | 3.68 |
shardingsphere-elasticjob_SchedulerFacade_newJobTriggerListener | /**
* Create job trigger listener.
*
* @return job trigger listener
*/
public JobTriggerListener newJobTriggerListener() {
return new JobTriggerListener(executionService, shardingService);
} | 3.68 |
framework_AbstractMedia_isLoop | /**
* @return true if looping is enabled
* @since 7.7.11
*/
public boolean isLoop() {
return getState(false).loop;
} | 3.68 |
hbase_MiniHBaseCluster_startRegionServerAndWait | /**
* Starts a region server thread and waits until its processed by master. Throws an exception when
* it can't start a region server or when the region server is not processed by master within the
* timeout.
* @return New RegionServerThread
*/
public JVMClusterUtil.RegionServerThread startRegionServerAndWait(long timeout)
throws IOException {
JVMClusterUtil.RegionServerThread t = startRegionServer();
ServerName rsServerName = t.getRegionServer().getServerName();
long start = EnvironmentEdgeManager.currentTime();
ClusterMetrics clusterStatus = getClusterMetrics();
while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
if (clusterStatus != null && clusterStatus.getLiveServerMetrics().containsKey(rsServerName)) {
return t;
}
Threads.sleep(100);
}
if (t.getRegionServer().isOnline()) {
throw new IOException("RS: " + rsServerName + " online, but not processed by master");
} else {
throw new IOException("RS: " + rsServerName + " is offline");
}
} | 3.68 |
flink_ExecutionEnvironment_readTextFileWithValue | /**
* Creates a {@link DataSet} that represents the Strings produced by reading the given file line
* wise. This method is similar to {@link #readTextFile(String, String)}, but it produces a
* DataSet with mutable {@link StringValue} objects, rather than Java Strings. StringValues can
* be used to tune implementations to be less object and garbage collection heavy.
*
* <p>The {@link java.nio.charset.Charset} with the given name will be used to read the files.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param charsetName The name of the character set used to read the file.
* @param skipInvalidLines A flag to indicate whether to skip lines that cannot be read with the
* given character set.
* @return A DataSet that represents the data read from the given file as text lines.
*/
public DataSource<StringValue> readTextFileWithValue(
String filePath, String charsetName, boolean skipInvalidLines) {
Preconditions.checkNotNull(filePath, "The file path may not be null.");
TextValueInputFormat format = new TextValueInputFormat(new Path(filePath));
format.setCharsetName(charsetName);
format.setSkipInvalidLines(skipInvalidLines);
return new DataSource<>(
this, format, new ValueTypeInfo<>(StringValue.class), Utils.getCallLocationName());
} | 3.68 |
hudi_FileSlice_isEmpty | /**
* Returns true if there is no data file and no log files. Happens as part of pending compaction.
*/
public boolean isEmpty() {
return (baseFile == null) && (logFiles.isEmpty());
} | 3.68 |
framework_Label_compareTo | /**
* Compares the Label to other objects.
*
* <p>
* Labels can be compared to other labels for sorting label contents. This
* is especially handy for sorting table columns.
* </p>
*
* <p>
* In RAW, PREFORMATTED and TEXT modes, the label contents are compared as
* is. In XML, UIDL and HTML modes, only CDATA is compared and tags ignored.
* If the other object is not a Label, its toString() return value is used
* in comparison.
* </p>
*
* @param other
* the Other object to compare to.
* @return a negative integer, zero, or a positive integer as this object is
* less than, equal to, or greater than the specified object.
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Label other) {
String thisValue = getComparableValue();
String otherValue = other.getComparableValue();
return thisValue.compareTo(otherValue);
} | 3.68 |
flink_Task_releaseResources | /**
* Releases resources before task exits. We should also fail the partition to release if the
* task has failed, is canceled, or is being canceled at the moment.
*/
private void releaseResources() {
LOG.debug(
"Release task {} network resources (state: {}).",
taskNameWithSubtask,
getExecutionState());
for (ResultPartitionWriter partitionWriter : partitionWriters) {
taskEventDispatcher.unregisterPartition(partitionWriter.getPartitionId());
}
// close network resources
if (isCanceledOrFailed()) {
failAllResultPartitions();
}
closeAllResultPartitions();
closeAllInputGates();
try {
taskStateManager.close();
} catch (Exception e) {
LOG.error("Failed to close task state manager for task {}.", taskNameWithSubtask, e);
}
} | 3.68 |
hbase_SpaceLimitSettings_getProto | /**
* Returns a copy of the internal state of <code>this</code>
*/
SpaceLimitRequest getProto() {
return proto.toBuilder().build();
} | 3.68 |
hadoop_TypedBytesInput_skipType | /**
* Skips a type byte.
* @return true iff the end of the file was not reached
* @throws IOException
*/
public boolean skipType() throws IOException {
try {
in.readByte();
return true;
} catch (EOFException eof) {
return false;
}
} | 3.68 |
framework_AbstractComponent_getDebugId | /**
* @deprecated As of 7.0. Use {@link #getId()}
*/
@Deprecated
public String getDebugId() {
return getId();
} | 3.68 |
morf_AliasedField_divideBy | /**
* @param expression value to use as the denominator.
* @return A new expression using {@link MathsField} and {@link MathsOperator#DIVIDE}.
*/
public final MathsField divideBy(AliasedField expression) {
return new MathsField(this, MathsOperator.DIVIDE, potentiallyBracketExpression(expression));
} | 3.68 |
hibernate-validator_AbstractMethodOverrideCheck_collectOverriddenMethods | /**
* Collect all the overridden elements of the inheritance tree.
*
* @param overridingMethod the method for which we want to find the overridden methods
* @param currentTypeElement the class we are analyzing
* @param methodInheritanceTreeBuilder the method inheritance tree builder
*/
private void collectOverriddenMethods( ExecutableElement overridingMethod, TypeElement currentTypeElement,
MethodInheritanceTree.Builder methodInheritanceTreeBuilder) {
if ( isJavaLangObjectOrNull( currentTypeElement ) ) {
return;
}
collectOverriddenMethodsInInterfaces( overridingMethod, currentTypeElement, methodInheritanceTreeBuilder );
TypeElement superclassTypeElement = (TypeElement) typeUtils.asElement( currentTypeElement.getSuperclass() );
if ( superclassTypeElement == null ) {
return;
}
ExecutableElement overriddenMethod = getOverriddenMethod( overridingMethod, superclassTypeElement );
if ( overriddenMethod != null ) {
methodInheritanceTreeBuilder.addOverriddenMethod( overridingMethod, overriddenMethod );
overridingMethod = overriddenMethod;
}
collectOverriddenMethods( overridingMethod, superclassTypeElement, methodInheritanceTreeBuilder );
} | 3.68 |
hbase_MemStoreCompactor_start | /**
* ---------------------------------------------------------------------- The request to dispatch
* the compaction asynchronous task. The method returns true if compaction was successfully
* dispatched, or false if there is already an ongoing compaction or no segments to compact.
*/
public boolean start() throws IOException {
if (!compactingMemStore.hasImmutableSegments()) { // no compaction on empty pipeline
return false;
}
// get a snapshot of the list of the segments from the pipeline,
// this local copy of the list is marked with specific version
versionedList = compactingMemStore.getImmutableSegments();
LOG.trace("Speculative compaction starting on {}/{}",
compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(),
compactingMemStore.getStore().getColumnFamilyName());
HStore store = compactingMemStore.getStore();
RegionCoprocessorHost cpHost = store.getCoprocessorHost();
if (cpHost != null) {
cpHost.preMemStoreCompaction(store);
}
try {
doCompaction();
} finally {
if (cpHost != null) {
cpHost.postMemStoreCompaction(store);
}
}
return true;
} | 3.68 |
hibernate-validator_PESELValidator_year | /**
* 1800–1899 - 80
* 1900–1999 - 00
* 2000–2099 - 20
* 2100–2199 - 40
* 2200–2299 - 60
*/
private int year(int year, int centuryCode) {
switch ( centuryCode ) {
case 4: return 1800 + year;
case 0: return 1900 + year;
case 1: return 2000 + year;
case 2: return 2100 + year;
case 3: return 2200 + year;
default:
throw new IllegalStateException( "Invalid century code." );
}
} | 3.68 |
hudi_Key_getBytes | /**
* @return byte[] The value of <i>this</i> key.
*/
public byte[] getBytes() {
return this.bytes;
} | 3.68 |
druid_DruidDataSourceBuilder_build | /**
* For issue #1796, use Spring Environment by specify configuration properties prefix to build DruidDataSource.
* <p>
* 这是为了兼容 Spring Boot 1.X 中 .properties 内配置属性不能按照配置声明顺序进行绑定,进而导致配置出错(issue #1796 )而提供的方法。
* 如果你不存在上述问题或者使用 .yml 进行配置则不必使用该方法,使用上面的{@link DruidDataSourceBuilder#build}即可,Spring Boot 2.0 修复了该问题,该方法届时也会停用。
* <p>
* fixed, the method will be removed in future versions.
*
* @see DruidDataSourceWrapper#setMaxEvictableIdleTimeMillis(long)
*/
@Deprecated
public DruidDataSource build(Environment env, String prefix) {
DruidDataSource druidDataSource = new DruidDataSourceWrapper();
druidDataSource.setMinEvictableIdleTimeMillis(
env.getProperty(prefix + "min-evictable-idle-time-millis",
Long.class,
DruidDataSource.DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS));
druidDataSource.setMaxEvictableIdleTimeMillis(
env.getProperty(prefix + "max-evictable-idle-time-millis",
Long.class,
DruidDataSource.DEFAULT_MAX_EVICTABLE_IDLE_TIME_MILLIS));
return druidDataSource;
} | 3.68 |
querydsl_SQLExpressions_addYears | /**
* Add the given amount of years to the date
*
* @param date date
* @param years years to add
* @return converted date
*/
public static <D extends Comparable> DateExpression<D> addYears(DateExpression<D> date, int years) {
return Expressions.dateOperation(date.getType(), Ops.DateTimeOps.ADD_YEARS, date, ConstantImpl.create(years));
} | 3.68 |
hbase_WALSplitUtil_getSplitEditFilesSorted | /**
* Returns sorted set of edit files made by splitter, excluding files with '.temp' suffix.
* @param walFS WAL FileSystem used to retrieving split edits files.
* @param regionDir WAL region dir to look for recovered edits files under.
* @return Files in passed <code>regionDir</code> as a sorted set.
*/
public static NavigableSet<Path> getSplitEditFilesSorted(final FileSystem walFS,
final Path regionDir) throws IOException {
NavigableSet<Path> filesSorted = new TreeSet<>();
Path editsdir = getRegionDirRecoveredEditsDir(regionDir);
if (!walFS.exists(editsdir)) {
return filesSorted;
}
FileStatus[] files = CommonFSUtils.listStatus(walFS, editsdir, new PathFilter() {
@Override
public boolean accept(Path p) {
boolean result = false;
try {
// Return files and only files that match the editfile names pattern.
// There can be other files in this directory other than edit files.
// In particular, on error, we'll move aside the bad edit file giving
// it a timestamp suffix. See moveAsideBadEditsFile.
Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName());
result = walFS.isFile(p) && m.matches();
// Skip the file whose name ends with RECOVERED_LOG_TMPFILE_SUFFIX,
// because it means splitwal thread is writting this file.
if (p.getName().endsWith(RECOVERED_LOG_TMPFILE_SUFFIX)) {
result = false;
}
// Skip SeqId Files
if (isSequenceIdFile(p)) {
result = false;
}
} catch (IOException e) {
LOG.warn("Failed isFile check on {}", p, e);
}
return result;
}
});
if (ArrayUtils.isNotEmpty(files)) {
Arrays.asList(files).forEach(status -> filesSorted.add(status.getPath()));
}
return filesSorted;
} | 3.68 |
hbase_OnlineLogRecord_setBlockBytesScanned | /**
* Sets the amount of block bytes scanned to retrieve the response cells.
*/
public OnlineLogRecordBuilder setBlockBytesScanned(long blockBytesScanned) {
this.blockBytesScanned = blockBytesScanned;
return this;
} | 3.68 |
hbase_ObjectPool_size | /**
* Returns an estimated count of objects kept in the pool. This also counts stale references, and
* you might want to call {@link #purge()} beforehand.
*/
public int size() {
return referenceCache.size();
} | 3.68 |
hbase_TableInputFormat_addColumns | /**
* Convenience method to parse a string representation of an array of column specifiers.
* @param scan The Scan to update.
* @param columns The columns to parse.
*/
private static void addColumns(Scan scan, String columns) {
String[] cols = columns.split(" ");
for (String col : cols) {
addColumn(scan, Bytes.toBytes(col));
}
} | 3.68 |
flink_InternalServiceDecorator_getNamespacedInternalServiceName | /** Generate namespaced name of the internal Service. */
public static String getNamespacedInternalServiceName(String clusterId, String namespace) {
return getInternalServiceName(clusterId) + "." + namespace;
} | 3.68 |
AreaShop_GeneralRegion_needsPeriodicUpdate | /**
* Check if a sign needs periodic updating.
* @return true if the signs of this region need periodic updating, otherwise false
*/
public boolean needsPeriodicUpdate() {
return !(isDeleted() || !(this instanceof RentRegion)) && getSignsFeature().needsPeriodicUpdate();
} | 3.68 |
flink_WindowOperator_registerCleanupTimer | /**
* Registers a timer to cleanup the content of the window.
*
* @param window the window whose state to discard
*/
private void registerCleanupTimer(W window) {
long cleanupTime = toEpochMillsForTimer(cleanupTime(window), shiftTimeZone);
if (cleanupTime == Long.MAX_VALUE) {
// don't set a GC timer for "end of time"
return;
}
if (windowAssigner.isEventTime()) {
triggerContext.registerEventTimeTimer(cleanupTime);
} else {
triggerContext.registerProcessingTimeTimer(cleanupTime);
}
} | 3.68 |
hbase_WALActionsListener_preLogArchive | /**
* The WAL is going to be archived.
* @param oldPath the path to the old wal
* @param newPath the path to the new wal
*/
default void preLogArchive(Path oldPath, Path newPath) throws IOException {
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithNestedEqualityCheck | /**
* Tests that strange equality behaviour is maintained.
*/
@Test
public void testSelectWithNestedEqualityCheck() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(TEST_TABLE))
.where(eq(new FieldReference(BOOLEAN_FIELD), eq(new FieldReference(CHAR_FIELD), "Y")));
String value = varCharCast("'Y'");
String expectedSql = "SELECT stringField FROM " + tableName(TEST_TABLE) + " WHERE (booleanField = (charField = " + stringLiteralPrefix() + value + "))";
assertEquals("Select with nested equality check", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
framework_VLayoutSlot_getExpandRatio | /**
* Get the expand ratio for the slot. The expand ratio describes how the
* slot should be resized compared to other slots in the layout.
*
* @return the expand ratio of the slot
*
* @see #setExpandRatio(double)
*
* @deprecated this value isn't used for anything by default
*/
@Deprecated
public double getExpandRatio() {
return expandRatio;
} | 3.68 |
flink_TableChange_set | /**
* A table change to set the table option.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> SET '<key>' = '<value>';
* </pre>
*
* @param key the option name to set.
* @param value the option value to set.
* @return a TableChange represents the modification.
*/
static SetOption set(String key, String value) {
return new SetOption(key, value);
} | 3.68 |
cron-utils_CronDefinitionBuilder_instance | /**
* Creates a new CronDefinition instance with provided field definitions.
*
* @return returns CronDefinition instance, never null
*/
public CronDefinition instance() {
final Set<CronConstraint> validations = new HashSet<>();
validations.addAll(cronConstraints);
final List<FieldDefinition> values = new ArrayList<>(fields.values());
values.sort(FieldDefinition.createFieldDefinitionComparator());
return new CronDefinition(values, validations, cronNicknames, matchDayOfWeekAndDayOfMonth);
} | 3.68 |
hbase_BucketCache_putIntoBackingMap | /**
* Put the new bucket entry into backingMap. Notice that we are allowed to replace the existing
* cache with a new block for the same cache key. there's a corner case: one thread cache a block
* in ramCache, copy to io-engine and add a bucket entry to backingMap. Caching another new block
* with the same cache key do the same thing for the same cache key, so if not evict the previous
* bucket entry, then memory leak happen because the previous bucketEntry is gone but the
* bucketAllocator do not free its memory.
* @see BlockCacheUtil#shouldReplaceExistingCacheBlock(BlockCache blockCache,BlockCacheKey
* cacheKey, Cacheable newBlock)
* @param key Block cache key
* @param bucketEntry Bucket entry to put into backingMap.
*/
protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) {
BucketEntry previousEntry = backingMap.put(key, bucketEntry);
if (previousEntry != null && previousEntry != bucketEntry) {
previousEntry.withWriteLock(offsetLock, () -> {
blockEvicted(key, previousEntry, false, false);
return null;
});
}
} | 3.68 |
hbase_ClassSize_align | /**
* Aligns a number to 8.
* @param num number to align to 8
* @return smallest number >= input that is a multiple of 8
*/
public static long align(long num) {
return memoryLayout.align(num);
} | 3.68 |
flink_ServerConnection_sendRequest | /**
* Returns a future holding the serialized request result.
*
* @param request the request to be sent.
* @return Future holding the serialized result
*/
@Override
public CompletableFuture<RESP> sendRequest(REQ request) {
synchronized (lock) {
if (running) {
EstablishedConnection.TimestampedCompletableFuture<RESP> requestPromiseTs =
new EstablishedConnection.TimestampedCompletableFuture<>(
System.nanoTime());
try {
final long requestId = requestCount++;
pendingRequests.put(requestId, requestPromiseTs);
stats.reportRequest();
ByteBuf buf =
MessageSerializer.serializeRequest(
channel.alloc(), requestId, request);
channel.writeAndFlush(buf)
.addListener(
(ChannelFutureListener)
future -> {
if (!future.isSuccess()) {
// Fail promise if not failed to write
EstablishedConnection
.TimestampedCompletableFuture<
RESP>
pending =
pendingRequests.remove(
requestId);
if (pending != null
&& pending.completeExceptionally(
future.cause())) {
stats.reportFailedRequest();
}
}
});
} catch (Throwable t) {
requestPromiseTs.completeExceptionally(t);
}
return requestPromiseTs;
} else {
return FutureUtils.completedExceptionally(new ClosedChannelException());
}
}
} | 3.68 |
framework_StringToByteConverter_getModelType | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#getModelType()
*/
@Override
public Class<Byte> getModelType() {
return Byte.class;
} | 3.68 |
graphhopper_OSMNodeData_getId | /**
* @return the internal id stored for the given OSM node id. use {@link #isTowerNode} etc. to find out what this
* id means
*/
public long getId(long osmNodeId) {
return idsByOsmNodeIds.get(osmNodeId);
} | 3.68 |
hadoop_SecureStorageInterfaceImpl_getLeaseCondition | /**
* Return and access condition for this lease, or else null if
* there's no lease.
*/
private AccessCondition getLeaseCondition(SelfRenewingLease lease) {
AccessCondition leaseCondition = null;
if (lease != null) {
leaseCondition = AccessCondition.generateLeaseCondition(lease.getLeaseID());
}
return leaseCondition;
} | 3.68 |
hudi_RunLengthDecoder_readNextGroup | /**
* Reads the next group.
*/
void readNextGroup() {
try {
int header = readUnsignedVarInt();
this.mode = (header & 1) == 0 ? MODE.RLE : MODE.PACKED;
switch (mode) {
case RLE:
this.currentCount = header >>> 1;
this.currentValue = readIntLittleEndianPaddedOnBitWidth();
return;
case PACKED:
int numGroups = header >>> 1;
this.currentCount = numGroups * 8;
if (this.currentBuffer.length < this.currentCount) {
this.currentBuffer = new int[this.currentCount];
}
currentBufferIdx = 0;
int valueIndex = 0;
while (valueIndex < this.currentCount) {
// values are bit packed 8 at a time, so reading bitWidth will always work
ByteBuffer buffer = in.slice(bitWidth);
this.packer.unpack8Values(buffer, buffer.position(), this.currentBuffer, valueIndex);
valueIndex += 8;
}
return;
default:
throw new ParquetDecodingException("not a valid mode " + this.mode);
}
} catch (IOException e) {
throw new ParquetDecodingException("Failed to read from input stream", e);
}
} | 3.68 |
flink_LimitedConnectionsFileSystem_getStreamOpenTimeout | /**
* Gets the number of milliseconds that a opening a stream may wait for availability in the
* connection pool.
*/
public long getStreamOpenTimeout() {
return streamOpenTimeoutNanos / 1_000_000;
} | 3.68 |
hadoop_IOStatisticsSnapshot_clear | /**
* Clear all the maps.
*/
public synchronized void clear() {
counters.clear();
gauges.clear();
minimums.clear();
maximums.clear();
meanStatistics.clear();
} | 3.68 |
framework_LegacyCommunicationManager_getClientCache | /**
* @deprecated As of 7.1. See #11410.
*/
@Deprecated
public ClientCache getClientCache(UI uI) {
Integer uiId = Integer.valueOf(uI.getUIId());
ClientCache cache = uiToClientCache.get(uiId);
if (cache == null) {
cache = new ClientCache();
uiToClientCache.put(uiId, cache);
uI.addDetachListener(event -> removeClientCache(uI));
}
return cache;
} | 3.68 |
framework_AbstractFieldConnector_isRequired | /**
* Checks whether the required indicator should be shown for the field.
* Required indicators are hidden if the field or its data source is
* read-only.
* <p>
* NOTE: since 8.0 this only delegates to
* {@link #isRequiredIndicatorVisible()}, and is left for legacy reasons.
*
* @deprecated Use {@link #isRequiredIndicatorVisible()} instead.
*
* @return true if required indicator should be shown
*/
@Deprecated
public boolean isRequired() {
return isRequiredIndicatorVisible();
} | 3.68 |
framework_DateFieldElement_setDate | /**
* Sets the value to the given date and time.
*
* @param value
* the date and time to set.
*/
public void setDate(LocalDate value) {
setISOValue(value.format(getISOFormatter()));
} | 3.68 |
framework_HierarchyMapper_doFetchDirectChildren | /**
* Generic method for finding direct children of a given parent, limited by
* given range.
*
* @param parent
* the parent
* @param range
* the range of direct children to return
* @return the requested children of the given parent
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private Stream<T> doFetchDirectChildren(T parent, Range range) {
return getDataProvider().fetchChildren(new HierarchicalQuery(
range.getStart(), range.length(), getBackEndSorting(),
getInMemorySorting(), getFilter(), parent));
} | 3.68 |
hadoop_SysInfoWindows_getAvailableVirtualMemorySize | /** {@inheritDoc} */
@Override
public long getAvailableVirtualMemorySize() {
refreshIfNeeded();
return vmemAvailable;
} | 3.68 |
hbase_UserProvider_reload | // Provide the reload function that uses the executor thread.
@Override
public ListenableFuture<String[]> reload(final String k, String[] oldValue)
throws Exception {
return executor.submit(new Callable<String[]>() {
@Override
public String[] call() throws Exception {
return getGroupStrings(k);
}
});
} | 3.68 |
hbase_SimpleRegionNormalizer_isMergeEnabled | /**
* Return this instance's configured value for {@value #MERGE_ENABLED_KEY}.
*/
public boolean isMergeEnabled() {
return normalizerConfiguration.isMergeEnabled();
} | 3.68 |
querydsl_Expressions_asDateTime | /**
* Create a new DateTimeExpression
*
* @param value the date time
* @return new DateTimeExpression
*/
public static <T extends Comparable<?>> DateTimeExpression<T> asDateTime(T value) {
return asDateTime(constant(value));
} | 3.68 |
hadoop_ExponentialRetryPolicy_getRetryInterval | /**
* Returns backoff interval between 80% and 120% of the desired backoff,
* multiply by 2^n-1 for exponential.
*
* @param retryCount The current retry attempt count.
* @return backoff Interval time
*/
public long getRetryInterval(final int retryCount) {
final long boundedRandDelta = (int) (this.deltaBackoff * MIN_RANDOM_RATIO)
+ this.randRef.nextInt((int) (this.deltaBackoff * MAX_RANDOM_RATIO)
- (int) (this.deltaBackoff * MIN_RANDOM_RATIO));
final double incrementDelta = (Math.pow(2, retryCount - 1)) * boundedRandDelta;
final long retryInterval = (int) Math.round(Math.min(this.minBackoff + incrementDelta, maxBackoff));
return retryInterval;
} | 3.68 |
hbase_CacheStats_getDataMissCount | // All of the counts of misses and hits.
public long getDataMissCount() {
return dataMissCount.sum();
} | 3.68 |
hbase_ZKLeaderManager_stepDownAsLeader | /**
* Removes the leader znode, if it is currently claimed by this instance.
*/
public void stepDownAsLeader() {
try {
synchronized (lock) {
if (!leaderExists.get()) {
return;
}
byte[] leaderId = ZKUtil.getData(watcher, leaderZNode);
if (leaderId != null && Bytes.equals(nodeId, leaderId)) {
LOG.info("Stepping down as leader");
ZKUtil.deleteNodeFailSilent(watcher, leaderZNode);
leaderExists.set(false);
} else {
LOG.info("Not current leader, no need to step down");
}
}
} catch (KeeperException ke) {
watcher.abort("Unhandled zookeeper exception removing leader node", ke);
candidate.stop("Unhandled zookeeper exception removing leader node: " + ke.getMessage());
} catch (InterruptedException e) {
watcher.abort("Unhandled zookeeper exception removing leader node", e);
candidate.stop("Unhandled zookeeper exception removing leader node: " + e.getMessage());
}
} | 3.68 |
flink_BinarySegmentUtils_getDouble | /**
* get double from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static double getDouble(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getDouble(offset);
} else {
return getDoubleMultiSegments(segments, offset);
}
} | 3.68 |
hadoop_TwoColumnLayout_nav | /**
* @return the class that will render the navigation bar.
*/
protected Class<? extends SubView> nav() {
return NavBlock.class;
} | 3.68 |
hudi_WriteOperationType_fromValue | /**
* Convert string value to WriteOperationType.
*/
public static WriteOperationType fromValue(String value) {
switch (value.toLowerCase(Locale.ROOT)) {
case "insert":
return INSERT;
case "insert_prepped":
return INSERT_PREPPED;
case "upsert":
return UPSERT;
case "upsert_prepped":
return UPSERT_PREPPED;
case "bulk_insert":
return BULK_INSERT;
case "bulk_insert_prepped":
return BULK_INSERT_PREPPED;
case "delete":
return DELETE;
case "delete_prepped":
return DELETE_PREPPED;
case "insert_overwrite":
return INSERT_OVERWRITE;
case "delete_partition":
return DELETE_PARTITION;
case "insert_overwrite_table":
return INSERT_OVERWRITE_TABLE;
case "cluster":
return CLUSTER;
case "compact":
return COMPACT;
case "index":
return INDEX;
case "alter_schema":
return ALTER_SCHEMA;
case "unknown":
return UNKNOWN;
default:
throw new HoodieException("Invalid value of Type.");
}
} | 3.68 |
morf_HumanReadableStatementHelper_generateUniqueIndexString | /**
* Generates a unique / non-unique string for the specified index definition.
*
* @param definition the definition of the index
* @return a string representation of unique / non-unique
*/
private static String generateUniqueIndexString(final Index definition) {
return definition.isUnique() ? "unique" : "non-unique";
} | 3.68 |
hbase_Addressing_createHostAndPortStr | /**
* Create a host-and-port string
* @param hostname Server hostname
* @param port Server port
* @return Returns a concatenation of <code>hostname</code> and <code>port</code> in following
* form: <code><hostname> ':' <port></code>. For example, if hostname is
* <code>example.org</code> and port is 1234, this method will return
* <code>example.org:1234</code>
*/
public static String createHostAndPortStr(final String hostname, final int port) {
return hostname + HOSTNAME_PORT_SEPARATOR + port;
} | 3.68 |
flink_FutureUtils_doForward | /**
* Completes the given future with either the given value or throwable, depending on which
* parameter is not null.
*
* @param value value with which the future should be completed
* @param throwable throwable with which the future should be completed exceptionally
* @param target future to complete
* @param <T> completed future
*/
public static <T> void doForward(
@Nullable T value, @Nullable Throwable throwable, CompletableFuture<T> target) {
if (throwable != null) {
target.completeExceptionally(throwable);
} else {
target.complete(value);
}
} | 3.68 |
hudi_HDFSParquetImporterUtils_createHoodieClient | /**
* Build Hoodie write client.
*
* @param jsc Java Spark Context
* @param basePath Base Path
* @param schemaStr Schema
* @param parallelism Parallelism
*/
public static SparkRDDWriteClient<HoodieRecordPayload> createHoodieClient(JavaSparkContext jsc, String basePath, String schemaStr,
int parallelism, Option<String> compactionStrategyClass, TypedProperties properties) {
HoodieCompactionConfig compactionConfig = compactionStrategyClass
.map(strategy -> HoodieCompactionConfig.newBuilder().withInlineCompaction(false)
.withCompactionStrategy(ReflectionUtils.loadClass(strategy)).build())
.orElse(HoodieCompactionConfig.newBuilder().withInlineCompaction(false).build());
HoodieWriteConfig config =
HoodieWriteConfig.newBuilder().withPath(basePath)
.withParallelism(parallelism, parallelism)
.withBulkInsertParallelism(parallelism)
.withDeleteParallelism(parallelism)
.withSchema(schemaStr).combineInput(true, true).withCompactionConfig(compactionConfig)
.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.BLOOM).build())
.withProps(properties).build();
return new SparkRDDWriteClient<>(new HoodieSparkEngineContext(jsc), config);
} | 3.68 |
framework_AbstractSingleSelect_keyToItem | /**
* Returns the item that the given key is assigned to, or {@code null} if
* there is no such item.
*
* @param key
* the key whose item to return
* @return the associated item if any, {@code null} otherwise.
*/
protected T keyToItem(String key) {
return getDataCommunicator().getKeyMapper().get(key);
} | 3.68 |
flink_BinarySegmentUtils_copyToUnsafe | /**
* Copy segments to target unsafe pointer.
*
* @param segments Source segments.
* @param offset The position where the bytes are started to be read from these memory segments.
* @param target The unsafe memory to copy the bytes to.
* @param pointer The position in the target unsafe memory to copy the chunk to.
* @param numBytes the number bytes to copy.
*/
public static void copyToUnsafe(
MemorySegment[] segments, int offset, Object target, int pointer, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
segments[0].copyToUnsafe(offset, target, pointer, numBytes);
} else {
copyMultiSegmentsToUnsafe(segments, offset, target, pointer, numBytes);
}
} | 3.68 |
zxing_State_latchAndAppend | // Create a new state representing this state with a latch to a (not
// necessary different) mode, and then a code.
State latchAndAppend(int mode, int value) {
int bitCount = this.bitCount;
Token token = this.token;
if (mode != this.mode) {
int latch = HighLevelEncoder.LATCH_TABLE[this.mode][mode];
token = token.add(latch & 0xFFFF, latch >> 16);
bitCount += latch >> 16;
}
int latchModeBitCount = mode == HighLevelEncoder.MODE_DIGIT ? 4 : 5;
token = token.add(value, latchModeBitCount);
return new State(token, mode, 0, bitCount + latchModeBitCount);
} | 3.68 |
framework_AbstractOrderedLayoutConnector_onStateChanged | /*
* (non-Javadoc)
*
* @see
* com.vaadin.client.ui.AbstractComponentConnector#onStateChanged(com.vaadin
* .client.communication.StateChangeEvent)
*/
@SuppressWarnings("deprecation")
@Override
public void onStateChanged(StateChangeEvent stateChangeEvent) {
super.onStateChanged(stateChangeEvent);
clickEventHandler.handleEventHandlerRegistration();
getWidget().setMargin(new MarginInfo(getState().marginsBitmask));
getWidget().setSpacing(getState().spacing);
updateInternalState();
} | 3.68 |
framework_Embedded_getParameterNames | /**
* Gets the embedded object parameter names.
*
* @return the Iterator of parameters names.
*/
public Iterator<String> getParameterNames() {
return getState(false).parameters.keySet().iterator();
} | 3.68 |
flink_FloatHashSet_add | /** See {@link Float#equals(Object)}. */
public boolean add(final float k) {
int intKey = Float.floatToIntBits(k);
if (intKey == 0) {
if (this.containsZero) {
return false;
}
this.containsZero = true;
} else {
float[] key = this.key;
int pos;
int curr;
if ((curr = Float.floatToIntBits(key[pos = MurmurHashUtil.fmix(intKey) & this.mask]))
!= 0) {
if (curr == intKey) {
return false;
}
while ((curr = Float.floatToIntBits(key[pos = pos + 1 & this.mask])) != 0) {
if (curr == intKey) {
return false;
}
}
}
key[pos] = k;
}
if (this.size++ >= this.maxFill) {
this.rehash(OptimizableHashSet.arraySize(this.size + 1, this.f));
}
return true;
} | 3.68 |
framework_ThemeResource_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return resourceID;
} | 3.68 |
hmily_FileRepository_clean | /**
* The file handle is occupied help gc clean buffer.
* http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4724038
* @param buffer buffer
*/
public static void clean(final ByteBuffer buffer) {
if (buffer == null || !buffer.isDirect() || buffer.capacity() == 0) {
return;
}
invoke(invoke(viewed(buffer), "cleaner"), "clean");
} | 3.68 |
hbase_RegionSplitter_newSplitAlgoInstance | /**
* @throws IOException if the specified SplitAlgorithm class couldn't be instantiated
*/
public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, String splitClassName)
throws IOException {
Class<?> splitClass;
// For split algorithms builtin to RegionSplitter, the user can specify
// their simple class name instead of a fully qualified class name.
if (splitClassName.equals(HexStringSplit.class.getSimpleName())) {
splitClass = HexStringSplit.class;
} else if (splitClassName.equals(DecimalStringSplit.class.getSimpleName())) {
splitClass = DecimalStringSplit.class;
} else if (splitClassName.equals(UniformSplit.class.getSimpleName())) {
splitClass = UniformSplit.class;
} else {
try {
splitClass = conf.getClassByName(splitClassName);
} catch (ClassNotFoundException e) {
throw new IOException("Couldn't load split class " + splitClassName, e);
}
if (splitClass == null) {
throw new IOException("Failed loading split class " + splitClassName);
}
if (!SplitAlgorithm.class.isAssignableFrom(splitClass)) {
throw new IOException("Specified split class doesn't implement SplitAlgorithm");
}
}
try {
return splitClass.asSubclass(SplitAlgorithm.class).getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new IOException("Problem loading split algorithm: ", e);
}
} | 3.68 |
hmily_HmilyRepositoryEvent_clear | /**
* help gc.
*/
public void clear() {
hmilyTransaction = null;
hmilyParticipant = null;
hmilyParticipantUndo = null;
hmilyLocks = null;
transId = null;
} | 3.68 |
framework_Page_pushState | /**
* Updates the browsers URI without causing actual page change. This method
* is useful if you wish implement "deep linking" to your application.
* Calling the method also adds a new entry to clients browser history and
* you can further use {@link PopStateListener} to track the usage of
* back/forward feature in browser.
* <p>
* Note, the current implementation supports setting only one new uri in one
* user interaction.
*
* @param uri
* the URI to be used for pushState operation. The URI is
* resolved over the current location. If the given URI is
* absolute, it must be of same origin as the current URI or the
* browser will not accept the new value.
* @since 8.0
*/
public void pushState(URI uri) {
pushState(uri.toString());
} | 3.68 |
pulsar_MessageId_fromByteArray | /**
* De-serialize a message id from a byte array.
*
* @param data
* byte array containing the serialized message id
* @return the de-serialized messageId object
* @throws IOException if the de-serialization fails
*/
static MessageId fromByteArray(byte[] data) throws IOException {
return DefaultImplementation.getDefaultImplementation().newMessageIdFromByteArray(data);
} | 3.68 |
framework_VProgressBar_isIndeterminate | /**
* Gets whether or not this progress indicator is indeterminate. In
* indeterminate mode there is an animation indicating that the task is
* running but without providing any information about the current progress.
*
* @return {@code true} if set to indeterminate mode, {@code false}
* otherwise
*/
public boolean isIndeterminate() {
return indeterminate;
} | 3.68 |
zxing_GenericGF_buildMonomial | /**
* @return the monomial representing coefficient * x^degree
*/
GenericGFPoly buildMonomial(int degree, int coefficient) {
if (degree < 0) {
throw new IllegalArgumentException();
}
if (coefficient == 0) {
return zero;
}
int[] coefficients = new int[degree + 1];
coefficients[0] = coefficient;
return new GenericGFPoly(this, coefficients);
} | 3.68 |
flink_RunLengthDecoder_readIntLittleEndian | /** Reads the next 4 byte little endian int. */
private int readIntLittleEndian() throws IOException {
int ch4 = in.read();
int ch3 = in.read();
int ch2 = in.read();
int ch1 = in.read();
return ((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + ch4);
} | 3.68 |
flink_BlobServer_deleteInternal | /**
* Deletes the file associated with the blob key in the local storage of the blob server.
*
* @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param key blob key associated with the file to be deleted
* @return <tt>true</tt> if the given blob is successfully deleted or non-existing;
* <tt>false</tt> otherwise
*/
private boolean deleteInternal(JobID jobId, PermanentBlobKey key) {
final File localFile =
new File(
BlobUtils.getStorageLocationPath(
storageDir.deref().getAbsolutePath(), jobId, key));
readWriteLock.writeLock().lock();
try {
boolean deleteLocally = true;
if (!localFile.delete() && localFile.exists()) {
LOG.warn(
"Failed to locally delete BLOB "
+ key
+ " at "
+ localFile.getAbsolutePath());
deleteLocally = false;
}
// this needs to happen inside the write lock in case of concurrent getFile() calls
boolean deleteHA = blobStore.delete(jobId, key);
return deleteLocally && deleteHA;
} finally {
readWriteLock.writeLock().unlock();
}
} | 3.68 |
AreaShop_WorldEditSelection_getWidth | /**
* Get X-size.
*
* @return width
*/
public int getWidth() {
return maximum.getBlockX() - minimum.getBlockX() + 1;
} | 3.68 |
flink_StateTtlConfig_setUpdateType | /**
* Sets the ttl update type.
*
* @param updateType The ttl update type configures when to update last access timestamp
* which prolongs state TTL.
*/
@Nonnull
public Builder setUpdateType(UpdateType updateType) {
this.updateType = updateType;
return this;
} | 3.68 |
flink_EdgeManagerBuildUtil_connectInternal | /** Connect all execution vertices to all partitions. */
private static void connectInternal(
List<ExecutionVertex> taskVertices,
List<IntermediateResultPartition> partitions,
ResultPartitionType resultPartitionType,
EdgeManager edgeManager) {
checkState(!taskVertices.isEmpty());
checkState(!partitions.isEmpty());
ConsumedPartitionGroup consumedPartitionGroup =
createAndRegisterConsumedPartitionGroupToEdgeManager(
taskVertices.size(), partitions, resultPartitionType, edgeManager);
for (ExecutionVertex ev : taskVertices) {
ev.addConsumedPartitionGroup(consumedPartitionGroup);
}
List<ExecutionVertexID> consumerVertices =
taskVertices.stream().map(ExecutionVertex::getID).collect(Collectors.toList());
ConsumerVertexGroup consumerVertexGroup =
ConsumerVertexGroup.fromMultipleVertices(consumerVertices, resultPartitionType);
for (IntermediateResultPartition partition : partitions) {
partition.addConsumers(consumerVertexGroup);
}
consumedPartitionGroup.setConsumerVertexGroup(consumerVertexGroup);
consumerVertexGroup.setConsumedPartitionGroup(consumedPartitionGroup);
} | 3.68 |
framework_VScrollTable_moveFocusDown | /**
* Moves the focus down by 1+offset rows
*
* @return Returns true if succeeded, else false if the selection could not
* be move downwards
*/
private boolean moveFocusDown(int offset) {
if (isSelectable()) {
if (focusedRow == null && scrollBody.iterator().hasNext()) {
// FIXME should focus first visible from top, not first rendered
// ??
return setRowFocus(
(VScrollTableRow) scrollBody.iterator().next());
} else {
VScrollTableRow next = getNextRow(focusedRow, offset);
if (next != null) {
return setRowFocus(next);
}
}
}
return false;
} | 3.68 |
flink_HiveServer2Endpoint_SetClientInfo | /** To be compatible with Hive3, add a default implementation. */
public TSetClientInfoResp SetClientInfo(TSetClientInfoReq tSetClientInfoReq) throws TException {
return new TSetClientInfoResp(buildErrorStatus("SetClientInfo"));
} | 3.68 |
hadoop_StateStoreUtils_filterMultiple | /**
* Filters a list of records to find all records matching the query.
*
* @param <T> Type of the class of the data record.
* @param query Map of field names and objects to use to filter results.
* @param records List of data records to filter.
* @return List of all records matching the query (or empty list if none
* match), null if the data set could not be filtered.
*/
public static <T extends BaseRecord> List<T> filterMultiple(
final Query<T> query, final Iterable<T> records) {
List<T> matchingList = new ArrayList<>();
for (T record : records) {
if (query.matches(record)) {
matchingList.add(record);
}
}
return matchingList;
} | 3.68 |
hbase_RegionInfo_areAdjacent | /**
* Check whether two regions are adjacent; i.e. lies just before or just after in a table.
* @return true if two regions are adjacent
*/
static boolean areAdjacent(RegionInfo regionA, RegionInfo regionB) {
if (regionA == null || regionB == null) {
throw new IllegalArgumentException("Can't check whether adjacent for null region");
}
if (!regionA.getTable().equals(regionB.getTable())) {
return false;
}
RegionInfo a = regionA;
RegionInfo b = regionB;
if (Bytes.compareTo(a.getStartKey(), b.getStartKey()) > 0) {
a = regionB;
b = regionA;
}
return Bytes.equals(a.getEndKey(), b.getStartKey());
} | 3.68 |
framework_ApplicationConfiguration_isProductionMode | /**
* Checks if production mode is enabled. When production mode is enabled,
* client-side logging is disabled. There may also be other performance
* optimizations.
*
* @since 7.1.2
* @return <code>true</code> if production mode is enabled; otherwise
* <code>false</code>.
*/
public static boolean isProductionMode() {
return !isDebugAvailable();
} | 3.68 |
querydsl_ConstructorUtils_getConstructor | /**
* Returns the constructor where the formal parameter list matches the
* givenTypes argument.
*
* It is advisable to first call
* {@link #getConstructorParameters(java.lang.Class, java.lang.Class[])}
* to get the parameters.
*
* @param type type
* @param givenTypes parameter types
* @return matching constructor
* @throws NoSuchMethodException
*/
public static <C> Constructor<C> getConstructor(Class<C> type, Class<?>[] givenTypes) throws NoSuchMethodException {
return type.getConstructor(givenTypes);
} | 3.68 |
hbase_WALInputFormat_getFiles | /**
* @param startTime If file looks like it has a timestamp in its name, we'll check if newer or
* equal to this value else we will filter out the file. If name does not seem to
* have a timestamp, we will just return it w/o filtering.
* @param endTime If file looks like it has a timestamp in its name, we'll check if older or
* equal to this value else we will filter out the file. If name does not seem to
* have a timestamp, we will just return it w/o filtering.
*/
private List<FileStatus> getFiles(FileSystem fs, Path dir, long startTime, long endTime)
throws IOException {
List<FileStatus> result = new ArrayList<>();
LOG.debug("Scanning " + dir.toString() + " for WAL files");
RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(dir);
if (!iter.hasNext()) {
return Collections.emptyList();
}
while (iter.hasNext()) {
LocatedFileStatus file = iter.next();
if (file.isDirectory()) {
// Recurse into sub directories
result.addAll(getFiles(fs, file.getPath(), startTime, endTime));
} else {
addFile(result, file, startTime, endTime);
}
}
// TODO: These results should be sorted? Results could be content of recovered.edits directory
// -- null padded increasing numeric -- or a WAL file w/ timestamp suffix or timestamp and
// then meta suffix. See AbstractFSWALProvider#WALStartTimeComparator
return result;
} | 3.68 |
hadoop_StoragePolicySatisfyManager_getMode | /**
* @return sps service mode.
*/
public StoragePolicySatisfierMode getMode() {
return mode;
} | 3.68 |
framework_BeanContainer_addItem | /**
* Adds the bean to the Container.
*
* @see Container#addItem(Object)
*/
@Override
public BeanItem<BEANTYPE> addItem(IDTYPE itemId, BEANTYPE bean) {
if (itemId != null && bean != null) {
return super.addItem(itemId, bean);
} else {
return null;
}
} | 3.68 |
hbase_Get_toMap | /**
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
* @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {
// we start with the fingerprint map and build on top of it.
Map<String, Object> map = getFingerprint();
// replace the fingerprint's simple list of families with a
// map from column families to lists of qualifiers and kv details
Map<String, List<String>> columns = new HashMap<>();
map.put("families", columns);
// add scalar information first
map.put("row", Bytes.toStringBinary(this.row));
map.put("maxVersions", this.maxVersions);
map.put("cacheBlocks", this.cacheBlocks);
List<Long> timeRange = new ArrayList<>(2);
timeRange.add(this.tr.getMin());
timeRange.add(this.tr.getMax());
map.put("timeRange", timeRange);
int colCount = 0;
// iterate through affected families and add details
for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
List<String> familyList = new ArrayList<>();
columns.put(Bytes.toStringBinary(entry.getKey()), familyList);
if (entry.getValue() == null) {
colCount++;
--maxCols;
familyList.add("ALL");
} else {
colCount += entry.getValue().size();
if (maxCols <= 0) {
continue;
}
for (byte[] column : entry.getValue()) {
if (--maxCols <= 0) {
continue;
}
familyList.add(Bytes.toStringBinary(column));
}
}
}
map.put("totalColumns", colCount);
if (this.filter != null) {
map.put("filter", this.filter.toString());
}
// add the id if set
if (getId() != null) {
map.put("id", getId());
}
return map;
} | 3.68 |
hibernate-validator_CodePointLength_normalize | /**
* Normalize a specified character sequence.
* @param value target value
* @return normalized value
*/
public CharSequence normalize(CharSequence value) {
if ( this.form == null || value == null || value.length() == 0 ) {
return value;
}
return Normalizer.normalize( value, this.form );
} | 3.68 |
morf_AbstractSqlDialectTest_expectedHints4c | /**
* @return The expected SQL for the {@link InsertStatement#useParallelDml(int)} directive.
*/
protected String expectedHints4c() {
return "INSERT INTO " + tableName("Foo") + " SELECT a, b FROM " + tableName("Foo_1");
} | 3.68 |
flink_FailureEnricherUtils_filterInvalidEnrichers | /**
* Filters out invalid {@link FailureEnricher} objects that have duplicate output keys.
*
* @param failureEnrichers a set of {@link FailureEnricher} objects to filter
* @return a filtered collection without any duplicate output keys
*/
@VisibleForTesting
static Collection<FailureEnricher> filterInvalidEnrichers(
final Set<FailureEnricher> failureEnrichers) {
final Map<String, Set<Class<?>>> enrichersByKey = new HashMap<>();
failureEnrichers.forEach(
enricher ->
enricher.getOutputKeys()
.forEach(
enricherKey ->
enrichersByKey
.computeIfAbsent(
enricherKey,
ignored -> new HashSet<>())
.add(enricher.getClass())));
final Set<Class<?>> invalidEnrichers =
enrichersByKey.entrySet().stream()
.filter(entry -> entry.getValue().size() > 1)
.flatMap(
entry -> {
LOG.warn(
"Following enrichers have have registered duplicate output key [%s] and will be ignored: {}.",
entry.getValue().stream()
.map(Class::getName)
.collect(Collectors.joining(", ")));
return entry.getValue().stream();
})
.collect(Collectors.toSet());
return failureEnrichers.stream()
.filter(enricher -> !invalidEnrichers.contains(enricher.getClass()))
.collect(Collectors.toList());
} | 3.68 |
hbase_Connection_getHbck | /**
* Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to
* be thread-safe. A new instance should be created by each thread. This is a lightweight
* operation. Pooling or caching of the returned Hbck instance is not recommended. <br>
* The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. <br>
* This will be used mostly by hbck tool. This may only be used to by pass getting registered
* master from ZK. In situations where ZK is not available or active master is not registered with
* ZK and user can get master address by other means, master can be explicitly specified.
* @param masterServer explicit {@link ServerName} for master server
* @return an Hbck instance for a specified master server
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK)
default Hbck getHbck(ServerName masterServer) throws IOException {
return toAsyncConnection().getHbck(masterServer);
} | 3.68 |
pulsar_AuthorizationService_allowTenantOperation | /**
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
public boolean allowTenantOperation(String tenantName,
TenantOperation operation,
String originalRole,
String role,
AuthenticationDataSource authData) throws Exception {
try {
return allowTenantOperationAsync(
tenantName, operation, originalRole, role, authData).get(
conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.68 |
framework_PointerEventSupport_isSupported | /**
* @return true if pointer events are supported by the browser, false
* otherwise
*/
public static boolean isSupported() {
return IMPL.isSupported();
} | 3.68 |
flink_ProjectOperator_projectTuple25 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>
ProjectOperator<
T,
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>
projectTuple25() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>
tType =
new TupleTypeInfo<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>(fTypes);
return new ProjectOperator<
T,
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
hudi_HoodieTableConfig_getDatabaseName | /**
* Read the database name.
*/
public String getDatabaseName() {
return getString(DATABASE_NAME);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.