name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_InPlaceMutableHashTable_insert
/** * Inserts the given record into the hash table. Note: this method doesn't care about whether a * record with the same key is already present. * * @param record The record to insert. * @throws IOException (EOFException specifically, if memory ran out) */ @Override public void insert(T record) throws IOException { if (closed) { return; } final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record)); final int bucket = hashCode & numBucketsMask; final int bucketSegmentIndex = bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex]; final int bucketOffset = (bucket & numBucketsPerSegmentMask) << bucketSizeBits; // offset of the bucket in the segment final long firstPointer = bucketSegment.getLong(bucketOffset); try { final long newFirstPointer = recordArea.appendPointerAndRecord(firstPointer, record); bucketSegment.putLong(bucketOffset, newFirstPointer); } catch (EOFException ex) { compactOrThrow(); insert(record); return; } numElements++; resizeTableIfNecessary(); }
3.68
morf_PortableSqlStatement_add
/** * Adds an unsupported marker for a given database. * * @param databaseTypeIdentifier The db type identifier ({@link DatabaseType#identifier()}). * @param supported The {@link DataUpgradeSupported} value. * @return This {@link PortableSqlStatement}. */ public PortableSqlStatement add(String databaseTypeIdentifier, DataUpgradeSupported supported) { statements.put(databaseTypeIdentifier, supported.toString()); return this; }
3.68
morf_XmlDataSetConsumer_buildColumnAttributes
/** * @param column The column for which to build attributes * @return The attributes */ private AttributesImpl buildColumnAttributes(Column column) { AttributesImpl columnAttributes = new AttributesImpl(); DataType columnDateType = column.getType(); columnAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.NAME_ATTRIBUTE, XmlDataSetNode.NAME_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, column.getName()); columnAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.TYPE_ATTRIBUTE, XmlDataSetNode.TYPE_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, columnDateType.name()); // the width attribute may not be required by this data type if (columnDateType.hasWidth() && column.getWidth() > 0) { columnAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.WIDTH_ATTRIBUTE, XmlDataSetNode.WIDTH_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, Integer.toString(column.getWidth())); } // the scale attribute may not be required by this data type if (columnDateType.hasScale() && column.getScale() > 0) { columnAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.SCALE_ATTRIBUTE, XmlDataSetNode.SCALE_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, Integer.toString(column.getScale())); } if (StringUtils.isNotEmpty(column.getDefaultValue())) { columnAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.DEFAULT_ATTRIBUTE, XmlDataSetNode.DEFAULT_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, column.getDefaultValue()); } if (column.isNullable()) { columnAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.NULLABLE_ATTRIBUTE, XmlDataSetNode.NULLABLE_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, Boolean.toString(column.isNullable())); } if (column.isPrimaryKey()) { columnAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.PRIMARYKEY_ATTRIBUTE, XmlDataSetNode.PRIMARYKEY_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, Boolean.toString(column.isPrimaryKey())); } if (column.isAutoNumbered()) { columnAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.AUTONUMBER_ATTRIBUTE, XmlDataSetNode.AUTONUMBER_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, Integer.toString(column.getAutoNumberStart())); } return columnAttributes; }
3.68
morf_AbstractSqlDialectTest_expectedSqlForMathOperations2
/** * @return expected SQL for math operation 2 */ protected String expectedSqlForMathOperations2() { return "a / b + 100"; }
3.68
hbase_IdLock_releaseLockEntry
/** * Must be called in a finally block to decrease the internal counter and remove the monitor * object for the given id if the caller is the last client. * @param entry the return value of {@link #getLockEntry(long)} */ public void releaseLockEntry(Entry entry) { Thread currentThread = Thread.currentThread(); synchronized (entry) { if (entry.holder != currentThread) { LOG.warn("{} is trying to release lock entry {}, but it is not the holder.", currentThread, entry); } entry.locked = false; if (entry.numWaiters > 0) { entry.notify(); } else { map.remove(entry.id); } } }
3.68
hmily_BindData_withValue
/** * With value bind data. * * @param <T> the type parameter * @param value the value * @return the bind data */ public <T> BindData<T> withValue(final Supplier<T> value) { return new BindData<>(this.type, value); }
3.68
framework_FocusUtil_focusOnLastFocusableElement
/** * Moves the focus to the last focusable child of given parent element. * * @param parent * the parent element * @since 8.1.7 */ public static void focusOnLastFocusableElement(Element parent) { Element[] focusableChildren = getFocusableChildren(parent); if (focusableChildren.length > 0) { focusableChildren[focusableChildren.length - 1].focus(); } }
3.68
flink_TypeInferenceUtil_createUnexpectedException
/** Returns an exception for an unexpected error during type inference. */ public static TableException createUnexpectedException( CallContext callContext, Throwable cause) { return new TableException( String.format( "Unexpected error in type inference logic of function '%s'. This is a bug.", callContext.getName()), cause); }
3.68
hbase_CompactionTool_compactStoreFiles
/** * Execute the actual compaction job. If the compact once flag is not specified, execute the * compaction until no more compactions are needed. Uses the Configuration settings provided. */ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName, final boolean compactOnce, final boolean major) throws IOException { HStore store = getStore(conf, fs, tableDir, htd, hri, familyName); LOG.info("Compact table=" + htd.getTableName() + " region=" + hri.getRegionNameAsString() + " family=" + familyName); if (major) { store.triggerMajorCompaction(); } do { Optional<CompactionContext> compaction = store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); if (!compaction.isPresent()) { break; } List<HStoreFile> storeFiles = store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); if (storeFiles != null && !storeFiles.isEmpty()) { if (deleteCompacted) { for (HStoreFile storeFile : storeFiles) { fs.delete(storeFile.getPath(), false); } } } } while (store.needsCompaction() && !compactOnce); // We need to close the store properly, to make sure it will archive compacted files store.close(); }
3.68
hbase_ConfServlet_getConfFromContext
/** * Return the Configuration of the daemon hosting this servlet. This is populated when the * HttpServer starts. */ private Configuration getConfFromContext() { Configuration conf = (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); assert conf != null; return conf; }
3.68
hadoop_CompressionCodec_createInputStreamWithCodecPool
/** * Create an input stream with a codec taken from the global CodecPool. * * @param codec The codec to use to create the input stream. * @param conf The configuration to use if we need to create a new codec. * @param in The input stream to wrap. * @return The new input stream * @throws IOException */ static CompressionInputStream createInputStreamWithCodecPool( CompressionCodec codec, Configuration conf, InputStream in) throws IOException { Decompressor decompressor = CodecPool.getDecompressor(codec); CompressionInputStream stream = null; try { stream = codec.createInputStream(in, decompressor); } finally { if (stream == null) { CodecPool.returnDecompressor(decompressor); } else { stream.setTrackedDecompressor(decompressor); } } return stream; }
3.68
pulsar_StateStoreProvider_init
/** * Initialize the state store provider. * * @param config the config to init the state store provider. * @param functionDetails the function details. * @throws Exception when failed to init the state store provider. */ default void init(Map<String, Object> config, FunctionDetails functionDetails) throws Exception {}
3.68
flink_TaskManagerRunner_getTerminationFuture
// export the termination future for caller to know it is terminated public CompletableFuture<Result> getTerminationFuture() { return terminationFuture; }
3.68
hmily_AbstractHmilySQLParserExecutor_generateHmilyUpdateStatement
/** * Generate Hmily update statement. * * @param updateStatement update statement * @param hmilyUpdateStatement hmily update statement * @return hmily update statement */ public HmilyUpdateStatement generateHmilyUpdateStatement(final UpdateStatement updateStatement, final HmilyUpdateStatement hmilyUpdateStatement) { return UpdateStatementAssembler.assembleHmilyUpdateStatement(updateStatement, hmilyUpdateStatement); }
3.68
hadoop_AzureBlobFileSystem_getInstrumentationMap
/** * Returns the counter() map in IOStatistics containing all the counters * and their values. * * @return Map of IOStatistics counters. */ @VisibleForTesting Map<String, Long> getInstrumentationMap() { return abfsCounters.toMap(); }
3.68
hadoop_StringValueMax_getCombinerOutput
/** * @return return an array of one element. The element is a string * representation of the aggregated value. The return value is * expected to be used by the a combiner. */ public ArrayList<String> getCombinerOutput() { ArrayList<String> retv = new ArrayList<String>(1); retv.add(maxVal); return retv; }
3.68
framework_GridDragSource_clearDragDataGenerator
/** * Remove the generator function set for the given type. * * @param type * Type of the generator to be removed. */ public void clearDragDataGenerator(String type) { generatorFunctions.remove(type); }
3.68
hbase_MultiByteBuff_hasRemaining
/** * Returns true if there are elements between the current position and the limt * @return true if there are elements, false otherwise */ @Override public final boolean hasRemaining() { checkRefCount(); return this.curItem.hasRemaining() || (this.curItemIndex < this.limitedItemIndex && this.items[this.curItemIndex + 1].hasRemaining()); }
3.68
rocketmq-connect_FilePositionManagementServiceImpl_call
/** * Computes a result, or throws an exception if unable to do so. * * @return computed result * @throws Exception if unable to compute a result */ @Override public Void call() { try { positionStore.persist(); if (callback != null) { callback.onCompletion(null, null, null); } } catch (Exception error) { callback.onCompletion(error, null, null); } return null; }
3.68
hbase_HBaseServerBase_getNamedQueueRecorder
/** * get NamedQueue Provider to add different logs to ringbuffer */ public NamedQueueRecorder getNamedQueueRecorder() { return this.namedQueueRecorder; }
3.68
pulsar_FunctionMetaDataManager_start
// Starts the tailer if we are in non-leader mode public synchronized void start() { if (exclusiveLeaderProducer == null) { try { // This means that we are in non-leader mode. start function metadata tailer initializeTailer(); } catch (PulsarClientException e) { throw new RuntimeException("Could not start MetaData topic tailer", e); } } }
3.68
framework_VErrorMessage_setOwner
/** * Set the owner, i.e the Widget that created this {@link VErrorMessage}. * The owner must be set if the {@link VErrorMessage} is created * 'stand-alone' (not within a {@link VOverlay}), or theming might not work * properly. * * @see VOverlay#setOwner(Widget) * @param owner * the owner (creator Widget) */ public void setOwner(Widget owner) { this.owner = owner; }
3.68
framework_ApplicationConfiguration_getTagsForServerSideClassName
/** * Returns all tags for given class. Tags are used in * {@link ApplicationConfiguration} to keep track of different classes and * their hierarchy * * @since 7.2 * @param classname * name of class which tags we want * @return Integer array of tags pointing to this classname */ public Integer[] getTagsForServerSideClassName(String classname) { List<Integer> tags = new ArrayList<>(); for (Map.Entry<Integer, String> entry : tagToServerSideClassName .entrySet()) { if (classname.equals(entry.getValue())) { tags.add(entry.getKey()); } } Integer[] out = new Integer[tags.size()]; return tags.toArray(out); }
3.68
querydsl_SQLTemplatesRegistry_getBuilder
/** * Get a SQLTemplates.Builder instance that matches best the SQL engine of the * given database metadata * * @param md database metadata * @return templates * @throws SQLException */ public SQLTemplates.Builder getBuilder(DatabaseMetaData md) throws SQLException { String name = md.getDatabaseProductName().toLowerCase(); if (name.equals("cubrid")) { return CUBRIDTemplates.builder(); } else if (name.equals("apache derby")) { return DerbyTemplates.builder(); } else if (name.startsWith("firebird")) { return FirebirdTemplates.builder(); } else if (name.equals("h2")) { return H2Templates.builder(); } else if (name.equals("hsql")) { return HSQLDBTemplates.builder(); } else if (name.equals("mysql")) { return MySQLTemplates.builder(); } else if (name.equals("oracle")) { return OracleTemplates.builder(); } else if (name.equals("postgresql")) { return PostgreSQLTemplates.builder(); } else if (name.equals("sqlite")) { return SQLiteTemplates.builder(); } else if (name.startsWith("teradata")) { return TeradataTemplates.builder(); } else if (name.equals("microsoft sql server")) { return getMssqlSqlTemplates(md); } else { return new SQLTemplates.Builder() { @Override protected SQLTemplates build(char escape, boolean quote) { return new SQLTemplates(Keywords.DEFAULT, "\"", escape, quote, false); } }; } }
3.68
hbase_HDFSBlocksDistribution_getTopHosts
/** Return the sorted list of hosts in terms of their weights */ public List<String> getTopHosts() { HostAndWeight[] hostAndWeights = getTopHostsWithWeights(); List<String> topHosts = new ArrayList<>(hostAndWeights.length); for (HostAndWeight haw : hostAndWeights) { topHosts.add(haw.getHost()); } return topHosts; }
3.68
hudi_SchedulerConfGenerator_generateConfig
/** * Helper to generate spark scheduling configs in XML format with input params. * * @param deltaSyncWeight Scheduling weight for delta sync * @param compactionWeight Scheduling weight for compaction * @param deltaSyncMinShare Minshare for delta sync * @param compactionMinShare Minshare for compaction * @param clusteringMinShare Scheduling weight for clustering * @param clusteringWeight Minshare for clustering * @return Spark scheduling configs */ public static String generateConfig(Integer deltaSyncWeight, Integer compactionWeight, Integer deltaSyncMinShare, Integer compactionMinShare, Integer clusteringWeight, Integer clusteringMinShare) { return String.format(SPARK_SCHEDULING_PATTERN, DELTASYNC_POOL_NAME, SPARK_SCHEDULER_FAIR_MODE, deltaSyncWeight.toString(), deltaSyncMinShare.toString(), COMPACT_POOL_NAME, SPARK_SCHEDULER_FAIR_MODE, compactionWeight.toString(), compactionMinShare.toString(), CLUSTERING_POOL_NAME, SPARK_SCHEDULER_FAIR_MODE, clusteringWeight.toString(), clusteringMinShare.toString()); }
3.68
morf_ResultSetComparer_getNonKeyColumns
/** * @return a list of non-Key column indexes */ private List<Integer> getNonKeyColumns(ResultSetMetaData metaData, Set<Integer> keyCols) throws SQLException { List<Integer> valueCols = Lists.newArrayList(); for (int i = 1; i <= metaData.getColumnCount(); i++) { if (!keyCols.contains(i)) { valueCols.add(i); } } return valueCols; }
3.68
flink_JobMasterGateway_triggerCheckpoint
/** * Triggers taking a checkpoint of the executed job. * * @param timeout for the rpc call * @return Future which is completed with the checkpoint path once completed */ default CompletableFuture<String> triggerCheckpoint(@RpcTimeout final Time timeout) { return triggerCheckpoint(CheckpointType.DEFAULT, timeout) .thenApply(CompletedCheckpoint::getExternalPointer); }
3.68
morf_DataSourceAdapter_getLoginTimeout
/** * @see javax.sql.CommonDataSource#getLoginTimeout() */ @Override public int getLoginTimeout() throws SQLException { return 0; }
3.68
flink_EmbeddedLeaderService_removeContender
/** Callback from leader contenders when they stop their service. */ private void removeContender(EmbeddedLeaderElection embeddedLeaderElection) { synchronized (lock) { // if the leader election was not even started, simply do nothing if (!embeddedLeaderElection.running || shutdown) { return; } try { if (!allLeaderContenders.remove(embeddedLeaderElection)) { throw new IllegalStateException( "leader election does not belong to this service"); } // stop the service if (embeddedLeaderElection.isLeader) { embeddedLeaderElection.contender.revokeLeadership(); } embeddedLeaderElection.contender = null; embeddedLeaderElection.running = false; embeddedLeaderElection.isLeader = false; // if that was the current leader, unset its status if (currentLeaderConfirmed == embeddedLeaderElection) { currentLeaderConfirmed = null; currentLeaderSessionId = null; currentLeaderAddress = null; } if (currentLeaderProposed == embeddedLeaderElection) { currentLeaderProposed = null; currentLeaderSessionId = null; } updateLeader() .whenComplete( (aVoid, throwable) -> { if (throwable != null) { fatalError(throwable); } }); } catch (Throwable t) { fatalError(t); } } }
3.68
flink_OptimizableHashSet_maxFill
/** * Returns the maximum number of entries that can be filled before rehashing. * * @param n the size of the backing array. * @param f the load factor. * @return the maximum number of entries before rehashing. */ public static int maxFill(int n, float f) { return Math.min((int) Math.ceil((double) ((float) n * f)), n - 1); }
3.68
hbase_DisableTableProcedure_runCoprocessorAction
/** * Coprocessor Action. * @param env MasterProcedureEnv * @param state the procedure state */ private void runCoprocessorAction(final MasterProcedureEnv env, final DisableTableState state) throws IOException, InterruptedException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { switch (state) { case DISABLE_TABLE_PRE_OPERATION: cpHost.preDisableTableAction(tableName, getUser()); break; case DISABLE_TABLE_POST_OPERATION: cpHost.postCompletedDisableTableAction(tableName, getUser()); break; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); } } }
3.68
morf_DataSetProducerBuilderImpl_isTableEmpty
/** * @see org.alfasoftware.morf.dataset.DataSetProducer#isTableEmpty(java.lang.String) */ @Override public boolean isTableEmpty(String tableName) { return records(tableName).isEmpty(); }
3.68
hbase_RequestConverter_buildDeleteTableRequest
/** * Creates a protocol buffer DeleteTableRequest * @return a DeleteTableRequest */ public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName, final long nonceGroup, final long nonce) { DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); return builder.build(); }
3.68
dubbo_Configurator_toConfigurators
/** * Convert override urls to map for use when re-refer. Send all rules every time, the urls will be reassembled and * calculated * * URL contract: * <ol> * <li>override://0.0.0.0/...( or override://ip:port...?anyhost=true)&para1=value1... means global rules * (all of the providers take effect)</li> * <li>override://ip:port...?anyhost=false Special rules (only for a certain provider)</li> * <li>override:// rule is not supported... ,needs to be calculated by registry itself</li> * <li>override://0.0.0.0/ without parameters means clearing the override</li> * </ol> * * @param urls URL list to convert * @return converted configurator list */ static Optional<List<Configurator>> toConfigurators(List<URL> urls) { if (CollectionUtils.isEmpty(urls)) { return Optional.empty(); } ConfiguratorFactory configuratorFactory = urls.get(0) .getOrDefaultApplicationModel() .getExtensionLoader(ConfiguratorFactory.class) .getAdaptiveExtension(); List<Configurator> configurators = new ArrayList<>(urls.size()); for (URL url : urls) { if (EMPTY_PROTOCOL.equals(url.getProtocol())) { configurators.clear(); break; } Map<String, String> override = new HashMap<>(url.getParameters()); // The anyhost parameter of override may be added automatically, it can't change the judgement of changing // url override.remove(ANYHOST_KEY); if (CollectionUtils.isEmptyMap(override)) { continue; } configurators.add(configuratorFactory.getConfigurator(url)); } Collections.sort(configurators); return Optional.of(configurators); }
3.68
flink_GivenJavaClasses_javaClassesThat
/** Equivalent of {@link ArchRuleDefinition#classes()}, but only for Java classes. */ public static GivenClassesConjunction javaClassesThat(DescribedPredicate<JavaClass> predicate) { return classes().that(areJavaClasses()).and(predicate); }
3.68
querydsl_CollQuery_clone
/** * Clone the state of this query to a new CollQuery instance */ @Override public CollQuery<T> clone() { return new CollQuery<T>(queryMixin.getMetadata().clone(), getQueryEngine()); }
3.68
framework_DateToLongConverter_getModelType
/* * (non-Javadoc) * * @see com.vaadin.data.util.converter.Converter#getModelType() */ @Override public Class<Long> getModelType() { return Long.class; }
3.68
flink_AdaptiveBatchScheduler_tryGetConsumedResultsInfo
/** Get information of consumable results. */ private Optional<List<BlockingResultInfo>> tryGetConsumedResultsInfo( final ExecutionJobVertex jobVertex) { List<BlockingResultInfo> consumableResultInfo = new ArrayList<>(); DefaultLogicalVertex logicalVertex = logicalTopology.getVertex(jobVertex.getJobVertexId()); Iterable<DefaultLogicalResult> consumedResults = logicalVertex.getConsumedResults(); for (DefaultLogicalResult consumedResult : consumedResults) { final ExecutionJobVertex producerVertex = getExecutionJobVertex(consumedResult.getProducer().getId()); if (producerVertex.isFinished()) { BlockingResultInfo resultInfo = checkNotNull(blockingResultInfos.get(consumedResult.getId())); consumableResultInfo.add(resultInfo); } else { // not all inputs consumable, return Optional.empty() return Optional.empty(); } } return Optional.of(consumableResultInfo); }
3.68
flink_CrossOperator_projectTuple25
/** * Projects a pair of crossed elements to a {@link Tuple} with the previously selected * fields. * * @return The projected data set. * @see Tuple * @see DataSet */ public < T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> ProjectCross< I1, I2, Tuple25< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> projectTuple25() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo< Tuple25< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>> tType = new TupleTypeInfo< Tuple25< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>(fTypes); return new ProjectCross< I1, I2, Tuple25< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>>( this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint); }
3.68
hbase_RegionServerSnapshotManager_start
/** * Start accepting snapshot requests. */ @Override public void start() { LOG.debug("Start Snapshot Manager " + rss.getServerName().toString()); this.memberRpcs.start(rss.getServerName().toString(), member); }
3.68
hbase_HRegionFileSystem_createDir
/** * Creates a directory. Assumes the user has already checked for this directory existence. * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks * whether the directory exists or not, and returns true if it exists. */ boolean createDir(Path dir) throws IOException { int i = 0; IOException lastIOE = null; do { try { return mkdirs(fs, conf, dir); } catch (IOException ioe) { lastIOE = ioe; if (fs.exists(dir)) return true; // directory is present try { sleepBeforeRetry("Create Directory", i + 1); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } } while (++i <= hdfsClientRetriesNumber); throw new IOException("Exception in createDir", lastIOE); }
3.68
morf_Function_getType
/** * Get the type of the function. * * @return the type */ public FunctionType getType() { return type; }
3.68
framework_ResourceLoader_runWhenHtmlImportsReady
/** * Executes a Runnable when all HTML imports are ready. If the browser does * not support triggering an event when HTML imports are ready, the Runnable * is executed immediately. * * @param runnable * the code to execute * @since 8.1 */ protected void runWhenHtmlImportsReady(Runnable runnable) { if (GWT.isClient() && supportsHtmlWhenReady()) { addHtmlImportsReadyHandler(() -> runnable.run()); } else { runnable.run(); } }
3.68
hbase_ByteBuffAllocator_clean
/** * Free all direct buffers if allocated, mainly used for testing. */ public void clean() { while (!buffers.isEmpty()) { ByteBuffer b = buffers.poll(); if (b.isDirect()) { UnsafeAccess.freeDirectBuffer(b); } } this.usedBufCount.set(0); this.maxPoolSizeInfoLevelLogged = false; this.poolAllocationBytes.reset(); this.heapAllocationBytes.reset(); this.lastPoolAllocationBytes = 0; this.lastHeapAllocationBytes = 0; }
3.68
flink_JobGraph_setJobID
/** Sets the ID of the job. */ public void setJobID(JobID jobID) { this.jobID = jobID; }
3.68
flink_CsvCommons_validateCharacterVal
/** * Validates the option {@code option} value must be a Character. * * @param tableOptions the table options * @param option the config option * @param unescape whether to unescape the option value */ private static void validateCharacterVal( ReadableConfig tableOptions, ConfigOption<String> option, boolean unescape) { if (!tableOptions.getOptional(option).isPresent()) { return; } final String value = unescape ? StringEscapeUtils.unescapeJava(tableOptions.get(option)) : tableOptions.get(option); if (value.length() != 1) { throw new ValidationException( String.format( "Option '%s.%s' must be a string with single character, but was: %s", IDENTIFIER, option.key(), tableOptions.get(option))); } }
3.68
framework_CustomizedSystemMessages_setAuthenticationErrorMessage
/** * Sets the message of the notification. Set to null for no message. If both * caption and message is null, the notification is disabled; * * @param authenticationErrorMessage * the message */ public void setAuthenticationErrorMessage( String authenticationErrorMessage) { this.authenticationErrorMessage = authenticationErrorMessage; }
3.68
hadoop_ByteBufferDecodingState_checkOutputBuffers
/** * Check and ensure the buffers are of the desired length and type, direct * buffers or not. * @param buffers the buffers to check */ void checkOutputBuffers(ByteBuffer[] buffers) { for (ByteBuffer buffer : buffers) { if (buffer == null) { throw new HadoopIllegalArgumentException( "Invalid buffer found, not allowing null"); } if (buffer.remaining() != decodeLength) { throw new HadoopIllegalArgumentException( "Invalid buffer, not of length " + decodeLength); } if (buffer.isDirect() != usingDirectBuffer) { throw new HadoopIllegalArgumentException( "Invalid buffer, isDirect should be " + usingDirectBuffer); } } }
3.68
open-banking-gateway_Xs2aConsentInfo_isOkRedirectConsent
/** * Was the redirection from ASPSP in REDIRECT mode using OK (consent granted) or NOK url (consent denied). */ public boolean isOkRedirectConsent(Xs2aContext ctx) { return ctx.isRedirectConsentOk(); }
3.68
hadoop_LeveldbIterator_remove
/** * Removes from the database the last element returned by the iterator. */ @Override public void remove() throws DBException { try { iter.remove(); } catch (DBException e) { throw e; } catch (RuntimeException e) { throw new DBException(e.getMessage(), e); } }
3.68
hadoop_IOStatisticsLogging_ioStatisticsToString
/** * Convert IOStatistics to a string form. * @param statistics A statistics instance. * @return string value or the empty string if null */ public static String ioStatisticsToString( @Nullable final IOStatistics statistics) { if (statistics != null) { StringBuilder sb = new StringBuilder(); mapToString(sb, "counters", statistics.counters(), " "); mapToString(sb, "gauges", statistics.gauges(), " "); mapToString(sb, "minimums", statistics.minimums(), " "); mapToString(sb, "maximums", statistics.maximums(), " "); mapToString(sb, "means", statistics.meanStatistics(), " "); return sb.toString(); } else { return ""; } }
3.68
hadoop_AbfsDelegationTokenIdentifier_getKind
/** * Get the token kind. * Returns {@link #TOKEN_KIND} always. * If a subclass does not want its renew/cancel process to be managed * by {@link AbfsDelegationTokenManager}, this must be overridden. * @return the kind of the token. */ @Override public Text getKind() { return TOKEN_KIND; }
3.68
starts_ChecksumUtil_isWellKnownUrl
/** * Check for so-called "well-known" classes that we don't track for RTS purposes. * Copied from Ekstazi. * * @param klas The class we want to check * @return true if klas is a well-known class */ public static boolean isWellKnownUrl(String klas) { return klas.contains("!/org/junit") || klas.contains("!/junit") || klas.contains("!/org/hamcrest") || klas.contains("!/org/apache/maven") || klas.contains(JAVAHOME); }
3.68
morf_SqlDialect_requiresOrderByForWindowFunction
/** * Windowing function usually have the following syntax * <b>FUNCTION</b> OVER (<b>partitionClause</b> <b>orderByClause</b>) * The partitionClause is generally optional, but the orderByClause is mandatory for certain functions. This method * specifies for which function the orderByClause is mandatory. * Certain dialects may behave differently with respect to this behaviour, which will be overridden as per behaviour * required of the dialect. * * @param function the windowing function * @return true if orderBy clause is mandatory */ protected boolean requiresOrderByForWindowFunction(Function function) { return WINDOW_FUNCTION_REQUIRING_ORDERBY.contains(function.getType()); }
3.68
framework_ContainerOrderedWrapper_removeListener
/** * @deprecated As of 7.0, replaced by * {@link #removePropertySetChangeListener(Container.PropertySetChangeListener)} */ @Override @Deprecated public void removeListener(Container.PropertySetChangeListener listener) { removePropertySetChangeListener(listener); }
3.68
framework_AbstractComponent_addListener
/* * Registers a new listener to listen events generated by this component. * Don't add a JavaDoc comment here, we use the default documentation from * implemented interface. */ @Override public Registration addListener(Component.Listener listener) { return addListener(Component.Event.class, listener, COMPONENT_EVENT_METHOD); }
3.68
morf_DatabaseMetaDataProvider_named
/** * Creates {@link AName} for searching the maps within this metadata provider. * * <p> * Metadata providers need to use case insensitive keys for lookup maps, since * database object name are considered case insensitive. While the same could * be achieved by simply upper-casing all database object names, such approach * can lead to mistakes. * * <p> * On top of that, using {@link AName} instead of upper-cased strings has the * advantage of strongly typed map keys, as opposed to maps of strings. * * @param name Case insensitive name of the object. * @return {@link AName} instance suitable for use as a key in the lookup maps. */ protected static AName named(String name) { return new AName(name); }
3.68
hadoop_AllocateResponse_containersFromPreviousAttempt
/** * Set the <code>containersFromPreviousAttempt</code> of the response. * @see AllocateResponse#setContainersFromPreviousAttempts(List) * @param containersFromPreviousAttempt * <code>containersFromPreviousAttempt</code> of the response * @return {@link AllocateResponseBuilder} */ @Private @Unstable public AllocateResponseBuilder containersFromPreviousAttempt( List<Container> containersFromPreviousAttempt) { allocateResponse.setContainersFromPreviousAttempts( containersFromPreviousAttempt); return this; }
3.68
hadoop_ProtobufHelper_getRemoteException
/** * Extract the remote exception from an unshaded version of the protobuf * libraries. * Kept for backward compatibility. * Return the IOException thrown by the remote server wrapped in * ServiceException as cause. * @param se ServiceException that wraps IO exception thrown by the server * @return Exception wrapped in ServiceException or * a new IOException that wraps the unexpected ServiceException. */ @Deprecated public static IOException getRemoteException( com.google.protobuf.ServiceException se) { Throwable e = se.getCause(); if (e == null) { return new IOException(se); } return e instanceof IOException ? (IOException) e : new IOException(se); }
3.68
framework_VTree_setFocused
/** * Is the node focused? * * @param focused * True if focused, false if not */ public void setFocused(boolean focused) { if (!this.focused && focused) { nodeCaptionDiv.addClassName(CLASSNAME_FOCUSED); this.focused = focused; if (BrowserInfo.get().isOpera()) { nodeCaptionDiv.focus(); } treeHasFocus = true; } else if (this.focused && !focused) { nodeCaptionDiv.removeClassName(CLASSNAME_FOCUSED); this.focused = focused; treeHasFocus = false; } }
3.68
hudi_HoodieMetaSyncOperations_deleteLastReplicatedTimeStamp
/** * Delete the timestamp of last replication. */ default void deleteLastReplicatedTimeStamp(String tableName) { }
3.68
hadoop_RouterDelegationTokenSecretManager_storeNewToken
/** * The Router Supports Store new Token. * * @param identifier RMDelegationToken. * @param tokenInfo DelegationTokenInformation. */ public void storeNewToken(RMDelegationTokenIdentifier identifier, DelegationTokenInformation tokenInfo) { try { String token = RouterDelegationTokenSupport.encodeDelegationTokenInformation(tokenInfo); long renewDate = tokenInfo.getRenewDate(); federationFacade.storeNewToken(identifier, renewDate, token); } catch (Exception e) { if (!shouldIgnoreException(e)) { LOG.error("Error in storing RMDelegationToken with sequence number: {}.", identifier.getSequenceNumber()); ExitUtil.terminate(1, e); } } }
3.68
hbase_CompactingMemStore_pushActiveToPipeline
/** * NOTE: When {@link CompactingMemStore#flushInMemory(MutableSegment)} calls this method, due to * concurrent writes and because we first add cell size to currActive.getDataSize and then * actually add cell to currActive.cellSet, it is possible that currActive.getDataSize could not * accommodate cellToAdd but currActive.cellSet is still empty if pending writes which not yet add * cells to currActive.cellSet,so for * {@link CompactingMemStore#flushInMemory(MutableSegment)},checkEmpty parameter is false. But if * {@link CompactingMemStore#snapshot} called this method,because there is no pending * write,checkEmpty parameter could be true. */ protected void pushActiveToPipeline(MutableSegment currActive, boolean checkEmpty) { if (!checkEmpty || !currActive.isEmpty()) { pipeline.pushHead(currActive); resetActive(); } }
3.68
hadoop_NamenodeStatusReport_setNamespaceInfo
/** * Set the namespace information. * * @param info Namespace information. */ public void setNamespaceInfo(NamespaceInfo info) { this.clusterId = info.getClusterID(); this.blockPoolId = info.getBlockPoolID(); this.registrationValid = true; }
3.68
flink_TwoInputOperator_getInput1
/** * Gets the data set that this operation uses as its first input. * * @return The data set that this operation uses as its first input. */ public DataSet<IN1> getInput1() { return this.input1; }
3.68
flink_Preconditions_checkCompletedNormally
/** * Ensures that future has completed normally. * * @throws IllegalStateException Thrown, if future has not completed or it has completed * exceptionally. */ public static void checkCompletedNormally(CompletableFuture<?> future) { checkState(future.isDone()); if (future.isCompletedExceptionally()) { try { future.get(); } catch (InterruptedException | ExecutionException e) { throw new IllegalStateException(e); } } }
3.68
framework_RangeValidator_setMaxValueIncluded
/** * Sets whether the maximum value is part of the accepted range. * * @param maxValueIncluded * true if the maximum value should be part of the range, false * otherwise */ public void setMaxValueIncluded(boolean maxValueIncluded) { this.maxValueIncluded = maxValueIncluded; }
3.68
graphhopper_LocationIndexTree_setMaxRegionSearch
/** * Searches also neighbouring tiles until the maximum distance from the query point is reached * (minResolutionInMeter*regionAround). Set to 1 to only search one tile. Good if you * have strict performance requirements and want the search to terminate early, and you can tolerate * that edges that may be in neighboring tiles are not found. Default is 4, which means approximately * that a square of three tiles upwards, downwards, leftwards and rightwards from the tile the query tile * is in is searched. */ public LocationIndexTree setMaxRegionSearch(int numTiles) { if (numTiles < 1) throw new IllegalArgumentException("Region of location index must be at least 1 but was " + numTiles); this.maxRegionSearch = numTiles; return this; }
3.68
hudi_StringUtils_nullToEmpty
/** * Returns the given string if it is non-null; the empty string otherwise. * * @param string the string to test and possibly return * @return {@code string} itself if it is non-null; {@code ""} if it is null */ public static String nullToEmpty(@Nullable String string) { return string == null ? "" : string; }
3.68
hadoop_TypedBytesRecordOutput_get
/** * Get a thread-local typed bytes record output for the supplied * {@link DataOutput}. * * @param out data output object * @return typed bytes record output corresponding to the supplied * {@link DataOutput}. */ public static TypedBytesRecordOutput get(DataOutput out) { return get(TypedBytesOutput.get(out)); }
3.68
framework_PureGWTTestApplication_getCommand
/** * Gets a reference to a {@link Command} item that is the direct child * of this level of menu. * * @param title * the command's title * @return a command, if found in this menu level, otherwise null. */ public Command getCommand(String title) { for (Command c : items) { if (c.title.equals(title)) { return c; } } return null; }
3.68
framework_Table_getIdByIndex
/** * Returns the item ID for the item represented by the index given. Assumes * that the current container implements {@link Container.Indexed}. * * See {@link Container.Indexed#getIdByIndex(int)} for more information * about the exceptions that can be thrown. * * @param index * the index for which the item ID should be fetched * @return the item ID for the given index * * @throws ClassCastException * if container does not implement {@link Container.Indexed} * @throws IndexOutOfBoundsException * thrown by {@link Container.Indexed#getIdByIndex(int)} if the * index is invalid */ protected Object getIdByIndex(int index) { return ((Container.Indexed) items).getIdByIndex(index); }
3.68
hbase_ZKUtil_getDataAndWatch
/** * Get the data at the specified znode and set a watch. Returns the data and sets a watch if the * node exists. Returns null and no watch is set if the node does not exist or there is an * exception. * @param zkw zk reference * @param znode path of node * @param stat object to populate the version of the znode * @return data of the specified znode, or null * @throws KeeperException if unexpected zookeeper exception */ public static byte[] getDataAndWatch(ZKWatcher zkw, String znode, Stat stat) throws KeeperException { return getDataInternal(zkw, znode, stat, true, true); }
3.68
morf_DatabaseMetaDataProvider_getTable
/** * @see org.alfasoftware.morf.metadata.Schema#getTable(java.lang.String) */ @Override public Table getTable(String tableName) { return tableCache.getUnchecked(named(tableName)); }
3.68
flink_SourceCoordinatorContext_runInCoordinatorThread
/** {@inheritDoc} If the runnable throws an Exception, the corresponding job is failed. */ @Override public void runInCoordinatorThread(Runnable runnable) { // when using a ScheduledThreadPool, uncaught exception handler catches only // exceptions thrown by the threadPool, so manually call it when the exception is // thrown by the runnable coordinatorExecutor.execute( new ThrowableCatchingRunnable( throwable -> coordinatorThreadFactory.uncaughtException( Thread.currentThread(), throwable), runnable)); }
3.68
hmily_AbstractHmilyTransactionAspect_hmilyInterceptor
/** * this is point cut with {@linkplain HmilyTCC }. */ @Pointcut("@annotation(org.dromara.hmily.annotation.HmilyTCC) || @annotation(org.dromara.hmily.annotation.HmilyTAC) || @annotation(org.dromara.hmily.annotation.HmilyXA)") public void hmilyInterceptor() { }
3.68
morf_AbstractSqlDialectTest_verifyRepairAutoNumberStartPosition
/** * Verify on the expected SQL statements to be run on repairing the autonumber start position. * @param sqlScriptExecutor The script executor * @param connection The connection to use */ @SuppressWarnings("unused") protected void verifyRepairAutoNumberStartPosition(SqlScriptExecutor sqlScriptExecutor,Connection connection) { verifyNoMoreInteractions(sqlScriptExecutor); }
3.68
querydsl_MathExpressions_min
/** * Create a {@code min(left, right)} expression * * <p>Return the smaller of the given values</p> * * @return min(left, right) */ public static <A extends Number & Comparable<?>> NumberExpression<A> min(Expression<A> left, Expression<A> right) { return NumberExpression.min(left, right); }
3.68
hbase_BucketAllocator_allocateBlock
/** * Allocate a block with specified size. Return the offset * @param blockSize size of block * @return the offset in the IOEngine */ public synchronized long allocateBlock(int blockSize) throws CacheFullException, BucketAllocatorException { assert blockSize > 0; BucketSizeInfo bsi = roundUpToBucketSizeInfo(blockSize); if (bsi == null) { throw new BucketAllocatorException("Allocation too big size=" + blockSize + "; adjust BucketCache sizes " + BlockCacheFactory.BUCKET_CACHE_BUCKETS_KEY + " to accomodate if size seems reasonable and you want it cached."); } long offset = bsi.allocateBlock(blockSize); // Ask caller to free up space and try again! if (offset < 0) throw new CacheFullException(blockSize, bsi.sizeIndex()); usedSize += bucketSizes[bsi.sizeIndex()]; return offset; }
3.68
flink_ReusingKeyGroupedIterator_next
/** Prior to call this method, call hasNext() once! */ @Override public E next() { if (this.currentIsUnconsumed || hasNext()) { this.currentIsUnconsumed = false; return ReusingKeyGroupedIterator.this.current; } else { throw new NoSuchElementException(); } }
3.68
hmily_HmilyParen_isLeftParen
/** * Judge passed token is left paren or not. * * @param token token * @return is left paren or not */ public static boolean isLeftParen(final char token) { return Arrays.stream(values()).anyMatch(each -> each.leftParen == token); }
3.68
framework_ErrorIndicator_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { VerticalLayout layout = new VerticalLayout(); TextField inVertical = new TextField(); inVertical.setRequired(true); inVertical.setRequiredError("Vertical layout tooltip"); inVertical.setCaption("Vertical layout caption"); layout.addComponent(inVertical); addComponent(layout); HorizontalLayout horizontalLayout = new HorizontalLayout(); TextField inHorizontal = new TextField(); inHorizontal.setRequired(true); inHorizontal.setRequiredError("Horizontal layout tooltip"); inHorizontal.setCaption("Horizontal layout caption"); horizontalLayout.addComponent(inHorizontal); layout.addComponent(horizontalLayout); getTooltipConfiguration().setOpenDelay(0); getTooltipConfiguration().setQuickOpenDelay(0); getTooltipConfiguration().setCloseTimeout(1000); }
3.68
hadoop_NamenodeStatusReport_getNumFiles
/** * Get the number of files. * * @return The number of files. */ public long getNumFiles() { return this.numOfFiles; }
3.68
hudi_Table_sortAndLimit
/** * Prepares for rendering. Rows are sorted and limited */ private void sortAndLimit() { this.renderRows = new ArrayList<>(); final int limit = this.limitOptional.orElse(rawRows.size()); // Row number is added here if enabled final List<List<Comparable>> rawOrderedRows = orderRows(); final List<List<Comparable>> orderedRows; if (addRowNo) { orderedRows = new ArrayList<>(); int rowNo = 0; for (List<Comparable> row : rawOrderedRows) { List<Comparable> newRow = new ArrayList<>(); newRow.add(rowNo++); newRow.addAll(row); orderedRows.add(newRow); } } else { orderedRows = rawOrderedRows; } renderHeaders = addRowNo ? new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_ROW_NO) .addTableHeaderFields(rowHeader) : rowHeader; renderRows = orderedRows.stream().limit(limit) .map(row -> IntStream.range(0, renderHeaders.getNumFields()).mapToObj(idx -> { String fieldName = renderHeaders.get(idx); if (fieldNameToConverterMap.containsKey(fieldName)) { return fieldNameToConverterMap.get(fieldName).apply(row.get(idx)); } Object v = row.get(idx); return v == null ? "null" : v.toString(); }).collect(Collectors.toList())).collect(Collectors.toList()); }
3.68
morf_DrawIOGraphPrinterImpl_print
/** * Prints the given graph to the String instance in a format suitable for importing into Draw.io * @param graph - {@link MutableGraph} of {@link Node} * @param layoutFormat - {@link LayoutFormat} - the specific layout format to use */ @Override public String print(DrawIOGraphPrinter.PrintableGraph<DrawIOGraphPrinter.Node> graph, DrawIOGraphPrinter.LayoutFormat layoutFormat) { List<String> outputLines = buildOutputLines(graph); // Sort the lines so that draw.io is more likely to group them together Collections.sort(outputLines); /* * Output to console */ StringBuilder sb = new StringBuilder(); sb.append("\n\n### COPY LINES BELOW THIS (EXCLUDING THIS LINE) ###\n"); sb.append("\n# label: %label%"); sb.append("\n# style: whiteSpace=wrap;html=1;rounded=1;fillColor=%fill%;strokeColor=#000000;fontColor=%fontColor%;"); sb.append("\n# namespace: csvimport-"); sb.append("\n# connect: {\"from\": \"refs\", \"to\": \"id\", \"style\": \"sharp=1;fontSize=14;\"}"); sb.append("\n# width: auto"); sb.append("\n# height: auto"); sb.append("\n# padding: 20"); sb.append("\n# ignore: fill,fontColor,id,refs"); if(layoutFormat != DrawIOGraphPrinter.LayoutFormat.HORIZONTAL_FLOW && layoutFormat != DrawIOGraphPrinter.LayoutFormat.VERTICAL_FLOW) { sb.append("\n# nodespacing: 200"); sb.append("\n# levelspacing: 100"); sb.append("\n# edgespacing: 60"); } sb.append("\n# layout: " + layoutFormat); sb.append("\nlabel,fill,fontColor,id,refs"); for (String outputLine : outputLines) { sb.append("\n"); sb.append(outputLine); } sb.append("\n\n### COPY LINES ABOVE THIS (EXCLUDING THIS LINE) ###\n"); return sb.toString(); }
3.68
flink_ClosureCleaner_clean
/** * Tries to clean the closure of the given object, if the object is a non-static inner class. * * @param func The object whose closure should be cleaned. * @param level the clean up level. * @param checkSerializable Flag to indicate whether serializability should be checked after the * closure cleaning attempt. * @throws InvalidProgramException Thrown, if 'checkSerializable' is true, and the object was * not serializable after the closure cleaning. * @throws RuntimeException A RuntimeException may be thrown, if the code of the class could not * be loaded, in order to process during the closure cleaning. */ public static void clean( Object func, ExecutionConfig.ClosureCleanerLevel level, boolean checkSerializable) { clean(func, level, checkSerializable, Collections.newSetFromMap(new IdentityHashMap<>())); }
3.68
flink_SegmentsUtil_setByte
/** * set byte from segments. * * @param segments target segments. * @param offset value offset. */ public static void setByte(MemorySegment[] segments, int offset, byte value) { if (inFirstSegment(segments, offset, 1)) { segments[0].put(offset, value); } else { setByteMultiSegments(segments, offset, value); } }
3.68
hbase_WALFactory_createWALWriter
/** * If you already have a WALFactory, you should favor the instance method. Uses defaults. * @return a writer that won't overwrite files. Caller must close. */ public static Writer createWALWriter(final FileSystem fs, final Path path, final Configuration configuration) throws IOException { return FSHLogProvider.createWriter(configuration, fs, path, false); }
3.68
hbase_Compactor_performCompaction
/** * Performs the compaction. * @param fd FileDetails of cell sink writer * @param scanner Where to read from. * @param writer Where to write to. * @param smallestReadPoint Smallest read point. * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is &lt;= * smallestReadPoint * @param request compaction request. * @param progress Progress reporter. * @return Whether compaction ended; false if it was interrupted for some reason. */ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, CompactionRequestImpl request, CompactionProgress progress) throws IOException { assert writer instanceof ShipperListener; long bytesWrittenProgressForLog = 0; long bytesWrittenProgressForShippedCall = 0; // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. List<Cell> cells = new ArrayList<>(); long currentTime = EnvironmentEdgeManager.currentTime(); long lastMillis = 0; if (LOG.isDebugEnabled()) { lastMillis = currentTime; } CloseChecker closeChecker = new CloseChecker(conf, currentTime); String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction"); long now = 0; boolean hasMore; ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax) .setSizeLimit(ScannerContext.LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE, compactScannerSizeLimit) .build(); throughputController.start(compactionName); Shipper shipper = (scanner instanceof Shipper) ? (Shipper) scanner : null; long shippedCallSizeLimit = (long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize(); try { do { hasMore = scanner.next(cells, scannerContext); currentTime = EnvironmentEdgeManager.currentTime(); if (LOG.isDebugEnabled()) { now = currentTime; } if (closeChecker.isTimeLimit(store, currentTime)) { progress.cancel(); return false; } // output to writer: Cell lastCleanCell = null; long lastCleanCellSeqId = 0; for (Cell c : cells) { if (cleanSeqId && c.getSequenceId() <= smallestReadPoint) { lastCleanCell = c; lastCleanCellSeqId = c.getSequenceId(); PrivateCellUtil.setSequenceId(c, 0); } else { lastCleanCell = null; lastCleanCellSeqId = 0; } writer.append(c); int len = c.getSerializedSize(); ++progress.currentCompactedKVs; progress.totalCompactedSize += len; bytesWrittenProgressForShippedCall += len; if (LOG.isDebugEnabled()) { bytesWrittenProgressForLog += len; } throughputController.control(compactionName, len); if (closeChecker.isSizeLimit(store, len)) { progress.cancel(); return false; } } if (shipper != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) { if (lastCleanCell != null) { // HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly. // ShipperListener will do a clone of the last cells it refer, so need to set back // sequence id before ShipperListener.beforeShipped PrivateCellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId); } // Clone the cells that are in the writer so that they are freed of references, // if they are holding any. ((ShipperListener) writer).beforeShipped(); // The SHARED block references, being read for compaction, will be kept in prevBlocks // list(See HFileScannerImpl#prevBlocks). In case of scan flow, after each set of cells // being returned to client, we will call shipped() which can clear this list. Here by // we are doing the similar thing. In between the compaction (after every N cells // written with collective size of 'shippedCallSizeLimit') we will call shipped which // may clear prevBlocks list. shipper.shipped(); bytesWrittenProgressForShippedCall = 0; } if (lastCleanCell != null) { // HBASE-16931, set back sequence id to avoid affecting scan order unexpectedly PrivateCellUtil.setSequenceId(lastCleanCell, lastCleanCellSeqId); } // Log the progress of long running compactions every minute if // logging at DEBUG level if (LOG.isDebugEnabled()) { if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) { String rate = String.format("%.2f", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0)); LOG.debug("Compaction progress: {} {}, rate={} KB/sec, throughputController is {}", compactionName, progress, rate, throughputController); lastMillis = now; bytesWrittenProgressForLog = 0; } } cells.clear(); } while (hasMore); } catch (InterruptedException e) { progress.cancel(); throw new InterruptedIOException( "Interrupted while control throughput of compacting " + compactionName); } finally { // Clone last cell in the final because writer will append last cell when committing. If // don't clone here and once the scanner get closed, then the memory of last cell will be // released. (HBASE-22582) ((ShipperListener) writer).beforeShipped(); throughputController.finish(compactionName); } progress.complete(); return true; }
3.68
hibernate-validator_ClassVisitor_visitTypeAsInterface
/** * Doesn't perform any checks at the moment but calls a visit methods on its own elements. * * @param element a class element to check * @param aVoid */ @Override public Void visitTypeAsInterface(TypeElement element, Void aVoid) { visitAllMyElements( element ); return null; }
3.68
hadoop_NormalizedResourceEvent_getMemory
/** * the normalized memory * @return the normalized memory */ public long getMemory() { return this.memory; }
3.68
hadoop_RolloverSignerSecretProvider_rollSecret
/** * Rolls the secret. It is called automatically at the rollover interval. */ protected synchronized void rollSecret() { if (!isDestroyed) { LOG.debug("rolling secret"); byte[] newSecret = generateNewSecret(); secrets = new byte[][]{newSecret, secrets[0]}; } }
3.68
flink_Description_add
/** * Block of description add. * * @param block block of description to add * @return block of description */ public DescriptionBuilder add(BlockElement block) { blocks.add(block); return this; }
3.68
hbase_TableDescriptorBuilder_equals
/** * Compare the contents of the descriptor with another one passed as a parameter. Checks if the * obj passed is an instance of ModifyableTableDescriptor, if yes then the contents of the * descriptors are compared. * @param obj The object to compare * @return true if the contents of the the two descriptors exactly match * @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof ModifyableTableDescriptor) { return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; } return false; }
3.68
hbase_RpcExecutor_getRpcCallSize
/** * Return the {@link RpcCall#getSize()} from {@code callRunner} or 0L. */ private static long getRpcCallSize(final CallRunner callRunner) { return Optional.ofNullable(callRunner).map(CallRunner::getRpcCall).map(RpcCall::getSize) .orElse(0L); }
3.68
hbase_WALProcedureStore_initOldLogs
/** * Make sure that the file set are gotten by calling {@link #getLogFiles()}, where we will sort * the file set by log id. * @return Max-LogID of the specified log file set */ private long initOldLogs(FileStatus[] logFiles) throws IOException { if (logFiles == null || logFiles.length == 0) { return 0L; } long maxLogId = 0; for (int i = 0; i < logFiles.length; ++i) { final Path logPath = logFiles[i].getPath(); leaseRecovery.recoverFileLease(fs, logPath); if (!isRunning()) { throw new IOException("wal aborting"); } maxLogId = Math.max(maxLogId, getLogIdFromName(logPath.getName())); ProcedureWALFile log = initOldLog(logFiles[i], this.walArchiveDir); if (log != null) { this.logs.add(log); } } initTrackerFromOldLogs(); return maxLogId; }
3.68
flink_HybridShuffleConfiguration_getBufferRequestTimeout
/** * Maximum time to wait when requesting read buffers from the buffer pool before throwing an * exception. */ public Duration getBufferRequestTimeout() { return bufferRequestTimeout; }
3.68
hbase_ScanQueryMatcher_isCellTTLExpired
/** Returns true if the cell is expired */ private static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp, final long now) { // Look for a TTL tag first. Use it instead of the family setting if // found. If a cell has multiple TTLs, resolve the conflict by using the // first tag encountered. Iterator<Tag> i = PrivateCellUtil.tagsIterator(cell); while (i.hasNext()) { Tag t = i.next(); if (TagType.TTL_TAG_TYPE == t.getType()) { // Unlike in schema cell TTLs are stored in milliseconds, no need // to convert long ts = cell.getTimestamp(); assert t.getValueLength() == Bytes.SIZEOF_LONG; long ttl = Tag.getValueAsLong(t); if (ts + ttl < now) { return true; } // Per cell TTLs cannot extend lifetime beyond family settings, so // fall through to check that break; } } return false; }
3.68
hadoop_FilterFileSystem_checkPath
/** Check that a Path belongs to this FileSystem. */ @Override protected void checkPath(Path path) { fs.checkPath(path); }
3.68