name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hudi_HoodieInputFormatUtils_getWritePartitionPaths
/** * Returns all the incremental write partition paths as a set with the given commits metadata. * * @param metadataList The commits metadata * @return the partition path set */ public static Set<String> getWritePartitionPaths(List<HoodieCommitMetadata> metadataList) { return metadataList.stream() .map(HoodieCommitMetadata::getWritePartitionPaths) .flatMap(Collection::stream) .collect(Collectors.toSet()); }
3.68
flink_MetricStore_retainTaskManagers
/** * Remove inactive task managers. * * @param activeTaskManagers to retain. */ synchronized void retainTaskManagers(List<String> activeTaskManagers) { taskManagers.keySet().retainAll(activeTaskManagers); }
3.68
hudi_HoodieHFileDataBlock_printRecord
/** * Print the record in json format */ private void printRecord(String msg, byte[] bs, Schema schema) throws IOException { GenericRecord record = HoodieAvroUtils.bytesToAvro(bs, schema); byte[] json = HoodieAvroUtils.avroToJson(record, true); LOG.error(String.format("%s: %s", msg, new String(json))); }
3.68
hadoop_CommitUtils_extractJobID
/** * Extract the job ID from a configuration. * @param conf configuration * @return a job ID or null. */ public static String extractJobID(Configuration conf) { String jobUUID = conf.getTrimmed(FS_S3A_COMMITTER_UUID, ""); if (!jobUUID.isEmpty()) { return jobUUID; } // there is no job UUID. // look for one from spark jobUUID = conf.getTrimmed(SPARK_WRITE_UUID, ""); if (!jobUUID.isEmpty()) { return jobUUID; } jobUUID = conf.getTrimmed(MR_JOB_ID, ""); if (!jobUUID.isEmpty()) { return jobUUID; } return null; }
3.68
hbase_RequestConverter_buildGetRegionInfoRequest
/** * Create a protocol buffer GetRegionInfoRequest, * @param regionName the name of the region to get info * @param includeCompactionState indicate if the compaction state is requested * @param includeBestSplitRow indicate if the bestSplitRow is requested * @return protocol buffer GetRegionInfoRequest */ public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, final boolean includeCompactionState, boolean includeBestSplitRow) { GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (includeCompactionState) { builder.setCompactionState(includeCompactionState); } if (includeBestSplitRow) { builder.setBestSplitRow(includeBestSplitRow); } return builder.build(); }
3.68
hadoop_DecayRpcSchedulerDetailedMetrics_addQueueTime
/** * Instrument a Call queue time based on its priority. * * @param priority of the RPC call * @param queueTime of the RPC call in the queue of the priority */ public void addQueueTime(int priority, long queueTime) { rpcQueueRates.add(queueNamesForLevels[priority], queueTime); }
3.68
hbase_SimpleRegionNormalizer_isSplitEnabled
/** * Return this instance's configured value for {@value #SPLIT_ENABLED_KEY}. */ public boolean isSplitEnabled() { return normalizerConfiguration.isSplitEnabled(); }
3.68
hadoop_SolverPreprocessor_validate
/** * Check if Solver's input parameters are valid. * * @param jobHistory the history {@link ResourceSkyline}s of the recurring * pipeline job. * @param timeInterval the time interval which is used to discretize the * history {@link ResourceSkyline}s. * @throws InvalidInputException if: (1) jobHistory is <em>null</em>; * (2) jobHistory is empty; (3) timeout is non-positive; * (4) timeInterval is non-positive; */ public final void validate( final Map<RecurrenceId, List<ResourceSkyline>> jobHistory, final int timeInterval) throws InvalidInputException { if ((jobHistory == null) || (jobHistory.size() == 0)) { LOGGER.error( "Job resource skyline history is invalid, please try again with" + " valid resource skyline history."); throw new InvalidInputException("Job ResourceSkyline history", "invalid"); } if (timeInterval <= 0) { LOGGER.error( "Solver timeInterval {} is invalid, please specify a positive value.", timeInterval); throw new InvalidInputException("Solver timeInterval", "non-positive"); } }
3.68
graphhopper_Entity_loadTable
/** * The main entry point into an Entity.Loader. Interprets each row of a CSV file within a zip file as a sinle * GTFS entity, and loads them into a table. * * @param zipOrDirectory the zip file or directory from which to read a table */ public void loadTable(File zipOrDirectory) throws IOException { InputStream zis; if (zipOrDirectory.isDirectory()) { Path path = zipOrDirectory.toPath().resolve(tableName + ".txt"); if (!path.toFile().exists()) { missing(); return; } zis = new FileInputStream(path.toFile()); LOG.info("Loading GTFS table {} from {}", tableName, path); } else { ZipFile zip = new ZipFile(zipOrDirectory); ZipEntry entry = zip.getEntry(tableName + ".txt"); if (entry == null) { Enumeration<? extends ZipEntry> entries = zip.entries(); // check if table is contained within sub-directory while (entries.hasMoreElements()) { ZipEntry e = entries.nextElement(); if (e.getName().endsWith(tableName + ".txt")) { entry = e; feed.errors.add(new TableInSubdirectoryError(tableName, entry.getName().replace(tableName + ".txt", ""))); } } missing(); if (entry == null) return; } zis = zip.getInputStream(entry); LOG.info("Loading GTFS table {} from {}", tableName, entry); } // skip any byte order mark that may be present. Files must be UTF-8, // but the GTFS spec says that "files that include the UTF byte order mark are acceptable" InputStream bis = new BOMInputStream(zis); CsvReader reader = new CsvReader(bis, ',', Charset.forName("UTF8")); this.reader = reader; reader.readHeaders(); while (reader.readRecord()) { // reader.getCurrentRecord() is zero-based and does not include the header line, keep our own row count if (++row % 500000 == 0) { LOG.info("Record number {}", human(row)); } loadOneRow(); // Call subclass method to produce an entity from the current row. } }
3.68
flink_Costs_getHeuristicCpuCost
/** * Gets the heuristic cost for the CPU. * * @return The heuristic CPU Cost. */ public double getHeuristicCpuCost() { return this.heuristicCpuCost; }
3.68
framework_VCaption_setTooltipInfo
/** * Sets the tooltip that should be shown for the caption. * * @param tooltipInfo * The tooltip that should be shown or null if no tooltip should * be shown */ public void setTooltipInfo(TooltipInfo tooltipInfo) { this.tooltipInfo = tooltipInfo; }
3.68
pulsar_NamespaceBundleStats_compareTo
// compare 2 bundles in below aspects: // 1. Inbound bandwidth // 2. Outbound bandwidth // 3. Total megRate (both in and out) // 4. Total topics and producers/consumers // 5. Total cache size public int compareTo(NamespaceBundleStats other) { int result = this.compareByBandwidthIn(other); if (result == 0) { result = this.compareByBandwidthOut(other); } if (result == 0) { result = this.compareByMsgRate(other); } if (result == 0) { result = this.compareByTopicConnections(other); } if (result == 0) { result = this.compareByCacheSize(other); } return result; }
3.68
hadoop_TimelineMetricOperation_exec
/** * Return the average value of the incoming metric and the base metric, * with a given state. Not supported yet. * * @param incoming Metric a * @param base Metric b * @param state Operation state * @return Not finished yet */ @Override public TimelineMetric exec(TimelineMetric incoming, TimelineMetric base, Map<Object, Object> state) { // Not supported yet throw new UnsupportedOperationException( "Unsupported aggregation operation: AVERAGE"); }
3.68
hbase_KeyValueUtil_createFirstOnRow
/** * Create a KeyValue for the specified row, family and qualifier that would be smaller than all * other possible KeyValues that have the same row, family, qualifier. Used for seeking. * @param buffer the buffer to use for the new <code>KeyValue</code> object * @param boffset buffer offset * @param row the value key * @param roffset row offset * @param rlength row length * @param family family name * @param foffset family offset * @param flength family length * @param qualifier column qualifier * @param qoffset qualifier offset * @param qlength qualifier length * @return First possible key on passed Row, Family, Qualifier. * @throws IllegalArgumentException The resulting <code>KeyValue</code> object would be larger * than the provided buffer or than * <code>Integer.MAX_VALUE</code> */ public static KeyValue createFirstOnRow(byte[] buffer, final int boffset, final byte[] row, final int roffset, final int rlength, final byte[] family, final int foffset, final int flength, final byte[] qualifier, final int qoffset, final int qlength) throws IllegalArgumentException { long lLength = KeyValue.getKeyValueDataStructureSize(rlength, flength, qlength, 0); if (lLength > Integer.MAX_VALUE) { throw new IllegalArgumentException("KeyValue length " + lLength + " > " + Integer.MAX_VALUE); } int iLength = (int) lLength; if (buffer.length - boffset < iLength) { throw new IllegalArgumentException( "Buffer size " + (buffer.length - boffset) + " < " + iLength); } int len = KeyValue.writeByteArray(buffer, boffset, row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, HConstants.LATEST_TIMESTAMP, KeyValue.Type.Maximum, null, 0, 0, null); return new KeyValue(buffer, boffset, len); }
3.68
flink_TwoPhaseCommitSinkFunction_invoke
/** This should not be implemented by subclasses. */ @Override public final void invoke(IN value) throws Exception {}
3.68
pulsar_ProxyConnection_doAuthentication
// According to auth result, send newConnected or newAuthChallenge command. private void doAuthentication(AuthData clientData) throws Exception { authState .authenticateAsync(clientData) .whenCompleteAsync((authChallenge, throwable) -> { if (throwable == null) { authChallengeSuccessCallback(authChallenge); } else { authenticationFailedCallback(throwable); } }, ctx.executor()); }
3.68
hmily_NacosPassiveConfig_fileName
/** * File name string. * * @return the string */ public String fileName() { return dataId + "." + fileExtension; }
3.68
querydsl_AliasFactory_createAliasForExpr
/** * Create an alias instance for the given class and Expression * * @param <A> * @param cl type for alias * @param expr underlying expression * @return alias instance */ @SuppressWarnings("unchecked") public <A> A createAliasForExpr(Class<A> cl, Expression<? extends A> expr) { try { final Map<Expression<?>, ManagedObject> expressionCache = proxyCache.computeIfAbsent(cl, a -> Collections.synchronizedMap(new WeakHashMap<>())); return (A) expressionCache.computeIfAbsent(expr, e -> (ManagedObject) createProxy(cl, expr)); } catch (ClassCastException e) { throw new QueryException(e); } }
3.68
flink_ExceptionUtils_tryDeserializeAndThrow
/** * Tries to find a {@link SerializedThrowable} as the cause of the given throwable and throws * its deserialized value. If there is no such throwable, then the original throwable is thrown. * * @param throwable to check for a SerializedThrowable * @param classLoader to be used for the deserialization of the SerializedThrowable * @throws Throwable either the deserialized throwable or the given throwable */ public static void tryDeserializeAndThrow(Throwable throwable, ClassLoader classLoader) throws Throwable { Throwable current = throwable; while (!(current instanceof SerializedThrowable) && current.getCause() != null) { current = current.getCause(); } if (current instanceof SerializedThrowable) { throw ((SerializedThrowable) current).deserializeError(classLoader); } else { throw throwable; } }
3.68
streampipes_PipelineManager_deletePipeline
/** * Deletes the pipeline with the pipeline Id * * @param pipelineId of pipeline to be deleted */ public static void deletePipeline(String pipelineId) { var pipeline = getPipeline(pipelineId); if (Objects.nonNull(pipeline)) { getPipelineStorage().deletePipeline(pipelineId); new NotificationsResourceManager().deleteNotificationsForPipeline(pipeline); } }
3.68
framework_AbstractOrderedLayoutConnector_getState
/* * (non-Javadoc) * * @see com.vaadin.client.ui.AbstractLayoutConnector#getState() */ @Override public AbstractOrderedLayoutState getState() { return (AbstractOrderedLayoutState) super.getState(); }
3.68
hbase_PrivateCellUtil_writeRowSkippingBytes
/** * Writes the row from the given cell to the output stream excluding the common prefix * @param out The dataoutputstream to which the data has to be written * @param cell The cell whose contents has to be written * @param rlength the row length */ public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength, int commonPrefix) throws IOException { if (cell instanceof ByteBufferExtendedCell) { ByteBufferUtils.copyBufferToStream((DataOutput) out, ((ByteBufferExtendedCell) cell).getRowByteBuffer(), ((ByteBufferExtendedCell) cell).getRowPosition() + commonPrefix, rlength - commonPrefix); } else { out.write(cell.getRowArray(), cell.getRowOffset() + commonPrefix, rlength - commonPrefix); } }
3.68
hbase_BalanceResponse_setBalancerRan
/** * Set true if the balancer ran, otherwise false. The balancer may not run in some * circumstances, such as if a balance is already running or there are regions already in * transition. * @param balancerRan true if balancer ran, false otherwise */ public Builder setBalancerRan(boolean balancerRan) { this.balancerRan = balancerRan; return this; }
3.68
hbase_ReplicationSyncUp_main
/** * Main program */ public static void main(String[] args) throws Exception { int ret = ToolRunner.run(HBaseConfiguration.create(), new ReplicationSyncUp(), args); System.exit(ret); }
3.68
hbase_VersionInfo_getSrcChecksum
/** * Get the checksum of the source files from which Hadoop was compiled. * @return a string that uniquely identifies the source **/ public static String getSrcChecksum() { return Version.srcChecksum; }
3.68
dubbo_Environment_setLocalMigrationRule
/** * @deprecated only for ut */ @Deprecated @DisableInject public void setLocalMigrationRule(String localMigrationRule) { this.localMigrationRule = localMigrationRule; }
3.68
hadoop_DatanodeAdminProperties_setUpgradeDomain
/** * Set the upgrade domain of the datanode. * @param upgradeDomain the upgrade domain of the datanode. */ public void setUpgradeDomain(final String upgradeDomain) { this.upgradeDomain = upgradeDomain; }
3.68
framework_ComboBoxElement_isTextInputAllowed
/** * Checks if text input is allowed for the combo box. * * @return <code>true</code> if text input is allowed, <code>false</code> * otherwise */ public boolean isTextInputAllowed() { return !isReadOnly(getInputField()); }
3.68
morf_InsertStatementDefaulter_getColumnsWithValues
/** * Gets a set of columns for which values have been provided. * * @param statement the statement to parse. * @return a set of columns for which values have been provided. */ private Set<String> getColumnsWithValues(InsertStatement statement) { Set<String> columnsWithValues = new HashSet<>(); addColumns(statement.getValues(), columnsWithValues); if (statement.getSelectStatement() != null) { addColumns(statement.getSelectStatement().getFields(), columnsWithValues); } if (statement.getFromTable() != null) { addColumnsFromSchema(statement.getFromTable(), columnsWithValues); } for (String columnName : statement.getFieldDefaults().keySet()) { columnsWithValues.add(columnName.toUpperCase()); } return columnsWithValues; }
3.68
hbase_RegionServerSnapshotManager_getRegionsToSnapshot
/** * Determine if the snapshot should be handled on this server NOTE: This is racy -- the master * expects a list of regionservers. This means if a region moves somewhere between the calls we'll * miss some regions. For example, a region move during a snapshot could result in a region to be * skipped or done twice. This is manageable because the {@link MasterSnapshotVerifier} will * double check the region lists after the online portion of the snapshot completes and will * explicitly fail the snapshot. * @return the list of online regions. Empty list is returned if no regions are responsible for * the given snapshot. */ private List<HRegion> getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { List<HRegion> onlineRegions = (List<HRegion>) rss.getRegions(TableName.valueOf(snapshot.getTable())); Iterator<HRegion> iterator = onlineRegions.iterator(); // remove the non-default regions while (iterator.hasNext()) { HRegion r = iterator.next(); if (!RegionReplicaUtil.isDefaultReplica(r.getRegionInfo())) { iterator.remove(); } } return onlineRegions; }
3.68
hbase_HRegionFileSystem_removeStoreFile
/** * Archives the specified store file from the specified family. * @param familyName Family that contains the store files * @param filePath {@link Path} to the store file to remove * @throws IOException if the archiving fails */ public void removeStoreFile(final String familyName, final Path filePath) throws IOException { HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs, this.tableDir, Bytes.toBytes(familyName), filePath); }
3.68
framework_FieldGroup_setEnabled
/** * Updates the enabled state of all bound fields. * * @param fieldsEnabled * true to enable all bound fields, false to disable them */ public void setEnabled(boolean fieldsEnabled) { enabled = fieldsEnabled; for (Field<?> field : getFields()) { field.setEnabled(fieldsEnabled); } }
3.68
hbase_HDFSBlocksDistribution_getHostAndWeights
/** Returns the hosts and their weights */ public Map<String, HostAndWeight> getHostAndWeights() { return this.hostAndWeights; }
3.68
hbase_ReplicationSourceWALReader_take
/** * Retrieves the next batch of WAL entries from the queue, waiting up to the specified time for a * batch to become available * @return A batch of entries, along with the position in the log after reading the batch * @throws InterruptedException if interrupted while waiting */ public WALEntryBatch take() throws InterruptedException { return entryBatchQueue.take(); }
3.68
hbase_SnapshotManager_isSnapshotCompleted
/** * Check to see if the snapshot is one of the currently completed snapshots Returns true if the * snapshot exists in the "completed snapshots folder". * @param snapshot expected snapshot to check * @return <tt>true</tt> if the snapshot is stored on the {@link FileSystem}, <tt>false</tt> if is * not stored * @throws IOException if the filesystem throws an unexpected exception, * @throws IllegalArgumentException if snapshot name is invalid. */ private boolean isSnapshotCompleted(SnapshotDescription snapshot) throws IOException { try { final Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); FileSystem fs = master.getMasterFileSystem().getFileSystem(); // check to see if the snapshot already exists return fs.exists(snapshotDir); } catch (IllegalArgumentException iae) { throw new UnknownSnapshotException("Unexpected exception thrown", iae); } }
3.68
flink_Either_obtainLeft
/** * Utility function for {@link EitherSerializer} to support object reuse. * * <p>To support object reuse both subclasses of Either contain a reference to an instance of * the other type. This method provides access to and initializes the cross-reference. * * @param input container for Left or Right value * @param leftSerializer for creating an instance of the left type * @param <L> the type of Left * @param <R> the type of Right * @return input if Left type else input's Left reference */ @Internal public static <L, R> Left<L, R> obtainLeft( Either<L, R> input, TypeSerializer<L> leftSerializer) { if (input.isLeft()) { return (Left<L, R>) input; } else { Right<L, R> right = (Right<L, R>) input; if (right.left == null) { right.left = Left.of(leftSerializer.createInstance()); right.left.right = right; } return right.left; } }
3.68
hbase_RegionPlacementMaintainer_getFavoredNodeList
/** * @param favoredNodesStr The String of favored nodes * @return the list of ServerName for the byte array of favored nodes. */ public static List<ServerName> getFavoredNodeList(String favoredNodesStr) { String[] favoredNodesArray = StringUtils.split(favoredNodesStr, ","); if (favoredNodesArray == null) return null; List<ServerName> serverList = new ArrayList<>(); for (String hostNameAndPort : favoredNodesArray) { serverList.add(ServerName.valueOf(hostNameAndPort, ServerName.NON_STARTCODE)); } return serverList; }
3.68
hadoop_WordListAnonymizerUtility_needsAnonymization
/** * Checks if the data needs anonymization. Typically, data types which are * numeric in nature doesn't need anonymization. */ public static boolean needsAnonymization(String data) { // Numeric data doesn't need anonymization // Currently this doesnt support inputs like // - 12.3 // - 12.3f // - 90L // - 1D if (StringUtils.isNumeric(data)) { return false; } return true; // by default return true }
3.68
flink_JarManifestParser_findFirstManifestAttribute
/** * Returns the value of the first manifest attribute found in the provided JAR file. * * @param jarFile JAR file to parse * @param attributes Attributes to check * @return Optional holding value of first found attribute * @throws IOException If there is an error accessing the JAR */ public static Optional<String> findFirstManifestAttribute(File jarFile, String... attributes) throws IOException { if (attributes.length == 0) { return Optional.empty(); } try (JarFile f = new JarFile(jarFile)) { return findFirstManifestAttribute(f, attributes); } }
3.68
hadoop_RoleModel_addResources
/** * Add a list of resources. * @param resources resource list * @return this statement. */ public Statement addResources(Collection<String> resources) { resource.addAll(resources); return this; }
3.68
hbase_RegistryEndpointsRefresher_mainLoop
// The main loop for the refresh thread. private void mainLoop() { long lastRefreshTime = EnvironmentEdgeManager.currentTime(); boolean firstRefresh = true; for (;;) { synchronized (this) { for (;;) { if (stopped) { LOG.info("Registry end points refresher loop exited."); return; } // if refreshNow is true, then we will wait until minTimeBetweenRefreshesMs elapsed, // otherwise wait until periodicRefreshMs elapsed long waitTime = getRefreshIntervalMs(firstRefresh) - (EnvironmentEdgeManager.currentTime() - lastRefreshTime); if (waitTime <= 0) { // we are going to refresh, reset this flag firstRefresh = false; refreshNow = false; break; } try { wait(waitTime); } catch (InterruptedException e) { LOG.warn("Interrupted during wait", e); Thread.currentThread().interrupt(); continue; } } } LOG.debug("Attempting to refresh registry end points"); try { refresher.refresh(); } catch (IOException e) { LOG.warn("Error refresh registry end points", e); } // We do not think it is a big deal to fail one time, so no matter what is refresh result, we // just update this refresh time and wait for the next round. If later this becomes critical, // could change to only update this value when we have done a successful refreshing. lastRefreshTime = EnvironmentEdgeManager.currentTime(); LOG.debug("Finished refreshing registry end points"); } }
3.68
hbase_HBaseTestingUtility_deleteTableIfAny
/** * Drop an existing table * @param tableName existing table */ public void deleteTableIfAny(TableName tableName) throws IOException { try { deleteTable(tableName); } catch (TableNotFoundException e) { // ignore } }
3.68
hadoop_QueueCapacityUpdateContext_getUpdatedClusterResource
/** * Returns the overall cluster resource available for the update phase of empty label. * @return cluster resource */ public Resource getUpdatedClusterResource() { return updatedClusterResource; }
3.68
hbase_SkipFilter_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) { return true; } if (!(o instanceof SkipFilter)) { return false; } SkipFilter other = (SkipFilter) o; return getFilter().areSerializedFieldsEqual(other.getFilter()); }
3.68
zxing_PlanarYUVLuminanceSource_getThumbnailHeight
/** * @return height of image from {@link #renderThumbnail()} */ public int getThumbnailHeight() { return getHeight() / THUMBNAIL_SCALE_FACTOR; }
3.68
framework_ComponentLocator_getPathForElement
/** * Generates a String locator which uniquely identifies the target element. * The {@link #getElementByPath(String)} method can be used for the inverse * operation, i.e. locating an element based on the return value from this * method. * <p> * Note that getElementByPath(getPathForElement(element)) == element is not * always true as #getPathForElement(Element) can return a path to another * element if the widget determines an action on the other element will give * the same result as the action on the target element. * </p> * * @since 7.2 * @param targetElement * The element to generate a path for. * @return A String locator that identifies the target element or null if a * String locator could not be created. */ public String getPathForElement(Element targetElement) { if (targetElement != null) { return getPathForElement(DOM.asOld(targetElement)); } return null; }
3.68
hadoop_TimelinePutResponse_setErrorCode
/** * Set the error code to the given error code * * @param errorCode * an error code */ public void setErrorCode(int errorCode) { this.errorCode = errorCode; }
3.68
flink_PeriodicMaterializationManager_close
// task thread and task canceler can access this method public synchronized void close() { LOG.info("Shutting down PeriodicMaterializationManager."); if (!periodicExecutor.isShutdown()) { periodicExecutor.shutdownNow(); } }
3.68
flink_DataStreamUtils_collectUnboundedStream
/** * Triggers execution of the DataStream application and collects the given number of records * from the stream. After the records are received, the execution is canceled. * * @deprecated Please use {@link DataStream#executeAndCollect()}. */ @Deprecated public static <E> List<E> collectUnboundedStream( DataStream<E> stream, int numElements, String jobName) throws Exception { final ClientAndIterator<E> clientAndIterator = collectWithClient(stream, jobName); final List<E> result = collectRecordsFromUnboundedStream(clientAndIterator, numElements); // cancel the job now that we have received enough elements clientAndIterator.client.cancel().get(); return result; }
3.68
hadoop_ReleaseContainerEvent_getContainer
/** * Get RMContainer. * @return RMContainer. */ public RMContainer getContainer() { return container; }
3.68
hbase_ByteBufferUtils_copyFromArrayToBuffer
/** * Copies bytes from given array's offset to length part into the given buffer. Puts the bytes to * buffer's given position. This doesn't affect the position of buffer. * @param out output bytebuffer to copy to * @param outOffset output buffer offset * @param in input array to copy from * @param inOffset input offset to copy from * @param length the number of bytes to copy */ public static void copyFromArrayToBuffer(ByteBuffer out, int outOffset, byte[] in, int inOffset, int length) { if (out.hasArray()) { System.arraycopy(in, inOffset, out.array(), out.arrayOffset() + outOffset, length); } else if (UNSAFE_AVAIL) { UnsafeAccess.copy(in, inOffset, out, outOffset, length); } else { ByteBuffer outDup = out.duplicate(); outDup.position(outOffset); outDup.put(in, inOffset, length); } }
3.68
flink_AbstractStreamOperatorV2_getProcessingTimeService
/** * Returns the {@link ProcessingTimeService} responsible for getting the current processing time * and registering timers. */ @VisibleForTesting public ProcessingTimeService getProcessingTimeService() { return processingTimeService; }
3.68
hudi_CloudObjectsSelectorCommon_getUrlForFile
/** * Construct a full qualified URL string to a cloud file from a given Row. Optionally check if the file exists. * Here Row is assumed to have the schema [bucket_name, filepath_relative_to_bucket]. * The checkIfExists logic assumes that the relevant impl classes for the storageUrlSchemePrefix are already present * on the classpath! * * @param storageUrlSchemePrefix Eg: s3:// or gs://. The storage-provider-specific prefix to use within the URL. */ private static Option<String> getUrlForFile(Row row, String storageUrlSchemePrefix, SerializableConfiguration serializableConfiguration, boolean checkIfExists) { final Configuration configuration = serializableConfiguration.newCopy(); String bucket = row.getString(0); String filePath = storageUrlSchemePrefix + bucket + "/" + row.getString(1); try { String filePathUrl = URLDecoder.decode(filePath, StandardCharsets.UTF_8.name()); if (!checkIfExists) { return Option.of(filePathUrl); } boolean exists = checkIfFileExists(storageUrlSchemePrefix, bucket, filePathUrl, configuration); return exists ? Option.of(filePathUrl) : Option.empty(); } catch (Exception exception) { LOG.warn(String.format("Failed to generate path to cloud file %s", filePath), exception); throw new HoodieException(String.format("Failed to generate path to cloud file %s", filePath), exception); } }
3.68
flink_HttpHeader_getValue
/** * Returns the value of this HTTP header. * * @return the value of this HTTP header */ public String getValue() { return value; }
3.68
morf_RenameTable_getName
/** * @see org.alfasoftware.morf.metadata.Table#getName() */ @Override public String getName() { return newName; }
3.68
MagicPlugin_Mage_doSetCurrency
// Returns the change, which may have been capped by min or max private double doSetCurrency(String key, double newValue) { Currency currency = initCurrency(key); return doSetCurrency(currency, key, newValue); }
3.68
hudi_BaseHoodieFunctionalIndexClient_register
/** * Register a functional index. * Index definitions are stored in user-specified path or, by default, in .hoodie/.index_defs/index.json. * For the first time, the index definition file will be created if not exists. * For the second time, the index definition file will be updated if exists. * Table Config is updated if necessary. */ public void register(HoodieTableMetaClient metaClient, String indexName, String indexType, Map<String, Map<String, String>> columns, Map<String, String> options) { LOG.info("Registering index {} of using {}", indexName, indexType); String indexMetaPath = metaClient.getTableConfig().getIndexDefinitionPath() .orElse(metaClient.getMetaPath() + Path.SEPARATOR + HoodieTableMetaClient.INDEX_DEFINITION_FOLDER_NAME + Path.SEPARATOR + HoodieTableMetaClient.INDEX_DEFINITION_FILE_NAME); // build HoodieFunctionalIndexMetadata and then add to index definition file metaClient.buildFunctionalIndexDefinition(indexMetaPath, indexName, indexType, columns, options); // update table config if necessary if (!metaClient.getTableConfig().getProps().containsKey(HoodieTableConfig.INDEX_DEFINITION_PATH) || !metaClient.getTableConfig().getIndexDefinitionPath().isPresent()) { metaClient.getTableConfig().setValue(HoodieTableConfig.INDEX_DEFINITION_PATH, indexMetaPath); HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), metaClient.getTableConfig().getProps()); } }
3.68
pulsar_DispatchRateLimiter_isDispatchRateLimitingEnabled
/** * Checks if dispatch-rate limiting is enabled. * * @return */ public boolean isDispatchRateLimitingEnabled() { return dispatchRateLimiterOnMessage != null || dispatchRateLimiterOnByte != null; }
3.68
hbase_CompoundConfiguration_addStringMap
/** * Add String map to config list. This map is generally created by HTableDescriptor or * HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous * ones if there are name collisions. * @return this, for builder pattern */ public CompoundConfiguration addStringMap(final Map<String, String> map) { freezeMutableConf(); // put new map at the front of the list (top priority) this.configs.add(0, new ImmutableConfigMap() { private final Map<String, String> m = map; @Override public Iterator<Map.Entry<String, String>> iterator() { return map.entrySet().iterator(); } @Override public String get(String key) { return m.get(key); } @Override public String getRaw(String key) { return get(key); } @Override public Class<?> getClassByName(String name) throws ClassNotFoundException { return null; } @Override public int size() { return m.size(); } @Override public String toString() { return m.toString(); } }); return this; }
3.68
rocketmq-connect_RecordOffsetManagement_pollOffsetWhile
/** * @param submittedPositions * @return */ private RecordOffset pollOffsetWhile(Deque<SubmittedPosition> submittedPositions) { RecordOffset offset = null; // Stop pulling if there is an uncommitted breakpoint while (canCommitHead(submittedPositions)) { offset = submittedPositions.poll().getPosition().getOffset(); } return offset; }
3.68
hbase_InstancePending_prepare
/** * Associates the given instance for the method {@link #get}. This method should be called once, * and {@code instance} should be non-null. This method is expected to call as soon as possible * because the method {@code get} is uninterruptibly blocked until this method is called. */ void prepare(T instance) { assert instance != null; instanceHolder = new InstanceHolder<>(instance); pendingLatch.countDown(); }
3.68
hadoop_TimelineReaderWebServicesUtils_createTimelineReaderContext
/** * Parse the passed context information represented as strings and convert * into a {@link TimelineReaderContext} object. * @param clusterId Cluster Id. * @param userId User Id. * @param flowName Flow Name. * @param flowRunId Run id for the flow. * @param appId App Id. * @param entityType Entity Type. * @param entityId Entity Id. * @return a {@link TimelineReaderContext} object. */ static TimelineReaderContext createTimelineReaderContext(String clusterId, String userId, String flowName, String flowRunId, String appId, String entityType, String entityIdPrefix, String entityId) { return new TimelineReaderContext(parseStr(clusterId), parseStr(userId), parseStr(flowName), parseLongStr(flowRunId), parseStr(appId), parseStr(entityType), parseLongStr(entityIdPrefix), parseStr(entityId)); }
3.68
flink_AbstractBytesHashMap_appendRecord
// ----------------------- Append ----------------------- public int appendRecord(LookupInfo<K, BinaryRowData> lookupInfo, BinaryRowData value) throws IOException { final long oldLastPosition = outView.getCurrentOffset(); // serialize the key into the BytesHashMap record area int skip = keySerializer.serializeToPages(lookupInfo.getKey(), outView); long offset = oldLastPosition + skip; // serialize the value into the BytesHashMap record area valueSerializer.serializeToPages(value, outView); if (offset > Integer.MAX_VALUE) { LOG.warn( "We can't handle key area with more than Integer.MAX_VALUE bytes," + " because the pointer is a integer."); throw new EOFException(); } return (int) offset; }
3.68
dubbo_NettyHttpRestServer_getChannelHandlers
/** * create channel handler * * @param url * @return */ protected List<ChannelHandler> getChannelHandlers(URL url) { List<ChannelHandler> channelHandlers = new ArrayList<>(); return channelHandlers; }
3.68
hadoop_StoragePolicySatisfyManager_stop
/** * This function will do following logic based on the configured sps mode: * * <p> * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then * it won't do anything. Administrator requires to stop external sps service * explicitly, if needed. * * <p> * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the * service is disabled and won't do any action. */ public void stop() { if (!storagePolicyEnabled) { if (LOG.isDebugEnabled()) { LOG.debug("Storage policy is not enabled, ignoring"); } return; } switch (mode) { case EXTERNAL: removeAllPathIds(); if (LOG.isDebugEnabled()) { LOG.debug( "Storage policy satisfier service is running outside namenode" + ", ignoring"); } break; case NONE: if (LOG.isDebugEnabled()) { LOG.debug("Storage policy satisfier is not enabled, ignoring"); } break; default: if (LOG.isDebugEnabled()) { LOG.debug("Invalid mode:{}, ignoring", mode); } break; } }
3.68
hbase_HRegionServer_getOnlineRegionsLocalContext
/** * For tests, web ui and metrics. This method will only work if HRegionServer is in the same JVM * as client; HRegion cannot be serialized to cross an rpc. */ public Collection<HRegion> getOnlineRegionsLocalContext() { Collection<HRegion> regions = this.onlineRegions.values(); return Collections.unmodifiableCollection(regions); }
3.68
framework_VAbstractTextualDate_setPlaceholder
/** * Sets the placeholder for this textual date input. * * @param placeholder * the placeholder to set, or {@code null} to clear */ public void setPlaceholder(String placeholder) { if (placeholder != null) { text.getElement().setAttribute("placeholder", placeholder); } else { text.getElement().removeAttribute("placeholder"); } }
3.68
rocketmq-connect_PositionStorageWriter_writeOffset
/** * write offsets * * @param positions positions */ @Override public void writeOffset(Map<RecordPartition, RecordOffset> positions) { for (Map.Entry<RecordPartition, RecordOffset> offset : positions.entrySet()) { writeOffset(offset.getKey(), offset.getValue()); } }
3.68
hbase_EncryptionUtil_wrapKey
/** * Protect a key by encrypting it with the secret key of the given subject. The configuration must * be set up correctly for key alias resolution. * @param conf configuration * @param subject subject key alias * @param key the key * @return the encrypted key bytes */ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException { // Wrap the key with the configured encryption algorithm. String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); } EncryptionProtos.WrappedKey.Builder builder = EncryptionProtos.WrappedKey.newBuilder(); builder.setAlgorithm(key.getAlgorithm()); byte[] iv = null; if (cipher.getIvLength() > 0) { iv = new byte[cipher.getIvLength()]; Bytes.secureRandom(iv); builder.setIv(UnsafeByteOperations.unsafeWrap(iv)); } byte[] keyBytes = key.getEncoded(); builder.setLength(keyBytes.length); builder.setHashAlgorithm(Encryption.getConfiguredHashAlgorithm(conf)); builder .setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); ByteArrayOutputStream out = new ByteArrayOutputStream(); Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher, iv); builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray())); // Build and return the protobuf message out.reset(); builder.build().writeDelimitedTo(out); return out.toByteArray(); }
3.68
framework_VTabsheet_recalculateCaptionWidths
/** * Recalculates the caption widths for all tabs within this tab bar, and * updates the tab width bookkeeping if necessary. */ private void recalculateCaptionWidths() { for (int i = 0; i < getTabCount(); ++i) { getTab(i).recalculateCaptionWidth(); } }
3.68
dubbo_Version_isRelease263OrHigher
/** * Check the framework release version number to decide if it's 2.6.3 or higher * * @param version, the sdk version */ public static boolean isRelease263OrHigher(String version) { return getIntVersion(version) >= 2060300; }
3.68
hudi_HoodieTableConfig_populateMetaFields
/** * @returns true is meta fields need to be populated. else returns false. */ public boolean populateMetaFields() { return Boolean.parseBoolean(getStringOrDefault(POPULATE_META_FIELDS)); }
3.68
flink_NettyShuffleEnvironment_getPartitionsOccupyingLocalResources
/** * Report unreleased partitions. * * @return collection of partitions which still occupy some resources locally on this task * executor and have been not released yet. */ @Override public Collection<ResultPartitionID> getPartitionsOccupyingLocalResources() { return resultPartitionManager.getUnreleasedPartitions(); }
3.68
framework_ScrollbarBundle_setScrollPos
/** * Sets the scroll position of the scrollbar in the axis the scrollbar is * representing. * <p> * <em>Note:</em> Even though {@code double} values are used, they are * currently only used as integers as large {@code int} (or small but fast * {@code long}). This means, all values are truncated to zero decimal * places. * * @param px * the new scroll position in pixels */ public final void setScrollPos(double px) { if (isLocked()) { return; } double oldScrollPos = scrollPos; scrollPos = Math.max(0, Math.min(maxScrollPos, truncate(px))); if (!WidgetUtil.pixelValuesEqual(oldScrollPos, scrollPos)) { if (scrollInProgress == null) { // Only used for tracking that there is "workPending" scrollInProgress = addScrollHandler(event -> { scrollInProgress.removeHandler(); scrollInProgress = null; }); } if (isInvisibleScrollbar) { invisibleScrollbarTemporaryResizer.show(); } /* * This is where the value needs to be converted into an integer no * matter how we flip it, since GWT expects an integer value. * There's no point making a JSNI method that accepts doubles as the * scroll position, since the browsers themselves don't support such * large numbers (as of today, 25.3.2014). This double-ranged is * only facilitating future virtual scrollbars. */ internalSetScrollPos(toInt32(scrollPos)); } }
3.68
nifi-maven_NarProvidedDependenciesMojo_getProject
/** * Gets the Maven project used by this mojo. * * @return the Maven project */ public MavenProject getProject() { return project; }
3.68
hbase_Segment_dump
// Debug methods /** * Dumps all cells of the segment into the given log */ void dump(Logger log) { for (Cell cell : getCellSet()) { log.debug(Objects.toString(cell)); } }
3.68
flink_EnvironmentSettings_build
/** Returns an immutable instance of {@link EnvironmentSettings}. */ public EnvironmentSettings build() { if (classLoader == null) { classLoader = Thread.currentThread().getContextClassLoader(); } return new EnvironmentSettings(configuration, classLoader, catalogStore); }
3.68
framework_GridLayout_newLine
/** * Forces the next component to be added at the beginning of the next line. * * <p> * Sets the cursor column to 0 and increments the cursor row by one. * </p> * * <p> * By calling this function you can ensure that no more components are added * right of the previous component. * </p> * * @see #space() */ public void newLine() { cursorX = 0; cursorY++; }
3.68
hbase_ZKVisibilityLabelWatcher_writeToZookeeper
/** * Write a labels mirror or user auths mirror into zookeeper * @param labelsOrUserAuths true for writing labels and false for user auths. */ public void writeToZookeeper(byte[] data, boolean labelsOrUserAuths) { String znode = this.labelZnode; if (!labelsOrUserAuths) { znode = this.userAuthsZnode; } try { ZKUtil.updateExistingNodeData(watcher, znode, data, -1); } catch (KeeperException e) { LOG.error("Failed writing to " + znode, e); watcher.abort("Failed writing node " + znode + " to zookeeper", e); } }
3.68
flink_CheckpointStorageLocationReference_isDefaultReference
/** Returns true, if this object is the default reference. */ public boolean isDefaultReference() { return encodedReference == null; }
3.68
flink_TaskManagerExceptionUtils_tryEnrichTaskManagerError
/** * Tries to enrich the passed exception or its causes with additional information. * * <p>This method improves error messages for direct and metaspace {@link OutOfMemoryError}. It * adds descriptions about possible causes and ways of resolution. * * @param root The Throwable of which the cause tree shall be traversed. */ public static void tryEnrichTaskManagerError(@Nullable Throwable root) { tryEnrichOutOfMemoryError( root, TM_METASPACE_OOM_ERROR_MESSAGE, TM_DIRECT_OOM_ERROR_MESSAGE, null); }
3.68
flink_BlockerSync_releaseBlocker
/** Lets the blocked thread continue. */ public void releaseBlocker() { synchronized (lock) { blockerReleased = true; lock.notifyAll(); } }
3.68
hbase_HFileCleaner_countDeletedFiles
// Currently only for testing purpose private void countDeletedFiles(boolean isLargeFile, boolean fromLargeQueue) { if (isLargeFile) { if (deletedLargeFiles.get() == Long.MAX_VALUE) { LOG.debug("Deleted more than Long.MAX_VALUE large files, reset counter to 0"); deletedLargeFiles.set(0L); } deletedLargeFiles.incrementAndGet(); } else { if (deletedSmallFiles.get() == Long.MAX_VALUE) { LOG.debug("Deleted more than Long.MAX_VALUE small files, reset counter to 0"); deletedSmallFiles.set(0L); } if (fromLargeQueue) { LOG.trace("Stolen a small file deletion task in large file thread"); } deletedSmallFiles.incrementAndGet(); } }
3.68
hadoop_LogExactlyOnce_debug
/** * Log at DEBUG if nothing has been logged yet. * @param format format string * @param args arguments */ public void debug(String format, Object...args) { if (!logged.getAndSet(true)) { log.debug(format, args); } }
3.68
hbase_ModifyNamespaceProcedure_prepareModify
/** * Action before any real action of adding namespace. */ private boolean prepareModify(final MasterProcedureEnv env) throws IOException { if (!getTableNamespaceManager(env).doesNamespaceExist(newNsDescriptor.getName())) { setFailure("master-modify-namespace", new NamespaceNotFoundException(newNsDescriptor.getName())); return false; } getTableNamespaceManager(env).validateTableAndRegionCount(newNsDescriptor); // This is used for rollback oldNsDescriptor = getTableNamespaceManager(env).get(newNsDescriptor.getName()); if ( !Objects.equals(oldNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP), newNsDescriptor.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP)) ) { checkNamespaceRSGroup(env, newNsDescriptor); } return true; }
3.68
flink_DataStream_writeToSocket
/** * Writes the DataStream to a socket as a byte array. The format of the output is specified by a * {@link SerializationSchema}. * * @param hostName host of the socket * @param port port of the socket * @param schema schema for serialization * @return the closed DataStream */ @PublicEvolving public DataStreamSink<T> writeToSocket( String hostName, int port, SerializationSchema<T> schema) { DataStreamSink<T> returnStream = addSink(new SocketClientSink<>(hostName, port, schema, 0)); returnStream.setParallelism( 1); // It would not work if multiple instances would connect to the same port return returnStream; }
3.68
framework_ColorPickerSelect_getValue
/** * Returns the selected value. * <p> * Value can be {@code null} if component is not yet initialized via * {@link #initContent()} * * @see ColorPickerSelect#initContent() * * @return the selected color, may be {@code null} */ @Override public Color getValue() { if (grid == null) { return null; } return grid.getValue(); }
3.68
morf_AbstractSqlDialectTest_testLower
/** * Tests that LOWER functionality works. */ @Test public void testLower() { SelectStatement statement = new SelectStatement(lowerCase(new FieldReference("field1"))).from(new TableReference( "schedule")); String actual = testDialect.convertStatementToSQL(statement); assertEquals("LowerCase script should match expected", expectedLower(), actual); }
3.68
hadoop_ConnectionPool_getMinActiveRatio
/** * Get the minimum ratio of active connections in this pool. * * @return Minimum ratio of active connections. */ protected float getMinActiveRatio() { return this.minActiveRatio; }
3.68
pulsar_MultiTopicsConsumerImpl_subscribeAsync
// subscribe one more given topic, but already know the numberPartitions CompletableFuture<Void> subscribeAsync(String topicName, int numberPartitions) { TopicName topicNameInstance = getTopicName(topicName); if (topicNameInstance == null) { return FutureUtil.failedFuture( new PulsarClientException.AlreadyClosedException("Topic name not valid")); } String fullTopicName = topicNameInstance.toString(); if (consumers.containsKey(fullTopicName) || partitionedTopics.containsKey(topicNameInstance.getPartitionedTopicName())) { return FutureUtil.failedFuture( new PulsarClientException.AlreadyClosedException("Already subscribed to " + topicName)); } if (getState() == State.Closing || getState() == State.Closed) { return FutureUtil.failedFuture( new PulsarClientException.AlreadyClosedException("Topics Consumer was already closed")); } CompletableFuture<Void> subscribeResult = new CompletableFuture<>(); subscribeTopicPartitions(subscribeResult, fullTopicName, numberPartitions, true /* createTopicIfDoesNotExist */); return subscribeResult; }
3.68
hbase_AccessChecker_requireGlobalPermission
/** * Checks that the user has the given global permission. The generated audit log message will * contain context information for the operation being authorized, based on the given parameters. * @param user Active user to which authorization checks should be applied * @param request Request type * @param perm Action being requested * @param namespace The given namespace */ public void requireGlobalPermission(User user, String request, Action perm, String namespace) throws IOException { AuthResult authResult; if (authManager.authorizeUserGlobal(user, perm)) { authResult = AuthResult.allow(request, "Global check allowed", user, perm, null); authResult.getParams().setNamespace(namespace); logResult(authResult); } else { authResult = AuthResult.deny(request, "Global check failed", user, perm, null); authResult.getParams().setNamespace(namespace); logResult(authResult); throw new AccessDeniedException( "Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") + "' (global, action=" + perm.toString() + ")"); } }
3.68
framework_DataCommunicator_dropActiveData
/** * Marks a data object identified by given key string to be dropped. * * @param key * key string */ public void dropActiveData(String key) { if (activeData.contains(key)) { droppedData.add(key); } }
3.68
hadoop_ReplicaInfo_getOriginalBytesReserved
/** * Number of bytes originally reserved for this replica. The actual * reservation is adjusted as data is written to disk. * * @return the number of bytes originally reserved for this replica. */ public long getOriginalBytesReserved() { return 0; }
3.68
framework_FieldGroup_isBuffered
/** * Checks the buffered mode for the bound fields. * <p> * * @see #setBuffered(boolean) for more details on buffered mode * * @see Field#isBuffered() * @return true if buffered mode is on, false otherwise * */ public boolean isBuffered() { return buffered; }
3.68
hbase_CleanerChore_newFileCleaner
/** * A utility method to create new instances of LogCleanerDelegate based on the class name of the * LogCleanerDelegate. * @param className fully qualified class name of the LogCleanerDelegate * @param conf used configuration * @return the new instance */ private T newFileCleaner(String className, Configuration conf) { try { Class<? extends FileCleanerDelegate> c = Class.forName(className).asSubclass(FileCleanerDelegate.class); @SuppressWarnings("unchecked") T cleaner = (T) c.getDeclaredConstructor().newInstance(); cleaner.setConf(conf); cleaner.init(this.params); return cleaner; } catch (Exception e) { LOG.warn("Can NOT create CleanerDelegate={}", className, e); // skipping if can't instantiate return null; } }
3.68
morf_XmlDataSetProducer_isEmptyDatabase
/** * @see org.alfasoftware.morf.metadata.Schema#isEmptyDatabase() */ @Override public boolean isEmptyDatabase() { return xmlStreamProvider.availableStreamNames().isEmpty(); }
3.68
hmily_XaLoadBalancerAutoConfiguration_xaLoadBalancerBeanPostProcessor
/** * 注册 {@link XaLoadBalancerBeanPostProcessor} Bean. * * @return {@link XaLoadBalancerBeanPostProcessor} Bean */ @Bean public static XaLoadBalancerBeanPostProcessor xaLoadBalancerBeanPostProcessor() { return new XaLoadBalancerBeanPostProcessor(); }
3.68
morf_AbstractSqlDialectTest_expectedRound
/** * @return The expected SQL for rounding */ protected String expectedRound() { return "SELECT ROUND(field1, 2) FROM " + tableName("schedule"); }
3.68
framework_RpcDataProviderExtension_getActiveItemIds
/** * Gets a collection copy of currently active item ids. * * @return collection of item ids */ public Collection<Object> getActiveItemIds() { return new HashSet<Object>(activeItemMap.keySet()); }
3.68
rocketmq-connect_WorkerSinkTask_closeMessageQueues
/** * remove offset from currentOffsets/lastCommittedOffsets * remove message from messageBatch * * @param queues * @param lost */ private void closeMessageQueues(Set<MessageQueue> queues, boolean lost) { if (!lost) { commitOffsets(System.currentTimeMillis(), true, queues); } else { log.trace("{} Closing the task as partitions have been lost: {}", this, queues); currentOffsets.keySet().removeAll(queues); } lastCommittedOffsets.keySet().removeAll(queues); messageBatch.removeIf(record -> { MessageQueue messageQueue = ConnectUtil.convertToMessageQueue(record.getPosition().getPartition()); return queues.contains(messageQueue); }); }
3.68