name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_FileIOChannel_getPathFile
/** Returns the path to the underlying temporary file as a File. */ public File getPathFile() { return path; }
3.68
hudi_HoodieCommitMetadata_getFileIdToFileStatus
/** * Extract the file status of all affected files from the commit metadata. If a file has * been touched multiple times in the given commits, the return value will keep the one * from the latest commit by file group ID. * * <p>Note: different with {@link #getFullPathToFileStatus(Configuration, String)}, * only the latest commit file for a file group is returned, * this is an optimization for COPY_ON_WRITE table to eliminate legacy files for filesystem view. * * @param hadoopConf * @param basePath The base path * @return the file ID to file status mapping */ public Map<String, FileStatus> getFileIdToFileStatus(Configuration hadoopConf, String basePath) { Map<String, FileStatus> fileIdToFileStatus = new HashMap<>(); for (List<HoodieWriteStat> stats : getPartitionToWriteStats().values()) { // Iterate through all the written files. for (HoodieWriteStat stat : stats) { String relativeFilePath = stat.getPath(); Path fullPath = relativeFilePath != null ? FSUtils.getPartitionPath(basePath, relativeFilePath) : null; if (fullPath != null) { FileStatus fileStatus = new FileStatus(stat.getFileSizeInBytes(), false, 0, 0, 0, fullPath); fileIdToFileStatus.put(stat.getFileId(), fileStatus); } } } return fileIdToFileStatus; }
3.68
flink_HiveStatsUtil_getPartitionColumnNullCount
/** * Get the null count for the {@param partitionColIndex} partition column in table {@param * hiveTable}. * * <p>To get the null count, it will first list all the partitions whose {@param * partitionColIndex} partition column is null, and merge the partition's statistic to get the * total rows, which is exactly null count for the {@param partitionColIndex} partition column. */ private static Long getPartitionColumnNullCount( HiveMetastoreClientWrapper client, Table hiveTable, int partitionColIndex, String defaultPartitionName) { // get the partial partition values List<String> partialPartitionVals = getPartialPartitionVals(partitionColIndex, defaultPartitionName); try { // list all the partitions that match the partial partition values List<Partition> partitions = client.listPartitions( hiveTable.getDbName(), hiveTable.getTableName(), partialPartitionVals, (short) -1); List<TableStats> catalogTableStatistics = partitions.stream() .map( p -> new TableStats( HiveStatsUtil.createCatalogTableStatistics( p.getParameters()) .getRowCount())) .collect(Collectors.toList()); Set<String> partitionKeys = getFieldNames(hiveTable.getPartitionKeys()); TableStats resultTableStats = catalogTableStatistics.stream() .reduce((s1, s2) -> s1.merge(s2, partitionKeys)) .orElse(TableStats.UNKNOWN); if (resultTableStats == TableStats.UNKNOWN || resultTableStats.getRowCount() < 0) { return null; } else { return resultTableStats.getRowCount(); } } catch (Exception e) { LOG.warn( "Can't list partition for table `{}.{}`, partition value {}.", hiveTable.getDbName(), hiveTable.getTableName(), partialPartitionVals); } return null; }
3.68
framework_VDebugWindow_uidl
/** * Called when a response is received. * * @param ac * @param uidl */ public void uidl(ApplicationConnection ac, ValueMap uidl) { if (isClosed()) { return; } for (Section s : sections) { s.uidl(ac, uidl); } }
3.68
hadoop_AMRMProxyMetrics_getMetrics
/** * Initialize the singleton instance. * * @return the singleton */ public static AMRMProxyMetrics getMetrics() { synchronized (AMRMProxyMetrics.class) { if (instance == null) { instance = DefaultMetricsSystem.instance() .register("AMRMProxyMetrics", "Metrics for the Yarn AMRMProxy", new AMRMProxyMetrics()); } } return instance; }
3.68
framework_Escalator_convertToVisual
/** * Adjusts the row index and number to be relevant for the current * virtual viewport. * <p> * It converts a logical range of rows index to the matching visual * range, truncating the resulting range with the viewport. * <p> * <ul> * <li>Escalator contains logical rows 0..100 * <li>Current viewport showing logical rows 20..29 * <li>convertToVisual([20..29]) &rarr; [0..9] * <li>convertToVisual([15..24]) &rarr; [0..4] * <li>convertToVisual([25..29]) &rarr; [5..9] * <li>convertToVisual([26..39]) &rarr; [6..9] * <li>convertToVisual([0..5]) &rarr; [0..-1] <em>(empty)</em> * <li>convertToVisual([35..1]) &rarr; [0..-1] <em>(empty)</em> * <li>convertToVisual([0..100]) &rarr; [0..9] * </ul> * * @return a logical range converted to a visual range, truncated to the * current viewport. The first visual row has the index 0. */ private Range convertToVisual(final Range logicalRange) { if (logicalRange.isEmpty()) { return logicalRange; } else if (visualRowOrder.isEmpty()) { // empty range return Range.withLength(0, 0); } /* * TODO [[spacer]]: these assumptions will be totally broken with * spacers. */ final int maxVisibleRowCount = getMaxVisibleRowCount(); final int currentTopRowIndex = getLogicalRowIndex( visualRowOrder.getFirst()); final Range[] partitions = logicalRange.partitionWith( Range.withLength(currentTopRowIndex, maxVisibleRowCount)); final Range insideRange = partitions[1]; return insideRange.offsetBy(-currentTopRowIndex); }
3.68
flink_HiveCatalog_getFieldNames
/** Get field names from field schemas. */ public static List<String> getFieldNames(List<FieldSchema> fieldSchemas) { List<String> names = new ArrayList<>(fieldSchemas.size()); for (FieldSchema fs : fieldSchemas) { names.add(fs.getName()); } return names; }
3.68
flink_GroupReduceOperatorBase_setGroupOrder
/** * Sets the order of the elements within a reduce group. * * @param order The order for the elements in a reduce group. */ public void setGroupOrder(Ordering order) { this.groupOrder = order; }
3.68
flink_FutureCompletingBlockingQueue_take
/** * <b>Warning:</b> This is a dangerous method and should only be used for testing convenience. A * method that blocks until availability does not go together well with the concept of * asynchronous notifications and non-blocking polling. * * <p>Get and remove the first element from the queue. The call blocks if the queue is empty. * The problem with this method is that it may loop internally until an element is available and * that way eagerly reset the availability future. If a consumer thread is blocked in taking an * element, it will receive availability notifications from {@link #notifyAvailable()} and * immediately reset them by calling {@link #poll()} and finding the queue empty. * * @return the first element in the queue. * @throws InterruptedException when the thread is interrupted. */ @VisibleForTesting public T take() throws InterruptedException { T next; while ((next = poll()) == null) { // use the future to wait for availability to avoid busy waiting try { getAvailabilityFuture().get(); } catch (ExecutionException | CompletionException e) { // this should never happen, but we propagate just in case throw new FlinkRuntimeException("exception in queue future completion", e); } } return next; }
3.68
pulsar_ManagedLedgerConfig_getReadEntryTimeoutSeconds
/** * Ledger read-entry timeout. * * @return */ public long getReadEntryTimeoutSeconds() { return readEntryTimeoutSeconds; }
3.68
hbase_EntryBuffers_appendEntry
/** * Append a log entry into the corresponding region buffer. Blocks if the total heap usage has * crossed the specified threshold. */ void appendEntry(WAL.Entry entry) throws InterruptedException, IOException { WALKey key = entry.getKey(); RegionEntryBuffer buffer; long incrHeap; synchronized (this) { buffer = buffers.get(key.getEncodedRegionName()); if (buffer == null) { buffer = new RegionEntryBuffer(key.getTableName(), key.getEncodedRegionName()); buffers.put(key.getEncodedRegionName(), buffer); } incrHeap = buffer.appendEntry(entry); } // If we crossed the chunk threshold, wait for more space to be available synchronized (controller.dataAvailable) { totalBuffered += incrHeap; while (totalBuffered > maxHeapUsage && controller.thrown.get() == null) { LOG.debug("Used {} bytes of buffered edits, waiting for IO threads", totalBuffered); controller.dataAvailable.wait(2000); } controller.dataAvailable.notifyAll(); } controller.checkForErrors(); }
3.68
hmily_Coordinator_setRollbackOnly
/** * Sets rollback only. */ public void setRollbackOnly() { if (state == XaState.STATUS_ACTIVE) { state = XaState.STATUS_MARKED_ROLLBACK; } }
3.68
hbase_ServerRegionReplicaUtil_isReadOnly
/** * Returns whether this region replica can accept writes. * @param region the HRegion object * @return whether the replica is read only */ public static boolean isReadOnly(HRegion region) { return region.getTableDescriptor().isReadOnly() || !isDefaultReplica(region.getRegionInfo()); }
3.68
starts_RTSUtil_computeAffectedTests
/** * This method computes the affected tests and sets the "changed" field to * contain the set of dependencies that changed since the last run. */ public static Set<String> computeAffectedTests(HashSet<String> allTests, Set<String> nonAffected, Map<String, Set<String>> testDeps) { long start = System.currentTimeMillis(); Set<String> affectedTests = new HashSet<>(allTests); affectedTests.removeAll(nonAffected); long end = System.currentTimeMillis(); LOGGER.log(Level.FINEST, "[TIME]COMPUTING AFFECTED: " + (end - start) + MILLISECOND); return affectedTests; }
3.68
dubbo_TriHttp2RemoteFlowController_cancel
/** * Clears the pending queue and writes errors for each remaining frame. * @param error the {@link Http2Error} to use. * @param cause the {@link Throwable} that caused this method to be invoked. */ void cancel(Http2Error error, Throwable cause) { cancelled = true; // Ensure that the queue can't be modified while we are writing. if (writing) { return; } FlowControlled frame = pendingWriteQueue.poll(); if (frame != null) { // Only create exception once and reuse to reduce overhead of filling in the stacktrace. final Http2Exception exception = streamError(stream.id(), error, cause, "Stream closed before write could take place"); do { writeError(frame, exception); frame = pendingWriteQueue.poll(); } while (frame != null); } streamByteDistributor.updateStreamableBytes(this); monitor.stateCancelled(this); }
3.68
flink_HandlerRequest_resolveParametersAndCreate
/** * Creates a new {@link HandlerRequest} after resolving the given {@link MessageParameters} * against the given query/path parameter maps. * * <p>For tests it is recommended to resolve the parameters manually and use {@link #create}. */ public static <R extends RequestBody, M extends MessageParameters> HandlerRequest<R> resolveParametersAndCreate( R requestBody, M messageParameters, Map<String, String> receivedPathParameters, Map<String, List<String>> receivedQueryParameters, Collection<File> uploadedFiles) throws HandlerRequestException { resolvePathParameters(messageParameters, receivedPathParameters); resolveQueryParameters(messageParameters, receivedQueryParameters); return create(requestBody, messageParameters, uploadedFiles); }
3.68
hudi_LSMTimeline_getVersionFilePath
/** * Returns the full version file path with given version number. */ public static Path getVersionFilePath(HoodieTableMetaClient metaClient) { return new Path(metaClient.getArchivePath(), VERSION_FILE_NAME); }
3.68
framework_CalendarTest_switchToDayView
/* * Switch to day view (week view with a single day visible). */ public void switchToDayView() { viewMode = Mode.DAY; // monthButton.setVisible(true); // weekButton.setVisible(true); }
3.68
hadoop_StageConfig_getJobAttemptTaskSubDir
/** * Get the path to the subdirectory under $jobID where task * attempts are. List this dir to find all task attempt dirs. * @return a path under the job attempt dir. */ public Path getJobAttemptTaskSubDir() { return jobAttemptTaskSubDir; }
3.68
hbase_HRegion_enableInterrupts
/** * If a handler thread was made ineligible for interrupt via {{@link #disableInterrupts()}, make * it eligible again. No-op if interrupts are already enabled. */ void enableInterrupts() { regionLockHolders.computeIfPresent(Thread.currentThread(), (t, b) -> true); }
3.68
streampipes_TextDocument_getContent
/** * Returns the {@link TextDocument}'s content. * * @return The content text. */ public String getContent() { return getText(true, false); }
3.68
hadoop_FilePosition_bufferStartOffset
/** * Gets the start of the current block's absolute offset. * * @return the start of the current block's absolute offset. */ public long bufferStartOffset() { throwIfInvalidBuffer(); return bufferStartOffset; }
3.68
pulsar_Schema_NATIVE_AVRO
/** * Create a schema instance that accepts a serialized Avro payload * without validating it against the schema specified. * It can be useful when migrating data from existing event or message stores. * * @return the auto schema instance * @since 2.9.0 */ static Schema<byte[]> NATIVE_AVRO(Object schema) { return DefaultImplementation.getDefaultImplementation().newAutoProduceValidatedAvroSchema(schema); }
3.68
hadoop_ActiveAuditManagerS3A_getActiveSpanMap
/** * Get the map of threads to active spans; allows * for testing of weak reference resolution after GC. * @return the span map */ @VisibleForTesting WeakReferenceThreadMap<WrappingAuditSpan> getActiveSpanMap() { return activeSpanMap; }
3.68
hbase_FlushPolicyFactory_create
/** * Create the FlushPolicy configured for the given table. */ public static FlushPolicy create(HRegion region, Configuration conf) throws IOException { Class<? extends FlushPolicy> clazz = getFlushPolicyClass(region.getTableDescriptor(), conf); FlushPolicy policy = ReflectionUtils.newInstance(clazz, conf); policy.configureForRegion(region); return policy; }
3.68
framework_ColorPickerGrid_setValue
/** * Sets the value of this object. If the new value is not equal to * {@code getValue()}, fires a {@link ValueChangeEvent}. Throws * {@code NullPointerException} if the value is null. * * @param color * the new value, not {@code null} * @throws NullPointerException * if {@code color} is {@code null} */ @Override public void setValue(Color color) { Objects.requireNonNull(color, "value cannot be null"); super.setValue(color); }
3.68
flink_EmptyIterator_remove
/** * Throws a {@link java.lang.UnsupportedOperationException}. * * @see java.util.Iterator#remove() */ @Override public void remove() { throw new UnsupportedOperationException(); }
3.68
framework_Page_updateLocation
/** * For internal use only. Used to update the server-side location when the * client-side location changes. * * @since 8.0 * * @param location * the new location URI * @param fireEvents * whether to fire {@link UriFragmentChangedEvent} if the URI * fragment changes * @param firePopstate * whether to fire {@link PopStateEvent} */ public void updateLocation(String location, boolean fireEvents, boolean firePopstate) { try { String oldUriFragment = this.location.getFragment(); this.location = new URI(location); String newUriFragment = this.location.getFragment(); if (fireEvents && !SharedUtil.equals(oldUriFragment, newUriFragment)) { fireEvent(new UriFragmentChangedEvent(this, newUriFragment)); } if (firePopstate) { fireEvent(new PopStateEvent(this, location)); } } catch (URISyntaxException e) { throw new RuntimeException(e); } }
3.68
pulsar_PulsarRegistrationClient_getBookiesThenFreshCache
/** * @throws IllegalArgumentException if parameter path is null or empty. */ private CompletableFuture<Versioned<Set<BookieId>>> getBookiesThenFreshCache(String path) { if (path == null || path.isEmpty()) { return failedFuture( new IllegalArgumentException("parameter [path] can not be null or empty.")); } return store.getChildren(path) .thenComposeAsync(children -> { final Set<BookieId> bookieIds = PulsarRegistrationClient.convertToBookieAddresses(children); final List<CompletableFuture<?>> bookieInfoUpdated = new ArrayList<>(bookieIds.size()); for (BookieId id : bookieIds) { // update the cache for new bookies if (path.equals(bookieReadonlyRegistrationPath) && readOnlyBookieInfo.get(id) == null) { bookieInfoUpdated.add(readBookieInfoAsReadonlyBookie(id)); continue; } if (path.equals(bookieRegistrationPath) && writableBookieInfo.get(id) == null) { bookieInfoUpdated.add(readBookieInfoAsWritableBookie(id)); continue; } if (path.equals(bookieAllRegistrationPath)) { if (writableBookieInfo.get(id) != null || readOnlyBookieInfo.get(id) != null) { // jump to next bookie id continue; } // check writable first final CompletableFuture<?> revalidateAllBookiesFuture = readBookieInfoAsWritableBookie(id) .thenCompose(writableBookieInfo -> writableBookieInfo .<CompletableFuture<Optional<CacheGetResult<BookieServiceInfo>>>>map( bookieServiceInfo -> completedFuture(null)) // check read-only then .orElseGet(() -> readBookieInfoAsReadonlyBookie(id))); bookieInfoUpdated.add(revalidateAllBookiesFuture); } } if (bookieInfoUpdated.isEmpty()) { return completedFuture(bookieIds); } else { return waitForAll(bookieInfoUpdated) .thenApply(___ -> bookieIds); } }) .thenApply(s -> new Versioned<>(s, Version.NEW)); }
3.68
framework_ApplicationConnection_captionSizeUpdated
/** * Use to notify that the given component's caption has changed; layouts may * have to be recalculated. * * @param widget * The Widget whose caption has changed * @deprecated As of 7.0.2, has not had any effect for a long time */ @Deprecated public void captionSizeUpdated(Widget widget) { // This doesn't do anything, it's just kept here for compatibility }
3.68
hadoop_LeveldbIterator_prev
/** * @return the previous element in the iteration and rewinds the iteration. */ public Map.Entry<byte[], byte[]> prev() throws DBException { try { return iter.prev(); } catch (DBException e) { throw e; } catch (RuntimeException e) { throw new DBException(e.getMessage(), e); } }
3.68
dubbo_ValidationFilter_invoke
/** * Perform the validation of before invoking the actual method based on <b>validation</b> attribute value. * * @param invoker service * @param invocation invocation. * @return Method invocation result * @throws RpcException Throws RpcException if validation failed or any other runtime exception occurred. */ @Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { if (needValidate(invoker.getUrl(), invocation.getMethodName())) { try { Validator validator = validation.getValidator(invoker.getUrl()); if (validator != null) { validator.validate( invocation.getMethodName(), invocation.getParameterTypes(), invocation.getArguments()); } } catch (RpcException e) { throw e; } catch (Throwable t) { return AsyncRpcResult.newDefaultAsyncResult(t, invocation); } } return invoker.invoke(invocation); }
3.68
flink_KeyedCoProcessFunction_onTimer
/** * Called when a timer set using {@link TimerService} fires. * * @param timestamp The timestamp of the firing timer. * @param ctx An {@link OnTimerContext} that allows querying the timestamp of the firing timer, * querying the {@link TimeDomain} of the firing timer and getting a {@link TimerService} * for registering timers and querying the time. The context is only valid during the * invocation of this method, do not store it. * @param out The collector for returning result values. * @throws Exception This method may throw exceptions. Throwing an exception will cause the * operation to fail and may trigger recovery. */ public void onTimer(long timestamp, OnTimerContext ctx, Collector<OUT> out) throws Exception {}
3.68
hmily_EventData_setSubscribe
/** * Sets subscribe. * * @param subscribe the subscribe */ public void setSubscribe(final String subscribe) { this.subscribe = subscribe; }
3.68
hbase_AsyncAdmin_mergeRegions
/** * Merge two regions. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent * regions * @deprecated since 2.3.0 and will be removed in 4.0.0.Use {@link #mergeRegions(List, boolean)} * instead. */ @Deprecated default CompletableFuture<Void> mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB, boolean forcible) { return mergeRegions(Arrays.asList(nameOfRegionA, nameOfRegionB), forcible); }
3.68
framework_DataProvider_fromCallbacks
/** * Creates a new data provider that uses callbacks for fetching and counting * items from any backing store. * <p> * The query that is passed to each callback will not contain any filter * values. * * @param fetchCallback * function that returns a stream of items from the back end for * a query * @param countCallback * function that returns the number of items in the back end for * a query * @return a new callback data provider */ public static <T> CallbackDataProvider<T, Void> fromCallbacks( FetchCallback<T, Void> fetchCallback, CountCallback<T, Void> countCallback) { return fromFilteringCallbacks(fetchCallback, countCallback); }
3.68
flink_MemorySegment_wrap
/** * Wraps the chunk of the underlying memory located between <tt>offset</tt> and <tt>offset + * length</tt> in a NIO ByteBuffer. The ByteBuffer has the full segment as capacity and the * offset and length parameters set the buffers position and limit. * * @param offset The offset in the memory segment. * @param length The number of bytes to be wrapped as a buffer. * @return A <tt>ByteBuffer</tt> backed by the specified portion of the memory segment. * @throws IndexOutOfBoundsException Thrown, if offset is negative or larger than the memory * segment size, or if the offset plus the length is larger than the segment size. */ public ByteBuffer wrap(int offset, int length) { if (!allowWrap) { throw new UnsupportedOperationException( "Wrap is not supported by this segment. This usually indicates that the underlying memory is unsafe, thus transferring of ownership is not allowed."); } return wrapInternal(offset, length); }
3.68
hadoop_NativeSingleLineParser_aggregateSkyline
/** * Aggregates different jobs' {@link ResourceSkyline}s within the same * pipeline together. * * @param resourceSkyline newly extracted {@link ResourceSkyline}. * @param recurrenceId the {@link RecurrenceId} which the resourceSkyline * belongs to. * @param skylineRecords a {@link Map} which stores the * {@link ResourceSkyline}s for all pipelines during this parsing. */ private void aggregateSkyline(final ResourceSkyline resourceSkyline, final RecurrenceId recurrenceId, final Map<RecurrenceId, List<ResourceSkyline>> skylineRecords) { List<ResourceSkyline> resourceSkylines = skylineRecords.get(recurrenceId); if (resourceSkylines == null) { resourceSkylines = new ArrayList<ResourceSkyline>(); skylineRecords.put(recurrenceId, resourceSkylines); } resourceSkylines.add(resourceSkyline); }
3.68
hbase_RawBytes_decode
/** * Read a {@code byte[]} from the buffer {@code src}. * @param src the {@link PositionedByteRange} to read the {@code byte[]} from * @param length the length to read from the buffer * @return the {@code byte[]} read from the buffer */ public byte[] decode(PositionedByteRange src, int length) { byte[] val = new byte[length]; src.get(val); return val; }
3.68
graphhopper_PbfBlobResult_storeSuccessResult
/** * Stores the results of a successful blob decoding operation. * <p> * * @param decodedEntities The entities from the blob. */ public void storeSuccessResult(List<ReaderElement> decodedEntities) { entities = decodedEntities; complete = true; success = true; }
3.68
flink_SnapshotDirectory_temporary
/** * Creates a local temporary snapshot directory for the given path. This will always return * "null" as result of {@link #completeSnapshotAndGetHandle()} and always attempt to delete the * underlying directory in {@link #cleanup()}. */ public static SnapshotDirectory temporary(@Nonnull File directory) throws IOException { return new TemporarySnapshotDirectory(directory); }
3.68
hadoop_AuxiliaryService_initializeContainer
/** * A new container is started on this NodeManager. This is a signal to * this {@link AuxiliaryService} about the container initialization. * This method is called when the NodeManager receives the container launch * command from the ApplicationMaster and before the container process is * launched. * * @param initContainerContext context for the container's initialization */ public void initializeContainer(ContainerInitializationContext initContainerContext) { }
3.68
hbase_PrivateCellUtil_getValueAsInt
/** * Converts the value bytes of the given cell into a int value * @return value as int */ public static int getValueAsInt(Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return ByteBufferUtils.toInt(((ByteBufferExtendedCell) cell).getValueByteBuffer(), ((ByteBufferExtendedCell) cell).getValuePosition()); } return Bytes.toInt(cell.getValueArray(), cell.getValueOffset()); }
3.68
flink_StreamNonDeterministicUpdatePlanVisitor_inputInsertOnly
// helper methods private boolean inputInsertOnly(final StreamPhysicalRel rel) { return ChangelogPlanUtils.inputInsertOnly(rel); }
3.68
hbase_KeyValue_clone
/** * Clones a KeyValue. This creates a copy, re-allocating the buffer. * @return Fully copied clone of this KeyValue * @throws CloneNotSupportedException if cloning of keyValue is not supported */ @Override public KeyValue clone() throws CloneNotSupportedException { KeyValue ret = (KeyValue) super.clone(); ret.bytes = Arrays.copyOf(this.bytes, this.bytes.length); ret.offset = 0; ret.length = ret.bytes.length; // Important to clone the memstoreTS as well - otherwise memstore's // update-in-place methods (eg increment) will end up creating // new entries ret.setSequenceId(seqId); return ret; }
3.68
hbase_MasterObserver_preRemoveReplicationPeer
/** * Called before remove a replication peer * @param peerId a short name that identifies the peer */ default void preRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId) throws IOException { }
3.68
hadoop_OBSInputStream_remainingInCurrentRequest
/** * Bytes left in the current request. Only valid if there is an active * request. * * @return how many bytes are left to read in the current GET. */ @InterfaceAudience.Private @InterfaceStability.Unstable public synchronized long remainingInCurrentRequest() { return this.contentRangeFinish - this.streamCurrentPos; }
3.68
hbase_Tag_copyValueTo
/** * Copies the tag's value bytes to the given byte array * @param tag The Tag * @param out The byte array where to copy the Tag value. * @param offset The offset within 'out' array where to copy the Tag value. */ public static void copyValueTo(Tag tag, byte[] out, int offset) { if (tag.hasArray()) { Bytes.putBytes(out, offset, tag.getValueArray(), tag.getValueOffset(), tag.getValueLength()); } else { ByteBufferUtils.copyFromBufferToArray(out, tag.getValueByteBuffer(), tag.getValueOffset(), offset, tag.getValueLength()); } }
3.68
hibernate-validator_MethodValidationConfiguration_isAllowParallelMethodsDefineParameterConstraints
/** * @return {@code true} if constraints on methods in parallel class hierarchy are allowed, {@code false} otherwise. */ public boolean isAllowParallelMethodsDefineParameterConstraints() { return this.allowParallelMethodsDefineParameterConstraints; }
3.68
hudi_HoodieLogFileReader_moveToPrev
/** * Reverse pointer, does not read the block. Return the current position of the log file (in reverse) If the pointer * (inputstream) is moved in any way, it is the job of the client of this class to seek/reset it back to the file * position returned from the method to expect correct results */ public long moveToPrev() throws IOException { if (!this.reverseReader) { throw new HoodieNotSupportedException("Reverse log reader has not been enabled"); } inputStream.seek(lastReverseLogFilePosition); long blockSize = inputStream.readLong(); // blocksize should be everything about a block including the length as well inputStream.seek(reverseLogFilePosition - blockSize); reverseLogFilePosition -= blockSize; lastReverseLogFilePosition = reverseLogFilePosition; return reverseLogFilePosition; }
3.68
morf_AbstractSqlDialectTest_expectedLeast
/** * @return The expected SQL statement when performing the ANSI LEAST call */ protected String expectedLeast() { return "SELECT LEAST(NULL, bob) FROM " + tableName("MyTable"); }
3.68
hibernate-validator_ReflectionHelper_isMap
/** * @param type the type to check. * * @return Returns {@code true} if {@code type} is implementing {@code Map}, {@code false} otherwise. */ public static boolean isMap(Type type) { if ( type instanceof Class && Map.class.isAssignableFrom( (Class<?>) type ) ) { return true; } if ( type instanceof ParameterizedType ) { return isMap( ( (ParameterizedType) type ).getRawType() ); } if ( type instanceof WildcardType ) { Type[] upperBounds = ( (WildcardType) type ).getUpperBounds(); return upperBounds.length != 0 && isMap( upperBounds[0] ); } return false; }
3.68
hudi_BufferedRandomAccessFile_flush
/** * If the file is writable, flush any bytes in the buffer that have not yet been written to disk. * @throws IOException */ public void flush() throws IOException { this.flushBuffer(); }
3.68
hbase_ZkSplitLogWorkerCoordination_grabTask
/** * try to grab a 'lock' on the task zk node to own and execute the task. * <p> * @param path zk node for the task * @return boolean value when grab a task success return true otherwise false */ private boolean grabTask(String path) { Stat stat = new Stat(); byte[] data; synchronized (grabTaskLock) { currentTask = path; workerInGrabTask = true; if (Thread.interrupted()) { return false; } } try { try { if ((data = ZKUtil.getDataNoWatch(watcher, path, stat)) == null) { SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.increment(); return false; } } catch (KeeperException e) { LOG.warn("Failed to get data for znode " + path, e); SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment(); return false; } SplitLogTask slt; try { slt = SplitLogTask.parseFrom(data); } catch (DeserializationException e) { LOG.warn("Failed parse data for znode " + path, e); SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment(); return false; } if (!slt.isUnassigned()) { SplitLogCounters.tot_wkr_failed_to_grab_task_owned.increment(); return false; } currentVersion = attemptToOwnTask(true, watcher, server.getServerName(), path, stat.getVersion()); if (currentVersion < 0) { SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.increment(); return false; } if (ZKSplitLog.isRescanNode(watcher, currentTask)) { ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails(); splitTaskDetails.setTaskNode(currentTask); splitTaskDetails.setCurTaskZKVersion(new MutableInt(currentVersion)); endTask(new SplitLogTask.Done(server.getServerName()), SplitLogCounters.tot_wkr_task_acquired_rescan, splitTaskDetails); return false; } LOG.info("worker " + server.getServerName() + " acquired task " + path); SplitLogCounters.tot_wkr_task_acquired.increment(); getDataSetWatchAsync(); submitTask(path, currentVersion, reportPeriod); // after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks try { int sleepTime = ThreadLocalRandom.current().nextInt(500) + 500; Thread.sleep(sleepTime); } catch (InterruptedException e) { LOG.warn("Interrupted while yielding for other region servers", e); Thread.currentThread().interrupt(); } return true; } finally { synchronized (grabTaskLock) { workerInGrabTask = false; // clear the interrupt from stopTask() otherwise the next task will // suffer Thread.interrupted(); } } }
3.68
hbase_PendingWatcher_prepare
/** * Associates the substantial watcher of processing events. This method should be called once, and * {@code watcher} should be non-null. This method is expected to call as soon as possible because * the event processing, being invoked by the ZooKeeper event thread, is uninterruptibly blocked * until this method is called. */ void prepare(Watcher watcher) { pending.prepare(watcher); }
3.68
flink_BaseHybridHashTable_close
/** * Closes the hash table. This effectively releases all internal structures and closes all open * files and removes them. The call to this method is valid both as a cleanup after the complete * inputs were properly processed, and as an cancellation call, which cleans up all resources * that are currently held by the hash join. */ public void close() { // make sure that we close only once if (!this.closed.compareAndSet(false, true)) { return; } // clear the current build side channel, if there exist one if (this.currentSpilledBuildSide != null) { try { this.currentSpilledBuildSide.getChannel().closeAndDelete(); } catch (Throwable t) { LOG.warn( "Could not close and delete the temp file for the current spilled partition build side.", t); } } // clear the current probe side channel, if there is one if (this.currentSpilledProbeSide != null) { try { this.currentSpilledProbeSide.getChannel().closeAndDelete(); } catch (Throwable t) { LOG.warn( "Could not close and delete the temp file for the current spilled partition probe side.", t); } } // clear the memory in the partitions clearPartitions(); // return the write-behind buffers for (int i = 0; i < this.buildSpillRetBufferNumbers; i++) { try { returnPage(this.buildSpillReturnBuffers.take()); } catch (InterruptedException iex) { throw new RuntimeException("Hashtable closing was interrupted"); } } this.buildSpillRetBufferNumbers = 0; }
3.68
open-banking-gateway_Xs2aAdapterConfiguration_xs2aPkcs12KeyStore
/** * The keystore for QWAC and QSEAL certificates. * @param keystorePath Location of the keystore. * @param keystorePassword Keystore password. */ @Bean @SneakyThrows Pkcs12KeyStore xs2aPkcs12KeyStore( @Value("${" + XS2A_PROTOCOL_CONFIG_PREFIX + "pkcs12.keystore}") String keystorePath, @Value("${" + XS2A_PROTOCOL_CONFIG_PREFIX + "pkcs12.password}") char[] keystorePassword ) { if (Paths.get(keystorePath).toFile().exists()) { return new Pkcs12KeyStore(Paths.get(keystorePath).toAbsolutePath().toString(), keystorePassword); } try (var is = Resources.getResource(keystorePath).openStream()) { return new Pkcs12KeyStore(is, keystorePassword, "default_qwac", "default_qseal"); } }
3.68
hbase_ProtobufMagic_compareTo
/* * Copied from Bytes.java to here hbase-common now depends on hbase-protocol Referencing * Bytes.java directly would create circular dependency */ private static int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, int length2) { // Short circuit equal case if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { return 0; } // Bring WritableComparator code local int end1 = offset1 + length1; int end2 = offset2 + length2; for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) { int a = (buffer1[i] & 0xff); int b = (buffer2[j] & 0xff); if (a != b) { return a - b; } } return length1 - length2; }
3.68
flink_MurmurHashUtil_hashBytes
/** * Hash bytes in MemorySegment. * * @param segment segment. * @param offset offset for MemorySegment * @param lengthInBytes length in MemorySegment * @return hash code */ public static int hashBytes(MemorySegment segment, int offset, int lengthInBytes) { return hashBytes(segment, offset, lengthInBytes, DEFAULT_SEED); }
3.68
hbase_MasterRpcServices_lockHeartbeat
/** * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED. * @throws ServiceException if given proc id is found but it is not a LockProcedure. */ @Override public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request) throws ServiceException { try { if ( server.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(), request.getKeepAlive()) ) { return LockHeartbeatResponse.newBuilder() .setTimeoutMs(server.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS)) .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build(); } else { return LockHeartbeatResponse.newBuilder() .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build(); } } catch (IOException e) { throw new ServiceException(e); } }
3.68
flink_InPlaceMutableHashTable_noSeekAppendPointerAndRecord
/** * Appends a pointer and a record. Call this function only if the write position is at the * end! * * @param pointer The pointer to write (Note: this is NOT the position to write to!) * @param record The record to write * @return A pointer to the written data * @throws IOException (EOFException specifically, if memory ran out) */ public long noSeekAppendPointerAndRecord(long pointer, T record) throws IOException { final long oldLastPosition = appendPosition; final long oldPositionInSegment = outView.getCurrentPositionInSegment(); final long oldSegmentIndex = outView.currentSegmentIndex; outView.writeLong(pointer); buildSideSerializer.serialize(record, outView); appendPosition += outView.getCurrentPositionInSegment() - oldPositionInSegment + outView.getSegmentSize() * (outView.currentSegmentIndex - oldSegmentIndex); return oldLastPosition; }
3.68
framework_MenuItem_getSubMenu
/** * Gets the sub-menu associated with this item. * * @return this item's sub-menu, or <code>null</code> if none exists */ public MenuBar getSubMenu() { return subMenu; }
3.68
flink_ScalaCsvOutputFormat_setInputType
/** * The purpose of this method is solely to check whether the data type to be processed is in * fact a tuple type. */ @Override public void setInputType(TypeInformation<?> type, ExecutionConfig executionConfig) { if (!type.isTupleType()) { throw new InvalidProgramException( "The " + ScalaCsvOutputFormat.class.getSimpleName() + " can only be used to write tuple data sets."); } }
3.68
zxing_PDF417ResultMetadata_getSegmentCount
/** * @return count of segments, -1 if not set */ public int getSegmentCount() { return segmentCount; }
3.68
hbase_RowModel_setKey
/** * @param key the row key */ public void setKey(byte[] key) { this.key = key; }
3.68
hbase_RegionCoprocessorHost_postScannerNext
/** * @param s the scanner * @param results the result set returned by the region server * @param limit the maximum number of results to return * @return 'has more' indication to give to client * @exception IOException Exception */ public boolean postScannerNext(final InternalScanner s, final List<Result> results, final int limit, boolean hasMore) throws IOException { if (this.coprocEnvironments.isEmpty()) { return hasMore; } return execOperationWithResult( new ObserverOperationWithResult<RegionObserver, Boolean>(regionObserverGetter, hasMore) { @Override public Boolean call(RegionObserver observer) throws IOException { return observer.postScannerNext(this, s, results, limit, getResult()); } }); }
3.68
morf_AbstractSqlDialectTest_testInsertIntoValuesWithComplexField
/** * Tests an INSERT INTO (...) VALUES (...) statement with a complex field. */ @Test public void testInsertIntoValuesWithComplexField() { Schema schema = schema(table("TableOne").columns(column("id", DataType.INTEGER), column("value", DataType.INTEGER))); InsertStatement testStatement = insert().into(tableRef("TableOne")).values(literal(3).as("id"), literal(1).plus(literal(2)).as("value")); assertEquals(expectedSqlInsertIntoValuesWithComplexField(), testDialect.convertStatementToSQL(testStatement, schema, null)); }
3.68
framework_AbstractColorPicker_setTextfieldVisibility
/** * Sets the visibility of the CSS color code text field. * * @param visible * The visibility */ public void setTextfieldVisibility(boolean visible) { textfieldVisible = visible; if (window != null) { window.setPreviewVisible(visible); } }
3.68
dubbo_PathAndInvokerMapper_removePath
/** * undeploy path metadata * * @param pathMatcher */ public void removePath(PathMatcher pathMatcher) { InvokerAndRestMethodMetadataPair containPathVariablePair = pathToServiceMapContainPathVariable.remove(pathMatcher); InvokerAndRestMethodMetadataPair unContainPathVariablePair = pathToServiceMapNoPathVariable.remove(pathMatcher); logger.info("dubbo rest undeploy pathMatcher:" + pathMatcher + ", and path variable method is :" + (containPathVariablePair == null ? null : containPathVariablePair.getRestMethodMetadata().getReflectMethod()) + ", and no path variable method is :" + (unContainPathVariablePair == null ? null : unContainPathVariablePair.getRestMethodMetadata().getReflectMethod())); }
3.68
framework_VCalendar_getEventResizeListener
/** * Get the listener that listens to when an events time limits are being * adjusted. * * @return */ public EventResizeListener getEventResizeListener() { return eventResizeListener; }
3.68
querydsl_GroupBy_sum
/** * Create a new aggregating sum expression * * @param expression expression a for which the accumulated sum will be used in the group by projection * @return wrapper expression */ public static <E extends Number> AbstractGroupExpression<E, E> sum(Expression<E> expression) { return new GSum<E>(expression); }
3.68
hadoop_FindOptions_getIn
/** * Returns the input stream to be used. * * @return input stream to be used */ public InputStream getIn() { return this.in; }
3.68
framework_VMenuBar_hideChildren
/** * * Recursively hide all child menus. * * @param animateIn * enable/disable animate-in animation when hide popup * @param animateOut * enable/disable animate-out animation when hide popup * @since 7.3.7 */ public void hideChildren(boolean animateIn, boolean animateOut) { if (visibleChildMenu != null) { visibleChildMenu.menuVisible = false; visibleChildMenu.hideChildren(animateIn, animateOut); popup.hide(false, animateIn, animateOut); } }
3.68
framework_Upload_removeSucceededListener
/** * Removes the upload success event listener. * * @param listener * the Listener to be removed. */ @Deprecated public void removeSucceededListener(SucceededListener listener) { removeListener(SucceededEvent.class, listener, UPLOAD_SUCCEEDED_METHOD); }
3.68
flink_AbstractBytesMultiMap_checkSkipWriteForPointer
/** For pointer needing update, skip unaligned part (4 bytes) for convenient updating. */ private int checkSkipWriteForPointer(AbstractPagedOutputView outView) throws IOException { // skip if there is no enough size. int available = outView.getSegmentSize() - outView.getCurrentPositionInSegment(); if (available < ELEMENT_POINT_LENGTH) { outView.advance(); return available; } return 0; }
3.68
hbase_EventHandler_getPriority
/** * Get the priority level for this handler instance. This uses natural ordering so lower numbers * are higher priority. * <p> * Lowest priority is Integer.MAX_VALUE. Highest priority is 0. * <p> * Subclasses should override this method to allow prioritizing handlers. * <p> * Handlers with the same priority are handled in FIFO order. * <p> * @return Integer.MAX_VALUE by default, override to set higher priorities */ public int getPriority() { return Integer.MAX_VALUE; }
3.68
AreaShop_AreaShop_warn
/** * Print a warning to the console. * @param message The message to print */ public static void warn(Object... message) { AreaShop.getInstance().getLogger().warning(StringUtils.join(message, " ")); }
3.68
flink_StringUtils_getRandomString
/** * Creates a random string with a length within the given interval. The string contains only * characters that can be represented as a single code point. * * @param rnd The random used to create the strings. * @param minLength The minimum string length. * @param maxLength The maximum string length (inclusive). * @param minValue The minimum character value to occur. * @param maxValue The maximum character value to occur. * @return A random String. */ public static String getRandomString( Random rnd, int minLength, int maxLength, char minValue, char maxValue) { int len = rnd.nextInt(maxLength - minLength + 1) + minLength; char[] data = new char[len]; int diff = maxValue - minValue + 1; for (int i = 0; i < data.length; i++) { data[i] = (char) (rnd.nextInt(diff) + minValue); } return new String(data); }
3.68
MagicPlugin_BaseSpell_getDisplayCooldown
/** * @return The cooldown to show in UI. Spells can manually set their * "display_cooldown" if they apply cooldown via an action. */ private int getDisplayCooldown() { return displayCooldown != -1 ? displayCooldown : cooldown; }
3.68
flink_DCounter_getMetricValue
/** * Returns the count of events since the last report. * * @return the number of events since the last retrieval */ @Override public Number getMetricValue() { long currentCount = counter.getCount(); long difference = currentCount - lastReportCount; currentReportCount = currentCount; return difference; }
3.68
flink_HiveParserQBParseInfo_getDistributeByForClause
/** Get the Distribute By AST for the clause. */ public HiveParserASTNode getDistributeByForClause(String clause) { return destToDistributeby.get(clause); }
3.68
flink_StateTable_isEmpty
/** * Returns whether this {@link StateTable} is empty. * * @return {@code true} if this {@link StateTable} has no elements, {@code false} otherwise. * @see #size() */ public boolean isEmpty() { return size() == 0; }
3.68
AreaShop_RentRegion_getMaxRentTime
/** * Get the maximum time the player can rent the region in advance (milliseconds). * @return The maximum rent time in milliseconds */ public long getMaxRentTime() { return Utils.getDurationFromMinutesOrStringInput(getStringSetting("rent.maxRentTime")); }
3.68
hbase_DumpReplicationQueues_main
/** * Main */ public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); int ret = ToolRunner.run(conf, new DumpReplicationQueues(), args); System.exit(ret); }
3.68
flink_StreamExecutionEnvironment_registerType
/** * Registers the given type with the serialization stack. If the type is eventually serialized * as a POJO, then the type is registered with the POJO serializer. If the type ends up being * serialized with Kryo, then it will be registered at Kryo to make sure that only tags are * written. * * @param type The class of the type to register. */ public void registerType(Class<?> type) { if (type == null) { throw new NullPointerException("Cannot register null type class."); } TypeInformation<?> typeInfo = TypeExtractor.createTypeInfo(type); if (typeInfo instanceof PojoTypeInfo) { config.registerPojoType(type); } else { config.registerKryoType(type); } }
3.68
querydsl_StringExpression_matches
/** * Create a {@code this.matches(regex)} expression * * <p>Return true if this String matches the given regular expression</p> * * <p>Some implementations such as Querydsl JPA will try to convert a regex expression into like * form and will throw an Exception when this fails</p> * * @param regex regular expression * @return this.matches(regex) * @see java.lang.String#matches(String) */ public BooleanExpression matches(String regex) { return matches(ConstantImpl.create(regex)); }
3.68
framework_AbstractSingleSelect_getSelectedItem
/** * Returns the currently selected item, or an empty optional if no item is * selected. * * @return an optional of the selected item if any, an empty optional * otherwise */ public Optional<T> getSelectedItem() { return Optional.ofNullable(selectedItem); }
3.68
graphhopper_GraphHopper_getLandmarks
/** * @return a mapping between profile names and according landmark preparations. The map will be empty before loading * or import. */ public Map<String, LandmarkStorage> getLandmarks() { return landmarks; }
3.68
framework_Upload_isButtonCaptionAsHtml
/** * Checks whether the caption of the button that fires uploading is rendered * as HTML * <p> * The default is {@code false}, i.e. to render that caption as plain text. * * @return {@code true} if the caption is rendered as HTML, {@code false} if * rendered as plain text * @since 8.11 */ public boolean isButtonCaptionAsHtml() { return getState(false).buttonCaptionAsHtml; }
3.68
hudi_HoodieRecordPayload_combineAndGetUpdateValue
/** * This methods lets you write custom merging/combining logic to produce new values as a function of current value on storage and whats contained * in this object. Implementations can leverage properties if required. * <p> * eg: * 1) You are updating counters, you may want to add counts to currentValue and write back updated counts * 2) You may be reading DB redo logs, and merge them with current image for a database row on storage * </p> * * @param currentValue Current value in storage, to merge/combine this payload with * @param schema Schema used for record * @param properties Payload related properties. For example pass the ordering field(s) name to extract from value in storage. * @return new combined/merged value to be written back to storage. EMPTY to skip writing this record. */ default Option<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema, Properties properties) throws IOException { return combineAndGetUpdateValue(currentValue, schema); }
3.68
hudi_TimelineUtils_validateTimestampAsOf
/** * Validate user-specified timestamp of time travel query against incomplete commit's timestamp. * * @throws HoodieException when time travel query's timestamp >= incomplete commit's timestamp */ public static void validateTimestampAsOf(HoodieTableMetaClient metaClient, String timestampAsOf) { Option<HoodieInstant> firstIncompleteCommit = metaClient.getCommitsTimeline() .filterInflightsAndRequested() .filter(instant -> !HoodieTimeline.REPLACE_COMMIT_ACTION.equals(instant.getAction()) || !ClusteringUtils.getClusteringPlan(metaClient, instant).isPresent()) .firstInstant(); if (firstIncompleteCommit.isPresent()) { String incompleteCommitTime = firstIncompleteCommit.get().getTimestamp(); if (compareTimestamps(timestampAsOf, GREATER_THAN_OR_EQUALS, incompleteCommitTime)) { throw new HoodieTimeTravelException(String.format( "Time travel's timestamp '%s' must be earlier than the first incomplete commit timestamp '%s'.", timestampAsOf, incompleteCommitTime)); } } // also timestamp as of cannot query cleaned up data. Option<HoodieInstant> latestCleanOpt = metaClient.getActiveTimeline().getCleanerTimeline().filterCompletedInstants().lastInstant(); if (latestCleanOpt.isPresent()) { // Ensure timestamp as of is > than the earliest commit to retain and try { HoodieCleanMetadata cleanMetadata = CleanerUtils.getCleanerMetadata(metaClient, latestCleanOpt.get()); String earliestCommitToRetain = cleanMetadata.getEarliestCommitToRetain(); if (!StringUtils.isNullOrEmpty(earliestCommitToRetain)) { ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(earliestCommitToRetain, LESSER_THAN_OR_EQUALS, timestampAsOf), "Cleaner cleaned up the timestamp of interest. Please ensure sufficient commits are retained with cleaner " + "for Timestamp as of query to work"); } else { // when cleaner is based on file versions, we may not find value for earliestCommitToRetain. // so, lets check if timestamp of interest is archived based on first entry in active timeline Option<HoodieInstant> firstCompletedInstant = metaClient.getActiveTimeline().getWriteTimeline().filterCompletedInstants().firstInstant(); if (firstCompletedInstant.isPresent()) { ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(firstCompletedInstant.get().getTimestamp(), LESSER_THAN_OR_EQUALS, timestampAsOf), "Please ensure sufficient commits are retained (uncleaned and un-archived) for timestamp as of query to work."); } } } catch (IOException e) { throw new HoodieTimeTravelException("Cleaner cleaned up the timestamp of interest. " + "Please ensure sufficient commits are retained with cleaner for Timestamp as of query to work "); } } }
3.68
flink_JobGraph_isCheckpointingEnabled
/** * Checks if the checkpointing was enabled for this job graph. * * @return true if checkpointing enabled */ public boolean isCheckpointingEnabled() { if (snapshotSettings == null) { return false; } return snapshotSettings.getCheckpointCoordinatorConfiguration().isCheckpointingEnabled(); }
3.68
hbase_HRegionServer_getUseThisHostnameInstead
// HMaster should override this method to load the specific config for master @Override protected String getUseThisHostnameInstead(Configuration conf) throws IOException { String hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); if (conf.getBoolean(UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) { if (!StringUtils.isBlank(hostname)) { String msg = UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " + UNSAFE_RS_HOSTNAME_KEY + " are mutually exclusive. Do not set " + UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " to true while " + UNSAFE_RS_HOSTNAME_KEY + " is used"; throw new IOException(msg); } else { return rpcServices.getSocketAddress().getHostName(); } } else { return hostname; } }
3.68
hbase_AuthManager_refreshNamespaceCacheFromWritable
/** * Update acl info for namespace. * @param namespace namespace * @param data updated acl data * @throws IOException exception when deserialize data */ public void refreshNamespaceCacheFromWritable(String namespace, byte[] data) throws IOException { if (data != null && data.length > 0) { try { ListMultimap<String, Permission> perms = PermissionStorage.readPermissions(data, conf); if (perms != null) { updateNamespaceCache(namespace, perms); } } catch (DeserializationException e) { throw new IOException(e); } } else { LOG.debug("Skipping permission cache refresh because writable data is empty"); } }
3.68
hbase_QuotaObserverChore_hasTableQuota
/** * Returns true if the given table has a table quota. */ public boolean hasTableQuota(TableName tn) { return tablesWithTableQuotas.contains(tn); }
3.68
hbase_PermissionStorage_removeUserPermission
/** * Removes a previously granted permission from the stored access control lists. The * {@link TablePermission} being removed must exactly match what is stored -- no wildcard matching * is attempted. Ie, if user "bob" has been granted "READ" access to the "data" table, but only to * column family plus qualifier "info:colA", then trying to call this method with only user "bob" * and the table name "data" (but without specifying the column qualifier "info:colA") will have * no effect. * @param conf the configuration * @param userPerm the details of the permission to be revoked * @param t acl table * @throws IOException if there is an error accessing the metadata table */ public static void removeUserPermission(Configuration conf, UserPermission userPerm, Table t) throws IOException { if ( null == userPerm.getPermission().getActions() || userPerm.getPermission().getActions().length == 0 ) { removePermissionRecord(conf, userPerm, t); } else { // Get all the global user permissions from the acl table List<UserPermission> permsList = getUserPermissions(conf, userPermissionRowKey(userPerm.getPermission()), null, null, null, false); List<Permission.Action> remainingActions = new ArrayList<>(); List<Permission.Action> dropActions = Arrays.asList(userPerm.getPermission().getActions()); for (UserPermission perm : permsList) { // Find the user and remove only the requested permissions if (perm.getUser().equals(userPerm.getUser())) { for (Permission.Action oldAction : perm.getPermission().getActions()) { if (!dropActions.contains(oldAction)) { remainingActions.add(oldAction); } } if (!remainingActions.isEmpty()) { perm.getPermission() .setActions(remainingActions.toArray(new Permission.Action[remainingActions.size()])); addUserPermission(conf, perm, t); } else { removePermissionRecord(conf, userPerm, t); } break; } } } if (LOG.isDebugEnabled()) { LOG.debug("Removed permission " + userPerm.toString()); } }
3.68
hbase_FilterListBase_transformCell
/** * For FilterList, we can consider a filter list as a node in a tree. sub-filters of the filter * list are children of the relative node. The logic of transforming cell of a filter list, well, * we can consider it as the process of post-order tree traverse. For a node , before we traverse * the current child, we should set the traverse result (transformed cell) of previous node(s) as * the initial value. (HBASE-18879). * @param c The cell in question. * @return the transformed cell. */ @Override public Cell transformCell(Cell c) throws IOException { if (isEmpty()) { return super.transformCell(c); } Cell transformed = c; for (int i = 0, n = filters.size(); i < n; i++) { if (subFiltersIncludedCell.get(i)) { transformed = filters.get(i).transformCell(transformed); } } return transformed; }
3.68
hbase_ZNodePaths_getMetaReplicaIdFromPath
/** * Parses the meta replicaId from the passed path. * @param path the name of the full path which includes baseZNode. */ public int getMetaReplicaIdFromPath(String path) { // Extract the znode from path. The prefix is of the following format. // baseZNode + PATH_SEPARATOR. int prefixLen = baseZNode.length() + 1; return getMetaReplicaIdFromZNode(path.substring(prefixLen)); }
3.68
flink_StreamExecutionEnvironment_setRuntimeMode
/** * Sets the runtime execution mode for the application (see {@link RuntimeExecutionMode}). This * is equivalent to setting the {@code execution.runtime-mode} in your application's * configuration file. * * <p>We recommend users to NOT use this method but set the {@code execution.runtime-mode} using * the command-line when submitting the application. Keeping the application code * configuration-free allows for more flexibility as the same application will be able to be * executed in any execution mode. * * @param executionMode the desired execution mode. * @return The execution environment of your application. */ @PublicEvolving public StreamExecutionEnvironment setRuntimeMode(final RuntimeExecutionMode executionMode) { checkNotNull(executionMode); configuration.set(ExecutionOptions.RUNTIME_MODE, executionMode); return this; }
3.68
hbase_WALEdit_add
/** * Append the given map of family->edits to a WALEdit data structure. This does not write to the * WAL itself. Note that as an optimization, we will stamp the Set of column families into the * WALEdit to save on our having to calculate column families subsequently down in the actual WAL * writing. * @param familyMap map of family->edits */ public void add(Map<byte[], List<Cell>> familyMap) { for (Map.Entry<byte[], List<Cell>> e : familyMap.entrySet()) { // 'foreach' loop NOT used. See HBASE-12023 "...creates too many iterator objects." int listSize = e.getValue().size(); // Add all Cells first and then at end, add the family rather than call {@link #add(Cell)} // and have it clone family each time. Optimization! for (int i = 0; i < listSize; i++) { addCell(e.getValue().get(i)); } addFamily(e.getKey()); } }
3.68