name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
streampipes_ChangedValueDetectionProcessor_declareModel
//TODO: Change Icon @Override public DataProcessorDescription declareModel() { return ProcessingElementBuilder.create("org.apache.streampipes.processors.transformation.jvm.changed-value") .category(DataProcessorType.VALUE_OBSERVER) .withLocales(Locales.EN) .withAssets(Assets.DOCUMENTATION) .requiredStream(StreamRequirementsBuilder.create() .requiredPropertyWithUnaryMapping(EpRequirements.anyProperty(), Labels.withId(COMPARE_FIELD_ID), PropertyScope.NONE) .build()) .outputStrategy(OutputStrategies.append(EpProperties.timestampProperty(CHANGE_FIELD_NAME))) .build(); }
3.68
pulsar_BrokerService_unblockDispatchersOnUnAckMessages
/** * Unblocks the dispatchers and removes it from the {@link #blockedDispatchers} list. * * @param dispatcherList */ public void unblockDispatchersOnUnAckMessages(List<PersistentDispatcherMultipleConsumers> dispatcherList) { lock.writeLock().lock(); try { dispatcherList.forEach(dispatcher -> { dispatcher.unBlockDispatcherOnUnackedMsgs(); executor().execute(() -> dispatcher.readMoreEntries()); log.info("[{}] Dispatcher is unblocked", dispatcher.getName()); blockedDispatchers.remove(dispatcher); }); } finally { lock.writeLock().unlock(); } }
3.68
hadoop_RollingFileSystemSink_throwMetricsException
/** * If the sink isn't set to ignore errors, throw a new * {@link MetricsException}. The message parameter will be used as the * new exception's message with the current file name * ({@link #currentFilePath}) appended to it. * * @param message the exception message. The message will have a colon and * the current file name ({@link #currentFilePath}) appended to it. */ private void throwMetricsException(String message) { if (!ignoreError) { throw new MetricsException(message + ": " + currentFilePath); } }
3.68
flink_AbstractAutoCloseableRegistry_removeCloseableInternal
/** Removes a mapping from the registry map, respecting locking. */ protected final boolean removeCloseableInternal(R closeable) { synchronized (getSynchronizationLock()) { return closeableToRef.remove(closeable) != null; } }
3.68
hadoop_LpSolver_generateOverAllocationConstraints
/** * Generate over-allocation constraints. * * @param lpModel the LP model. * @param cJobITimeK actual container allocation for job i in time * interval k. * @param oa container over-allocation. * @param x predicted container allocation. * @param indexJobITimeK index for job i at time interval k. * @param timeK index for time interval k. */ private void generateOverAllocationConstraints( final ExpressionsBasedModel lpModel, final double cJobITimeK, final Variable[] oa, final Variable[] x, final int indexJobITimeK, final int timeK) { // oa_job_i_timeK >= x_timeK - cJobITimeK Expression overAllocExpression = lpModel.addExpression("over_alloc_" + indexJobITimeK); overAllocExpression.set(oa[indexJobITimeK], 1); overAllocExpression.set(x[timeK], -1); overAllocExpression.lower(-cJobITimeK); // >= }
3.68
hbase_ColumnFamilyDescriptorBuilder_setIndexBlockEncoding
/** * Set index block encoding algorithm used in block cache. * @param type What kind of index block encoding will be used. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setIndexBlockEncoding(IndexBlockEncoding type) { return setValue(INDEX_BLOCK_ENCODING_BYTES, type == null ? IndexBlockEncoding.NONE.name() : type.name()); }
3.68
hibernate-validator_ConstraintHelper_findValidatorDescriptors
/** * Returns those validator descriptors for the given constraint annotation * matching the given target. * * @param annotationType The annotation of interest. * @param validationTarget The target, either annotated element or parameters. * @param <A> the type of the annotation * * @return A list with matching validator descriptors. */ public <A extends Annotation> List<ConstraintValidatorDescriptor<A>> findValidatorDescriptors(Class<A> annotationType, ValidationTarget validationTarget) { return getAllValidatorDescriptors( annotationType ).stream() .filter( d -> supportsValidationTarget( d, validationTarget ) ) .collect( Collectors.toList() ); }
3.68
hbase_HRegionServer_abort
/** * Cause the server to exit without closing the regions it is serving, the log it is using and * without notifying the master. Used unit testing and on catastrophic events such as HDFS is * yanked out from under hbase or we OOME. the reason we are aborting the exception that caused * the abort, or null */ @Override public void abort(String reason, Throwable cause) { if (!setAbortRequested()) { // Abort already in progress, ignore the new request. LOG.debug("Abort already in progress. Ignoring the current request with reason: {}", reason); return; } String msg = "***** ABORTING region server " + this + ": " + reason + " *****"; if (cause != null) { LOG.error(HBaseMarkers.FATAL, msg, cause); } else { LOG.error(HBaseMarkers.FATAL, msg); } // HBASE-4014: show list of coprocessors that were loaded to help debug // regionserver crashes.Note that we're implicitly using // java.util.HashSet's toString() method to print the coprocessor names. LOG.error(HBaseMarkers.FATAL, "RegionServer abort: loaded coprocessors are: " + CoprocessorHost.getLoadedCoprocessors()); // Try and dump metrics if abort -- might give clue as to how fatal came about.... try { LOG.info("Dump of metrics as JSON on abort: " + DumpRegionServerMetrics.dumpMetrics()); } catch (MalformedObjectNameException | IOException e) { LOG.warn("Failed dumping metrics", e); } // Do our best to report our abort to the master, but this may not work try { if (cause != null) { msg += "\nCause:\n" + Throwables.getStackTraceAsString(cause); } // Report to the master but only if we have already registered with the master. RegionServerStatusService.BlockingInterface rss = rssStub; if (rss != null && this.serverName != null) { ReportRSFatalErrorRequest.Builder builder = ReportRSFatalErrorRequest.newBuilder(); builder.setServer(ProtobufUtil.toServerName(this.serverName)); builder.setErrorMessage(msg); rss.reportRSFatalError(null, builder.build()); } } catch (Throwable t) { LOG.warn("Unable to report fatal error to master", t); } scheduleAbortTimer(); // shutdown should be run as the internal user stop(reason, true, null); }
3.68
morf_TableOutputter_outputExampleData
/** * Outputs the example data rows. * * @param numberOfExamples to output * @param workSheet to add the data rows to * @param table to get metadata from * @param startRow to start adding the example rows at * @param records to add as examples * @return the new row to carry on outputting at * @throws WriteException if any of the writes to workSheet fail */ private int outputExampleData(final Integer numberOfExamples, WritableSheet workSheet, Table table, final int startRow, Iterable<Record> records) throws WriteException { int currentRow = startRow; int rowsOutput = 0; for (Record record : records) { if (currentRow >= MAX_EXCEL_ROWS) { continue; } if (numberOfExamples != null && rowsOutput >= numberOfExamples) { // Need to continue the loop rather than break as we need to close // the connection which happens at the end of iteration... continue; } record(currentRow, workSheet, table, record); rowsOutput++; currentRow++; } if (currentRow >= MAX_EXCEL_ROWS) { // This is a fix for WEB-56074. It will be removed if/when WEB-42351 is developed. throw new RowLimitExceededException("Output for table '" + table.getName() + "' exceeds the maximum number of rows (" + MAX_EXCEL_ROWS + ") in an Excel worksheet. It will be truncated."); } currentRow++; return currentRow; }
3.68
graphhopper_LandmarkStorage_setMaximumWeight
/** * Specify the maximum possible value for your used area. With this maximum weight value you can influence the storage * precision for your weights that help A* finding its way to the goal. The same value is used for all subnetworks. * Note, if you pick this value too big then too similar weights are stored * (some bits of the storage capability will be left unused). * If too low then far away values will have the same maximum value associated ("maxed out"). * Both will lead to bad performance. * * @param maxWeight use a negative value to automatically determine this value. */ public LandmarkStorage setMaximumWeight(double maxWeight) { if (maxWeight > 0) { this.factor = maxWeight / PRECISION; if (Double.isInfinite(factor) || Double.isNaN(factor)) throw new IllegalStateException("Illegal factor " + factor + " calculated from maximum weight " + maxWeight); } return this; }
3.68
framework_AbstractMedia_pause
/** * Pauses the media. */ public void pause() { getRpcProxy(MediaControl.class).pause(); }
3.68
hmily_PropertyName_isIndexed
/** * Whether the parameter of the index type list array. */ private boolean isIndexed(final int index) { String element = getElement(index); return isIndexed(element); }
3.68
framework_Table_setSortDisabled
/** * Disables the sorting by the user altogether. * * @param sortDisabled * True if sorting is disabled. * @deprecated As of 7.0, use {@link #setSortEnabled(boolean)} instead */ @Deprecated public void setSortDisabled(boolean sortDisabled) { setSortEnabled(!sortDisabled); }
3.68
framework_ApplicationConfiguration_isQuietDebugMode
/** * Checks whether debug logging should be quiet. * * @return <code>true</code> if debug logging should be quiet */ public static boolean isQuietDebugMode() { String debugParameter = Window.Location.getParameter("debug"); return isDebugAvailable() && debugParameter != null && debugParameter.startsWith("q"); }
3.68
hadoop_VersionUtil_compareVersions
/** * Compares two version name strings using maven's ComparableVersion class. * * @param version1 * the first version to compare * @param version2 * the second version to compare * @return a negative integer if version1 precedes version2, a positive * integer if version2 precedes version1, and 0 if and only if the two * versions are equal. */ public static int compareVersions(String version1, String version2) { ComparableVersion v1 = new ComparableVersion(version1); ComparableVersion v2 = new ComparableVersion(version2); return v1.compareTo(v2); }
3.68
hbase_Mutation_isReturnResults
/** Returns current value for returnResults */ // Used by Increment and Append only. @InterfaceAudience.Private protected boolean isReturnResults() { byte[] v = getAttribute(RETURN_RESULTS); return v == null ? true : Bytes.toBoolean(v); }
3.68
framework_EventHelper_updateFocusHandler
/** * Adds or removes a focus handler depending on if the connector has focus * listeners on the server side or not. * * @param connector * The connector to update. Must implement focusHandler. * @param handlerRegistration * The old registration reference or null if no handler has been * registered previously * @param widget * The widget which emits focus events * @return a new registration handler that can be used to unregister the * handler later */ public static <T extends ComponentConnector & FocusHandler> HandlerRegistration updateFocusHandler( T connector, HandlerRegistration handlerRegistration, Widget widget) { return updateHandler(connector, connector, FOCUS, handlerRegistration, FocusEvent.getType(), widget); }
3.68
querydsl_Expressions_arrayPath
/** * Create a new Path expression * * @param arrayType array type * @param metadata path metadata * @param <A> array type * @param <E> element type * @return path expression */ public static <A, E> ArrayPath<A, E> arrayPath(Class<A> arrayType, PathMetadata metadata) { return new ArrayPath<A, E>(arrayType, metadata); }
3.68
hbase_BlockCache_notifyFileCachingCompleted
/** * Notifies the cache implementation that the given file has been fully cached (all its blocks * made into the cache). * @param fileName the file that has been completely cached. */ default void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int dataBlockCount, long size) { // noop }
3.68
hbase_MasterObserver_postCompletedTruncateTableAction
/** * Called after {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part * of truncate table procedure and it is async to the truncate RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedTruncateTableAction( final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName) throws IOException { }
3.68
flink_DataStreamStateTTLTestProgram_setBackendWithCustomTTLTimeProvider
/** * Sets the state backend to a new {@link StubStateBackend} which has a {@link * MonotonicTTLTimeProvider}. * * @param env The {@link StreamExecutionEnvironment} of the job. */ private static void setBackendWithCustomTTLTimeProvider(StreamExecutionEnvironment env) { final MonotonicTTLTimeProvider ttlTimeProvider = new MonotonicTTLTimeProvider(); final StateBackend configuredBackend = env.getStateBackend(); final StateBackend stubBackend = new StubStateBackend(configuredBackend, ttlTimeProvider); env.setStateBackend(stubBackend); }
3.68
framework_BindingValidationStatus_getMessage
/** * Gets error validation message if status is {@link Status#ERROR}. * * @return an optional validation error status or an empty optional if * status is not an error */ public Optional<String> getMessage() { if (getStatus() == Status.OK || result == null) { return Optional.empty(); } return result.getMessage(); }
3.68
hudi_HashFunction_hash
/** * Hashes a specified key into several integers. * * @param k The specified key. * @return The array of hashed values. */ public int[] hash(Key k) { byte[] b = k.getBytes(); if (b == null) { throw new NullPointerException("buffer reference is null"); } if (b.length == 0) { throw new IllegalArgumentException("key length must be > 0"); } int[] result = new int[nbHash]; for (int i = 0, initval = 0; i < nbHash; i++) { initval = hashFunction.hash(b, initval); result[i] = Math.abs(initval % maxValue); } return result; }
3.68
hbase_NamespaceAuditor_isInitialized
/** * Checks if namespace auditor is initialized. Used only for testing. * @return true, if is initialized */ public boolean isInitialized() { return stateManager.isInitialized(); }
3.68
framework_DateField_setRangeEnd
/** * Sets the end range for this component. If the value is set after this * date (taking the resolution into account), the component will not * validate. If <code>endDate</code> is set to <code>null</code>, any value * after <code>startDate</code> will be accepted by the range. * * @param endDate * - the allowed range's end date (inclusive, based on the * current resolution) */ public void setRangeEnd(Date endDate) { if (endDate != null && getState().rangeStart != null && getState().rangeStart.after(endDate)) { throw new IllegalStateException( "endDate cannot be earlier than startDate"); } // Create a defensive copy against issues when using java.sql.Date (and // also against mutable Date). getState().rangeEnd = endDate != null ? new Date(endDate.getTime()) : null; updateRangeValidator(); }
3.68
flink_HiveParserSemanticAnalyzer_processPTFSource
/* * - a partitionTableFunctionSource can be a tableReference, a SubQuery or another * PTF invocation. * - For a TABLEREF: set the source to the alias returned by processTable * - For a SubQuery: set the source to the alias returned by processSubQuery * - For a PTF invocation: recursively call processPTFChain. */ private PTFInputSpec processPTFSource(HiveParserQB qb, HiveParserASTNode inputNode) throws SemanticException { PTFInputSpec qInSpec = null; int type = inputNode.getType(); String alias; switch (type) { case HiveASTParser.TOK_TABREF: alias = processTable(qb, inputNode); qInSpec = new PTFQueryInputSpec(); ((PTFQueryInputSpec) qInSpec).setType(PTFQueryInputType.TABLE); ((PTFQueryInputSpec) qInSpec).setSource(alias); break; case HiveASTParser.TOK_SUBQUERY: alias = processSubQuery(qb, inputNode); qInSpec = new PTFQueryInputSpec(); ((PTFQueryInputSpec) qInSpec).setType(PTFQueryInputType.SUBQUERY); ((PTFQueryInputSpec) qInSpec).setSource(alias); break; case HiveASTParser.TOK_PTBLFUNCTION: qInSpec = processPTFChain(qb, inputNode); break; default: throw new SemanticException( HiveParserUtils.generateErrorMessage( inputNode, "Unknown input type to PTF")); } qInSpec.setAstNode(inputNode); return qInSpec; }
3.68
hbase_Superusers_initialize
/** * Should be called only once to pre-load list of super users and super groups from Configuration. * This operation is idempotent. * @param conf configuration to load users from * @throws IOException if unable to initialize lists of superusers or super groups * @throws IllegalStateException if current user is null */ public static void initialize(Configuration conf) throws IOException { ImmutableSet.Builder<String> superUsersBuilder = ImmutableSet.builder(); ImmutableSet.Builder<String> superGroupsBuilder = ImmutableSet.builder(); systemUser = User.getCurrent(); if (systemUser == null) { throw new IllegalStateException("Unable to obtain the current user, " + "authorization checks for internal operations will not work correctly!"); } String currentUser = systemUser.getShortName(); LOG.trace("Current user name is {}", currentUser); superUsersBuilder.add(currentUser); String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]); for (String name : superUserList) { if (AuthUtil.isGroupPrincipal(name)) { // Let's keep the '@' for distinguishing from user. superGroupsBuilder.add(name); } else { superUsersBuilder.add(name); } } superUsers = superUsersBuilder.build(); superGroups = superGroupsBuilder.build(); }
3.68
hbase_Get_setCacheBlocks
/** * Set whether blocks should be cached for this Get. * <p> * This is true by default. When true, default settings of the table and family are used (this * will never override caching blocks if the block cache is disabled for that family or entirely). * @param cacheBlocks if false, default settings are overridden and blocks will not be cached */ public Get setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; return this; }
3.68
morf_AbstractSqlDialectTest_expectedLower
/** * @return The expected SQL for the LOWER function. */ protected String expectedLower() { return "SELECT LOWER(field1) FROM " + tableName("schedule"); }
3.68
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_checkPrecondition
/** * TODO maybe add force config to schedule the clustering. It could allow clustering on partitions that are not doing write operation. * Block clustering if there is any ongoing concurrent writers * * @return true if the schedule can proceed */ @Override public boolean checkPrecondition() { HoodieTimeline timeline = getHoodieTable().getActiveTimeline().getDeltaCommitTimeline().filterInflightsAndRequested(); if (!timeline.empty()) { LOG.warn("When using consistent bucket, clustering cannot be scheduled async if there are concurrent writers. " + "Writer instant: {}.", timeline.getInstants()); return false; } return true; }
3.68
framework_VaadinSession_getState
/** * Returns the lifecycle state of this session. * * @since 7.2 * @return the current state */ public State getState() { assert hasLock(); return state; }
3.68
hbase_ClusterStatusTracker_toByteArray
/** Returns Content of the clusterup znode as a serialized pb with the pb magic as prefix. */ static byte[] toByteArray() { ZooKeeperProtos.ClusterUp.Builder builder = ZooKeeperProtos.ClusterUp.newBuilder(); builder.setStartDate(new java.util.Date().toString()); return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); }
3.68
hadoop_CommitUtilsWithMR_getMagicTaskAttemptPath
/** * Compute the path where the output of a task attempt is stored until * that task is committed. * This path is marked as a base path for relocations, so subdirectory * information is preserved. * @param context the context of the task attempt. * @param jobUUID unique Job ID. * @param dest The output path to commit work into * @return the path where a task attempt should be stored. */ public static Path getMagicTaskAttemptPath(TaskAttemptContext context, String jobUUID, Path dest) { return new Path(getBaseMagicTaskAttemptPath(context, jobUUID, dest), BASE); }
3.68
hbase_HFileReaderImpl_getMetaBlock
/** * @param cacheBlock Add block to cache, if found * @return block wrapped in a ByteBuffer, with header skipped */ @Override public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException { if (trailer.getMetaIndexCount() == 0) { return null; // there are no meta blocks } if (metaBlockIndexReader == null) { throw new IOException(path + " meta index not loaded"); } byte[] mbname = Bytes.toBytes(metaBlockName); int block = metaBlockIndexReader.rootBlockContainingKey(mbname, 0, mbname.length); if (block == -1) { return null; } long blockSize = metaBlockIndexReader.getRootBlockDataSize(block); // Per meta key from any given file, synchronize reads for said block. This // is OK to do for meta blocks because the meta block index is always // single-level. synchronized (metaBlockIndexReader.getRootBlockKey(block)) { // Check cache for block. If found return. long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block); BlockCacheKey cacheKey = new BlockCacheKey(name, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META); cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, false, true, BlockType.META, null); if (cachedBlock != null) { assert cachedBlock.isUnpacked() : "Packed block leak."; // Return a distinct 'shallow copy' of the block, // so pos does not get messed by the scanner return cachedBlock; } // Cache Miss, please load. HFileBlock compressedBlock = fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false, true); HFileBlock uncompressedBlock = compressedBlock.unpack(hfileContext, fsBlockReader); if (compressedBlock != uncompressedBlock) { compressedBlock.release(); } // Cache the block if (cacheBlock) { cacheConf.getBlockCache().ifPresent( cache -> cache.cacheBlock(cacheKey, uncompressedBlock, cacheConf.isInMemory())); } return uncompressedBlock; } }
3.68
flink_EnrichedRowData_replaceMutableRow
/** * Replaces the mutable {@link RowData} backing this {@link EnrichedRowData}. * * <p>This method replaces the mutable row data in place and does not return a new object. This * is done for performance reasons. */ public EnrichedRowData replaceMutableRow(RowData mutableRow) { this.mutableRow = mutableRow; return this; }
3.68
hbase_DateTieredCompactionPolicy_needsCompaction
/** * Heuristics for guessing whether we need minor compaction. */ @Override @InterfaceAudience.Private public boolean needsCompaction(Collection<HStoreFile> storeFiles, List<HStoreFile> filesCompacting) { ArrayList<HStoreFile> candidates = new ArrayList<>(storeFiles); try { return !selectMinorCompaction(candidates, false, true).getFiles().isEmpty(); } catch (Exception e) { LOG.error("Can not check for compaction: ", e); return false; } }
3.68
hadoop_RegistryPathUtils_getUsername
/** * Return the username found in the ZK path. * * @param recPath the ZK recPath. * @return the user name. */ public static String getUsername(String recPath) { String user = "anonymous"; Matcher matcher = USER_NAME.matcher(recPath); if (matcher.find()) { user = matcher.group(1); } return user; }
3.68
hibernate-validator_TypeHelper_getErasedType
/** * Gets the erased type of the specified type. * * @param type the type to perform erasure on * * @return the erased type, never a parameterized type nor a type variable * * @see <a href="http://docs.oracle.com/javase/specs/jls/se8/html/jls-4.html#jls-4.6">4.6 Type Erasure</a> */ public static Type getErasedType(Type type) { // the erasure of a parameterized type G<T1, ... ,Tn> is |G| if ( type instanceof ParameterizedType ) { Type rawType = ( (ParameterizedType) type ).getRawType(); return getErasedType( rawType ); } // TODO: the erasure of a nested type T.C is |T|.C // the erasure of an array type T[] is |T|[] if ( isArray( type ) ) { Type componentType = getComponentType( type ); Type erasedComponentType = getErasedType( componentType ); return getArrayType( erasedComponentType ); } // the erasure of a type variable is the erasure of its leftmost bound if ( type instanceof TypeVariable<?> ) { Type[] bounds = ( (TypeVariable<?>) type ).getBounds(); return getErasedType( bounds[0] ); } // the erasure of a wildcard type is the erasure of its leftmost upper bound if ( type instanceof WildcardType ) { Type[] upperBounds = ( (WildcardType) type ).getUpperBounds(); return getErasedType( upperBounds[0] ); } // the erasure of every other type is the type itself return type; }
3.68
pulsar_BaseResource_requestAsync
// do the authentication stage, and once authentication completed return a Builder public CompletableFuture<Builder> requestAsync(final WebTarget target) { CompletableFuture<Builder> builderFuture = new CompletableFuture<>(); CompletableFuture<Map<String, String>> authFuture = new CompletableFuture<>(); try { AuthenticationDataProvider authData = auth.getAuthData(target.getUri().getHost()); if (authData.hasDataForHttp()) { auth.authenticationStage(target.getUri().toString(), authData, null, authFuture); } else { authFuture.complete(null); } // auth complete, return a new Builder authFuture.whenComplete((respHeaders, ex) -> { if (ex != null) { log.warn("[{}] Failed to perform http request at auth stage: {}", target.getUri(), ex.getMessage()); builderFuture.completeExceptionally(new PulsarClientException(ex)); return; } try { Builder builder = target.request(MediaType.APPLICATION_JSON); if (authData.hasDataForHttp()) { Set<Entry<String, String>> headers = auth.newRequestHeader(target.getUri().toString(), authData, respHeaders); if (headers != null) { headers.forEach(entry -> builder.header(entry.getKey(), entry.getValue())); } } builderFuture.complete(builder); } catch (Throwable t) { builderFuture.completeExceptionally(new GettingAuthenticationDataException(t)); } }); } catch (Throwable t) { builderFuture.completeExceptionally(new GettingAuthenticationDataException(t)); } return builderFuture; }
3.68
AreaShop_TeleportFeature_teleportPlayer
/** * Teleport a player to the region when he has permissions for it. * @param player Player that should be teleported * @return true if the teleport succeeded, otherwise false */ public boolean teleportPlayer(Player player) { return teleportPlayer(player, false, true); }
3.68
hbase_HMaster_getLoadedCoprocessors
/** * The set of loaded coprocessors is stored in a static set. Since it's statically allocated, it * does not require that HMaster's cpHost be initialized prior to accessing it. * @return a String representation of the set of names of the loaded coprocessors. */ public static String getLoadedCoprocessors() { return CoprocessorHost.getLoadedCoprocessors().toString(); }
3.68
flink_DeltaIteration_registerAggregationConvergenceCriterion
/** * Registers an {@link Aggregator} for the iteration together with a {@link * ConvergenceCriterion}. For a general description of aggregators, see {@link * #registerAggregator(String, Aggregator)} and {@link Aggregator}. At the end of each * iteration, the convergence criterion takes the aggregator's global aggregate value and * decides whether the iteration should terminate. A typical use case is to have an aggregator * that sums up the total error of change in an iteration step and have to have a convergence * criterion that signals termination as soon as the aggregate value is below a certain * threshold. * * @param name The name under which the aggregator is registered. * @param aggregator The aggregator class. * @param convergenceCheck The convergence criterion. * @return The DeltaIteration itself, to allow chaining function calls. */ @PublicEvolving public <X extends Value> DeltaIteration<ST, WT> registerAggregationConvergenceCriterion( String name, Aggregator<X> aggregator, ConvergenceCriterion<X> convergenceCheck) { this.aggregators.registerAggregationConvergenceCriterion( name, aggregator, convergenceCheck); return this; }
3.68
pulsar_AuthenticationProviderOpenID_validateAllowedAudiences
/** * Validate the configured allow list of allowedAudiences. The allowedAudiences must be set because * JWT must have an audience claim. * See https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation. * @param allowedAudiences * @return the validated audiences */ String[] validateAllowedAudiences(Set<String> allowedAudiences) { if (allowedAudiences == null || allowedAudiences.isEmpty()) { throw new IllegalArgumentException("Missing configured value for: " + ALLOWED_AUDIENCES); } return allowedAudiences.toArray(new String[0]); }
3.68
flink_QuickSort_getMaxDepth
/** Deepest recursion before giving up and doing a heapsort. Returns 2 * ceil(log(n)). */ protected static int getMaxDepth(int x) { if (x <= 0) { throw new IllegalArgumentException("Undefined for " + x); } return (32 - Integer.numberOfLeadingZeros(x - 1)) << 2; }
3.68
morf_NamedParameterPreparedStatement_createFor
/** * Create the prepared statement against the specified connection. * * @param connection the connection * @return the prepared statement. * @throws SQLException if the statement could not be created */ public NamedParameterPreparedStatement createFor(Connection connection) throws SQLException { return new NamedParameterPreparedStatement(connection, query, indexMap, false, this); }
3.68
hbase_MobUtils_createWriter
/** * Creates a writer for the mob file in temp directory. * @param conf The current configuration. * @param fs The current file system. * @param family The descriptor of the current column family. * @param path The path for a temp directory. * @param maxKeyCount The key count. * @param compression The compression algorithm. * @param cacheConfig The current cache config. * @param cryptoContext The encryption context. * @param checksumType The checksum type. * @param bytesPerChecksum The bytes per checksum. * @param blocksize The HFile block size. * @param bloomType The bloom filter type. * @param isCompaction If the writer is used in compaction. * @param writerCreationTracker to track the current writer in the store * @return The writer for the mob file. */ public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, ColumnFamilyDescriptor family, Path path, long maxKeyCount, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, ChecksumType checksumType, int bytesPerChecksum, int blocksize, BloomType bloomType, boolean isCompaction, Consumer<Path> writerCreationTracker) throws IOException { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } final CacheConfig writerCacheConf; if (isCompaction) { writerCacheConf = new CacheConfig(cacheConfig); writerCacheConf.setCacheDataOnWrite(false); } else { writerCacheConf = cacheConfig; } HFileContext hFileContext = new HFileContextBuilder().withCompression(compression) .withIncludesMvcc(true).withIncludesTags(true).withCompressTags(family.isCompressTags()) .withChecksumType(checksumType).withBytesPerCheckSum(bytesPerChecksum) .withBlockSize(blocksize).withHBaseCheckSum(true) .withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(cryptoContext) .withCreateTime(EnvironmentEdgeManager.currentTime()).build(); StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, fs).withFilePath(path) .withBloomType(bloomType).withMaxKeyCount(maxKeyCount).withFileContext(hFileContext) .withWriterCreationTracker(writerCreationTracker).build(); return w; }
3.68
dubbo_NetUtils_isInvalidPort
/** * Tells whether the port to test is an invalid port. * * @implNote Numeric comparison only. * @param port port to test * @return true if invalid */ public static boolean isInvalidPort(int port) { return port < MIN_PORT || port > MAX_PORT; }
3.68
framework_DateField_getRangeStart
/** * Returns the precise rangeStart used. * * @param startDate * */ public Date getRangeStart() { return getState(false).rangeStart; }
3.68
hadoop_MemoryPlacementConstraintManager_addConstraintToMap
/** * Helper method that adds a constraint to a map for a given source tag. * Assumes there is already a lock on the constraint map. * * @param constraintMap constraint map to which the constraint will be added * @param sourceTags the source tags that will enable this constraint * @param placementConstraint the new constraint to be added * @param replace if true, an existing constraint for these sourceTags will be * replaced with the new one */ private void addConstraintToMap( Map<String, PlacementConstraint> constraintMap, Set<String> sourceTags, PlacementConstraint placementConstraint, boolean replace) { if (validateConstraint(sourceTags, placementConstraint)) { String sourceTag = getValidSourceTag(sourceTags); if (constraintMap.get(sourceTag) == null || replace) { if (replace) { LOG.info("Replacing the constraint associated with tag {} with {}.", sourceTag, placementConstraint); } constraintMap.put(sourceTag, placementConstraint); } else { LOG.info("Constraint {} will not be added. There is already a " + "constraint associated with tag {}.", placementConstraint, sourceTag); } } }
3.68
dubbo_DubboBootstrap_await
/** * Block current thread to be await. * * @return {@link DubboBootstrap} */ public DubboBootstrap await() { // if has been waited, no need to wait again, return immediately if (!awaited.get()) { if (!isStopped()) { executeMutually(() -> { while (!awaited.get()) { if (logger.isInfoEnabled()) { logger.info(NAME + " awaiting ..."); } try { condition.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } }); } } return this; }
3.68
Activiti_BaseEntityEventListener_isValidEvent
/** * @return true, if the event is an {@link ActivitiEntityEvent} and (if needed) the entityClass set in this instance, is assignable from the entity class in the event. */ protected boolean isValidEvent(ActivitiEvent event) { boolean valid = false; if (event instanceof ActivitiEntityEvent) { if (entityClass == null) { valid = true; } else { valid = entityClass.isAssignableFrom(((ActivitiEntityEvent) event).getEntity().getClass()); } } return valid; }
3.68
hbase_MasterObserver_preRecommissionRegionServer
/** * Called before recommission region server. */ default void preRecommissionRegionServer(ObserverContext<MasterCoprocessorEnvironment> ctx, ServerName server, List<byte[]> encodedRegionNames) throws IOException { }
3.68
hbase_StorageClusterStatusModel_setLiveNodes
/** * @param nodes the list of live node models */ public void setLiveNodes(List<Node> nodes) { this.liveNodes = nodes; }
3.68
querydsl_DateExpression_month
/** * Create a month expression (range 1-12 / JAN-DEC) * * @return month */ public NumberExpression<Integer> month() { if (month == null) { month = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.MONTH, mixin); } return month; }
3.68
hudi_HoodieUnMergedLogRecordScanner_newBuilder
/** * Returns the builder for {@code HoodieUnMergedLogRecordScanner}. */ public static HoodieUnMergedLogRecordScanner.Builder newBuilder() { return new Builder(); }
3.68
hbase_Bytes_toLong
/** * Converts a byte array to a long value. * @param bytes array of bytes * @param offset offset into array * @param length length of data (must be {@link #SIZEOF_LONG}) * @return the long value * @throws IllegalArgumentException if length is not {@link #SIZEOF_LONG} or if there's not enough * room in the array at the offset indicated. */ public static long toLong(byte[] bytes, int offset, final int length) { if (length != SIZEOF_LONG || offset + length > bytes.length) { throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_LONG); } return ConverterHolder.BEST_CONVERTER.toLong(bytes, offset, length); }
3.68
morf_AbstractSqlDialectTest_testOptimiseForRowCountOnSubquery
/** * Check that we don't allow the use of the optimise for row count hint on a subquery. */ @Test(expected = IllegalArgumentException.class) public void testOptimiseForRowCountOnSubquery() { testDialect.convertStatementToSQL( select().from(select().from("Foo").optimiseForRowCount(1)) ); }
3.68
rocketmq-connect_RecordOffsetManagement_submitRecord
/** * submit record * * @param position * @return */ public SubmittedPosition submitRecord(RecordPosition position) { SubmittedPosition submittedPosition = new SubmittedPosition(position); records.computeIfAbsent(position.getPartition(), e -> new LinkedList<>()).add(submittedPosition); // ensure thread safety in operation synchronized (this) { numUnackedMessages.incrementAndGet(); } return submittedPosition; }
3.68
hbase_AuthManager_removeTable
/** * Remove given table from AuthManager's table cache. * @param table table name */ public void removeTable(TableName table) { tableCache.remove(table); }
3.68
flink_StaticFileServerHandler_setDateAndCacheHeaders
/** * Sets the "date" and "cache" headers for the HTTP Response. * * @param response The HTTP response object. * @param fileToCache File to extract the modification timestamp from. */ public static void setDateAndCacheHeaders(HttpResponse response, File fileToCache) { SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US); dateFormatter.setTimeZone(GMT_TIMEZONE); // date header Calendar time = new GregorianCalendar(); response.headers().set(DATE, dateFormatter.format(time.getTime())); // cache headers time.add(Calendar.SECOND, HTTP_CACHE_SECONDS); response.headers().set(EXPIRES, dateFormatter.format(time.getTime())); response.headers().set(CACHE_CONTROL, "private, max-age=" + HTTP_CACHE_SECONDS); response.headers() .set(LAST_MODIFIED, dateFormatter.format(new Date(fileToCache.lastModified()))); }
3.68
hudi_HoodieHiveUtils_getTimestampWriteable
/** * Get timestamp writeable object from long value. * Hive3 use TimestampWritableV2 to build timestamp objects and Hive2 use TimestampWritable. * So that we need to initialize timestamp according to the version of Hive. */ public static Writable getTimestampWriteable(long value, boolean timestampMillis) { return HIVE_SHIM.getTimestampWriteable(value, timestampMillis); }
3.68
hbase_MetricsMaster_setNumTableInSpaceQuotaViolation
/** * Sets the number of table in violation of a space quota. * @see MetricsMasterQuotaSource#updateNumTablesInSpaceQuotaViolation(long) */ public void setNumTableInSpaceQuotaViolation(final long numTablesInViolation) { masterQuotaSource.updateNumTablesInSpaceQuotaViolation(numTablesInViolation); }
3.68
flink_PlanReference_fromJsonString
/** Create a reference starting from a JSON string. */ public static PlanReference fromJsonString(String jsonString) { Objects.requireNonNull(jsonString, "Json string cannot be null"); return new ContentPlanReference(jsonString); }
3.68
rocketmq-connect_AvroData_splitName
/** * Split a full dotted-syntax name into a namespace and a single-component name. */ private static String[] splitName(String fullName) { String[] result = new String[2]; int indexLastDot = fullName.lastIndexOf('.'); if (indexLastDot >= 0) { result[0] = fullName.substring(0, indexLastDot); result[1] = fullName.substring(indexLastDot + 1); } else { result[0] = null; result[1] = fullName; } return result; }
3.68
hadoop_ConverterUtils_getYarnUrlFromURI
/* * This method is deprecated, use {@link URL#fromURI(URI)} instead. */ @Public @Deprecated public static URL getYarnUrlFromURI(URI uri) { return URL.fromURI(uri); }
3.68
hbase_ColumnFamilyDescriptorBuilder_setEvictBlocksOnClose
/** * Set the setEvictBlocksOnClose flag. * @param value true if we should evict cached blocks from the blockcache on close * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { return setValue(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean.toString(value)); }
3.68
hbase_ReplicationSourceLogQueue_getQueue
/** * Return queue for the given walGroupId Please don't add or remove elements from the returned * queue. Use {@link #enqueueLog(Path, String)} and {@link #remove(String)} methods respectively. * @param walGroupId walGroupId */ public PriorityBlockingQueue<Path> getQueue(String walGroupId) { return queues.get(walGroupId); }
3.68
flink_SharingPhysicalSlotRequestBulk_clearPendingRequests
/** * Clear the pending requests. * * <p>The method can be used to make the bulk fulfilled and stop the fulfillability check in * {@link PhysicalSlotRequestBulkChecker}. */ void clearPendingRequests() { pendingRequests.clear(); }
3.68
hadoop_OBSListing_accept
/** * Accept all prefixes except the one for the base path, "self". * * @param keyPath qualified path to the entry * @param prefix common prefix in listing. * @return true if the entry is accepted (i.e. that a status entry should be * generated. */ @Override public boolean accept(final Path keyPath, final String prefix) { return !keyPath.equals(qualifiedPath); }
3.68
hbase_SnapshotQuotaObserverChore_getTimeUnit
/** * Extracts the time unit for the chore period and initial delay from the configuration. The * configuration value for {@link #SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY} must correspond to a * {@link TimeUnit} value. * @param conf The configuration object. * @return The configured time unit for the chore period and initial delay or the default value. */ static TimeUnit getTimeUnit(Configuration conf) { return TimeUnit .valueOf(conf.get(SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY, SNAPSHOT_QUOTA_CHORE_TIMEUNIT_DEFAULT)); }
3.68
hadoop_FederationCache_buildGetPoliciesConfigurationsCacheRequest
/** * Build GetPoliciesConfigurations CacheRequest. * * @param cacheKey cacheKey. * @return CacheRequest. * @throws YarnException exceptions from yarn servers. */ protected CacheRequest<String, CacheResponse<SubClusterPolicyConfiguration>> buildGetPoliciesConfigurationsCacheRequest(String cacheKey) throws YarnException { CacheResponse<SubClusterPolicyConfiguration> response = buildSubClusterPolicyConfigurationResponse(); return new CacheRequest<>(cacheKey, response); }
3.68
hbase_MetricsSource_getWALReaderEditsBufferUsage
/** * Returns the amount of memory in bytes used in this RegionServer by edits pending replication. */ public long getWALReaderEditsBufferUsage() { return globalSourceSource.getWALReaderEditsBufferBytes(); }
3.68
hbase_SnapshotManager_takeSnapshot
/** * Take a snapshot based on the enabled/disabled state of the table. * @throws HBaseSnapshotException when a snapshot specific exception occurs. * @throws IOException when some sort of generic IO exception occurs. */ public void takeSnapshot(SnapshotDescription snapshot) throws IOException { this.takingSnapshotLock.readLock().lock(); try { takeSnapshotInternal(snapshot); } finally { this.takingSnapshotLock.readLock().unlock(); } }
3.68
pulsar_ManagedLedgerConfig_setLazyCursorRecovery
/** * Whether to recover cursors lazily when trying to recover a * managed ledger backing a persistent topic. It can improve write availability of topics. * The caveat is now when recovered ledger is ready to write we're not sure if all old consumers last mark * delete position can be recovered or not. * @param lazyCursorRecovery if enable lazy cursor recovery. */ public ManagedLedgerConfig setLazyCursorRecovery(boolean lazyCursorRecovery) { this.lazyCursorRecovery = lazyCursorRecovery; return this; }
3.68
hadoop_TimelineStateStore_serviceInit
/** * Initialize the state storage * * @param conf the configuration * @throws IOException */ @Override public void serviceInit(Configuration conf) throws IOException { initStorage(conf); }
3.68
hadoop_PathLocation_getDestinations
/** * Get the list of locations found in the mount table. * The first result is the highest priority path. * * @return List of remote locations. */ public List<RemoteLocation> getDestinations() { return Collections.unmodifiableList(this.destinations); }
3.68
flink_ComponentMetricGroup_close
/** * Closes the component group by removing and closing all metrics and subgroups (inherited from * {@link AbstractMetricGroup}), plus closing and removing all dedicated component subgroups. */ @Override public void close() { synchronized (this) { if (!isClosed()) { // remove all metrics and generic subgroups super.close(); // remove and close all subcomponent metrics for (ComponentMetricGroup group : subComponents()) { group.close(); } } } }
3.68
hadoop_RLESparseResourceAllocation_getCapacityAtTime
/** * Returns the capacity, i.e. total resources allocated at the specified point * of time. * * @param tick timeStap at which resource needs to be known * @return the resources allocated at the specified time */ public Resource getCapacityAtTime(long tick) { readLock.lock(); try { Entry<Long, Resource> closestStep = cumulativeCapacity.floorEntry(tick); if (closestStep != null) { return Resources.clone(closestStep.getValue()); } return Resources.clone(ZERO_RESOURCE); } finally { readLock.unlock(); } }
3.68
hudi_SimpleBloomFilter_write
// @Override public void write(DataOutput out) throws IOException { out.write(getUTF8Bytes(filter.toString())); }
3.68
open-banking-gateway_DateTimeFormatConfig_addFormatters
/** * Swagger-codegen is not able to produce @DateTimeFormat annotation: * https://github.com/swagger-api/swagger-codegen/issues/1235 * https://github.com/swagger-api/swagger-codegen/issues/4113 * To fix this - forcing formatters globally. */ @Override public void addFormatters(FormatterRegistry registry) { DateTimeFormatterRegistrar registrar = new DateTimeFormatterRegistrar(); registrar.setUseIsoFormat(true); registrar.registerFormatters(registry); }
3.68
flink_RocksDBStateBackend_setNumberOfTransferThreads
/** * Sets the number of threads used to transfer files while snapshotting/restoring. * * @param numberOfTransferThreads The number of threads used to transfer files while * snapshotting/restoring. */ public void setNumberOfTransferThreads(int numberOfTransferThreads) { rocksDBStateBackend.setNumberOfTransferThreads(numberOfTransferThreads); }
3.68
rocketmq-connect_JsonConverterConfig_cacheSize
/** * return cache size * * @return */ public int cacheSize() { return cacheSize; }
3.68
streampipes_StreamRequirementsBuilder_requiredProperty
/** * Sets a new property requirement, e.g., a property of a specific data type or with specific semantics * a data stream that is connected to this pipeline element must provide. * * @param propertyRequirement The property requirement. * Use {@link org.apache.streampipes.sdk.helpers.EpRequirements} to * create a new requirement. * @return this */ public StreamRequirementsBuilder requiredProperty(EventProperty propertyRequirement) { this.eventProperties.add(propertyRequirement); return this; }
3.68
framework_CalendarDropHandler_getApplicationConnection
/* * (non-Javadoc) * * @see com.vaadin.terminal.gwt.client.ui.dd.VDropHandler# * getApplicationConnection () */ @Override public ApplicationConnection getApplicationConnection() { return calendarConnector.getClient(); }
3.68
hadoop_PathLocation_getDestinationOrder
/** * Get the order for the destinations. * * @return Order for the destinations. */ public DestinationOrder getDestinationOrder() { return this.destOrder; }
3.68
flink_FileChannelMemoryMappedBoundedData_close
/** * Closes the file and unmaps all memory mapped regions. After calling this method, access to * any ByteBuffer obtained from this instance will cause a segmentation fault. */ public void close() throws IOException { IOUtils.closeQuietly(fileChannel); for (ByteBuffer bb : memoryMappedRegions) { PlatformDependent.freeDirectBuffer(bb); } memoryMappedRegions.clear(); // To make this compatible with all versions of Windows, we must wait with // deleting the file until it is unmapped. // See also // https://stackoverflow.com/questions/11099295/file-flag-delete-on-close-and-memory-mapped-files/51649618#51649618 Files.delete(filePath); }
3.68
framework_AbstractComponent_writeDesign
/* * (non-Javadoc) * * @see com.vaadin.ui.Component#writeDesign(org.jsoup.nodes.Element, * com.vaadin.ui.declarative.DesignContext) */ @Override public void writeDesign(Element design, DesignContext designContext) { AbstractComponent def = designContext.getDefaultInstance(this); Attributes attr = design.attributes(); // handle default attributes for (String attribute : getDefaultAttributes()) { DesignAttributeHandler.writeAttribute(this, attribute, attr, def, designContext); } // handle locale if (getLocale() != null && (getParent() == null || !getLocale().equals(getParent().getLocale()))) { design.attr("locale", getLocale().toString()); } // handle size writeSize(attr, def); // handle component error String errorMsg = getComponentError() != null ? getComponentError().getFormattedHtmlMessage() : null; String defErrorMsg = def.getComponentError() != null ? def.getComponentError().getFormattedHtmlMessage() : null; if (!SharedUtil.equals(errorMsg, defErrorMsg)) { attr.put("error", errorMsg); } // handle tab index if (this instanceof Focusable) { DesignAttributeHandler.writeAttribute("tabindex", attr, ((Focusable) this).getTabIndex(), ((Focusable) def).getTabIndex(), Integer.class, designContext); } // handle custom attributes Map<String, String> customAttributes = designContext .getCustomAttributes(this); if (customAttributes != null) { for (Entry<String, String> entry : customAttributes.entrySet()) { attr.put(entry.getKey(), entry.getValue()); } } }
3.68
flink_Tuple1_setFields
/** * Sets new values to all fields of the tuple. * * @param f0 The value for field 0 */ public void setFields(T0 f0) { this.f0 = f0; }
3.68
framework_DataCommunicator_onRequestRows
/** * Request the given rows to be available on the client side. * * @param firstRowIndex * the index of the first requested row * @param numberOfRows * the number of requested rows * @param firstCachedRowIndex * the index of the first cached row * @param cacheSize * the number of cached rows * @since 8.0.6 */ protected void onRequestRows(int firstRowIndex, int numberOfRows, int firstCachedRowIndex, int cacheSize) { if (numberOfRows > getMaximumAllowedRows()) { throw new IllegalStateException( "Client tried fetch more rows than allowed. This is denied to prevent denial of service."); } setPushRows(Range.withLength(firstRowIndex, numberOfRows)); markAsDirty(); }
3.68
querydsl_CollectionExpressionBase_isEmpty
/** * Create a {@code this.isEmpty()} expression * * <p>Evaluates to true, if this has no elements.</p> * * @return this.isEmpty() */ public final BooleanExpression isEmpty() { if (empty == null) { empty = Expressions.booleanOperation(Ops.COL_IS_EMPTY, mixin); } return empty; }
3.68
hadoop_TypedBytesInput_readRawMap
/** * Reads the raw bytes following a <code>Type.MAP</code> code. * @return the obtained bytes sequence * @throws IOException */ public byte[] readRawMap() throws IOException { Buffer buffer = new Buffer(); int length = readMapHeader(); buffer.append(new byte[] { (byte) Type.MAP.code, (byte) (0xff & (length >> 24)), (byte) (0xff & (length >> 16)), (byte) (0xff & (length >> 8)), (byte) (0xff & length) }); for (int i = 0; i < length; i++) { buffer.append(readRaw()); buffer.append(readRaw()); } return buffer.get(); }
3.68
flink_InputChannel_increaseBackoff
/** * Increases the current backoff and returns whether the operation was successful. * * @return <code>true</code>, iff the operation was successful. Otherwise, <code>false</code>. */ protected boolean increaseBackoff() { // Backoff is disabled if (initialBackoff == 0) { return false; } if (currentBackoff == 0) { // This is the first time backing off currentBackoff = initialBackoff; return true; } // Continue backing off else if (currentBackoff < maxBackoff) { currentBackoff = Math.min(currentBackoff * 2, maxBackoff); return true; } // Reached maximum backoff return false; }
3.68
hudi_HoodieKeyLookupHandle_getLookupResult
/** * Of all the keys, that were added, return a list of keys that were actually found in the file group. */ public HoodieKeyLookupResult getLookupResult() { if (LOG.isDebugEnabled()) { LOG.debug("#The candidate row keys for " + partitionPathFileIDPair + " => " + candidateRecordKeys); } HoodieBaseFile baseFile = getLatestBaseFile(); List<Pair<String, Long>> matchingKeysAndPositions = HoodieIndexUtils.filterKeysFromFile( new Path(baseFile.getPath()), candidateRecordKeys, hoodieTable.getHadoopConf()); LOG.info( String.format("Total records (%d), bloom filter candidates (%d)/fp(%d), actual matches (%d)", totalKeysChecked, candidateRecordKeys.size(), candidateRecordKeys.size() - matchingKeysAndPositions.size(), matchingKeysAndPositions.size())); return new HoodieKeyLookupResult(partitionPathFileIDPair.getRight(), partitionPathFileIDPair.getLeft(), baseFile.getCommitTime(), matchingKeysAndPositions); }
3.68
flink_StreamElement_asRecord
/** * Casts this element into a StreamRecord. * * @return This element as a stream record. * @throws java.lang.ClassCastException Thrown, if this element is actually not a stream record. */ @SuppressWarnings("unchecked") public final <E> StreamRecord<E> asRecord() { return (StreamRecord<E>) this; }
3.68
pulsar_MessageIdAdv_compareTo
/** * The default implementation of {@link Comparable#compareTo(Object)}. */ default int compareTo(MessageId o) { if (!(o instanceof MessageIdAdv)) { throw new UnsupportedOperationException("Unknown MessageId type: " + ((o != null) ? o.getClass().getName() : "null")); } final MessageIdAdv other = (MessageIdAdv) o; int result = Long.compare(this.getLedgerId(), other.getLedgerId()); if (result != 0) { return result; } result = Long.compare(this.getEntryId(), other.getEntryId()); if (result != 0) { return result; } // TODO: Correct the following compare logics, see https://github.com/apache/pulsar/pull/18981 result = Integer.compare(this.getPartitionIndex(), other.getPartitionIndex()); if (result != 0) { return result; } return Integer.compare(this.getBatchIndex(), other.getBatchIndex()); }
3.68
flink_SystemProcessingTimeService_isAlive
/** * @return {@code true} is the status of the service is {@link #STATUS_ALIVE}, {@code false} * otherwise. */ @VisibleForTesting boolean isAlive() { return status.get() == STATUS_ALIVE; }
3.68
flink_CliFrontend_list
/** * Executes the list action. * * @param args Command line arguments for the list action. */ protected void list(String[] args) throws Exception { LOG.info("Running 'list' command."); final Options commandOptions = CliFrontendParser.getListCommandOptions(); final CommandLine commandLine = getCommandLine(commandOptions, args, false); ListOptions listOptions = new ListOptions(commandLine); // evaluate help flag if (listOptions.isPrintHelp()) { CliFrontendParser.printHelpForList(customCommandLines); return; } final boolean showRunning; final boolean showScheduled; final boolean showAll; // print running and scheduled jobs if not option supplied if (!listOptions.showRunning() && !listOptions.showScheduled() && !listOptions.showAll()) { showRunning = true; showScheduled = true; showAll = false; } else { showRunning = listOptions.showRunning(); showScheduled = listOptions.showScheduled(); showAll = listOptions.showAll(); } final CustomCommandLine activeCommandLine = validateAndGetActiveCommandLine(commandLine); runClusterAction( activeCommandLine, commandLine, (clusterClient, effectiveConfiguration) -> listJobs(clusterClient, showRunning, showScheduled, showAll)); }
3.68
hbase_ProcedureExecutor_isStarted
/** * Return true if the procedure is started. * @param procId the ID of the procedure to check * @return true if the procedure execution is started, otherwise false. */ public boolean isStarted(long procId) { Procedure<?> proc = procedures.get(procId); if (proc == null) { return completed.get(procId) != null; } return proc.wasExecuted(); }
3.68
flink_KubernetesUtils_getNamespacedServiceName
/** Generate namespaced name of the service. */ public static String getNamespacedServiceName(Service service) { return service.getMetadata().getName() + "." + service.getMetadata().getNamespace(); }
3.68
framework_VTree_isGrandParentOf
/** * Travels up the hierarchy looking for this node. * * @param child * The child which grandparent this is or is not * @return True if this is a grandparent of the child node */ public boolean isGrandParentOf(TreeNode child) { TreeNode currentNode = child; boolean isGrandParent = false; while (currentNode != null) { currentNode = currentNode.getParentNode(); if (currentNode == this) { isGrandParent = true; break; } } return isGrandParent; }
3.68