name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_ReconfigurableBase_reconfigureProperty
/** * {@inheritDoc} * * This method makes the change to this objects {@link Configuration} * and calls reconfigurePropertyImpl to update internal data structures. * This method cannot be overridden, subclasses should instead override * reconfigurePropertyImpl. */ @Override public final void reconfigureProperty(String property, String newVal) throws ReconfigurationException { if (isPropertyReconfigurable(property)) { LOG.info("changing property " + property + " to " + newVal); synchronized(getConf()) { getConf().get(property); String effectiveValue = reconfigurePropertyImpl(property, newVal); if (newVal != null) { getConf().set(property, effectiveValue); } else { getConf().unset(property); } } } else { throw new ReconfigurationException(property, newVal, getConf().get(property)); } }
3.68
framework_AbstractRemoteDataSource_removeRowData
/** * Informs this data source that the server has removed data. * * @param firstRowIndex * the index of the first removed row * @param count * the number of removed rows, starting from * <code>firstRowIndex</code> */ protected void removeRowData(int firstRowIndex, int count) { Profiler.enter("AbstractRemoteDataSource.removeRowData"); // Cache was not filled since previous insertRowData. The old rows are // no longer useful. if (invalidatedRows != null) { invalidatedRows.clear(); } size -= count; Range removedRange = Range.withLength(firstRowIndex, count); dropFromCache(removedRange); // shift indices to fill the cache correctly int firstMoved = Math.max(firstRowIndex + count, cached.getStart()); for (int i = firstMoved; i < cached.getEnd(); i++) { moveRowFromIndexToIndex(i, i - count); } if (cached.isSubsetOf(removedRange)) { // Whole cache is part of the removal. Empty cache cached = Range.withLength(0, 0); } else if (removedRange.intersects(cached)) { // Removal and cache share some indices. fix accordingly. Range[] partitions = cached.partitionWith(removedRange); Range remainsBefore = partitions[0]; Range transposedRemainsAfter = partitions[2] .offsetBy(-removedRange.length()); // #8840 either can be empty if the removed range was over the // cached range if (remainsBefore.isEmpty()) { cached = transposedRemainsAfter; } else if (transposedRemainsAfter.isEmpty()) { cached = remainsBefore; } else { cached = remainsBefore.combineWith(transposedRemainsAfter); } } else if (removedRange.getEnd() <= cached.getStart()) { // Removal was before the cache. offset the cache. cached = cached.offsetBy(-removedRange.length()); } getHandlers().forEach(dch -> dch.dataRemoved(firstRowIndex, count)); ensureCoverageCheck(); Profiler.leave("AbstractRemoteDataSource.removeRowData"); }
3.68
hbase_RateLimiter_canExecute
/** * Are there enough available resources to allow execution? * @param amount the number of required resources, a non-negative number * @return true if there are enough available resources, otherwise false */ public synchronized boolean canExecute(final long amount) { if (isBypass()) { return true; } long refillAmount = refill(limit); if (refillAmount == 0 && avail < amount) { return false; } // check for positive overflow if (avail <= Long.MAX_VALUE - refillAmount) { avail = Math.min(avail + refillAmount, limit); } else { avail = limit; } if (avail >= amount) { return true; } return false; }
3.68
hadoop_CoderUtil_toBuffers
/** * Convert an array of this chunks to an array of ByteBuffers * @param chunks chunks to convertToByteArrayState into buffers * @return an array of ByteBuffers */ static ByteBuffer[] toBuffers(ECChunk[] chunks) { ByteBuffer[] buffers = new ByteBuffer[chunks.length]; ECChunk chunk; for (int i = 0; i < chunks.length; i++) { chunk = chunks[i]; if (chunk == null) { buffers[i] = null; } else { buffers[i] = chunk.getBuffer(); if (chunk.isAllZero()) { CoderUtil.resetBuffer(buffers[i], buffers[i].remaining()); } } } return buffers; }
3.68
hudi_SourceFormatAdapter_getInvalidCharMask
/** * Replacement mask for invalid characters encountered in avro names. * @return sanitized value. */ private String getInvalidCharMask() { return invalidCharMask; }
3.68
flink_HyperLogLogPlusPlus_merge
/** * Merge the HLL buffers by iterating through the registers in both buffers and select the * maximum number of leading zeros for each register. */ public void merge(HllBuffer buffer1, HllBuffer buffer2) { int idx = 0; int wordOffset = 0; while (wordOffset < numWords) { long word1 = buffer1.array[wordOffset]; long word2 = buffer2.array[wordOffset]; long word = 0L; int i = 0; long mask = REGISTER_WORD_MASK; while (idx < m && i < REGISTERS_PER_WORD) { word |= Math.max(word1 & mask, word2 & mask); mask <<= REGISTER_SIZE; i += 1; idx += 1; } buffer1.array[wordOffset] = word; wordOffset += 1; } }
3.68
hbase_HFileWriterImpl_finishInit
/** Additional initialization steps */ protected void finishInit(final Configuration conf) { if (blockWriter != null) { throw new IllegalStateException("finishInit called twice"); } blockWriter = new HFileBlock.Writer(conf, blockEncoder, hFileContext, cacheConf.getByteBuffAllocator(), conf.getInt(MAX_BLOCK_SIZE_UNCOMPRESSED, hFileContext.getBlocksize() * 10)); // Data block index writer boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(blockWriter, cacheIndexesOnWrite ? cacheConf : null, cacheIndexesOnWrite ? name : null, indexBlockEncoder); dataBlockIndexWriter.setMaxChunkSize(HFileBlockIndex.getMaxChunkSize(conf)); dataBlockIndexWriter.setMinIndexNumEntries(HFileBlockIndex.getMinIndexNumEntries(conf)); inlineBlockWriters.add(dataBlockIndexWriter); // Meta data block index writer metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(); LOG.trace("Initialized with {}", cacheConf); }
3.68
hbase_CompositeImmutableSegment_getCellSet
/** Returns a set of all cells in the segment */ @Override protected CellSet getCellSet() { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); }
3.68
hadoop_Event_getTimestamp
/** * The time when this event occurred, in milliseconds since the epoch. */ public long getTimestamp() { return timestamp; }
3.68
flink_ProcessingTimeTriggers_afterEndOfWindow
/** Creates a trigger that fires when the processing time passes the end of the window. */ public static <W extends Window> AfterEndOfWindow<W> afterEndOfWindow() { return new AfterEndOfWindow<>(); }
3.68
hadoop_LocalSASKeyGeneratorImpl_getSASKeyBasedStorageAccountInstance
/** * Helper method that creates a CloudStorageAccount instance based on * SAS key for accountName * * @param accountName Storage Account Name * @return CloudStorageAccount instance created using SAS key for * the Storage Account. * @throws SASKeyGenerationException */ private CloudStorageAccount getSASKeyBasedStorageAccountInstance( String accountName) throws SASKeyGenerationException { LOG.debug("Creating SAS key from account instance {}", accountName); try { String accountNameWithoutDomain = getAccountNameWithoutDomain(accountName); CloudStorageAccount account = getStorageAccountInstance(accountNameWithoutDomain, AzureNativeFileSystemStore.getAccountKeyFromConfiguration( accountName, getConf())); return new CloudStorageAccount( new StorageCredentialsSharedAccessSignature( account.generateSharedAccessSignature( getDefaultAccountAccessPolicy())), false, account.getEndpointSuffix(), accountNameWithoutDomain); } catch (KeyProviderException keyProviderEx) { throw new SASKeyGenerationException("Encountered KeyProviderException" + " while retrieving Storage key from configuration for account " + accountName, keyProviderEx); } catch (InvalidKeyException invalidKeyEx) { throw new SASKeyGenerationException("Encoutered InvalidKeyException " + "while generating Account level SAS key for account" + accountName, invalidKeyEx); } catch(StorageException storeEx) { throw new SASKeyGenerationException("Encoutered StorageException while " + "generating Account level SAS key for account" + accountName, storeEx); } catch(URISyntaxException uriSyntaxEx) { throw new SASKeyGenerationException("Encountered URISyntaxException for" + " account " + accountName, uriSyntaxEx); } }
3.68
flink_InternalServiceDecorator_getInternalServiceName
/** Generate name of the internal Service. */ public static String getInternalServiceName(String clusterId) { return clusterId; }
3.68
hadoop_BlockBlobAppendStream_setCompactionBlockCount
/** * Set compaction parameters. * It is intended to be used for unit testing purposes only. */ @VisibleForTesting void setCompactionBlockCount(int activationCount) { activateCompactionBlockCount = activationCount; }
3.68
flink_BinaryHashPartition_getNumOccupiedMemorySegments
/** * Gets the number of memory segments used by this partition, which includes build side memory * buffers and overflow memory segments. * * @return The number of occupied memory segments. */ int getNumOccupiedMemorySegments() { // either the number of memory segments, or one for spilling final int numPartitionBuffers = this.partitionBuffers != null ? this.partitionBuffers.length : this.buildSideWriteBuffer.getNumOccupiedMemorySegments(); return numPartitionBuffers + bucketArea.buckets.length + bucketArea.numOverflowSegments; }
3.68
hibernate-validator_ExecutableMetaData_addToExecutablesByDeclaringType
/** * Merges the given executable with the metadata contributed by other * providers for the same executable in the hierarchy. * * @param executable The executable to merge. */ private void addToExecutablesByDeclaringType(ConstrainedExecutable executable) { Class<?> beanClass = executable.getCallable().getDeclaringClass(); ConstrainedExecutable mergedExecutable = executablesByDeclaringType.get( beanClass ); if ( mergedExecutable != null ) { mergedExecutable = mergedExecutable.merge( executable ); } else { mergedExecutable = executable; } executablesByDeclaringType.put( beanClass, mergedExecutable ); }
3.68
framework_AbstractSelect_readItem
/** * Reads an Item from a design and inserts it into the data source. * Hierarchical select components should override this method to recursively * recursively read any child items as well. * * @since 7.5.0 * @param child * a child element representing the item * @param selected * A set accumulating selected items. If the item that is read is * marked as selected, its item id should be added to this set. * @param context * the DesignContext instance used in parsing * @return the item id of the new item * * @throws DesignException * if the tag name of the {@code child} element is not * {@code option}. */ protected Object readItem(Element child, Set<String> selected, DesignContext context) { if (!"option".equals(child.tagName())) { throw new DesignException("Unrecognized child element in " + getClass().getSimpleName() + ": " + child.tagName()); } String itemId; String caption = DesignFormatter.decodeFromTextNode(child.html()); if (child.hasAttr("item-id")) { itemId = child.attr("item-id"); addItem(itemId); setItemCaption(itemId, caption); } else { addItem(itemId = caption); } if (child.hasAttr("icon")) { setItemIcon(itemId, DesignAttributeHandler.readAttribute("icon", child.attributes(), Resource.class)); } if (child.hasAttr("selected")) { selected.add(itemId); } return itemId; }
3.68
graphhopper_VectorTile_getFeaturesOrBuilderList
/** * <pre> * The actual features in this tile. * </pre> * * <code>repeated .vector_tile.Tile.Feature features = 2;</code> */ public java.util.List<? extends vector_tile.VectorTile.Tile.FeatureOrBuilder> getFeaturesOrBuilderList() { if (featuresBuilder_ != null) { return featuresBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(features_); } }
3.68
hbase_HttpServer_setThreads
/** * Set the min, max number of worker threads (simultaneous connections). */ public void setThreads(int min, int max) { QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool(); pool.setMinThreads(min); pool.setMaxThreads(max); }
3.68
morf_SqlDialect_decorateTemporaryTableName
/** * Decorate the table name in an appropriate manner for temporary table in the * relevant database. * * @param undecoratedName core name. * @return decorated version. */ public String decorateTemporaryTableName(String undecoratedName) { return undecoratedName; }
3.68
hudi_HoodieBloomIndex_isGlobal
/** * This is not global, since we depend on the partitionPath to do the lookup. */ @Override public boolean isGlobal() { return false; }
3.68
streampipes_MigrateExtensionsResource_getMigrator
/** * Find and return the corresponding {@link IModelMigrator} instance within the registered migrators. * This allows to pass the corresponding model migrator to a {@link ModelMigratorConfig} which is exchanged * between Core and Extensions service. * * @param modelMigratorConfig config that describes the model migrator to be returned * @return Optional model migrator which is empty in case no appropriate migrator is found among the registered. */ public Optional<MmT> getMigrator(ModelMigratorConfig modelMigratorConfig) { return DeclarersSingleton.getInstance().getServiceDefinition().getMigrators() .stream() .filter(modelMigrator -> modelMigrator.config().equals(modelMigratorConfig)) .map(modelMigrator -> (MmT) modelMigrator) .findFirst(); }
3.68
flink_RestoredCheckpointStats_getExternalPath
/** * Returns the external path if this checkpoint was persisted externally. * * @return External path of this checkpoint or <code>null</code>. */ @Nullable public String getExternalPath() { return externalPath; }
3.68
flink_RocksDBMemoryConfiguration_fromOtherAndConfiguration
/** * Derives a RocksDBMemoryConfiguration from another object and a configuration. The values set * on the other object take precedence, and the values from the configuration are used if no * values are set on the other config object. */ public static RocksDBMemoryConfiguration fromOtherAndConfiguration( RocksDBMemoryConfiguration other, ReadableConfig config) { final RocksDBMemoryConfiguration newConfig = new RocksDBMemoryConfiguration(); newConfig.useManagedMemory = other.useManagedMemory != null ? other.useManagedMemory : config.get(RocksDBOptions.USE_MANAGED_MEMORY); newConfig.fixedMemoryPerSlot = other.fixedMemoryPerSlot != null ? other.fixedMemoryPerSlot : config.get(RocksDBOptions.FIX_PER_SLOT_MEMORY_SIZE); newConfig.writeBufferRatio = other.writeBufferRatio != null ? other.writeBufferRatio : config.get(RocksDBOptions.WRITE_BUFFER_RATIO); newConfig.highPriorityPoolRatio = other.highPriorityPoolRatio != null ? other.highPriorityPoolRatio : config.get(RocksDBOptions.HIGH_PRIORITY_POOL_RATIO); newConfig.usePartitionedIndexFilters = other.usePartitionedIndexFilters != null ? other.usePartitionedIndexFilters : config.get(RocksDBOptions.USE_PARTITIONED_INDEX_FILTERS); return newConfig; }
3.68
framework_AbstractSingleSelect_getValue
/** * Returns the current value of this object which is the currently selected * item. * <p> * The call is delegated to {@link #getSelectedItem()} * * @return the current selection, may be {@code null} * * @see #getSelectedItem() * @see Single#getSelectedItem */ @Override public T getValue() { return getSelectedItem().orElse(null); }
3.68
hbase_AbstractFSWAL_main
/** * Pass one or more log file names and it will either dump out a text version on * <code>stdout</code> or split the specified log files. */ public static void main(String[] args) throws IOException { if (args.length < 2) { usage(); System.exit(-1); } // either dump using the WALPrettyPrinter or split, depending on args if (args[0].compareTo("--dump") == 0) { WALPrettyPrinter.run(Arrays.copyOfRange(args, 1, args.length)); } else if (args[0].compareTo("--perf") == 0) { LOG.error(HBaseMarkers.FATAL, "Please use the WALPerformanceEvaluation tool instead. i.e.:"); LOG.error(HBaseMarkers.FATAL, "\thbase org.apache.hadoop.hbase.wal.WALPerformanceEvaluation --iterations " + args[1]); System.exit(-1); } else if (args[0].compareTo("--split") == 0) { Configuration conf = HBaseConfiguration.create(); for (int i = 1; i < args.length; i++) { try { Path logPath = new Path(args[i]); CommonFSUtils.setFsDefault(conf, logPath); split(conf, logPath); } catch (IOException t) { t.printStackTrace(System.err); System.exit(-1); } } } else { usage(); System.exit(-1); } }
3.68
hbase_NamespacePermission_implies
/** * check if given action is granted in given namespace. * @param namespace namespace's name * @param action action to be checked * @return true if granted, false otherwise */ public boolean implies(String namespace, Action action) { return namespace.equals(this.namespace) && implies(action); }
3.68
hbase_AsyncConnectionImpl_getLocator
// we will override this method for testing retry caller, so do not remove this method. AsyncRegionLocator getLocator() { return locator; }
3.68
hudi_ConsistentBucketIndexBulkInsertPartitionerWithRows_prepareRepartition
/** * Prepare consistent hashing metadata for repartition * * @param rows input records */ private void prepareRepartition(JavaRDD<Row> rows) { this.partitionToIdentifier = initializeBucketIdentifier(rows); this.partitionToFileIdPfxIdxMap = ConsistentBucketIndexUtils.generatePartitionToFileIdPfxIdxMap(partitionToIdentifier); partitionToIdentifier.values().forEach(identifier -> { fileIdPfxList.addAll(identifier.getNodes().stream().map(ConsistentHashingNode::getFileIdPrefix).collect(Collectors.toList())); }); }
3.68
morf_TableOutputter_createBlankWriteableCell
/** * Creates a blank {@link WritableCell} for a given column number and row index. * * @param columnNumber the column number * @param rowIndex the row index * @return a blank {@link WritableCell} */ private WritableCell createBlankWriteableCell(int columnNumber, int rowIndex) { return new jxl.write.Blank(columnNumber, rowIndex); }
3.68
pulsar_ConsumerConfiguration_getNegativeAckRedeliveryBackoff
/** * @return the configured {@link RedeliveryBackoff} for the consumer */ public RedeliveryBackoff getNegativeAckRedeliveryBackoff() { return conf.getNegativeAckRedeliveryBackoff(); }
3.68
hadoop_NoopAuditManagerS3A_createNewSpan
/** * A static source of no-op spans, using the same span ID * source as managed spans. * @param name operation name. * @param path1 first path of operation * @param path2 second path of operation * @return a span for the audit */ public static AuditSpanS3A createNewSpan( final String name, final String path1, final String path2) { return NoopSpan.INSTANCE; }
3.68
pulsar_AuthenticationProvider_authenticate
/** * Validate the authentication for the given credentials with the specified authentication data. * This method is useful in one stage authn, if you're not doing one stage or if you're providing * your own state implementation for one stage authn, it should throw an exception. * * @param authData * provider specific authentication data * @return the "role" string for the authenticated connection, if the authentication was successful * @throws AuthenticationException * if the credentials are not valid * @deprecated use and implement {@link AuthenticationProvider#authenticateAsync(AuthenticationDataSource)} instead. */ @Deprecated default String authenticate(AuthenticationDataSource authData) throws AuthenticationException { throw new AuthenticationException("Not supported"); }
3.68
rocketmq-connect_MetricsReporter_onHistogramRemoved
/** * Called when a {@link Histogram} is removed from the registry. * * @param name the histogram's name */ public void onHistogramRemoved(String name) { this.onCounterRemoved(MetricUtils.stringToMetricName(name)); }
3.68
flink_KubernetesJobGraphStoreUtil_jobIDToName
/** * Convert a {@link JobID} to config map key. We will add prefix {@link * Constants#JOB_GRAPH_STORE_KEY_PREFIX}. * * @param jobID job id * @return a key to store job graph in the ConfigMap */ public String jobIDToName(JobID jobID) { return JOB_GRAPH_STORE_KEY_PREFIX + jobID; }
3.68
hbase_Bytes_tail
/** * Make a new byte array from a subset of bytes at the tail of another. * @param a array * @param length amount of bytes to snarf * @return Last <code>length</code> bytes from <code>a</code> */ public static byte[] tail(final byte[] a, final int length) { if (a.length < length) { return null; } byte[] result = new byte[length]; System.arraycopy(a, a.length - length, result, 0, length); return result; }
3.68
hadoop_Chunk_checkEOF
/** * Check whether we reach the end of the stream. * * @return false if the chunk encoded stream has more data to read (in which * case available() will be greater than 0); true otherwise. * @throws java.io.IOException * on I/O errors. */ private boolean checkEOF() throws IOException { if (isClosed()) return true; while (true) { if (remain > 0) return false; if (lastChunk) return true; readLength(); } }
3.68
hudi_CloudObjectsSelector_getSqsQueueAttributes
/** * Get SQS queue attributes. * * @param sqsClient AWSClient for sqsClient * @param queueUrl queue full url * @return map of attributes needed */ protected Map<String, String> getSqsQueueAttributes(SqsClient sqsClient, String queueUrl) { GetQueueAttributesResponse queueAttributesResult = sqsClient.getQueueAttributes( GetQueueAttributesRequest.builder() .queueUrl(queueUrl) .attributeNames(QueueAttributeName.fromValue(SQS_ATTR_APPROX_MESSAGES)) .build() ); return queueAttributesResult.attributesAsStrings(); }
3.68
framework_FocusableHTML_addKeyDownHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasKeyDownHandlers#addKeyDownHandler( * com.google.gwt.event.dom.client.KeyDownHandler) */ @Override public HandlerRegistration addKeyDownHandler(KeyDownHandler handler) { return addDomHandler(handler, KeyDownEvent.getType()); }
3.68
hbase_MasterProcedureScheduler_waitTableSharedLock
/** * Suspend the procedure if the specified table is already locked. other "read" operations in the * table-queue may be executed concurrently, * @param procedure the procedure trying to acquire the lock * @param table Table to lock * @return true if the procedure has to wait for the table to be available */ public boolean waitTableSharedLock(final Procedure<?> procedure, final TableName table) { return waitTableQueueSharedLock(procedure, table) == null; }
3.68
hadoop_AggregationOperation_getAggregationOperation
/** * returns the AggregationOperation enum that represents that string. * @param aggOpStr Aggregation operation. * @return the AggregationOperation enum that represents that string */ public static AggregationOperation getAggregationOperation(String aggOpStr) { for (AggregationOperation aggOp : AggregationOperation.values()) { if (aggOp.name().equals(aggOpStr)) { return aggOp; } } return null; }
3.68
hbase_IndexBlockEncoding_getId
/** Returns The id of a data block encoder. */ public short getId() { return id; }
3.68
flink_FutureUtils_thenApplyAsyncIfNotDone
/** * This function takes a {@link CompletableFuture} and a function to apply to this future. If * the input future is already done, this function returns {@link * CompletableFuture#thenApply(Function)}. Otherwise, the return value is {@link * CompletableFuture#thenApplyAsync(Function, Executor)} with the given executor. * * @param completableFuture the completable future for which we want to apply. * @param executor the executor to run the apply function if the future is not yet done. * @param applyFun the function to apply. * @param <IN> type of the input future. * @param <OUT> type of the output future. * @return a completable future that is applying the given function to the input future. */ public static <IN, OUT> CompletableFuture<OUT> thenApplyAsyncIfNotDone( CompletableFuture<IN> completableFuture, Executor executor, Function<? super IN, ? extends OUT> applyFun) { return completableFuture.isDone() ? completableFuture.thenApply(applyFun) : completableFuture.thenApplyAsync(applyFun, executor); }
3.68
flink_RowUtils_createRowWithNamedPositions
/** Internal utility for creating a row in static named-position field mode. */ @Internal public static Row createRowWithNamedPositions( RowKind kind, Object[] fieldByPosition, LinkedHashMap<String, Integer> positionByName) { return new Row(kind, fieldByPosition, null, positionByName); }
3.68
hadoop_ActiveOperationContext_newOperationId
/** * Create an operation ID. The nature of it should be opaque. * @return an ID for the constructor. */ protected static long newOperationId() { return NEXT_OPERATION_ID.incrementAndGet(); }
3.68
framework_PointerEventSupport_getNativeEventName
/** * @param eventType * @return the native event name of the given event */ public static String getNativeEventName(EventType eventType) { return IMPL.getNativeEventName(eventType); }
3.68
hbase_PreemptiveFastFailException_isGuaranteedClientSideOnly
/** Returns true if we know no mutation made it to the server, false otherwise. */ public boolean isGuaranteedClientSideOnly() { return guaranteedClientSideOnly; }
3.68
hbase_Procedure_setTimeout
// ========================================================================== // runtime state - timeout related // ========================================================================== /** * @param timeout timeout interval in msec */ protected void setTimeout(int timeout) { this.timeout = timeout; }
3.68
framework_ElementResizeEvent_getElement
/** * Returns the resized element. * * @return the element */ public Element getElement() { return element; }
3.68
hbase_Response_getLocation
/** Returns the value of the Location header */ public String getLocation() { return getHeader("Location"); }
3.68
pulsar_AbstractTopic_getTopicPolicies
/** * Get {@link TopicPolicies} for this topic. * @return TopicPolicies, if they exist. Otherwise, the value will not be present. */ public Optional<TopicPolicies> getTopicPolicies() { return brokerService.getTopicPolicies(TopicName.get(topic)); }
3.68
framework_MultiSelectionModelImpl_fetchAll
/** * Fetch all items from the given data provider. * * @since 8.1 * @param dataProvider * the data provider to fetch from * @return all items in this data provider */ private Stream<T> fetchAll(DataProvider<T, ?> dataProvider) { return dataProvider.fetch(new Query<>()); }
3.68
hbase_ZKListener_nodeDeleted
/** * Called when a node has been deleted * @param path full path of the deleted node */ public void nodeDeleted(String path) { // no-op }
3.68
pulsar_PulsarClientImplementationBindingImpl_encodeKeyValueSchemaInfo
/** * Encode key & value into schema into a KeyValue schema. * * @param schemaName the final schema name * @param keySchema the key schema * @param valueSchema the value schema * @param keyValueEncodingType the encoding type to encode and decode key value pair * @return the final schema info */ public <K, V> SchemaInfo encodeKeyValueSchemaInfo(String schemaName, Schema<K> keySchema, Schema<V> valueSchema, KeyValueEncodingType keyValueEncodingType) { return KeyValueSchemaInfo.encodeKeyValueSchemaInfo(schemaName, keySchema, valueSchema, keyValueEncodingType); }
3.68
flink_StreamOperatorStateHandler_getPartitionedState
/** * Creates a partitioned state handle, using the state backend configured for this task. * * @throws IllegalStateException Thrown, if the key/value state was already initialized. * @throws Exception Thrown, if the state backend cannot create the key/value state. */ protected <S extends State, N> S getPartitionedState( N namespace, TypeSerializer<N> namespaceSerializer, StateDescriptor<S, ?> stateDescriptor) throws Exception { /* TODO: NOTE: This method does a lot of work caching / retrieving states just to update the namespace. This method should be removed for the sake of namespaces being lazily fetched from the keyed state backend, or being set on the state directly. */ if (keyedStateBackend != null) { return keyedStateBackend.getPartitionedState( namespace, namespaceSerializer, stateDescriptor); } else { throw new RuntimeException( "Cannot create partitioned state. The keyed state " + "backend has not been set. This indicates that the operator is not " + "partitioned/keyed."); } }
3.68
graphhopper_GTFSError_compareTo
/** must be comparable to put into mapdb */ public int compareTo (GTFSError o) { if (this.file == null && o.file != null) return -1; else if (this.file != null && o.file == null) return 1; int file = this.file == null && o.file == null ? 0 : String.CASE_INSENSITIVE_ORDER.compare(this.file, o.file); if (file != 0) return file; int errorType = String.CASE_INSENSITIVE_ORDER.compare(this.errorType, o.errorType); if (errorType != 0) return errorType; int affectedEntityId = this.affectedEntityId == null && o.affectedEntityId == null ? 0 : String.CASE_INSENSITIVE_ORDER.compare(this.affectedEntityId, o.affectedEntityId); if (affectedEntityId != 0) return affectedEntityId; else return Long.compare(this.line, o.line); }
3.68
hbase_SnapshotScannerHDFSAclHelper_removeTableAcl
/** * Remove table acls when modify table * @param tableName the table * @param users the table users with READ permission * @return false if an error occurred, otherwise true */ public boolean removeTableAcl(TableName tableName, Set<String> users) { try { long start = EnvironmentEdgeManager.currentTime(); if (users.size() > 0) { handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0), HDFSAclOperation.OperationType.REMOVE); } LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName, EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Set HDFS acl error when create or modify table {}", tableName, e); return false; } }
3.68
framework_VGridLayout_getHorizontalSpacing
/** * Returns the spacing between the cells horizontally in pixels. * * @return */ protected int getHorizontalSpacing() { return LayoutManager.get(client).getOuterWidth(spacingMeasureElement); }
3.68
flink_DecimalData_fromBigDecimal
/** * Creates an instance of {@link DecimalData} from a {@link BigDecimal} and the given precision * and scale. * * <p>The returned decimal value may be rounded to have the desired scale. The precision will be * checked. If the precision overflows, null will be returned. */ public static @Nullable DecimalData fromBigDecimal(BigDecimal bd, int precision, int scale) { bd = bd.setScale(scale, RoundingMode.HALF_UP); if (bd.precision() > precision) { return null; } long longVal = -1; if (precision <= MAX_COMPACT_PRECISION) { longVal = bd.movePointRight(scale).longValueExact(); } return new DecimalData(precision, scale, longVal, bd); }
3.68
flink_Serializers_getContainedGenericTypes
/** * Returns all GenericTypeInfos contained in a composite type. * * @param typeInfo {@link CompositeType} */ private static void getContainedGenericTypes( CompositeType<?> typeInfo, List<GenericTypeInfo<?>> target) { for (int i = 0; i < typeInfo.getArity(); i++) { TypeInformation<?> type = typeInfo.getTypeAt(i); if (type instanceof CompositeType) { getContainedGenericTypes((CompositeType<?>) type, target); } else if (type instanceof GenericTypeInfo) { if (!target.contains(type)) { target.add((GenericTypeInfo<?>) type); } } } }
3.68
morf_SchemaUtils_nullable
/** * @see org.alfasoftware.morf.metadata.SchemaUtils.ColumnBuilder#nullable() */ @Override public ColumnBuilder nullable() { return new ColumnBuilderImpl(this, true, getDefaultValue(), isPrimaryKey(), isAutoNumbered(), getAutoNumberStart()); }
3.68
hadoop_AbfsConfiguration_getTokenProviderClass
/** * Returns account-specific token provider class if it exists, else checks if * an account-agnostic setting is present for token provider class if AuthType * matches with authType passed. * @param authType AuthType effective on the account * @param name Account-agnostic configuration key * @param defaultValue Class returned if none is configured * @param xface Interface shared by all possible values * @param <U> Interface class type * @return Highest-precedence Class object that was found */ public <U> Class<? extends U> getTokenProviderClass(AuthType authType, String name, Class<? extends U> defaultValue, Class<U> xface) { Class<?> tokenProviderClass = getAccountSpecificClass(name, defaultValue, xface); // If there is none set specific for account // fall back to generic setting if Auth Type matches if ((tokenProviderClass == null) && (authType == getAccountAgnosticEnum( FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey))) { tokenProviderClass = getAccountAgnosticClass(name, defaultValue, xface); } return (tokenProviderClass == null) ? null : tokenProviderClass.asSubclass(xface); }
3.68
framework_CalendarConnector_getActions
/** * Returns ALL currently registered events. Use {@link #getActions(Date)} to * get the actions for a specific date */ @Override public Action[] getActions() { List<Action> actions = new ArrayList<Action>(); for (int i = 0; i < actionKeys.size(); i++) { final String actionKey = actionKeys.get(i); final VCalendarAction a = new VCalendarAction(this, rpc, actionKey); a.setCaption(getActionCaption(actionKey)); a.setIconUrl(getActionIcon(actionKey)); try { a.setActionStartDate(getActionStartDate(actionKey)); a.setActionEndDate(getActionEndDate(actionKey)); } catch (ParseException pe) { getLogger().log(Level.SEVERE, pe.getMessage() == null ? "" : pe.getMessage(), pe); } actions.add(a); } return actions.toArray(new Action[actions.size()]); }
3.68
morf_SqlScriptExecutor_withMaxRows
/** * @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.QueryBuilder#withMaxRows(java.util.Optional) */ @Override public QueryBuilder withMaxRows(Optional<Integer> maxRows) { this.maxRows = maxRows; return this; }
3.68
AreaShop_Utils_toUniqueId
/** * Conversion from name to uuid. * @param name The name of the player * @return The uuid of the player */ @SuppressWarnings("deprecation") // Fake deprecation by Bukkit to inform developers, method will stay public static String toUniqueId(String name) { if(name == null) { return null; } else { return Bukkit.getOfflinePlayer(name).getUniqueId().toString(); } }
3.68
hbase_RegionCoprocessorHost_preCompact
/** * Called prior to rewriting the store files selected for compaction * @param store the store being compacted * @param scanner the scanner used to read store data during compaction * @param scanType type of Scan * @param tracker used to track the life cycle of a compaction * @param request the compaction request * @param user the user * @return Scanner to use (cannot be null!) */ public InternalScanner preCompact(final HStore store, final InternalScanner scanner, final ScanType scanType, final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user) throws IOException { InternalScanner defaultResult = scanner; if (coprocEnvironments.isEmpty()) { return defaultResult; } return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, InternalScanner>( regionObserverGetter, defaultResult, user) { @Override public InternalScanner call(RegionObserver observer) throws IOException { InternalScanner scanner = observer.preCompact(this, store, getResult(), scanType, tracker, request); if (scanner == null) { throw new CoprocessorException("Null Scanner return disallowed!"); } return scanner; } }); }
3.68
hadoop_DeSelectFields_initFields
/** * Initial DeSelectFields with unselected fields. * @param unselectedFields a set of unselected field. */ public void initFields(Set<String> unselectedFields) { if (unselectedFields == null) { return; } for (String field : unselectedFields) { if (!field.trim().isEmpty()) { String[] literalsArray = field.split(","); for (String literals : literalsArray) { if (literals != null && !literals.trim().isEmpty()) { DeSelectType type = DeSelectType.obtainType(literals); if (type == null) { LOG.warn("Invalid deSelects string " + literals.trim()); DeSelectType[] typeArray = DeSelectType.values(); String allSupportLiterals = Arrays.toString(typeArray); throw new BadRequestException("Invalid deSelects string " + literals.trim() + " specified. It should be one of " + allSupportLiterals); } else { this.types.add(type); } } } } } }
3.68
framework_LegacyWindow_open
/** * Opens the given resource in a window with the given size, border and * name. For more information on the meaning of {@code windowName}, see * {@link #open(Resource, String)}. * <p> * As of Vaadin 7.0.0, the functionality for opening a Resource in a Page * has been replaced with similar methods based on a String URL. This is * because the usage of Resource is problematic with memory management and * with security features in some browsers. Is is recommended to instead use * {@link Link} for starting downloads. * </p> * * @param resource * the resource. * @param windowName * the name of the window. * @param width * the width of the window in pixels * @param height * the height of the window in pixels * @param border * the border style of the window. * @deprecated As of 7.0, use getPage().open instead */ @Deprecated public void open(Resource resource, String windowName, int width, int height, BorderStyle border) { getPage().open(resource, windowName, width, height, border); }
3.68
hbase_Table_exists
/** * Test for the existence of columns in the table, as specified by the Gets. * <p> * This will return an array of booleans. Each value will be true if the related Get matches one * or more keys, false if not. * <p> * This is a server-side call so it prevents any data from being transferred to the client. * @param gets the Gets * @return Array of boolean. True if the specified Get matches one or more keys, false if not. * @throws IOException e */ default boolean[] exists(List<Get> gets) throws IOException { throw new NotImplementedException("Add an implementation!"); }
3.68
hbase_BitSetNode_isEmpty
/** Returns true, if there are no active procedures in this BitSetNode, else false. */ public boolean isEmpty() { // TODO: cache the value for (int i = 0; i < deleted.length; ++i) { if (deleted[i] != WORD_MASK) { return false; } } return true; }
3.68
druid_MySQL8DateTimeSqlTypeFilter_resultSet_getMetaData
/** * mybatis查询结果为map时, 会自动做类型映射。只有在自动映射前,更改 ResultSetMetaData 里映射的 java 类型,才会生效 * @param chain * @param resultSet * @return * @throws SQLException */ @Override public ResultSetMetaData resultSet_getMetaData(FilterChain chain, ResultSetProxy resultSet) throws SQLException { return new MySQL8DateTimeResultSetMetaData(chain.resultSet_getMetaData(resultSet)); }
3.68
flink_FailureEnricherUtils_getFailureEnrichers
/** * Returns a set of validated FailureEnrichers for a given configuration. * * @param configuration the configuration for the job * @return a collection of validated FailureEnrichers */ public static Collection<FailureEnricher> getFailureEnrichers( final Configuration configuration) { final PluginManager pluginManager = PluginUtils.createPluginManagerFromRootFolder(configuration); return getFailureEnrichers(configuration, pluginManager); }
3.68
hbase_HMaster_setCatalogJanitorEnabled
/** * Switch for the background CatalogJanitor thread. Used for testing. The thread will continue to * run. It will just be a noop if disabled. * @param b If false, the catalog janitor won't do anything. */ public void setCatalogJanitorEnabled(final boolean b) { this.catalogJanitorChore.setEnabled(b); }
3.68
framework_VAbstractCalendarPanel_focusNextMonth
/** * Selects the next month */ @SuppressWarnings("deprecation") private void focusNextMonth() { if (focusedDate == null) { return; } // Trying to request next month Date requestedNextMonthDate = (Date) focusedDate.clone(); addOneMonth(requestedNextMonthDate); if (!isDateInsideRange(requestedNextMonthDate, getResolution(this::isMonth))) { return; } // Now also checking whether the day is inside the range or not. If not // inside, correct it if (!isDateInsideRange(requestedNextMonthDate, getResolution(this::isDay))) { requestedNextMonthDate = adjustDateToFitInsideRange( requestedNextMonthDate); } focusedDate.setTime(requestedNextMonthDate.getTime()); displayedMonth.setMonth(displayedMonth.getMonth() + 1); renderCalendar(); }
3.68
graphhopper_VectorTile_clearFloatValue
/** * <code>optional float float_value = 2;</code> */ public Builder clearFloatValue() { bitField0_ = (bitField0_ & ~0x00000002); floatValue_ = 0F; onChanged(); return this; }
3.68
framework_LayoutManager_getBorderBottom
/** * Gets the bottom border of the given element, provided that it has been * measured. These elements are guaranteed to be measured: * <ul> * <li>ManagedLayouts and their child Connectors * <li>Elements for which there is at least one ElementResizeListener * <li>Elements for which at least one ManagedLayout has registered a * dependency * </ul> * * A negative number is returned if the element has not been measured. If 0 * is returned, it might indicate that the element is not attached to the * DOM. * * @param element * the element to get the measured size for * @return the measured bottom border of the element in pixels. */ public int getBorderBottom(Element element) { assert needsMeasure( element) : "Getting measurement for element that is not measured"; return getMeasuredSize(element, nullSize).getBorderBottom(); }
3.68
hbase_MetaFixer_isOverlap
/** * @return True if an overlap found between passed in <code>ri</code> and the <code>pair</code>. * Does NOT check the pairs themselves overlap. */ static boolean isOverlap(RegionInfo ri, Pair<RegionInfo, RegionInfo> pair) { if (ri == null || pair == null) { // Can't be an overlap in either of these cases. return false; } return ri.isOverlap(pair.getFirst()) || ri.isOverlap(pair.getSecond()); }
3.68
framework_ServerRpcQueue_add
/** * Adds an explicit RPC method invocation to the send queue. * * @param invocation * RPC method invocation * @param lastOnly * <code>true</code> to remove all previously delayed invocations * of the same method that were also enqueued with lastonly set * to <code>true</code>. <code>false</code> to add invocation to * the end of the queue without touching previously enqueued * invocations. */ public void add(MethodInvocation invocation, boolean lastOnly) { if (!connection.isApplicationRunning()) { getLogger().warning( "Trying to invoke method on not yet started or stopped application"); return; } String tag; if (lastOnly) { tag = invocation.getLastOnlyTag(); assert !tag.matches( "\\d+") : "getLastOnlyTag value must have at least one non-digit character"; pendingInvocations.remove(tag); } else { tag = Integer.toString(lastInvocationTag++); } pendingInvocations.put(tag, invocation); }
3.68
dubbo_MetricsGlobalRegistry_getCompositeRegistry
/** * Use CompositeMeterRegistry according to the following priority * 1. If useGlobalRegistry is configured, use the micrometer global CompositeMeterRegistry * 2. If there is a spring actuator, use spring's CompositeMeterRegistry * 3. Dubbo's own CompositeMeterRegistry is used by default */ public static CompositeMeterRegistry getCompositeRegistry(ApplicationModel applicationModel) { Optional<MetricsConfig> configOptional = applicationModel.getApplicationConfigManager().getMetrics(); if (configOptional.isPresent() && configOptional.get().getUseGlobalRegistry() != null && configOptional.get().getUseGlobalRegistry()) { return Metrics.globalRegistry; } else { return compositeRegistry; } }
3.68
hadoop_DefaultStringifier_store
/** * Stores the item in the configuration with the given keyName. * * @param <K> the class of the item * @param conf the configuration to store * @param item the object to be stored * @param keyName the name of the key to use * @throws IOException : forwards Exceptions from the underlying * {@link Serialization} classes. */ public static <K> void store(Configuration conf, K item, String keyName) throws IOException { DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf, GenericsUtil.getClass(item)); conf.set(keyName, stringifier.toString(item)); stringifier.close(); }
3.68
flink_ExecutionVertexInputInfo_getSubpartitionIndexRange
/** Get the subpartition range this subtask should consume. */ public IndexRange getSubpartitionIndexRange() { return subpartitionIndexRange; }
3.68
hibernate-validator_ClassVisitor_visitAllMyElements
/** * Visits all inner elements of provided {@link TypeElement}. * * @param typeElement inner elements of which you want to visit */ private void visitAllMyElements(TypeElement typeElement) { Name qualifiedName = typeElement.getQualifiedName(); if ( !processedTypes.contains( qualifiedName ) ) { processedTypes.add( qualifiedName ); for ( Element element : elementUtils.getAllMembers( typeElement ) ) { visit( element ); } } }
3.68
morf_UpgradeTestHelper_validateStepsArePackageVisible
/** * Validate that the upgrades are package-visible. * @param upgradeSteps The sequence of upgrade steps */ public void validateStepsArePackageVisible(Iterable<Class<? extends UpgradeStep>> upgradeSteps) { for (Class<? extends UpgradeStep> upgradeStepClass : upgradeSteps) { // Upgrade steps classes should be package-visible (default) - not public if (Modifier.isPublic(upgradeStepClass.getModifiers())) { fail(String.format("Upgrade class [%s] is public and should be package visible", upgradeStepClass.getSimpleName())); } } }
3.68
hudi_CompactionUtils_getCompactionPlansByTimeline
/** * Util method to get compaction plans by action_type(COMPACT or LOG_COMPACT) * @param metaClient HoodieTable's metaclient * @param filteredTimelineSupplier gives a timeline object, this can be either filtered to return pending compactions or log compaction instants. * @param requestedInstantWrapper function that gives a requested Hoodie instant. * @return List of pair of HoodieInstant and it's corresponding compaction plan. * Note here the compaction plan can be related to a compaction instant or log compaction instant. */ private static List<Pair<HoodieInstant, HoodieCompactionPlan>> getCompactionPlansByTimeline( HoodieTableMetaClient metaClient, Function<HoodieTableMetaClient, HoodieTimeline> filteredTimelineSupplier, Function<String, HoodieInstant> requestedInstantWrapper) { List<HoodieInstant> filteredInstants = filteredTimelineSupplier.apply(metaClient).getInstants(); return filteredInstants.stream() .map(instant -> Pair.of(instant, getCompactionPlan(metaClient, requestedInstantWrapper.apply(instant.getTimestamp())))) .collect(Collectors.toList()); }
3.68
flink_ConfigurationUtils_hideSensitiveValues
/** * Replaces values whose keys are sensitive according to {@link * GlobalConfiguration#isSensitive(String)} with {@link GlobalConfiguration#HIDDEN_CONTENT}. * * <p>This can be useful when displaying configuration values. * * @param keyValuePairs for which to hide sensitive values * @return A map where all sensitive value are hidden */ @Nonnull public static Map<String, String> hideSensitiveValues(Map<String, String> keyValuePairs) { final HashMap<String, String> result = new HashMap<>(); for (Map.Entry<String, String> keyValuePair : keyValuePairs.entrySet()) { if (GlobalConfiguration.isSensitive(keyValuePair.getKey())) { result.put(keyValuePair.getKey(), GlobalConfiguration.HIDDEN_CONTENT); } else { result.put(keyValuePair.getKey(), keyValuePair.getValue()); } } return result; }
3.68
morf_H2Dialect_addPrimaryKeyConstraintStatement
/** * @param table The table to add the constraint for * @param primaryKeyColumnNames List of the column names of the primary key * @return The statement */ private String addPrimaryKeyConstraintStatement(Table table, List<String> primaryKeyColumnNames) { return "ALTER TABLE " + schemaNamePrefix() + table.getName() + " ADD CONSTRAINT " + table.getName() + "_PK PRIMARY KEY (" + Joiner.on(", ").join(primaryKeyColumnNames) + ")"; }
3.68
hbase_Subprocedure_waitForLocallyCompleted
/** * Waits until the entire procedure has globally completed, or has been aborted. */ public void waitForLocallyCompleted() throws ForeignException, InterruptedException { Procedure.waitForLatch(releasedLocalBarrier, monitor, wakeFrequency, barrierName + ":completed"); }
3.68
framework_AbstractSplitPanel_setLocked
/** * Lock the SplitPanels position, disabling the user from dragging the split * handle. * * @param locked * Set <code>true</code> if locked, <code>false</code> otherwise. */ public void setLocked(boolean locked) { getSplitterState().locked = locked; }
3.68
flink_DeclarativeSlotManager_checkResourceRequirements
/** * Matches resource requirements against available resources. In a first round requirements are * matched against free slot, and any match results in a slot allocation. The remaining * unfulfilled requirements are matched against pending slots, allocating more workers if no * matching pending slot could be found. If the requirements for a job could not be fulfilled * then a notification is sent to the job master informing it as such. * * <p>Performance notes: At it's core this method loops, for each job, over all free/pending * slots for each required slot, trying to find a matching slot. One should generally go in with * the assumption that this runs in numberOfJobsRequiringResources * numberOfRequiredSlots * * numberOfFreeOrPendingSlots. This is especially important when dealing with pending slots, as * matches between requirements and pending slots are not persisted and recomputed on each call. * This may required further refinements in the future; e.g., persisting the matches between * requirements and pending slots, or not matching against pending slots at all. * * <p>When dealing with unspecific resource profiles (i.e., {@link ResourceProfile#ANY}/{@link * ResourceProfile#UNKNOWN}), then the number of free/pending slots is not relevant because we * only need exactly 1 comparison to determine whether a slot can be fulfilled or not, since * they are all the same anyway. * * <p>When dealing with specific resource profiles things can be a lot worse, with the classical * cases where either no matches are found, or only at the very end of the iteration. In the * absolute worst case, with J jobs, requiring R slots each with a unique resource profile such * each pair of these profiles is not matching, and S free/pending slots that don't fulfill any * requirement, then this method does a total of J*R*S resource profile comparisons. * * <p>DO NOT call this method directly. Use {@link #checkResourceRequirementsWithDelay()} * instead. */ private void checkResourceRequirements() { final Map<JobID, Collection<ResourceRequirement>> missingResources = resourceTracker.getMissingResources(); if (missingResources.isEmpty()) { taskExecutorManager.clearPendingTaskManagerSlots(); return; } final Map<JobID, ResourceCounter> unfulfilledRequirements = new LinkedHashMap<>(); for (Map.Entry<JobID, Collection<ResourceRequirement>> resourceRequirements : missingResources.entrySet()) { final JobID jobId = resourceRequirements.getKey(); final ResourceCounter unfulfilledJobRequirements = tryAllocateSlotsForJob(jobId, resourceRequirements.getValue()); if (!unfulfilledJobRequirements.isEmpty()) { unfulfilledRequirements.put(jobId, unfulfilledJobRequirements); } } if (unfulfilledRequirements.isEmpty()) { return; } ResourceCounter freePendingSlots = ResourceCounter.withResources( taskExecutorManager.getPendingTaskManagerSlots().stream() .collect( Collectors.groupingBy( PendingTaskManagerSlot::getResourceProfile, Collectors.summingInt(x -> 1)))); for (Map.Entry<JobID, ResourceCounter> unfulfilledRequirement : unfulfilledRequirements.entrySet()) { freePendingSlots = tryFulfillRequirementsWithPendingSlots( unfulfilledRequirement.getKey(), unfulfilledRequirement.getValue().getResourcesWithCount(), freePendingSlots); } if (!freePendingSlots.isEmpty()) { taskExecutorManager.removePendingTaskManagerSlots(freePendingSlots); } }
3.68
hbase_CellModel_setValue
/** * @param value the value to set */ public void setValue(byte[] value) { this.value = value; }
3.68
hbase_Bytes_bytesToVint
/** * Reads a zero-compressed encoded long from input buffer and returns it. * @param buffer buffer to convert * @return vint bytes as an integer. */ public static long bytesToVint(final byte[] buffer) { int offset = 0; byte firstByte = buffer[offset++]; int len = WritableUtils.decodeVIntSize(firstByte); if (len == 1) { return firstByte; } long i = 0; for (int idx = 0; idx < len - 1; idx++) { byte b = buffer[offset++]; i = i << 8; i = i | (b & 0xFF); } return (WritableUtils.isNegativeVInt(firstByte) ? ~i : i); }
3.68
framework_ColumnProperty_isRowIdentifier
/** * Returns whether or not this property is used as a row identifier. * * @return true if the property is a row identifier, false otherwise. */ public boolean isRowIdentifier() { return isPrimaryKey() || isVersionColumn(); }
3.68
flink_SolutionSetUpdateBarrierBroker_instance
/** @return singleton instance */ public static Broker<SolutionSetUpdateBarrier> instance() { return INSTANCE; }
3.68
morf_H2Dialect_getDeleteLimitSuffix
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getDeleteLimitSuffix(int) */ @Override protected Optional<String> getDeleteLimitSuffix(int limit) { return Optional.of("LIMIT " + limit); }
3.68
flink_ResourceManager_closeJobManagerConnection
/** * This method should be called by the framework once it detects that a currently registered job * manager has failed. * * @param jobId identifying the job whose leader shall be disconnected. * @param resourceRequirementHandling indicating how existing resource requirements for the * corresponding job should be handled * @param cause The exception which cause the JobManager failed. */ protected void closeJobManagerConnection( JobID jobId, ResourceRequirementHandling resourceRequirementHandling, Exception cause) { JobManagerRegistration jobManagerRegistration = jobManagerRegistrations.remove(jobId); if (jobManagerRegistration != null) { final ResourceID jobManagerResourceId = jobManagerRegistration.getJobManagerResourceID(); final JobMasterGateway jobMasterGateway = jobManagerRegistration.getJobManagerGateway(); final JobMasterId jobMasterId = jobManagerRegistration.getJobMasterId(); log.info( "Disconnect job manager {}@{} for job {} from the resource manager.", jobMasterId, jobMasterGateway.getAddress(), jobId); jobManagerHeartbeatManager.unmonitorTarget(jobManagerResourceId); jmResourceIdRegistrations.remove(jobManagerResourceId); blocklistHandler.deregisterBlocklistListener(jobMasterGateway); if (resourceRequirementHandling == ResourceRequirementHandling.CLEAR) { slotManager.clearResourceRequirements(jobId); } // tell the job manager about the disconnect jobMasterGateway.disconnectResourceManager(getFencingToken(), cause); } else { log.debug("There was no registered job manager for job {}.", jobId); } }
3.68
graphhopper_DAType_isInMemory
/** * @return true if data resides in the JVM heap. */ public boolean isInMemory() { return memRef == MemRef.HEAP; }
3.68
framework_TableWidthItemRemove_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "The table should retain the correct width on item remove and add."; }
3.68
morf_CompositeSchema_tableExists
/** * @see org.alfasoftware.morf.metadata.Schema#tableExists(java.lang.String) */ @Override public boolean tableExists(String name) { for (Schema schema : delegates) if (schema.tableExists(name)) return true; return false; }
3.68
hbase_TsvImporterTextMapper_doSetup
/** * Handles common parameter initialization that a subclass might want to leverage. */ protected void doSetup(Context context) { Configuration conf = context.getConfiguration(); // If a custom separator has been used, // decode it back from Base64 encoding. separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY); if (separator == null) { separator = ImportTsv.DEFAULT_SEPARATOR; } else { separator = new String(Base64.getDecoder().decode(separator)); } skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true); logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); }
3.68
hbase_QuotaFilter_getNamespaceFilter
/** Returns the Namespace filter regex */ public String getNamespaceFilter() { return namespaceRegex; }
3.68
MagicPlugin_BaseSpell_initialize
/** * Used internally to initialize the Spell, do not call. * * @param instance The spells instance */ @Override public void initialize(MageController instance) { this.controller = instance; }
3.68