name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
morf_MergeStatement_merge
/** * Constructs a Merge Statement which either inserts or updates * a record into a table depending on whether a condition exists in * the table. * * @return Statement builder. */ public static MergeStatementBuilder merge() { return new MergeStatementBuilder(); }
3.68
morf_DrawIOGraphPrinter_print
/** * Prints the given graph to String instance in a format suitable for importing into Draw.io, using the * default layout (HORIZONTAL_TREE). * @param graph - {@link MutableGraph} of {@link Node} */ default String print(PrintableGraph<Node> graph) { return print(graph, LayoutFormat.HORIZONTAL_TREE); }
3.68
hbase_AbstractFSWAL_trySetReadyForRolling
// return whether we have successfully set readyForRolling to true. private boolean trySetReadyForRolling() { // Check without holding lock first. Usually we will just return here. // waitingRoll is volatile and unacedEntries is only accessed inside event loop so it is safe to // check them outside the consumeLock. if (!waitingRoll(epochAndState) || !unackedAppends.isEmpty()) { return false; } consumeLock.lock(); try { // 1. a roll is requested // 2. all out-going entries have been acked(we have confirmed above). if (waitingRoll(epochAndState)) { readyForRolling = true; readyForRollingCond.signalAll(); return true; } else { return false; } } finally { consumeLock.unlock(); } }
3.68
flink_ProducerMergedPartitionFileIndex_getRegion
/** * Get the subpartition's {@link FixedSizeRegion} containing the specific buffer index. * * @param subpartitionId the subpartition id * @param bufferIndex the buffer index * @return the region containing the buffer index, or return emtpy if the region is not found. */ Optional<FixedSizeRegion> getRegion( TieredStorageSubpartitionId subpartitionId, int bufferIndex) { synchronized (lock) { return indexCache.get(subpartitionId.getSubpartitionId(), bufferIndex); } }
3.68
hmily_PropertyName_of
/** * property key 转换为一个PropertyName对象. * * @param name name; * @return this property name */ public static PropertyName of(final String name) { return Optional.ofNullable(name) .filter(n -> n.length() > 1) .filter(n -> n.charAt(0) != NAME_JOIN && n.charAt(n.length() - 1) != NAME_JOIN) .map(n -> { List<String> elements = new ArrayList<>(16); process(n, (e, indexed) -> { String element = e.get(); if (element.length() > 0) { elements.add(element); } }); return new PropertyName(elements.toArray(new String[0])); }).orElse(EMPTY); }
3.68
morf_DeleteStatement_deepCopy
/** * @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation) */ @Override public DeleteStatementBuilder deepCopy(DeepCopyTransformation transformer) { return new DeleteStatementBuilder(this, transformer); }
3.68
framework_CellReference_getRowReference
/** * Gets the RowReference for this CellReference. * * @return the row reference */ protected RowReference<T> getRowReference() { return rowReference; }
3.68
framework_EditorImpl_doClose
/** * Handles clean up for closing the Editor. */ protected void doClose() { edited = null; for (Component c : columnFields.values()) { removeComponentFromGrid(c); } columnFields.clear(); getState().columnFields.clear(); }
3.68
hmily_HmilyAutoConfiguration_refererAnnotationBeanPostProcessor
/** * Referer annotation bean post processor referer annotation bean post processor. * * @return the referer annotation bean post processor */ @Bean @ConditionalOnProperty(value = "hmily.support.rpc.annotation", havingValue = "true") public BeanPostProcessor refererAnnotationBeanPostProcessor() { return new RefererAnnotationBeanPostProcessor(); }
3.68
hadoop_RouterAuditLogger_start
/** * Adds the first key-val pair to the passed builder in the following format * key=value. */ static void start(Keys key, String value, StringBuilder b) { b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value); }
3.68
flink_PartitionSpec_getFieldIndices
/** Gets field index of all fields in input. */ @JsonIgnore public int[] getFieldIndices() { return fields; }
3.68
flink_SSLUtils_createRestNettySSLContext
/** * Creates an SSL context for the external REST SSL. If mutual authentication is configured the * client and the server side configuration are identical. */ @Nullable public static SslContext createRestNettySSLContext( Configuration config, boolean clientMode, ClientAuth clientAuth, SslProvider provider) throws Exception { checkNotNull(config, "config"); if (!SecurityOptions.isRestSSLEnabled(config)) { return null; } String[] sslProtocols = getEnabledProtocols(config); List<String> ciphers = Arrays.asList(getEnabledCipherSuites(config)); final SslContextBuilder sslContextBuilder; if (clientMode) { sslContextBuilder = SslContextBuilder.forClient(); if (clientAuth != ClientAuth.NONE) { KeyManagerFactory kmf = getKeyManagerFactory(config, false, provider); sslContextBuilder.keyManager(kmf); } } else { KeyManagerFactory kmf = getKeyManagerFactory(config, false, provider); sslContextBuilder = SslContextBuilder.forServer(kmf); } if (clientMode || clientAuth != ClientAuth.NONE) { Optional<TrustManagerFactory> tmf = getTrustManagerFactory(config, false); tmf.map( // Use specific ciphers and protocols if SSL is configured with self-signed // certificates (user-supplied truststore) tm -> sslContextBuilder .trustManager(tm) .protocols(sslProtocols) .ciphers(ciphers) .clientAuth(clientAuth)); } return sslContextBuilder.sslProvider(provider).build(); }
3.68
hbase_BloomFilterFactory_isDeleteFamilyBloomEnabled
/** Returns true if Delete Family Bloom filters are enabled in the given configuration */ public static boolean isDeleteFamilyBloomEnabled(Configuration conf) { return conf.getBoolean(IO_STOREFILE_DELETEFAMILY_BLOOM_ENABLED, true); }
3.68
flink_DefaultConfigurableOptionsFactory_setInternal
/** * Sets the configuration with (key, value) if the key is predefined, otherwise throws * IllegalArgumentException. * * @param key The configuration key, if key is not predefined, throws IllegalArgumentException * out. * @param value The configuration value. */ private void setInternal(String key, String value) { Preconditions.checkArgument( value != null && !value.isEmpty(), "The configuration value must not be empty."); configuredOptions.put(key, value); }
3.68
flink_StreamGraphHasherV2_generateUserSpecifiedHash
/** Generates a hash from a user-specified ID. */ private byte[] generateUserSpecifiedHash(StreamNode node, Hasher hasher) { hasher.putString(node.getTransformationUID(), Charset.forName("UTF-8")); return hasher.hash().asBytes(); }
3.68
flink_KeyGroupPartitioner_reportKeyGroupOfElementAtIndex
/** * This method reports in the bookkeeping data that the element at the given index belongs to * the given key-group. */ protected void reportKeyGroupOfElementAtIndex(int index, int keyGroup) { final int keyGroupIndex = keyGroup - firstKeyGroup; elementKeyGroups[index] = keyGroupIndex; ++counterHistogram[keyGroupIndex]; }
3.68
framework_AriaHelper_bindCaption
/** * Binds a caption (label in HTML speak) to the form element as required by * WAI-ARIA specification. * * @param widget * Widget, that should be bound to the caption * @param captionElement * Element with of caption to bind */ public static void bindCaption(Widget widget, Element captionElement) { assert widget != null : "Valid Widget required"; if (widget instanceof HandlesAriaCaption) { // Let the widget handle special cases itself if (captionElement == null) { ((HandlesAriaCaption) widget).bindAriaCaption(null); } else { ensureHasId(captionElement); ((HandlesAriaCaption) widget) .bindAriaCaption(DOM.asOld(captionElement)); } } else if (captionElement != null) { // Handle the default case ensureHasId(captionElement); String ownerId = ensureHasId(widget.getElement()); captionElement.setAttribute("for", ownerId); Roles.getTextboxRole().setAriaLabelledbyProperty( widget.getElement(), Id.of(captionElement)); } else { clearCaption(widget); } }
3.68
pulsar_SinkContext_seek
/** * Reset the subscription associated with this topic and partition to a specific message id. * * @param topic - topic name * @param partition - partition id (0 for non-partitioned topics) * @param messageId to reset to * @throws PulsarClientException */ default void seek(String topic, int partition, MessageId messageId) throws PulsarClientException { throw new UnsupportedOperationException("not implemented"); }
3.68
hbase_HBaseTestingUtility_enableDebug
/** * Switches the logger for the given class to DEBUG level. * @param clazz The class for which to switch to debug logging. * @deprecated In 2.3.0, will be removed in 4.0.0. Only support changing log level on log4j now as * HBase only uses log4j. You should do this by your own as it you know which log * framework you are using then set the log level to debug is very easy. */ @Deprecated public void enableDebug(Class<?> clazz) { Log4jUtils.enableDebug(clazz); }
3.68
hibernate-validator_MethodInheritanceTree_hasOverriddenMethods
/** * Checks if there are any overridden methods in the hierarchy. * * @return {@code true} if there are any overridden methods found, {@code false} otherwise */ public boolean hasOverriddenMethods() { return overriddenMethods.size() > 0; }
3.68
hadoop_DoubleValueSum_getCombinerOutput
/** * @return return an array of one element. The element is a string * representation of the aggregated value. The return value is * expected to be used by the a combiner. */ public ArrayList<String> getCombinerOutput() { ArrayList<String> retv = new ArrayList<String>(1); retv.add("" + sum); return retv; }
3.68
flink_FlinkContainersSettings_flinkDistLocation
/** * Sets the {@code flinkDistLocation} and returns a reference to this Builder enabling * method chaining. * * @param flinkDistLocation The {@code flinkDistLocation} to set. * @return A reference to this Builder. */ public Builder flinkDistLocation(String flinkDistLocation) { this.flinkDistLocation = flinkDistLocation; this.buildFromFlinkDist = true; return this; }
3.68
pulsar_SecurityUtility_processConscryptTrustManagers
/*** * Conscrypt TrustManager instances will be configured to use the Pulsar {@link TlsHostnameVerifier} * class. * This method is used as a workaround for https://github.com/google/conscrypt/issues/1015 * when Conscrypt / OpenSSL is used as the TLS security provider. * * @param trustManagers the array of TrustManager instances to process. * @return same instance passed as parameter */ @InterfaceAudience.Private public static TrustManager[] processConscryptTrustManagers(TrustManager[] trustManagers) { for (TrustManager trustManager : trustManagers) { processConscryptTrustManager(trustManager); } return trustManagers; }
3.68
hbase_KeyValueUtil_copyKeyToNewByteBuffer
/** * The position will be set to the beginning of the new ByteBuffer * @return the Bytebuffer containing the key part of the cell */ public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) { byte[] bytes = new byte[keyLength(cell)]; appendKeyTo(cell, bytes, 0); ByteBuffer buffer = ByteBuffer.wrap(bytes); return buffer; }
3.68
druid_PoolUpdater_removeDataSources
/** * Remove unused DataSources. */ public void removeDataSources() { if (nodesToDel == null || nodesToDel.isEmpty()) { return; } try { lock.lock(); Map<String, DataSource> map = highAvailableDataSource.getDataSourceMap(); Set<String> copySet = new HashSet<String>(nodesToDel); for (String nodeName : copySet) { LOG.info("Start removing Node " + nodeName + "."); if (!map.containsKey(nodeName)) { LOG.info("Node " + nodeName + " is NOT existed in the map."); cancelBlacklistNode(nodeName); continue; } DataSource ds = map.get(nodeName); if (ds instanceof DruidDataSource) { DruidDataSource dds = (DruidDataSource) ds; int activeCount = dds.getActiveCount(); // CAUTION, activeCount MAYBE changed! if (activeCount > 0) { LOG.warn("Node " + nodeName + " is still running [activeCount=" + activeCount + "], try next time."); continue; } else { LOG.info("Close Node " + nodeName + " and remove it."); try { dds.close(); } catch (Exception e) { LOG.error("Exception occurred while closing Node " + nodeName + ", just remove it.", e); } } } map.remove(nodeName); // Remove the node directly if it is NOT a DruidDataSource. cancelBlacklistNode(nodeName); } } catch (Exception e) { LOG.error("Exception occurred while removing DataSources.", e); } finally { lock.unlock(); } }
3.68
framework_VTextField_valueChange
/** * Called when the field value might have changed and/or the field was * blurred. These are combined so the blur event is sent in the same batch * as a possible value change event (these are often connected). * * @param blurred * true if the field was blurred */ public void valueChange(boolean blurred) { if (client != null && paintableId != null) { boolean sendBlurEvent = false; boolean sendValueChange = false; if (blurred && client.hasEventListeners(this, EventId.BLUR)) { sendBlurEvent = true; client.updateVariable(paintableId, EventId.BLUR, "", false); } String newText = prompting ? "" : getText(); if (newText != null && !newText.equals(valueBeforeEdit)) { sendValueChange = immediate; client.updateVariable(paintableId, "text", newText, false); valueBeforeEdit = newText; valueBeforeEditIsSynced = true; } /* * also send cursor position, no public api yet but for easier * extension */ updateCursorPosition(); if (sendBlurEvent || sendValueChange) { /* * Avoid sending text change event as we will simulate it on the * server side before value change events. */ textChangeEventTrigger.cancel(); scheduled = false; client.sendPendingVariableChanges(); } } }
3.68
graphhopper_Helper_staticHashCode
/** * Produces a static hashcode for a string that is platform independent and still compatible to the default * of openjdk. Do not use for performance critical applications. * * @see String#hashCode() */ public static int staticHashCode(String str) { int len = str.length(); int val = 0; for (int idx = 0; idx < len; ++idx) { val = 31 * val + str.charAt(idx); } return val; }
3.68
hbase_HMaster_constructMaster
/** * Utility for constructing an instance of the passed HMaster class. * @return HMaster instance. */ public static HMaster constructMaster(Class<? extends HMaster> masterClass, final Configuration conf) { try { Constructor<? extends HMaster> c = masterClass.getConstructor(Configuration.class); return c.newInstance(conf); } catch (Exception e) { Throwable error = e; if ( e instanceof InvocationTargetException && ((InvocationTargetException) e).getTargetException() != null ) { error = ((InvocationTargetException) e).getTargetException(); } throw new RuntimeException("Failed construction of Master: " + masterClass.toString() + ". ", error); } }
3.68
hadoop_RouterFedBalance_main
/** * Main function of the RouterFedBalance program. Parses the input arguments * and invokes the RouterFedBalance::run() method, via the ToolRunner. * @param argv Command-line arguments sent to RouterFedBalance. */ public static void main(String[] argv) { Configuration conf = getDefaultConf(); RouterFedBalance fedBalance = new RouterFedBalance(); fedBalance.setConf(conf); int exitCode; try { exitCode = ToolRunner.run(fedBalance, argv); } catch (Exception e) { LOG.warn("Couldn't complete RouterFedBalance operation.", e); exitCode = -1; } System.exit(exitCode); }
3.68
framework_VColorPickerArea_setColor
/** * Sets the color for the area. * * @param color */ public void setColor(String color) { this.color = color; }
3.68
hadoop_LoggedJob_setJobProperties
/** * Set the configuration properties of the job. */ void setJobProperties(Properties conf) { this.jobProperties = new JobProperties(conf); }
3.68
hbase_TraceUtil_createRemoteSpan
/** * Create a span which parent is from remote, i.e, passed through rpc. * </p> * We will set the kind of the returned span to {@link SpanKind#SERVER}, as this should be the top * most span at server side. */ public static Span createRemoteSpan(String name, Context ctx) { return getGlobalTracer().spanBuilder(name).setParent(ctx).setSpanKind(SpanKind.SERVER) .startSpan(); }
3.68
rocketmq-connect_Worker_allocatedConnectors
/** * get connectors * * @return */ public Set<String> allocatedConnectors() { return new HashSet<>(connectors.keySet()); }
3.68
hadoop_RBFMetrics_getNameserviceAggregatedInt
/** * Get the aggregated value for a method for all nameservices. * @param f Method reference * @return Aggregated integer. */ private int getNameserviceAggregatedInt(ToIntFunction<MembershipStats> f) { try { return getActiveNamenodeRegistrations().stream() .map(MembershipState::getStats) .collect(Collectors.summingInt(f)); } catch (IOException e) { LOG.error("Unable to extract metrics: {}", e.getMessage()); return 0; } }
3.68
hudi_HoodieTableConfig_getArchivelogFolder
/** * Get the relative path of archive log folder under metafolder, for this table. */ public String getArchivelogFolder() { return getStringOrDefault(ARCHIVELOG_FOLDER); }
3.68
hadoop_S3AReadOpContext_withChangeDetectionPolicy
/** * Set builder value. * @param value new value * @return the builder */ public S3AReadOpContext withChangeDetectionPolicy( final ChangeDetectionPolicy value) { changeDetectionPolicy = value; return this; }
3.68
framework_DragSourceExtension_setDragData
/** * Set server side drag data. This data is available in the drop event and * can be used to transfer data between drag source and drop target if they * are in the same UI. * * @param data * Data to transfer to drop event. */ public void setDragData(Object data) { dragData = data; }
3.68
hbase_TableRecordReaderImpl_restart
/** * Restart from survivable exceptions by creating a new scanner. */ public void restart(byte[] firstRow) throws IOException { Scan currentScan; if ((endRow != null) && (endRow.length > 0)) { if (trrRowFilter != null) { Scan scan = new Scan().withStartRow(firstRow).withStopRow(endRow); TableInputFormat.addColumns(scan, trrInputColumns); scan.setFilter(trrRowFilter); scan.setCacheBlocks(false); this.scanner = this.htable.getScanner(scan); currentScan = scan; } else { LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", endRow: " + Bytes.toStringBinary(endRow)); Scan scan = new Scan().withStartRow(firstRow).withStopRow(endRow); TableInputFormat.addColumns(scan, trrInputColumns); this.scanner = this.htable.getScanner(scan); currentScan = scan; } } else { LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", no endRow"); Scan scan = new Scan().withStartRow(firstRow); TableInputFormat.addColumns(scan, trrInputColumns); scan.setFilter(trrRowFilter); this.scanner = this.htable.getScanner(scan); currentScan = scan; } if (logScannerActivity) { LOG.info("Current scan=" + currentScan.toString()); timestamp = EnvironmentEdgeManager.currentTime(); rowcount = 0; } }
3.68
flink_HiveServer2Endpoint_GetQueryId
// CHECKSTYLE.OFF: MethodName /** To be compatible with Hive3, add a default implementation. */ public TGetQueryIdResp GetQueryId(TGetQueryIdReq tGetQueryIdReq) throws TException { throw new TException( new UnsupportedOperationException( String.format(UNSUPPORTED_ERROR_MESSAGE, "GetQueryId"))); }
3.68
hbase_MetaTableAccessor_getTableState
/** * Fetch table state for given table from META table * @param conn connection to use * @param tableName table to fetch state for */ @Nullable public static TableState getTableState(Connection conn, TableName tableName) throws IOException { if (tableName.equals(TableName.META_TABLE_NAME)) { return new TableState(tableName, TableState.State.ENABLED); } Table metaHTable = getMetaHTable(conn); Get get = new Get(tableName.getName()).addColumn(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER); Result result = metaHTable.get(get); return CatalogFamilyFormat.getTableState(result); }
3.68
flink_LongHybridHashTable_tryDenseMode
/** After build end, try to use dense mode. */ private void tryDenseMode() { // if some partitions have spilled to disk, always use hash mode if (numSpillFiles != 0) { return; } long minKey = Long.MAX_VALUE; long maxKey = Long.MIN_VALUE; long recordCount = 0; for (LongHashPartition p : this.partitionsBeingBuilt) { long partitionRecords = p.getBuildSideRecordCount(); recordCount += partitionRecords; if (partitionRecords > 0) { if (p.getMinKey() < minKey) { minKey = p.getMinKey(); } if (p.getMaxKey() > maxKey) { maxKey = p.getMaxKey(); } } } if (buildSpillRetBufferNumbers != 0) { throw new RuntimeException( "buildSpillRetBufferNumbers should be 0: " + buildSpillRetBufferNumbers); } long range = maxKey - minKey + 1; // 1.range is negative mean: range is too big to overflow // 2.range is zero, maybe the max is Long.Max, and the min is Long.Min, // so we should not use dense mode too. if (range > 0 && (range <= recordCount * 4 || range <= segmentSize / 8)) { // try to request memory. int buffers = (int) Math.ceil(((double) (range * 8)) / segmentSize); // TODO MemoryManager needs to support flexible larger segment, so that the index area // of the build side is placed on a segment to avoid the overhead of addressing. MemorySegment[] denseBuckets = new MemorySegment[buffers]; for (int i = 0; i < buffers; i++) { MemorySegment seg = getNextBuffer(); if (seg == null) { returnAll(Arrays.asList(denseBuckets)); return; } denseBuckets[i] = seg; for (int j = 0; j < segmentSize; j += 8) { seg.putLong(j, INVALID_ADDRESS); } } denseMode = true; LOG.info("LongHybridHashTable: Use dense mode!"); this.minKey = minKey; this.maxKey = maxKey; List<MemorySegment> segments = new ArrayList<>(); buildSpillReturnBuffers.drainTo(segments); returnAll(segments); ArrayList<MemorySegment> dataBuffers = new ArrayList<>(); long addressOffset = 0; for (LongHashPartition p : this.partitionsBeingBuilt) { p.iteratorToDenseBucket(denseBuckets, addressOffset, minKey); p.updateDenseAddressOffset(addressOffset); dataBuffers.addAll(Arrays.asList(p.getPartitionBuffers())); addressOffset += (p.getPartitionBuffers().length << segmentSizeBits); returnAll(Arrays.asList(p.getBuckets())); } this.denseBuckets = denseBuckets; this.densePartition = new LongHashPartition( this, buildSideSerializer, dataBuffers.toArray(new MemorySegment[0])); freeCurrent(); } }
3.68
druid_SQLBinaryOpExpr_getMergedList
/** * only for parameterized output * * @return */ public List<SQLObject> getMergedList() { return mergedList; }
3.68
hbase_SchemaLocking_getLockResource
/** * @return {@link LockedResource} for resource of specified type & name. null if resource is not * locked. */ LockedResource getLockResource(LockedResourceType resourceType, String resourceName) { LockAndQueue queue; switch (resourceType) { case SERVER: queue = serverLocks.get(ServerName.valueOf(resourceName)); break; case NAMESPACE: queue = namespaceLocks.get(resourceName); break; case TABLE: queue = tableLocks.get(TableName.valueOf(resourceName)); break; case REGION: queue = regionLocks.get(resourceName); break; case PEER: queue = peerLocks.get(resourceName); break; case META: queue = metaLock; break; case GLOBAL: queue = globalLocks.get(resourceName); break; default: queue = null; break; } return queue != null ? createLockedResource(resourceType, resourceName, queue) : null; }
3.68
flink_RichInputFormat_openInputFormat
/** * Opens this InputFormat instance. This method is called once per parallel instance. Resources * should be allocated in this method. (e.g. database connections, cache, etc.) * * @see InputFormat * @throws IOException in case allocating the resources failed. */ @PublicEvolving public void openInputFormat() throws IOException { // do nothing here, just for subclasses }
3.68
hbase_AbstractFSWAL_doReplaceWriter
/** * Notice that you need to clear the {@link #rollRequested} flag in this method, as the new writer * will begin to work before returning from this method. If we clear the flag after returning from * this call, we may miss a roll request. The implementation class should choose a proper place to * clear the {@link #rollRequested} flag so we do not miss a roll request, typically before you * start writing to the new writer. */ protected void doReplaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException { Preconditions.checkNotNull(nextWriter); waitForSafePoint(); /** * For {@link FSHLog},here would shutdown {@link FSHLog.SyncRunner}. */ doCleanUpResources(); // we will call rollWriter in init method, where we want to create the first writer and // obviously the previous writer is null, so here we need this null check. And why we must call // logRollAndSetupWalProps before closeWriter is that, we will call markClosedAndClean after // closing the writer asynchronously, we need to make sure the WALProps is put into // walFile2Props before we call markClosedAndClean if (writer != null) { long oldFileLen = writer.getLength(); logRollAndSetupWalProps(oldPath, newPath, oldFileLen); closeWriter(writer, oldPath); } else { logRollAndSetupWalProps(oldPath, newPath, 0); } this.writer = nextWriter; /** * Here is used for {@link AsyncFSWAL} and {@link FSHLog} to set the under layer filesystem * output after writer is replaced. */ onWriterReplaced(nextWriter); this.fileLengthAtLastSync = nextWriter.getLength(); this.highestProcessedAppendTxidAtLastSync = 0L; consumeLock.lock(); try { consumerScheduled.set(true); int currentEpoch = epochAndState >>> 2; int nextEpoch = currentEpoch == MAX_EPOCH ? 0 : currentEpoch + 1; // set a new epoch and also clear waitingRoll and writerBroken this.epochAndState = nextEpoch << 2; // Reset rollRequested status rollRequested.set(false); consumeExecutor.execute(consumer); } finally { consumeLock.unlock(); } }
3.68
hbase_StorageClusterStatusModel_getStorefileSizeMB
/** Returns the total size of store files, in MB */ @XmlAttribute public int getStorefileSizeMB() { return storefileSizeMB; }
3.68
AreaShop_Utils_isDouble
/** * Check if a string is a double. * @param input The input * @return true if the input is a double, otherwise false */ @SuppressWarnings("ResultOfMethodCallIgnored") public static boolean isDouble(String input) { try { Double.parseDouble(input); return true; } catch(NumberFormatException e) { return false; } }
3.68
framework_TabSheet_removeTab
/** * Removes a {@link Tab} and the component associated with it, as previously * added with {@link #addTab(Component)}, * {@link #addTab(Component, String, Resource)} or * {@link #addComponent(Component)}. * <p> * If the tab was selected, the first eligible (visible and enabled) * remaining tab is selected. * </p> * * @see #addTab(Component) * @see #addTab(Component, String, Resource) * @see #addComponent(Component) * @see #removeComponent(Component) * @param tab * the Tab to remove */ public void removeTab(Tab tab) { removeComponent(tab.getComponent()); }
3.68
hadoop_EncryptionSecrets_toString
/** * String function returns the encryption mode but not any other * secrets. * @return a string safe for logging. */ @Override public String toString() { return S3AEncryptionMethods.NONE.equals(encryptionMethod) ? "(no encryption)" : encryptionMethod.getMethod(); }
3.68
flink_FlinkImageBuilder_setFlinkHome
/** * Sets flink home. * * @param flinkHome The flink home. * @return The flink home. */ public FlinkImageBuilder setFlinkHome(String flinkHome) { this.flinkHome = flinkHome; return this; }
3.68
hbase_TableHFileArchiveTracker_clearTables
/** * Remove the currently archived tables. * <p> * Does some intelligent checking to make sure we don't prematurely create an archive tracker. */ private void clearTables() { getMonitor().clearArchive(); }
3.68
hbase_VersionModel_getJerseyVersion
/** Returns the version of the embedded Jersey framework */ @XmlAttribute(name = "Jersey") public String getJerseyVersion() { return jerseyVersion; }
3.68
hadoop_OBSListing_hasNext
/** * Declare that the iterator has data if it is either is the initial * iteration or it is a later one and the last listing obtained was * incomplete. */ @Override public boolean hasNext() { return firstListing || objects.isTruncated(); }
3.68
hibernate-validator_AnnotationTypeMemberCheck_validateWildcardBounds
/** * Returns true, if the given type mirror is a wildcard type with the given extends and super bounds, false otherwise. * * @param type The type to check. * @param expectedExtendsBound A mirror representing the expected extends bound. * @param expectedSuperBound A mirror representing the expected super bound. * * @return True, if the given type mirror is a wildcard type with the given extends and super bounds, false otherwise. */ private boolean validateWildcardBounds(TypeMirror type, final TypeMirror expectedExtendsBound, final TypeMirror expectedSuperBound) { Boolean theValue = type.accept( new TypeKindVisitor8<Boolean, Void>() { @Override public Boolean visitWildcard(WildcardType t, Void p) { boolean extendsBoundMatches = ( t.getExtendsBound() == null ? expectedExtendsBound == null : expectedExtendsBound != null && typeUtils .isSameType( t.getExtendsBound(), expectedExtendsBound ) ); boolean superBoundMatches = ( t.getSuperBound() == null ? expectedSuperBound == null : expectedSuperBound != null && typeUtils .isSameType( t.getSuperBound(), expectedSuperBound ) ); return extendsBoundMatches && superBoundMatches; } }, null ); return Boolean.TRUE.equals( theValue ); }
3.68
flink_TableDescriptor_build
/** Returns an immutable instance of {@link TableDescriptor}. */ public TableDescriptor build() { return new TableDescriptor(schema, options, partitionKeys, comment); }
3.68
querydsl_PathMetadataFactory_forMapAccess
/** * Create a new PathMetadata instance for for key based map access * * @param parent parent path * @param key key for map access * @return map access path */ public static <KT> PathMetadata forMapAccess(Path<?> parent, KT key) { return new PathMetadata(parent, key, PathType.MAPVALUE_CONSTANT); }
3.68
flink_BufferBuilder_appendAndCommit
/** Same as {@link #append(ByteBuffer)} but additionally {@link #commit()} the appending. */ public int appendAndCommit(ByteBuffer source) { int writtenBytes = append(source); commit(); return writtenBytes; }
3.68
querydsl_GeometryExpression_crosses
/** * Returns 1 (TRUE) if this geometric object “spatially crosses’ anotherGeometry. * * @param geometry other geometry * @return true, if crosses */ public BooleanExpression crosses(Expression<? extends Geometry> geometry) { return Expressions.booleanOperation(SpatialOps.CROSSES, mixin, geometry); }
3.68
morf_SpreadsheetDataSetProducer_getSchema
/** * {@inheritDoc} * * @see org.alfasoftware.morf.dataset.DataSetProducer#getSchema() */ @Override public Schema getSchema() { return new Schema() { @Override public Table getTable(String name) { throw new UnsupportedOperationException("Cannot get the metadata of a table for a spreadsheet"); } @Override public boolean isEmptyDatabase() { return tables.isEmpty(); } @Override public boolean tableExists(String name) { return tables.containsKey(name); } @Override public Collection<String> tableNames() { return tables.keySet(); } @Override public Collection<Table> tables() { throw new UnsupportedOperationException("Cannot get the metadata of a table for a spreadsheet"); } @Override public boolean viewExists(String name) { return false; } @Override public View getView(String name) { throw new IllegalArgumentException("Invalid view [" + name + "]. Views are not supported in spreadsheets"); } @Override public Collection<String> viewNames() { return Collections.emptySet(); } @Override public Collection<View> views() { return Collections.emptySet(); } }; }
3.68
flink_Path_depth
/** * Returns the number of elements in this path. * * @return the number of elements in this path */ public int depth() { String path = uri.getPath(); int depth = 0; int slash = path.length() == 1 && path.charAt(0) == '/' ? -1 : 0; while (slash != -1) { depth++; slash = path.indexOf(SEPARATOR, slash + 1); } return depth; }
3.68
hadoop_ResourceCalculatorProcessTree_getVirtualMemorySize
/** * Get the virtual memory used by all the processes in the * process-tree that are older than the passed in age. * * @param olderThanAge processes above this age are included in the * memory addition * @return virtual memory used by the process-tree in bytes for * processes older than the specified age, {@link #UNAVAILABLE} if it * cannot be calculated. */ public long getVirtualMemorySize(int olderThanAge) { return UNAVAILABLE; }
3.68
hbase_FileSystemUtilizationChore_getPeriod
/** * Extracts the period for the chore from the configuration. * @param conf The configuration object. * @return The configured chore period or the default value. */ static int getPeriod(Configuration conf) { return conf.getInt(FS_UTILIZATION_CHORE_PERIOD_KEY, FS_UTILIZATION_CHORE_PERIOD_DEFAULT); }
3.68
hbase_RegionNormalizerFactory_getRegionNormalizer
/** * Create a region normalizer from the given conf. * @param conf configuration * @return {@link RegionNormalizer} implementation */ private static RegionNormalizer getRegionNormalizer(Configuration conf) { // Create instance of Region Normalizer Class<? extends RegionNormalizer> balancerKlass = conf.getClass(HConstants.HBASE_MASTER_NORMALIZER_CLASS, SimpleRegionNormalizer.class, RegionNormalizer.class); return ReflectionUtils.newInstance(balancerKlass, conf); }
3.68
framework_SelectorPredicate_isWildcard
/** * @return the wildcard */ public boolean isWildcard() { return wildcard; }
3.68
graphhopper_Entity_writeDoubleField
/** * Write a double value, with precision 10^-7. NaN is written as "". */ protected void writeDoubleField (double val) throws IOException { // NaN's represent missing values if (Double.isNaN(val)) writeStringField(""); // control file size: don't use unnecessary precision // This is usually used for coordinates; one ten-millionth of a degree at the equator is 1.1cm, // and smaller elsewhere on earth, plenty precise enough. // On Jupiter, however, it's a different story. // Use the US locale so that . is used as the decimal separator else writeStringField(String.format(Locale.US, "%.7f", val)); }
3.68
dubbo_Version_isSupportResponseAttachment
/** * Dubbo 2.x protocol version numbers are limited to 2.0.2/2000200 ~ 2.0.99/2009900, other versions are consider as * invalid or not from official release. * * @param version, the protocol version. * @return */ public static boolean isSupportResponseAttachment(String version) { if (StringUtils.isEmpty(version)) { return false; } int iVersion = getIntVersion(version); if (iVersion >= LOWEST_VERSION_FOR_RESPONSE_ATTACHMENT && iVersion <= HIGHEST_PROTOCOL_VERSION) { return true; } return false; }
3.68
flink_CheckpointConfig_enableUnalignedCheckpoints
/** * Enables unaligned checkpoints, which greatly reduce checkpointing times under backpressure. * * <p>Unaligned checkpoints contain data stored in buffers as part of the checkpoint state, * which allows checkpoint barriers to overtake these buffers. Thus, the checkpoint duration * becomes independent of the current throughput as checkpoint barriers are effectively not * embedded into the stream of data anymore. * * <p>Unaligned checkpoints can only be enabled if {@link * ExecutionCheckpointingOptions#CHECKPOINTING_MODE} is {@link CheckpointingMode#EXACTLY_ONCE}. */ @PublicEvolving public void enableUnalignedCheckpoints() { enableUnalignedCheckpoints(true); }
3.68
flink_StatusWatermarkValve_markWatermarkAligned
/** * Mark the {@link InputChannelStatus} as watermark-aligned and add it to the {@link * #alignedChannelStatuses}. * * @param inputChannelStatus the input channel status to be marked */ private void markWatermarkAligned(InputChannelStatus inputChannelStatus) { if (!inputChannelStatus.isWatermarkAligned) { inputChannelStatus.isWatermarkAligned = true; inputChannelStatus.addTo(alignedChannelStatuses); } }
3.68
flink_LambdaUtil_withContextClassLoader
/** * Runs the given runnable with the given ClassLoader as the thread's {@link * Thread#setContextClassLoader(ClassLoader) context class loader}. * * <p>The method will make sure to set the context class loader of the calling thread back to * what it was before after the runnable completed. */ public static <R, E extends Throwable> R withContextClassLoader( final ClassLoader cl, final SupplierWithException<R, E> s) throws E { try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(cl)) { return s.get(); } }
3.68
flink_StringUtils_showControlCharacters
/** * Replaces control characters by their escape-coded version. For example, if the string * contains a line break character ('\n'), this character will be replaced by the two characters * backslash '\' and 'n'. As a consequence, the resulting string will not contain any more * control characters. * * @param str The string in which to replace the control characters. * @return The string with the replaced characters. */ public static String showControlCharacters(String str) { int len = str.length(); StringBuilder sb = new StringBuilder(); for (int i = 0; i < len; i += 1) { char c = str.charAt(i); switch (c) { case '\b': sb.append("\\b"); break; case '\t': sb.append("\\t"); break; case '\n': sb.append("\\n"); break; case '\f': sb.append("\\f"); break; case '\r': sb.append("\\r"); break; default: sb.append(c); } } return sb.toString(); }
3.68
framework_MenuBarTooltipsNearEdge_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { VerticalLayout vlayout = new VerticalLayout(); vlayout.setSizeFull(); vlayout.addComponent(buildMenu("Menu")); vlayout.setComponentAlignment(vlayout.getComponent(0), Alignment.BOTTOM_RIGHT); setContent(vlayout); getTooltipConfiguration().setOpenDelay(0); getTooltipConfiguration().setQuickOpenDelay(0); getTooltipConfiguration().setCloseTimeout(1000); }
3.68
framework_VAbstractCalendarPanel_handleNavigationYearMode
/** * Handles the keyboard navigation when the resolution is set to years. * * @param keycode * The keycode to process * @param ctrl * Is ctrl pressed? * @param shift * is shift pressed * @return Returns true if the keycode was processed, else false */ protected boolean handleNavigationYearMode(int keycode, boolean ctrl, boolean shift) { // Ctrl and Shift selection not supported if (ctrl || shift) { return false; } else if (keycode == getPreviousKey()) { focusNextYear(10); // Add 10 years return true; } else if (keycode == getForwardKey()) { focusNextYear(1); // Add 1 year return true; } else if (keycode == getNextKey()) { focusPreviousYear(10); // Subtract 10 years return true; } else if (keycode == getBackwardKey()) { focusPreviousYear(1); // Subtract 1 year return true; } else if (keycode == getSelectKey()) { value = (Date) focusedDate.clone(); onSubmit(); return true; } else if (keycode == getResetKey()) { // Restore showing value the selected value focusedDate.setTime(value.getTime()); renderCalendar(); return true; } else if (keycode == getCloseKey()) { // TODO fire listener, on users responsibility?? onCancel(); return true; } return false; }
3.68
hbase_HRegionServer_startReplicationService
/** * Start up replication source and sink handlers. */ private void startReplicationService() throws IOException { if (sameReplicationSourceAndSink && this.replicationSourceHandler != null) { this.replicationSourceHandler.startReplicationService(); } else { if (this.replicationSourceHandler != null) { this.replicationSourceHandler.startReplicationService(); } if (this.replicationSinkHandler != null) { this.replicationSinkHandler.startReplicationService(); } } }
3.68
flink_HiveParserDDLSemanticAnalyzer_convertShowFunctions
/** * Add the task according to the parsed command tree. This is used for the CLI command "SHOW * FUNCTIONS;". * * @param ast The parsed command tree. */ private Operation convertShowFunctions(HiveParserASTNode ast) { if (ast.getChildCount() == 2) { assert (ast.getChild(0).getType() == HiveASTParser.KW_LIKE); throw new ValidationException("SHOW FUNCTIONS LIKE is not supported yet"); } return new ShowFunctionsOperation(); }
3.68
flink_ExecutionConfig_enableGenericTypes
/** * Enables the use generic types which are serialized via Kryo. * * <p>Generic types are enabled by default. * * @see #disableGenericTypes() */ public void enableGenericTypes() { setGenericTypes(true); }
3.68
flink_StatsSummary_createSnapshot
/** * Returns a snapshot of the current state. * * @return A snapshot of the current state. */ public StatsSummarySnapshot createSnapshot() { return new StatsSummarySnapshot( min, max, sum, count, histogram == null ? null : histogram.getStatistics()); }
3.68
framework_VTreeTable_addAndRemoveRows
/* * Overridden to allow animation of expands and collapses of nodes. */ @Override public void addAndRemoveRows(UIDL partialRowAdditions) { if (partialRowAdditions == null) { return; } if (animationsEnabled) { if (partialRowAdditions.hasAttribute("hide")) { scrollBody.unlinkRowsAnimatedAndUpdateCacheWhenFinished( partialRowAdditions.getIntAttribute("firstprowix"), partialRowAdditions.getIntAttribute("numprows")); } else { scrollBody.insertRowsAnimated(partialRowAdditions, partialRowAdditions.getIntAttribute("firstprowix"), partialRowAdditions.getIntAttribute("numprows")); discardRowsOutsideCacheWindow(); } } else { super.addAndRemoveRows(partialRowAdditions); } }
3.68
flink_WindowReader_aggregate
/** * Reads window state generated using an {@link AggregateFunction}. * * @param uid The uid of the operator. * @param aggregateFunction The aggregate function used to create the window. * @param readerFunction The window reader function. * @param keyType The key type of the window. * @param accType The type information of the accumulator function. * @param outputType The output type of the reader function. * @param <K> The type of the key. * @param <T> The type of the values that are aggregated. * @param <ACC> The type of the accumulator (intermediate aggregate state). * @param <R> The type of the aggregated result. * @param <OUT> The output type of the reader function. * @return A {@code DataSet} of objects read from keyed state. * @throws IOException If savepoint does not contain the specified uid. */ public <K, T, ACC, R, OUT> DataSource<OUT> aggregate( String uid, AggregateFunction<T, ACC, R> aggregateFunction, WindowReaderFunction<R, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<ACC> accType, TypeInformation<OUT> outputType) throws IOException { WindowReaderOperator<?, K, R, W, OUT> operator = WindowReaderOperator.aggregate( aggregateFunction, readerFunction, keyType, windowSerializer, accType); return readWindowOperator(uid, outputType, operator); }
3.68
flink_AbstractBinaryWriter_grow
/** Increases the capacity to ensure that it can hold at least the minimum capacity argument. */ private void grow(int minCapacity) { int oldCapacity = segment.size(); int newCapacity = oldCapacity + (oldCapacity >> 1); if (newCapacity - minCapacity < 0) { newCapacity = minCapacity; } segment = MemorySegmentFactory.wrap(Arrays.copyOf(segment.getArray(), newCapacity)); afterGrow(); }
3.68
hbase_AsyncRegionLocationCache_add
/** * Add the given locations to the cache, merging with existing if necessary. Also cleans out any * previously cached locations which may have been superseded by this one (i.e. in case of merged * regions). See {@link #cleanProblematicOverlappedRegions(RegionLocations)} * @param locs the locations to cache * @return the final location (possibly merged) that was added to the cache */ public synchronized RegionLocations add(RegionLocations locs) { byte[] startKey = locs.getRegionLocation().getRegion().getStartKey(); RegionLocations oldLocs = cache.putIfAbsent(startKey, locs); if (oldLocs == null) { cleanProblematicOverlappedRegions(locs); return locs; } // check whether the regions are the same, this usually happens when table is split/merged, // or deleted and recreated again. RegionInfo region = locs.getRegionLocation().getRegion(); RegionInfo oldRegion = oldLocs.getRegionLocation().getRegion(); if (region.getEncodedName().equals(oldRegion.getEncodedName())) { RegionLocations mergedLocs = oldLocs.mergeLocations(locs); if (isEqual(mergedLocs, oldLocs)) { // the merged one is the same with the old one, give up LOG.trace("Will not add {} to cache because the old value {} " + " is newer than us or has the same server name." + " Maybe it is updated before we replace it", locs, oldLocs); return oldLocs; } locs = mergedLocs; } else { // the region is different, here we trust the one we fetched. This maybe wrong but finally // the upper layer can detect this and trigger removal of the wrong locations if (LOG.isDebugEnabled()) { LOG.debug("The newly fetch region {} is different from the old one {} for row '{}'," + " try replaying the old one...", region, oldRegion, Bytes.toStringBinary(startKey)); } } cache.put(startKey, locs); cleanProblematicOverlappedRegions(locs); return locs; }
3.68
dubbo_LFUCache_pollFirst
/** * Retrieves and removes the first node of this deque. * * @return removed node */ CacheNode<K, V> pollFirst() { CacheNode<K, V> node = null; if (first.prev != last) { node = first.prev; first.prev = node.prev; first.prev.next = first; node.prev = null; node.next = null; } return node; }
3.68
flink_StreamProjection_projectTuple2
/** * Projects a {@link Tuple} {@link DataStream} to the previously selected fields. * * @return The projected DataStream. * @see Tuple * @see DataStream */ public <T0, T1> SingleOutputStreamOperator<Tuple2<T0, T1>> projectTuple2() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple2<T0, T1>> tType = new TupleTypeInfo<Tuple2<T0, T1>>(fTypes); return dataStream.transform( "Projection", tType, new StreamProject<IN, Tuple2<T0, T1>>( fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
3.68
flink_ProjectOperator_projectTuple15
/** * Projects a {@link Tuple} {@link DataSet} to the previously selected fields. * * @return The projected DataSet. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> ProjectOperator< T, Tuple15< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> projectTuple15() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType()); TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>> tType = new TupleTypeInfo< Tuple15< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(fTypes); return new ProjectOperator< T, Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>( this.ds, this.fieldIndexes, tType); }
3.68
flink_FlinkRelMetadataQuery_reuseOrCreate
/** * Reuse input metadataQuery instance if it could cast to FlinkRelMetadataQuery class, or create * one if not. * * @param mq metadataQuery which try to reuse * @return a FlinkRelMetadataQuery instance */ public static FlinkRelMetadataQuery reuseOrCreate(RelMetadataQuery mq) { if (mq instanceof FlinkRelMetadataQuery) { return (FlinkRelMetadataQuery) mq; } else { return instance(); } }
3.68
pulsar_ResourceUnitRanking_getAllocatedLoadPercentageBandwidthOut
/** * Percentage of outbound bandwidth allocated to bundle's quota. */ public double getAllocatedLoadPercentageBandwidthOut() { return this.allocatedLoadPercentageBandwidthOut; }
3.68
framework_CompositeValidator_getMode
/** * Gets the mode of the validator. * * @return Operation mode of the validator: {@link CombinationMode#AND} or * {@link CombinationMode#OR}. */ public final CombinationMode getMode() { return mode; }
3.68
hbase_TableInfoModel_setName
/** * @param name the table name */ public void setName(String name) { this.name = name; }
3.68
framework_DragSourceExtensionConnector_removeDraggable
/** * Removes draggable and class name from the given element. * * @param element * Element to remove draggable from. */ protected void removeDraggable(Element element) { element.setDraggable(Element.DRAGGABLE_FALSE); element.removeClassName( getStylePrimaryName(element) + STYLE_SUFFIX_DRAGSOURCE); element.removeClassName(STYLE_NAME_DRAGGABLE); }
3.68
graphhopper_VectorTile_getVersion
/** * <pre> * Any compliant implementation must first read the version * number encoded in this message and choose the correct * implementation for this version number before proceeding to * decode other parts of this message. * </pre> * * <code>required uint32 version = 15 [default = 1];</code> */ public int getVersion() { return version_; }
3.68
hbase_RSGroupAdminClient_balanceRSGroup
/** * Balance regions in the given RegionServer group. * @return BalanceResponse details about the balancer run */ public BalanceResponse balanceRSGroup(String groupName, BalanceRequest request) throws IOException { try { BalanceRSGroupRequest req = ProtobufUtil.createBalanceRSGroupRequest(groupName, request); return ProtobufUtil.toBalanceResponse(stub.balanceRSGroup(null, req)); } catch (ServiceException e) { throw ProtobufUtil.handleRemoteException(e); } }
3.68
hadoop_ApplicationPlacementAllocatorFactory_getAppPlacementAllocator
/** * Get AppPlacementAllocator related to the placement type requested. * * @param appPlacementAllocatorName * allocator class name. * @param appSchedulingInfo app SchedulingInfo. * @param schedulerRequestKey scheduler RequestKey. * @param rmContext RMContext. * @return Specific AppPlacementAllocator instance based on type */ public static AppPlacementAllocator<SchedulerNode> getAppPlacementAllocator( String appPlacementAllocatorName, AppSchedulingInfo appSchedulingInfo, SchedulerRequestKey schedulerRequestKey, RMContext rmContext) { Class<?> policyClass; try { if (StringUtils.isEmpty(appPlacementAllocatorName)) { policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS; } else { policyClass = Class.forName(appPlacementAllocatorName); } } catch (ClassNotFoundException e) { policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS; } if (!AppPlacementAllocator.class.isAssignableFrom(policyClass)) { policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS; } @SuppressWarnings("unchecked") AppPlacementAllocator<SchedulerNode> placementAllocatorInstance = (AppPlacementAllocator<SchedulerNode>) ReflectionUtils .newInstance(policyClass, null); placementAllocatorInstance.initialize(appSchedulingInfo, schedulerRequestKey, rmContext); return placementAllocatorInstance; }
3.68
hadoop_NamenodeStatusReport_getNumBlocks
/** * Get the number of blocks. * * @return The number of blocks. */ public long getNumBlocks() { return this.numOfBlocks; }
3.68
framework_Tree_isSelectable
/** * Returns the current selectable state. Selectable determines if the a node * can be selected on the client side. Selectable does not affect * {@link #setValue(Object)} or {@link #select(Object)}. * * <p> * The tree is selectable by default. * </p> * * @return the current selectable state. */ public boolean isSelectable() { return selectable; }
3.68
framework_AdjacentElementsWithTooltips_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { TooltipConfiguration ttc = super.getTooltipConfiguration(); ttc.setMaxWidth(350); ttc.setOpenDelay(200); ttc.setCloseTimeout(200); ttc.setQuickOpenDelay(1000); ttc.setQuickOpenTimeout(1000); HorizontalLayout layout = new HorizontalLayout(); layout.addComponent(makeButton("first")); layout.addComponent(makeButton("second")); addComponent(layout); }
3.68
MagicPlugin_PreciousStonesManager_getAllWarps
/** * Return all warps. * * @return A list of warps */ @Nullable @Override public Collection<PlayerWarp> getAllWarps() { if (!enabled || api == null) return null; return api.getAllFieldLocations(); }
3.68
hadoop_AzureNativeFileSystemStore_finalize
// Finalizer to ensure complete shutdown @Override protected void finalize() throws Throwable { LOG.debug("finalize() called"); close(); super.finalize(); }
3.68
hbase_BucketAllocator_getLeastFilledBuckets
/** * Returns a set of indices of the buckets that are least filled excluding the offsets, we also * the fully free buckets for the BucketSizes where everything is empty and they only have one * completely free bucket as a reserved * @param excludedBuckets the buckets that need to be excluded due to currently being in used * @param bucketCount max Number of buckets to return * @return set of bucket indices which could be used for eviction */ public Set<Integer> getLeastFilledBuckets(Set<Integer> excludedBuckets, int bucketCount) { Queue<Integer> queue = MinMaxPriorityQueue.<Integer> orderedBy(new Comparator<Integer>() { @Override public int compare(Integer left, Integer right) { // We will always get instantiated buckets return Float.compare(((float) buckets[left].usedCount) / buckets[left].itemCount, ((float) buckets[right].usedCount) / buckets[right].itemCount); } }).maximumSize(bucketCount).create(); for (int i = 0; i < buckets.length; i++) { if (!excludedBuckets.contains(i) && !buckets[i].isUninstantiated() && // Avoid the buckets that are the only buckets for a sizeIndex bucketSizeInfos[buckets[i].sizeIndex()].bucketList.size() != 1 ) { queue.add(i); } } Set<Integer> result = new HashSet<>(bucketCount); result.addAll(queue); return result; }
3.68
hadoop_ConverterUtils_toNodeId
/* * This method is deprecated, use {@link NodeId#fromString(String)} instead. */ @Public @Deprecated public static NodeId toNodeId(String nodeIdStr) { return NodeId.fromString(nodeIdStr); }
3.68
flink_Channel_swapUnionNodes
/** Utility method used while swapping binary union nodes for n-ary union nodes. */ public void swapUnionNodes(PlanNode newUnionNode) { if (!(this.source instanceof BinaryUnionPlanNode)) { throw new IllegalStateException(); } else { this.source = newUnionNode; } }
3.68
hadoop_VolumeAMSProcessor_aggregateVolumesFrom
// Currently only scheduling request is supported. private List<Volume> aggregateVolumesFrom(AllocateRequest request) throws VolumeException { List<Volume> volumeList = new ArrayList<>(); List<SchedulingRequest> requests = request.getSchedulingRequests(); if (requests != null) { for (SchedulingRequest req : requests) { Resource totalResource = req.getResourceSizing().getResources(); List<ResourceInformation> resourceList = totalResource.getAllResourcesListCopy(); for (ResourceInformation resourceInformation : resourceList) { List<VolumeMetaData> volumes = VolumeMetaData.fromResource(resourceInformation); for (VolumeMetaData vs : volumes) { if (vs.getVolumeCapabilityRange().getMinCapacity() <= 0) { // capacity not specified, ignore continue; } else if (vs.isProvisionedVolume()) { volumeList.add(checkAndGetVolume(vs)); } else { throw new InvalidVolumeException("Only pre-provisioned volume" + " is supported now, volumeID must exist."); } } } } } return volumeList; }
3.68