name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_ClientSnapshotDescriptionUtils_toString
/** * Returns a single line (no \n) representation of snapshot metadata. Use this instead of the * {@code toString} method of * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}. * We don't replace * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription}'s * {@code toString}, because it is auto-generated by protoc. * @param snapshot description of the snapshot * @return single line string with a summary of the snapshot parameters */ public static String toString(SnapshotProtos.SnapshotDescription snapshot) { if (snapshot == null) { return null; } return new StringBuilder("{ ss=").append(snapshot.getName()).append(" table=") .append(snapshot.hasTable() ? TableName.valueOf(snapshot.getTable()) : "").append(" type=") .append(snapshot.getType()).append(" ttl=").append(snapshot.getTtl()).append(" }").toString(); }
3.68
hadoop_DynoInfraUtils_triggerDataNodeBlockReport
/** * Trigger a block report on a given DataNode. * * @param conf Configuration * @param dataNodeTarget The target; should be like {@code <host>:<port>} */ private static void triggerDataNodeBlockReport(Configuration conf, String dataNodeTarget) throws IOException { InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(dataNodeTarget); ClientDatanodeProtocol dnProtocol = DFSUtilClient .createClientDatanodeProtocolProxy(datanodeAddr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class)); dnProtocol.triggerBlockReport(new BlockReportOptions.Factory().build()); }
3.68
flink_AbstractStreamOperatorV2_getOperatorName
/** * Return the operator name. If the runtime context has been set, then the task name with * subtask index is returned. Otherwise, the simple class name is returned. * * @return If runtime context is set, then return task name with subtask index. Otherwise return * simple class name. */ protected String getOperatorName() { if (runtimeContext != null) { return runtimeContext.getTaskNameWithSubtasks(); } else { return getClass().getSimpleName(); } }
3.68
hadoop_FileIoProvider_transferToSocketFully
/** * Transfer data from a FileChannel to a SocketOutputStream. * * @param volume target volume. null if unavailable. * @param sockOut SocketOutputStream to write the data. * @param fileCh FileChannel from which to read data. * @param position position within the channel where the transfer begins. * @param count number of bytes to transfer. * @param waitTime returns the nanoseconds spent waiting for the socket * to become writable. * @param transferTime returns the nanoseconds spent transferring data. * @throws IOException */ public void transferToSocketFully( @Nullable FsVolumeSpi volume, SocketOutputStream sockOut, FileChannel fileCh, long position, int count, LongWritable waitTime, LongWritable transferTime) throws IOException { final long begin = profilingEventHook.beforeFileIo(volume, TRANSFER, count); try { faultInjectorEventHook.beforeFileIo(volume, TRANSFER, count); sockOut.transferToFully(fileCh, position, count, waitTime, transferTime); profilingEventHook.afterFileIo(volume, TRANSFER, begin, count); } catch (Exception e) { String em = e.getMessage(); if (em != null) { if (!em.startsWith("Broken pipe") && !em.startsWith("Connection reset")) { onFailure(volume, begin); } } else { onFailure(volume, begin); } throw e; } }
3.68
hbase_HttpServer_setPort
/** * @see #addEndpoint(URI) * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead. */ @Deprecated public Builder setPort(int port) { this.port = port; return this; }
3.68
framework_SerializerHelper_writeClassArray
/** * Serializes the class references so * {@link #readClassArray(ObjectInputStream)} can deserialize it. Supports * null class arrays. * * @param out * The {@link ObjectOutputStream} to serialize to. * @param classes * An array containing class references or null. * @throws IOException * Rethrows any IOExceptions from the ObjectOutputStream */ public static void writeClassArray(ObjectOutputStream out, Class<?>[] classes) throws IOException { if (classes == null) { out.writeObject(null); } else { String[] classNames = new String[classes.length]; for (int i = 0; i < classes.length; i++) { classNames[i] = classes[i].getName(); } out.writeObject(classNames); } }
3.68
dubbo_ApplicationModel_reset
// only for unit test @Deprecated public static void reset() { if (FrameworkModel.defaultModel().getDefaultAppModel() != null) { FrameworkModel.defaultModel().getDefaultAppModel().destroy(); } }
3.68
hbase_MasterWalManager_archiveMetaLog
/** * The hbase:meta region may OPEN and CLOSE without issue on a server and then move elsewhere. On * CLOSE, the WAL for the hbase:meta table may not be archived yet (The WAL is only needed if * hbase:meta did not close cleanaly). Since meta region is no long on this server, the * ServerCrashProcedure won't split these leftover hbase:meta WALs, just leaving them in the WAL * splitting dir. If we try to delete the WAL splitting for the server, it fail since the dir is * not totally empty. We can safely archive these hbase:meta log; then the WAL dir can be deleted. * @param serverName the server to archive meta log */ public void archiveMetaLog(final ServerName serverName) { try { Path logDir = new Path(this.rootDir, AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); if (fs.exists(splitDir)) { FileStatus[] logfiles = CommonFSUtils.listStatus(fs, splitDir, META_FILTER); if (logfiles != null) { for (FileStatus status : logfiles) { if (!status.isDir()) { Path newPath = AbstractFSWAL.getWALArchivePath(this.oldLogDir, status.getPath()); if (!CommonFSUtils.renameAndSetModifyTime(fs, status.getPath(), newPath)) { LOG.warn("Unable to move " + status.getPath() + " to " + newPath); } else { LOG.debug("Archived meta log " + status.getPath() + " to " + newPath); } } } } if (!fs.delete(splitDir, false)) { LOG.warn("Unable to delete log dir. Ignoring. " + splitDir); } } } catch (IOException ie) { LOG.warn("Failed archiving meta log for server " + serverName, ie); } }
3.68
hadoop_AllocateResponse_responseId
/** * Set the <code>responseId</code> of the response. * @see AllocateResponse#setResponseId(int) * @param responseId <code>responseId</code> of the response * @return {@link AllocateResponseBuilder} */ @Private @Unstable public AllocateResponseBuilder responseId(int responseId) { allocateResponse.setResponseId(responseId); return this; }
3.68
morf_AbstractSqlDialectTest_expectedSelectFirstOrderByNullsLastDesc
/** * @return Expected SQL for {@link #testSelectOrderByTwoFields()} */ protected String expectedSelectFirstOrderByNullsLastDesc() { return "SELECT stringField FROM " + tableName(ALTERNATE_TABLE) + " ORDER BY stringField DESC NULLS LAST LIMIT 0,1"; }
3.68
framework_Range_partitionWith
/** * Overlay this range with another one, and partition the ranges according * to how they position relative to each other. * <p> * The three partitions are returned as a three-element Range array: * <ul> * <li>Elements in this range that occur before elements in * <code>other</code>. * <li>Elements that are shared between the two ranges. * <li>Elements in this range that occur after elements in * <code>other</code>. * </ul> * * @param other * the other range to act as delimiters. * @return a three-element Range array of partitions depicting the elements * before (index 0), shared/inside (index 1) and after (index 2). */ public Range[] partitionWith(final Range other) { final Range[] splitBefore = splitAt(other.getStart()); final Range rangeBefore = splitBefore[0]; final Range[] splitAfter = splitBefore[1].splitAt(other.getEnd()); final Range rangeInside = splitAfter[0]; final Range rangeAfter = splitAfter[1]; return new Range[] { rangeBefore, rangeInside, rangeAfter }; }
3.68
morf_Criterion_getOperator
/** * Get the operator associated with the criterion * * @return the operator */ public Operator getOperator() { return operator; }
3.68
hudi_HoodieWriteHandle_write
/** * Perform the actual writing of the given record into the backing file. */ public void write(HoodieRecord record, Schema schema, TypedProperties props) { doWrite(record, schema, props); }
3.68
flink_RocksDBStateBackend_setDbStoragePaths
/** * Sets the directories in which the local RocksDB database puts its files (like SST and * metadata files). These directories do not need to be persistent, they can be ephemeral, * meaning that they are lost on a machine failure, because state in RocksDB is persisted in * checkpoints. * * <p>If nothing is configured, these directories default to the TaskManager's local temporary * file directories. * * <p>Each distinct state will be stored in one path, but when the state backend creates * multiple states, they will store their files on different paths. * * <p>Passing {@code null} to this function restores the default behavior, where the configured * temp directories will be used. * * @param paths The paths across which the local RocksDB database files will be spread. */ public void setDbStoragePaths(String... paths) { rocksDBStateBackend.setDbStoragePaths(paths); }
3.68
hadoop_NamenodeStatusReport_getWebAddress
/** * Get the web address. * * @return The web address. */ public String getWebAddress() { return this.webAddress; }
3.68
flink_KvStateRegistry_unregisterListener
/** * Unregisters the listener with the registry. * * @param jobId for which to unregister the {@link KvStateRegistryListener} */ public void unregisterListener(JobID jobId) { listeners.remove(jobId); }
3.68
hadoop_TaskInfo_getInputRecords
/** * @return Number of records input to this task. */ public int getInputRecords() { return recsIn; }
3.68
framework_HierarchicalContainer_removeItemRecursively
/** * Removes the Item identified by given itemId and all its children from the * given Container. * * @param container * the container where the item is to be removed * @param itemId * the identifier of the Item to be removed * @return true if the operation succeeded */ public static boolean removeItemRecursively( Container.Hierarchical container, Object itemId) { boolean success = true; Collection<?> children2 = container.getChildren(itemId); if (children2 != null) { for (Object o : children2.toArray()) { boolean removeItemRecursively = removeItemRecursively(container, o); if (!removeItemRecursively) { success = false; } } } // remove the root of subtree if children where successfully removed if (success) { success = container.removeItem(itemId); } return success; }
3.68
hbase_ScanQueryMatcher_clearCurrentRow
/** * Make {@link #currentRow()} return null. */ public void clearCurrentRow() { currentRow = null; }
3.68
hadoop_TypedBytesOutput_writeBool
/** * Writes a boolean as a typed bytes sequence. * * @param b the boolean to be written * @throws IOException */ public void writeBool(boolean b) throws IOException { out.write(Type.BOOL.code); out.writeBoolean(b); }
3.68
flink_EventTimeSessionWindows_mergeWindows
/** Merge overlapping {@link TimeWindow}s. */ @Override public void mergeWindows( Collection<TimeWindow> windows, MergingWindowAssigner.MergeCallback<TimeWindow> c) { TimeWindow.mergeWindows(windows, c); }
3.68
framework_VCalendarPanel_focusDay
/** * Sets the focus to given date in the current view. Used when moving in the * calendar with the keyboard. * * @param date * A Date representing the day of month to be focused. Must be * one of the days currently visible. */ private void focusDay(Date date) { // Only used when calendar body is present if (resolution.getCalendarField() > Resolution.MONTH .getCalendarField()) { if (focusedDay != null) { focusedDay.removeStyleDependentName(CN_FOCUSED); } if (date != null && focusedDate != null) { focusedDate.setTime(date.getTime()); int rowCount = days.getRowCount(); for (int i = 0; i < rowCount; i++) { int cellCount = days.getCellCount(i); for (int j = 0; j < cellCount; j++) { Widget widget = days.getWidget(i, j); if (widget != null && widget instanceof Day) { Day curday = (Day) widget; if (curday.getDate().equals(date)) { curday.addStyleDependentName(CN_FOCUSED); focusedDay = curday; return; } } } } } } }
3.68
hadoop_RouterMetricsService_getNamenodeMetrics
/** * Get the Namenode metrics. * * @return Namenode metrics. */ public NamenodeBeanMetrics getNamenodeMetrics() { return this.nnMetrics; }
3.68
hudi_Pipelines_boundedBootstrap
/** * Constructs bootstrap pipeline for batch execution mode. * The indexing data set is loaded before the actual data write * in order to support batch UPSERT. */ private static DataStream<HoodieRecord> boundedBootstrap( Configuration conf, RowType rowType, DataStream<RowData> dataStream) { final RowDataKeyGen rowDataKeyGen = RowDataKeyGen.instance(conf, rowType); // shuffle by partition keys dataStream = dataStream .keyBy(rowDataKeyGen::getPartitionPath); return rowDataToHoodieRecord(conf, rowType, dataStream) .transform( "batch_index_bootstrap", TypeInformation.of(HoodieRecord.class), new BatchBootstrapOperator<>(conf)) .setParallelism(conf.getOptional(FlinkOptions.INDEX_BOOTSTRAP_TASKS).orElse(dataStream.getParallelism())) .uid(opUID("batch_index_bootstrap", conf)); }
3.68
flink_ProducerMergedPartitionFileReader_getReadStartAndEndOffset
/** * Return a tuple of the start and end file offset, or return null if the buffer is not found in * the data index. */ @Nullable private Tuple2<Long, Long> getReadStartAndEndOffset( TieredStorageSubpartitionId subpartitionId, int bufferIndex, @Nullable ReadProgress currentReadProgress, @Nullable CompositeBuffer partialBuffer) { ProducerMergedReadProgress readProgress = convertToCurrentReadProgress(currentReadProgress); long readStartOffset; long readEndOffset; if (readProgress == null || readProgress.getCurrentBufferOffset() == readProgress.getEndOfRegionOffset()) { Optional<ProducerMergedPartitionFileIndex.FixedSizeRegion> regionOpt = dataIndex.getRegion(subpartitionId, bufferIndex); if (!regionOpt.isPresent()) { return null; } readStartOffset = regionOpt.get().getRegionStartOffset(); readEndOffset = regionOpt.get().getRegionEndOffset(); } else { readStartOffset = readProgress.getCurrentBufferOffset() + getPartialBufferReadBytes(partialBuffer); readEndOffset = readProgress.getEndOfRegionOffset(); } checkState(readStartOffset <= readEndOffset); return Tuple2.of(readStartOffset, readEndOffset); }
3.68
framework_TableQuery_getFullTableName
/** * Returns the complete table name obtained by concatenation of the catalog * and schema names (if any) and the table name. * * This method can be overridden if customization is needed. * * @return table name in the form it should be used in query and update * statements * @since 7.1 */ protected String getFullTableName() { if (fullTableName == null) { StringBuilder sb = new StringBuilder(); if (catalogName != null) { sb.append(catalogName).append('.'); } if (schemaName != null) { sb.append(schemaName).append('.'); } sb.append(tableName); fullTableName = sb.toString(); } return fullTableName; }
3.68
hbase_CoprocessorHost_loadSystemCoprocessors
/** * Load system coprocessors once only. Read the class names from configuration. Called by * constructor. */ protected void loadSystemCoprocessors(Configuration conf, String confKey) { boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, DEFAULT_COPROCESSORS_ENABLED); if (!coprocessorsEnabled) { return; } Class<?> implClass; // load default coprocessors from configure file String[] defaultCPClasses = conf.getStrings(confKey); if (defaultCPClasses == null || defaultCPClasses.length == 0) return; int currentSystemPriority = Coprocessor.PRIORITY_SYSTEM; for (String className : defaultCPClasses) { // After HBASE-23710 and HBASE-26714 when configuring for system coprocessor, we accept // an optional format of className|priority|path String[] classNameToken = className.split("\\|"); boolean hasPriorityOverride = false; boolean hasPath = false; className = classNameToken[0]; int overridePriority = Coprocessor.PRIORITY_SYSTEM; Path path = null; if (classNameToken.length > 1 && !Strings.isNullOrEmpty(classNameToken[1])) { overridePriority = Integer.parseInt(classNameToken[1]); hasPriorityOverride = true; } if (classNameToken.length > 2 && !Strings.isNullOrEmpty(classNameToken[2])) { path = new Path(classNameToken[2].trim()); hasPath = true; } className = className.trim(); if (findCoprocessor(className) != null) { // If already loaded will just continue LOG.warn("Attempted duplicate loading of " + className + "; skipped"); continue; } ClassLoader cl = this.getClass().getClassLoader(); try { // override the class loader if a path for the system coprocessor is provided. if (hasPath) { cl = CoprocessorClassLoader.getClassLoader(path, this.getClass().getClassLoader(), pathPrefix, conf); } Thread.currentThread().setContextClassLoader(cl); implClass = cl.loadClass(className); int coprocPriority = hasPriorityOverride ? overridePriority : currentSystemPriority; // Add coprocessors as we go to guard against case where a coprocessor is specified twice // in the configuration E env = checkAndLoadInstance(implClass, coprocPriority, conf); if (env != null) { this.coprocEnvironments.add(env); LOG.info("System coprocessor {} loaded, priority={}.", className, coprocPriority); if (!hasPriorityOverride) { ++currentSystemPriority; } } } catch (Throwable t) { // We always abort if system coprocessors cannot be loaded abortServer(className, t); } } }
3.68
dubbo_RpcServiceContext_getMethodName
/** * get method name. * * @return method name. */ @Override public String getMethodName() { return methodName; }
3.68
pulsar_ConfigValidationUtils_listFv
/** * Returns a new NestableFieldValidator for a List where each item is validated by validator. * * @param validator used to validate each item in the list * @param notNull whether or not a value of null is valid * @return a NestableFieldValidator for a list with each item validated by a different validator. */ public static NestableFieldValidator listFv(final NestableFieldValidator validator, final boolean notNull) { return new NestableFieldValidator() { @Override public void validateField(String pd, String name, Object field) throws IllegalArgumentException { if (field == null) { if (notNull) { throw new IllegalArgumentException("Field " + name + " must not be null"); } else { return; } } if (field instanceof Iterable) { for (Object e : (Iterable) field) { validator.validateField(pd + "Each element of the list ", name, e); } return; } throw new IllegalArgumentException( "Field " + name + " must be an Iterable but was a " + field.getClass()); } }; }
3.68
hadoop_TFile_getLastKey
/** * Get the last key in the TFile. * * @return The last key in the TFile. * @throws IOException raised on errors performing I/O. */ public RawComparable getLastKey() throws IOException { checkTFileDataIndex(); return tfileIndex.getLastKey(); }
3.68
hadoop_BondedS3AStatisticsContext_addValueToQuantiles
/** * Add a value to a quantiles statistic. No-op if the quantile * isn't found. * @param op operation to look up. * @param value value to add. * @throws ClassCastException if the metric is not a Quantiles. */ @Override public void addValueToQuantiles(Statistic op, long value) { getInstrumentation().addValueToQuantiles(op, value); }
3.68
framework_PureGWTTestApplication_getTestedWidget
/** * Gets the tested widget. * * @return tested widget */ public T getTestedWidget() { return testedWidget; }
3.68
rocketmq-connect_RecordOffsetManagement_ack
/** * Acknowledge this record; signals that its offset may be safely committed. */ public void ack() { if (this.acked.compareAndSet(false, true)) { messageAcked(); } }
3.68
pulsar_MetaStoreImpl_compressManagedInfo
/** * Compress Managed Info data such as LedgerInfo, CursorInfo. * * compression data structure * [MAGIC_NUMBER](2) + [METADATA_SIZE](4) + [METADATA_PAYLOAD] + [MANAGED_LEDGER_INFO_PAYLOAD] */ private byte[] compressManagedInfo(byte[] info, byte[] metadata, int metadataSerializedSize, MLDataFormats.CompressionType compressionType) { if (compressionType == null || compressionType.equals(CompressionType.NONE)) { return info; } ByteBuf metadataByteBuf = null; ByteBuf encodeByteBuf = null; try { metadataByteBuf = PulsarByteBufAllocator.DEFAULT.buffer(metadataSerializedSize + 6, metadataSerializedSize + 6); metadataByteBuf.writeShort(MAGIC_MANAGED_INFO_METADATA); metadataByteBuf.writeInt(metadataSerializedSize); metadataByteBuf.writeBytes(metadata); encodeByteBuf = getCompressionCodec(compressionType) .encode(Unpooled.wrappedBuffer(info)); CompositeByteBuf compositeByteBuf = PulsarByteBufAllocator.DEFAULT.compositeBuffer(); compositeByteBuf.addComponent(true, metadataByteBuf); compositeByteBuf.addComponent(true, encodeByteBuf); byte[] dataBytes = new byte[compositeByteBuf.readableBytes()]; compositeByteBuf.readBytes(dataBytes); return dataBytes; } finally { if (metadataByteBuf != null) { metadataByteBuf.release(); } if (encodeByteBuf != null) { encodeByteBuf.release(); } } }
3.68
flink_WatermarkStrategy_forGenerator
/** Creates a watermark strategy based on an existing {@link WatermarkGeneratorSupplier}. */ static <T> WatermarkStrategy<T> forGenerator(WatermarkGeneratorSupplier<T> generatorSupplier) { return generatorSupplier::createWatermarkGenerator; }
3.68
flink_FutureUtils_forward
/** * Forwards the value from the source future to the target future. * * @param source future to forward the value from * @param target future to forward the value to * @param <T> type of the value */ public static <T> void forward(CompletableFuture<T> source, CompletableFuture<T> target) { source.whenComplete(forwardTo(target)); }
3.68
framework_ConverterUtil_canConverterHandle
/** * Checks if the given converter can handle conversion between the given * presentation and model type. Does strict type checking and only returns * true if the converter claims it can handle exactly the given types. * * @see #canConverterPossiblyHandle(Converter, Class, Class) * * @param converter * The converter to check. If this is null the result is always * false. * @param presentationType * The presentation type * @param modelType * The model type * @return true if the converter supports conversion between the given * presentation and model type, false otherwise */ public static boolean canConverterHandle(Converter<?, ?> converter, Class<?> presentationType, Class<?> modelType) { if (converter == null) { return false; } if (modelType != converter.getModelType()) { return false; } if (presentationType != converter.getPresentationType()) { return false; } return true; }
3.68
framework_PointerEventSupportImpl_init
/** * Initializes event support. */ protected void init() { }
3.68
querydsl_SimpleExpression_ne
/** * Create a {@code this <> right} expression * * @param right rhs of the comparison * @return this != right */ public BooleanExpression ne(Expression<? super T> right) { return Expressions.booleanOperation(Ops.NE, mixin, right); }
3.68
flink_StreamExecutionEnvironment_isForceCheckpointing
/** * Returns whether checkpointing is force-enabled. * * @deprecated Forcing checkpoints will be removed in future version. */ @Deprecated @SuppressWarnings("deprecation") @PublicEvolving public boolean isForceCheckpointing() { return checkpointCfg.isForceCheckpointing(); }
3.68
hadoop_OBSListing_createLocatedFileStatusIterator
/** * Create a located status iterator over a file status iterator. * * @param statusIterator an iterator over the remote status entries * @return a new remote iterator */ LocatedFileStatusIterator createLocatedFileStatusIterator( final RemoteIterator<FileStatus> statusIterator) { return new LocatedFileStatusIterator(statusIterator); }
3.68
hadoop_NamenodeStatusReport_getHighestPriorityLowRedundancyECBlocks
/** * Gets the total number of erasure coded low redundancy blocks on the cluster * with the highest risk of loss. * * @return the total number of low redundancy blocks on the cluster * with the highest risk of loss. */ public long getHighestPriorityLowRedundancyECBlocks() { return this.highestPriorityLowRedundancyECBlocks; }
3.68
hbase_RegionServerSpaceQuotaManager_getActivePoliciesAsMap
/** * Converts a map of table to {@link SpaceViolationPolicyEnforcement}s into * {@link SpaceViolationPolicy}s. */ public Map<TableName, SpaceQuotaSnapshot> getActivePoliciesAsMap() { final Map<TableName, SpaceViolationPolicyEnforcement> enforcements = copyActiveEnforcements(); final Map<TableName, SpaceQuotaSnapshot> policies = new HashMap<>(); for (Entry<TableName, SpaceViolationPolicyEnforcement> entry : enforcements.entrySet()) { final SpaceQuotaSnapshot snapshot = entry.getValue().getQuotaSnapshot(); if (snapshot != null) { policies.put(entry.getKey(), snapshot); } } return policies; }
3.68
hadoop_RegistryDNSServer_main
/** * Lanches the server instance. * @param args the command line args. * @throws IOException if command line options can't be parsed */ public static void main(String[] args) throws IOException { StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG); Configuration conf = new RegistryConfiguration(); new GenericOptionsParser(conf, args); launchDNSServer(conf, null); }
3.68
hadoop_NamenodeStatusReport_getNumInMaintenanceLiveDataNodes
/** * Get the number of live in maintenance nodes. * * @return The number of live in maintenance nodes. */ public int getNumInMaintenanceLiveDataNodes() { return this.inMaintenanceLiveDataNodes; }
3.68
hibernate-validator_StringHelper_toShortString
/** * Creates a compact string representation of the given type, useful for debugging or toString() methods. Package * names are shortened, e.g. "org.hibernate.validator.internal.engine" becomes "o.h.v.i.e". Not to be used for * user-visible log messages. */ public static String toShortString(Type type) { if ( type instanceof Class ) { return toShortString( (Class<?>) type ); } else if ( type instanceof ParameterizedType ) { return toShortString( (ParameterizedType) type ); } else { return type.toString(); } }
3.68
framework_Criterion_setKey
/** * Sets the key of the payload to be compared. * * @param key * key of the payload to be compared */ public void setKey(String key) { this.key = key; }
3.68
zxing_DetectionResult_adjustRowNumber
/** * @return true, if row number was adjusted, false otherwise */ private static boolean adjustRowNumber(Codeword codeword, Codeword otherCodeword) { if (otherCodeword == null) { return false; } if (otherCodeword.hasValidRowNumber() && otherCodeword.getBucket() == codeword.getBucket()) { codeword.setRowNumber(otherCodeword.getRowNumber()); return true; } return false; }
3.68
pulsar_PushPulsarSource_consume
/** * Attach a consumer function to this Source. This is invoked by the implementation * to pass messages whenever there is data to be pushed to Pulsar. * * @param record next message from source which should be sent to a Pulsar topic */ public void consume(Record<T> record) { try { queue.put(record); } catch (InterruptedException e) { throw new RuntimeException(e); } }
3.68
hadoop_IOStatisticsStore_incrementCounter
/** * Increment a counter by one. * * No-op if the counter is unknown. * @param key statistics key * @return old value or, if the counter is unknown: 0 */ default long incrementCounter(String key) { return incrementCounter(key, 1); }
3.68
hbase_ExampleMasterObserverWithMetrics_getTotalMemory
/** Returns the total memory of the process. We will use this to define a gauge metric */ private long getTotalMemory() { return Runtime.getRuntime().totalMemory(); }
3.68
hadoop_ZStandardCompressor_reinit
/** * Prepare the compressor to be used in a new stream with settings defined in * the given Configuration. It will reset the compressor's compression level * and compression strategy. * * @param conf Configuration storing new settings */ @Override public void reinit(Configuration conf) { if (conf == null) { return; } level = ZStandardCodec.getCompressionLevel(conf); reset(); LOG.debug("Reinit compressor with new compression configuration"); }
3.68
hbase_WALSplitter_split
/** * Split a folder of WAL files. Delete the directory when done. Used by tools and unit tests. It * should be package private. It is public only because TestWALObserver is in a different package, * which uses this method to do log splitting. * @return List of output files created by the split. */ public static List<Path> split(Path walRootDir, Path walsDir, Path archiveDir, FileSystem walFS, Configuration conf, final WALFactory factory) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem rootFS = rootDir.getFileSystem(conf); WALSplitter splitter = new WALSplitter(factory, conf, walRootDir, walFS, rootDir, rootFS); final List<FileStatus> wals = SplitLogManager.getFileList(conf, Collections.singletonList(walsDir), null); List<Path> splits = new ArrayList<>(); if (!wals.isEmpty()) { for (FileStatus wal : wals) { SplitWALResult splitWALResult = splitter.splitWAL(wal, null); if (splitWALResult.isFinished()) { WALSplitUtil.archive(wal.getPath(), splitWALResult.isCorrupt(), archiveDir, walFS, conf); // splitter.outputSink.splits is mark as final, do not need null check splits.addAll(splitter.outputSink.splits); } } } if (!walFS.delete(walsDir, true)) { throw new IOException("Unable to delete src dir " + walsDir); } return splits; }
3.68
framework_WebBrowser_isTouchDevice
/** * @return true if the browser is detected to support touch events */ public boolean isTouchDevice() { return touchDevice; }
3.68
hadoop_ReencryptionStatus_addZoneIfNecessary
/** * @param zoneId * @return true if this is a zone is added. */ private boolean addZoneIfNecessary(final Long zoneId, final String name, final ReencryptionInfoProto reProto) { if (!zoneStatuses.containsKey(zoneId)) { LOG.debug("Adding zone {} for re-encryption status", zoneId); Preconditions.checkNotNull(reProto); final ZoneReencryptionStatus.Builder builder = new ZoneReencryptionStatus.Builder(); builder.id(zoneId).zoneName(name) .ezKeyVersionName(reProto.getEzKeyVersionName()) .submissionTime(reProto.getSubmissionTime()) .canceled(reProto.getCanceled()) .filesReencrypted(reProto.getNumReencrypted()) .fileReencryptionFailures(reProto.getNumFailures()); if (reProto.hasCompletionTime()) { builder.completionTime(reProto.getCompletionTime()); builder.state(State.Completed); zonesReencrypted++; } else { builder.state(State.Submitted); } if (reProto.hasLastFile()) { builder.lastCheckpointFile(reProto.getLastFile()); } return zoneStatuses.put(zoneId, builder.build()) == null; } return false; }
3.68
hbase_HBaseCluster_restoreClusterMetrics
/** * Restores the cluster to given state if this is a real cluster, otherwise does nothing. This is * a best effort restore. If the servers are not reachable, or insufficient permissions, etc. * restoration might be partial. * @return whether restoration is complete */ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOException { return true; }
3.68
hbase_HBaseTestingUtility_truncateTable
/** * Truncate a table using the admin command. Effectively disables, deletes, and recreates the * table. For previous behavior of issuing row deletes, see deleteTableData. Expressly does not * preserve regions of existing table. * @param tableName table which must exist. * @return HTable for the new table */ public Table truncateTable(final TableName tableName) throws IOException { return truncateTable(tableName, false); }
3.68
dubbo_LoadingStrategy_includedPackages
/** * To restrict some class that should not be loaded from `org.apache.dubbo` package type SPI class. * For example, we can restrict the implementation class which package is `org.xxx.xxx` * can be loaded as SPI implementation. * * @return packages can be loaded in `org.apache.dubbo`'s SPI */ default String[] includedPackages() { // default match all return null; }
3.68
morf_SqlDialect_getLeastFunctionName
/** * @return The name of the LEAST function */ protected String getLeastFunctionName() { return "LEAST"; }
3.68
hbase_HFileBlock_getPrevBlockOffset
/** Returns the offset of the previous block of the same type in the file, or -1 if unknown */ long getPrevBlockOffset() { return prevBlockOffset; }
3.68
hadoop_AbfsCountersImpl_createCounter
/** * Create a counter in the registry. * * @param stats AbfsStatistic whose counter needs to be made. * @return counter or null. */ private MutableCounterLong createCounter(AbfsStatistic stats) { return registry.newCounter(stats.getStatName(), stats.getStatDescription(), 0L); }
3.68
framework_VCalendar_updateWeekGrid
/** * Re-render the week grid. * * @param daysCount * The amount of days to include in the week * @param days * The days * @param today * Todays date * @param realDayNames * The names of the dates */ @SuppressWarnings("deprecation") public void updateWeekGrid(int daysCount, List<CalendarDay> days, Date today, String[] realDayNames) { weekGrid.setFirstHour(getFirstHourOfTheDay()); weekGrid.setLastHour(getLastHourOfTheDay()); weekGrid.getTimeBar().updateTimeBar(is24HFormat()); dayToolbar.clear(); dayToolbar.addBackButton(); dayToolbar.setVerticalSized(isHeightUndefined); dayToolbar.setHorizontalSized(isWidthUndefined); weekGrid.clearDates(); weekGrid.setDisabled(isDisabledOrReadOnly()); for (CalendarDay day : days) { String date = day.getDate(); String localizedDateFormat = day.getLocalizedDateFormat(); Date d = dateformat_date.parse(date); int dayOfWeek = day.getDayOfWeek(); if (dayOfWeek < getFirstDayNumber() || dayOfWeek > getLastDayNumber()) { continue; } boolean isToday = false; int dayOfMonth = d.getDate(); if (today.getDate() == dayOfMonth && today.getYear() == d.getYear() && today.getMonth() == d.getMonth()) { isToday = true; } dayToolbar.add(realDayNames[dayOfWeek - 1], date, localizedDateFormat, isToday ? "today" : null); weeklyLongEvents.addDate(d); weekGrid.addDate(d); if (isToday) { weekGrid.setToday(d, today); } } dayToolbar.addNextButton(); }
3.68
hmily_LogUtil_getInstance
/** * Gets instance. * * @return the instance */ public static LogUtil getInstance() { return LOG_UTIL; }
3.68
flink_DoubleHashSet_add
/** See {@link Double#equals(Object)}. */ public boolean add(final double k) { long longKey = Double.doubleToLongBits(k); if (longKey == 0L) { if (this.containsZero) { return false; } this.containsZero = true; } else { double[] key = this.key; int pos; long curr; if ((curr = Double.doubleToLongBits( key[pos = (int) MurmurHashUtil.fmix(longKey) & this.mask])) != 0L) { if (curr == longKey) { return false; } while ((curr = Double.doubleToLongBits(key[pos = pos + 1 & this.mask])) != 0L) { if (curr == longKey) { return false; } } } key[pos] = k; } if (this.size++ >= this.maxFill) { this.rehash(OptimizableHashSet.arraySize(this.size + 1, this.f)); } return true; }
3.68
hudi_HoodieBaseParquetWriter_handleParquetBloomFilters
/** * Once we get parquet version >= 1.12 among all engines we can cleanup the reflexion hack. * * @param parquetWriterbuilder * @param hadoopConf */ protected void handleParquetBloomFilters(ParquetWriter.Builder parquetWriterbuilder, Configuration hadoopConf) { // inspired from https://github.com/apache/parquet-mr/blob/master/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/ParquetOutputFormat.java#L458-L464 hadoopConf.forEach(conf -> { String key = conf.getKey(); if (key.startsWith(BLOOM_FILTER_ENABLED)) { String column = key.substring(BLOOM_FILTER_ENABLED.length() + 1, key.length()); try { Method method = parquetWriterbuilder.getClass().getMethod("withBloomFilterEnabled", String.class, boolean.class); method.invoke(parquetWriterbuilder, column, Boolean.valueOf(conf.getValue()).booleanValue()); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { // skip } } if (key.startsWith(BLOOM_FILTER_EXPECTED_NDV)) { String column = key.substring(BLOOM_FILTER_EXPECTED_NDV.length() + 1, key.length()); try { Method method = parquetWriterbuilder.getClass().getMethod("withBloomFilterNDV", String.class, long.class); method.invoke(parquetWriterbuilder, column, Long.valueOf(conf.getValue()).longValue()); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { // skip } } }); }
3.68
pulsar_RangeEntryCacheImpl_readFromStorage
/** * Reads the entries from Storage. * @param lh the handle * @param firstEntry the first entry * @param lastEntry the last entry * @param shouldCacheEntry if we should put the entry into the cache * @return a handle to the operation */ CompletableFuture<List<EntryImpl>> readFromStorage(ReadHandle lh, long firstEntry, long lastEntry, boolean shouldCacheEntry) { final int entriesToRead = (int) (lastEntry - firstEntry) + 1; CompletableFuture<List<EntryImpl>> readResult = lh.readAsync(firstEntry, lastEntry) .thenApply( ledgerEntries -> { requireNonNull(ml.getName()); requireNonNull(ml.getExecutor()); try { // We got the entries, we need to transform them to a List<> type long totalSize = 0; final List<EntryImpl> entriesToReturn = Lists.newArrayListWithExpectedSize(entriesToRead); for (LedgerEntry e : ledgerEntries) { EntryImpl entry = RangeEntryCacheManagerImpl.create(e, interceptor); entriesToReturn.add(entry); totalSize += entry.getLength(); if (shouldCacheEntry) { EntryImpl cacheEntry = EntryImpl.create(entry); insert(cacheEntry); cacheEntry.release(); } } ml.getMbean().recordReadEntriesOpsCacheMisses(entriesToReturn.size(), totalSize); manager.mlFactoryMBean.recordCacheMiss(entriesToReturn.size(), totalSize); ml.getMbean().addReadEntriesSample(entriesToReturn.size(), totalSize); return entriesToReturn; } finally { ledgerEntries.close(); } }); // handle LH invalidation readResult.exceptionally(exception -> { if (exception instanceof BKException && ((BKException) exception).getCode() == BKException.Code.TooManyRequestsException) { } else { ml.invalidateLedgerHandle(lh); pendingReadsManager.invalidateLedger(lh.getId()); } return null; }); return readResult; }
3.68
flink_BatchTask_openUserCode
/** * Opens the given stub using its {@link * org.apache.flink.api.common.functions.RichFunction#open(OpenContext)} method. If the open * call produces an exception, a new exception with a standard error message is created, using * the encountered exception as its cause. * * @param stub The user code instance to be opened. * @param parameters The parameters supplied to the user code. * @throws Exception Thrown, if the user code's open method produces an exception. */ public static void openUserCode(Function stub, Configuration parameters) throws Exception { try { FunctionUtils.openFunction(stub, DefaultOpenContext.INSTANCE); } catch (Throwable t) { throw new Exception( "The user defined 'open(Configuration)' method in " + stub.getClass().toString() + " caused an exception: " + t.getMessage(), t); } }
3.68
framework_AbstractClientConnector_getListeners
/** * Returns all listeners that are registered for the given event type or one * of its subclasses. * * @param eventType * The type of event to return listeners for. * @return A collection with all registered listeners. Empty if no listeners * are found. */ public Collection<?> getListeners(Class<?> eventType) { if (eventRouter == null) { return Collections.emptyList(); } return eventRouter.getListeners(eventType); }
3.68
open-banking-gateway_HbciConsentInfo_noAccountsConsentPresent
/** * Any kind of consent exists? */ public boolean noAccountsConsentPresent(AccountListHbciContext ctx) { if (ctx.isConsentIncompatible()) { return true; } Optional<HbciResultCache> cached = cachedResultAccessor.resultFromCache(ctx); return cached.map(hbciResultCache -> null == hbciResultCache.getAccounts()).orElse(true); }
3.68
flink_DagConnection_getSource
/** * Gets the source of the connection. * * @return The source Node. */ public OptimizerNode getSource() { return this.source; }
3.68
open-banking-gateway_Xs2aTransactionParameters_toParameters
// TODO - MapStruct? @Override public RequestParams toParameters() { var requestParamsMap = RequestParams.builder() .withBalance(super.getWithBalance()) .bookingStatus(bookingStatus) .dateFrom(dateFrom) .dateTo(dateTo) .build() .toMap(); Optional.ofNullable(page).ifPresent(p -> requestParamsMap.put(PAGE_INDEX_QUERY_PARAMETER_NAME, p.toString())); Optional.ofNullable(pageSize).ifPresent(ps -> requestParamsMap.put(PAGE_SIZE_QUERY_PARAMETER_NAME, ps.toString())); return RequestParams.fromMap(requestParamsMap); }
3.68
flink_ResourceSpec_setExtendedResources
/** * Add the given extended resources. This will discard all the previous added extended * resources. */ public Builder setExtendedResources(Collection<ExternalResource> extendedResources) { this.extendedResources = extendedResources.stream() .collect( Collectors.toMap( ExternalResource::getName, Function.identity())); return this; }
3.68
shardingsphere-elasticjob_ClassPathJobScanner_doScan
/** * Calls the parent search that will search and register all the candidates by {@code ElasticJobConfiguration}. * * @param basePackages the packages to check for annotated classes */ @Override protected Set<BeanDefinitionHolder> doScan(final String... basePackages) { addIncludeFilter(new AnnotationTypeFilter(ElasticJobConfiguration.class)); Set<BeanDefinitionHolder> beanDefinitions = super.doScan(basePackages); if (!beanDefinitions.isEmpty()) { processBeanDefinitions(beanDefinitions); } return beanDefinitions; }
3.68
flink_SkipListUtils_helpGetValueVersion
/** * Returns the version of the value. * * @param valuePointer the pointer to the value. * @param spaceAllocator the space allocator. */ static int helpGetValueVersion(long valuePointer, Allocator spaceAllocator) { Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(valuePointer)); int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(valuePointer); MemorySegment segment = chunk.getMemorySegment(offsetInChunk); int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk); return getValueVersion(segment, offsetInByteBuffer); }
3.68
framework_DragAndDropWrapper_getHorizontalDropLocation
/** * @return a detail about the drags horizontal position over the * wrapper. */ public HorizontalDropLocation getHorizontalDropLocation() { return HorizontalDropLocation .valueOf((String) getData("horizontalLocation")); }
3.68
hbase_UserProvider_load
// Since UGI's don't hash based on the user id // The cache needs to be keyed on the same thing that Hadoop's Groups class // uses. So this cache uses shortname. @Override public String[] load(String ugi) throws Exception { return getGroupStrings(ugi); }
3.68
flink_ProcessingTimeSessionWindows_withDynamicGap
/** * Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions * based on the element timestamp. * * @param sessionWindowTimeGapExtractor The extractor to use to extract the time gap from the * input elements * @return The policy. */ @PublicEvolving public static <T> DynamicProcessingTimeSessionWindows<T> withDynamicGap( SessionWindowTimeGapExtractor<T> sessionWindowTimeGapExtractor) { return new DynamicProcessingTimeSessionWindows<>(sessionWindowTimeGapExtractor); }
3.68
AreaShop_RentRegion_getMaxExtends
/** * Get the max number of extends of this region. * @return -1 if infinite otherwise the maximum number */ public int getMaxExtends() { return getIntegerSetting("rent.maxExtends"); }
3.68
streampipes_DataStreamApi_all
/** * Get all available data streams * * @return {@link org.apache.streampipes.model.SpDataStream} A list of all data streams owned by the user. */ @Override public List<SpDataStream> all() { return getAll(getBaseResourcePath()); }
3.68
flink_BackgroundTask_initialBackgroundTask
/** * Creates an initial background task. This means that this background task has no predecessor. * * @param task task to run * @param executor executor to run the task * @param <V> type of the result * @return initial {@link BackgroundTask} representing the task to execute */ static <V> BackgroundTask<V> initialBackgroundTask( SupplierWithException<? extends V, ? extends Exception> task, Executor executor) { return new BackgroundTask<>(FutureUtils.completedVoidFuture(), task, executor); }
3.68
flink_Schema_newBuilder
/** Builder for configuring and creating instances of {@link Schema}. */ public static Schema.Builder newBuilder() { return new Builder(); }
3.68
hbase_HFilePrettyPrinter_outputTo
/** * Write to the given {@link PrintStream}. * @param output a {@link PrintStream} instance. * @return {@code this} */ public Builder outputTo(PrintStream output) { this.output = output; return this; }
3.68
hbase_CellUtil_toString
/** Returns a string representation of the cell */ public static String toString(Cell cell, boolean verbose) { if (cell == null) { return ""; } StringBuilder builder = new StringBuilder(); String keyStr = getCellKeyAsString(cell); String tag = null; String value = null; if (verbose) { // TODO: pretty print tags as well if (cell.getTagsLength() > 0) { tag = Bytes.toStringBinary(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); } if (!(cell instanceof KeyValue.KeyOnlyKeyValue)) { value = Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); } } builder.append(keyStr); if (tag != null && !tag.isEmpty()) { builder.append("/").append(tag); } if (value != null) { builder.append("/").append(value); } return builder.toString(); }
3.68
querydsl_NumberExpression_lt
/** * Create a {@code this < right} expression * * @param <A> * @param right rhs of the comparison * @return {@code this < right} * @see java.lang.Comparable#compareTo(Object) */ public final <A extends Number & Comparable<?>> BooleanExpression lt(Expression<A> right) { return Expressions.booleanOperation(Ops.LT, this, right); }
3.68
framework_AbstractErrorMessage_toString
/* Documented in superclass */ @Override public String toString() { return getMessage(); }
3.68
framework_FileUploadHandler_matchForBoundary
/** * Reads the input to expect a boundary string. Expects that the first * character has already been matched. * * @return -1 if the boundary was matched, else returns the first byte * from boundary * @throws IOException */ private int matchForBoundary() throws IOException { matchedCount = 0; /* * Going to "buffered mode". Read until full boundary match or a * different character. */ while (true) { matchedCount++; if (matchedCount == boundary.length) { /* * The whole boundary matched so we have reached the end of * file */ atTheEnd = true; return -1; } int fromActualStream = realInputStream.read(); if (fromActualStream != boundary[matchedCount]) { /* * Did not find full boundary, cache the mismatching byte * and start returning the partially matched boundary. */ bufferedByte = fromActualStream; return getBuffered(); } } }
3.68
MagicPlugin_MapController_getURLItem
/** * Get a new ItemStack for the specified url with a specific cropping. * */ @Override public ItemStack getURLItem(String world, String url, String name, int x, int y, int width, int height, Integer priority) { MapView mapView = getURL(world, url, name, x, y, null, null, width, height, priority); return getMapItem(name, mapView); }
3.68
flink_HadoopUtils_paramsFromGenericOptionsParser
/** * Returns {@link ParameterTool} for the arguments parsed by {@link GenericOptionsParser}. * * @param args Input array arguments. It should be parsable by {@link GenericOptionsParser} * @return A {@link ParameterTool} * @throws IOException If arguments cannot be parsed by {@link GenericOptionsParser} * @see GenericOptionsParser */ public static ParameterTool paramsFromGenericOptionsParser(String[] args) throws IOException { Option[] options = new GenericOptionsParser(args).getCommandLine().getOptions(); Map<String, String> map = new HashMap<String, String>(); for (Option option : options) { String[] split = option.getValue().split("="); map.put(split[0], split[1]); } return ParameterTool.fromMap(map); }
3.68
framework_DesignContext_readPackageMappings
/** * Reads and stores the mappings from prefixes to package names from meta * tags located under <head> in the html document. * * @param doc * the document */ protected void readPackageMappings(Document doc) { Element head = doc.head(); if (head == null) { return; } for (Node child : head.childNodes()) { if (child instanceof Element) { Element childElement = (Element) child; if ("meta".equals(childElement.tagName())) { Attributes attributes = childElement.attributes(); if (attributes.hasKey("name") && attributes.hasKey("content") && "package-mapping" .equals(attributes.get("name"))) { String contentString = attributes.get("content"); String[] parts = contentString.split(":"); if (parts.length != 2) { throw new DesignException("The meta tag '" + child + "' cannot be parsed."); } String prefixName = parts[0]; String packageName = parts[1]; addPackagePrefix(prefixName, packageName); } } } } }
3.68
hbase_HRegionServer_submitRegionProcedure
/** * Will ignore the open/close region procedures which already submitted or executed. When master * had unfinished open/close region procedure and restarted, new active master may send duplicate * open/close region request to regionserver. The open/close request is submitted to a thread pool * and execute. So first need a cache for submitted open/close region procedures. After the * open/close region request executed and report region transition succeed, cache it in executed * region procedures cache. See {@link #finishRegionProcedure(long)}. After report region * transition succeed, master will not send the open/close region request to regionserver again. * And we thought that the ongoing duplicate open/close region request should not be delayed more * than 600 seconds. So the executed region procedures cache will expire after 600 seconds. See * HBASE-22404 for more details. * @param procId the id of the open/close region procedure * @return true if the procedure can be submitted. */ boolean submitRegionProcedure(long procId) { if (procId == -1) { return true; } // Ignore the region procedures which already submitted. Long previous = submittedRegionProcedures.putIfAbsent(procId, procId); if (previous != null) { LOG.warn("Received procedure pid={}, which already submitted, just ignore it", procId); return false; } // Ignore the region procedures which already executed. if (executedRegionProcedures.getIfPresent(procId) != null) { LOG.warn("Received procedure pid={}, which already executed, just ignore it", procId); return false; } return true; }
3.68
hudi_RocksDBDAO_loadManagedColumnFamilies
/** * Helper to load managed column family descriptors. */ private List<ColumnFamilyDescriptor> loadManagedColumnFamilies(DBOptions dbOptions) throws RocksDBException { final List<ColumnFamilyDescriptor> managedColumnFamilies = new ArrayList<>(); final Options options = new Options(dbOptions, new ColumnFamilyOptions()); List<byte[]> existing = RocksDB.listColumnFamilies(options, rocksDBBasePath); if (existing.isEmpty()) { LOG.info("No column family found. Loading default"); managedColumnFamilies.add(getColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); } else { LOG.info("Loading column families :" + existing.stream().map(String::new).collect(Collectors.toList())); managedColumnFamilies .addAll(existing.stream().map(RocksDBDAO::getColumnFamilyDescriptor).collect(Collectors.toList())); } return managedColumnFamilies; }
3.68
framework_MultiSelect_select
/** * Adds the given items to the set of currently selected items. * <p> * By default this does not clear any previous selection. To do that, use * {@link #deselectAll()}. * <p> * If the all the items were already selected, this is a NO-OP. * <p> * This is a short-hand for {@link #updateSelection(Set, Set)} with nothing * to deselect. * * @param items * to add to selection, not {@code null} */ public default void select(T... items) { Objects.requireNonNull(items); Stream.of(items).forEach(Objects::requireNonNull); updateSelection(new LinkedHashSet<>(Arrays.asList(items)), Collections.emptySet()); }
3.68
hadoop_QueueCapacityUpdateContext_addUpdateWarning
/** * Adds an update warning to the context. * @param warning warning during update phase */ public void addUpdateWarning(QueueUpdateWarning warning) { warnings.add(warning); }
3.68
open-banking-gateway_FintechSecureStorage_psuAspspKeyFromPrivate
/** * Reads PSU/Fintechs' user private key from FinTechs' private storage. * @param session Service session with which consent is associated. * @param fintech Owner of the private storage. * @param password FinTechs' Datasafe/KeyStore password. * @return PSU/Fintechs' user consent protection key. */ @SneakyThrows public PubAndPrivKey psuAspspKeyFromPrivate(ServiceSession session, Fintech fintech, Supplier<char[]> password) { try (InputStream is = datasafeServices.privateService().read( ReadRequest.forDefaultPrivate( fintech.getUserIdAuth(password), new FintechPsuAspspTuple(session).toDatasafePathWithoutParent())) ) { return serde.readKey(is); } }
3.68
hadoop_DynoInfraUtils_getNameNodeTrackingUri
/** * Get the URI that can be used to access the tracking interface for the * NameNode, i.e. the web UI of the NodeManager hosting the NameNode * container. * * @param nameNodeProperties The set of properties representing the * information about the launched NameNode. * @return The tracking URI. */ static URI getNameNodeTrackingUri(Properties nameNodeProperties) throws IOException { return URI.create(String.format("http://%s:%s/node/containerlogs/%s/%s/", nameNodeProperties.getProperty(DynoConstants.NN_HOSTNAME), nameNodeProperties.getProperty(Environment.NM_HTTP_PORT.name()), nameNodeProperties.getProperty(Environment.CONTAINER_ID.name()), UserGroupInformation.getCurrentUser().getShortUserName())); }
3.68
hudi_CloudObjectsSelector_deleteProcessedMessages
/** * Delete Queue Messages after hudi commit. This method will be invoked by source.onCommit. */ public void deleteProcessedMessages(SqsClient sqs, String queueUrl, List<Message> processedMessages) { if (!processedMessages.isEmpty()) { // create batch for deletion, SES DeleteMessageBatchRequest only accept max 10 entries List<List<Message>> deleteBatches = createListPartitions(processedMessages, 10); for (List<Message> deleteBatch : deleteBatches) { deleteBatchOfMessages(sqs, queueUrl, deleteBatch); } } }
3.68
morf_TableLoader_builder
/** * @return A new {@link TableLoaderBuilder}. */ public static TableLoaderBuilder builder() { return new TableLoaderBuilderImpl(); }
3.68
hudi_HoodieHiveUtils_getIncrementalTableNames
/** * Returns a list of tableNames for which hoodie.<tableName>.consume.mode is set to incremental else returns empty List * * @param job * @return */ public static List<String> getIncrementalTableNames(JobContext job) { Map<String, String> tablesModeMap = job.getConfiguration() .getValByRegex(HOODIE_CONSUME_MODE_PATTERN_STRING.pattern()); List<String> result = tablesModeMap.entrySet().stream().map(s -> { if (s.getValue().trim().toUpperCase().equals(INCREMENTAL_SCAN_MODE)) { Matcher matcher = HOODIE_CONSUME_MODE_PATTERN_STRING.matcher(s.getKey()); return (!matcher.find() ? null : matcher.group(1)); } return null; }).filter(Objects::nonNull).collect(Collectors.toList()); if (result == null) { // Returns an empty list instead of null. result = new ArrayList<>(); } return result; }
3.68
hadoop_PathLocation_hasMultipleDestinations
/** * Check if this location supports multiple clusters/paths. * * @return If it has multiple destinations. */ public boolean hasMultipleDestinations() { return this.destinations.size() > 1; }
3.68
flink_ThriftObjectConversions_toTTableSchema
/** Similar logic in the {@code org.apache.hive.service.cli.ColumnDescriptor}. */ public static TTableSchema toTTableSchema(ResolvedSchema schema) { TTableSchema tSchema = new TTableSchema(); for (int i = 0; i < schema.getColumnCount(); i++) { Column column = schema.getColumns().get(i); TColumnDesc desc = new TColumnDesc(); desc.setColumnName(column.getName()); column.getComment().ifPresent(desc::setComment); desc.setPosition(i); TTypeDesc typeDesc = new TTypeDesc(); // Hive uses the TPrimitiveTypeEntry only. Please refer to TypeDescriptor#toTTypeDesc. DataType columnType = column.getDataType(); TPrimitiveTypeEntry typeEntry = new TPrimitiveTypeEntry( Type.getType(HiveTypeUtil.toHiveTypeInfo(columnType, false)).toTType()); if (hasTypeQualifiers(columnType.getLogicalType())) { typeEntry.setTypeQualifiers(toTTypeQualifiers(columnType.getLogicalType())); } typeDesc.addToTypes(TTypeEntry.primitiveEntry(typeEntry)); desc.setTypeDesc(typeDesc); tSchema.addToColumns(desc); } return tSchema; }
3.68