name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_Cluster_getActiveTaskTrackers
/** * Get all active trackers in the cluster. * * @return array of {@link TaskTrackerInfo} * @throws IOException * @throws InterruptedException */ public TaskTrackerInfo[] getActiveTaskTrackers() throws IOException, InterruptedException { return client.getActiveTrackers(); }
3.68
morf_ConnectionResourcesBean_setFetchSizeForBulkSelects
/** * @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setFetchSizeForBulkSelects(Integer) */ @Override public void setFetchSizeForBulkSelects(Integer fetchSizeForBulkSelects) { this.fetchSizeForBulkSelects = fetchSizeForBulkSelects; }
3.68
hbase_MobUtils_hasValidMobRefCellValue
/** * Indicates whether the current mob ref cell has a valid value. A mob ref cell has a mob * reference tag. The value of a mob ref cell consists of two parts, real mob value length and mob * file name. The real mob value length takes 4 bytes. The remaining part is the mob file name. * @param cell The mob ref cell. * @return True if the cell has a valid value. */ public static boolean hasValidMobRefCellValue(Cell cell) { return cell.getValueLength() > Bytes.SIZEOF_INT; }
3.68
querydsl_GenericExporter_setEmbeddableAnnotation
/** * Set the embeddable annotation * * @param embeddableAnnotation embeddable annotation */ public void setEmbeddableAnnotation( Class<? extends Annotation> embeddableAnnotation) { this.embeddableAnnotation = embeddableAnnotation; }
3.68
hudi_HoodieAvroUtils_rewriteRecordWithNewSchema
/** * Given avro records, rewrites them with new schema. * * @param oldRecords oldRecords to be rewritten * @param newSchema newSchema used to rewrite oldRecord * @param renameCols a map store all rename cols, (k, v)-> (colNameFromNewSchema, colNameFromOldSchema) * @return a iterator of rewritten GenericRecords */ public static Iterator<GenericRecord> rewriteRecordWithNewSchema(Iterator<GenericRecord> oldRecords, Schema newSchema, Map<String, String> renameCols, boolean validate) { if (oldRecords == null || newSchema == null) { return Collections.emptyIterator(); } return new Iterator<GenericRecord>() { @Override public boolean hasNext() { return oldRecords.hasNext(); } @Override public GenericRecord next() { return rewriteRecordWithNewSchema(oldRecords.next(), newSchema, renameCols, validate); } }; }
3.68
flink_HiveParserDDLSemanticAnalyzer_getFullyQualifiedName
// Get the fully qualified name in the ast. e.g. the ast of the form ^(DOT^(DOT a b) c) will // generate a name of the form a.b.c public static String getFullyQualifiedName(HiveParserASTNode ast) { if (ast.getChildCount() == 0) { return ast.getText(); } else if (ast.getChildCount() == 2) { return getFullyQualifiedName((HiveParserASTNode) ast.getChild(0)) + "." + getFullyQualifiedName((HiveParserASTNode) ast.getChild(1)); } else if (ast.getChildCount() == 3) { return getFullyQualifiedName((HiveParserASTNode) ast.getChild(0)) + "." + getFullyQualifiedName((HiveParserASTNode) ast.getChild(1)) + "." + getFullyQualifiedName((HiveParserASTNode) ast.getChild(2)); } else { return null; } }
3.68
framework_AbstractTextField_setSelection
/** * Sets the range of text to be selected. * <p> * As a side effect the field will become focused. * * @param start * the position of the first character to be selected * @param length * the number of characters to be selected */ public void setSelection(int start, int length) { getRpcProxy(AbstractTextFieldClientRpc.class).selectRange(start, length); focus(); }
3.68
framework_UIConnector_findStylesheetTag
/** * Finds a link tag for a style sheet with the given URL * * @since 7.3 * @param url * the URL of the style sheet * @return the link tag or null if no matching link tag was found */ private LinkElement findStylesheetTag(String url) { NodeList<Element> linkTags = getHead() .getElementsByTagName(LinkElement.TAG); for (int i = 0; i < linkTags.getLength(); i++) { final LinkElement link = LinkElement.as(linkTags.getItem(i)); if ("stylesheet".equals(link.getRel()) && "text/css".equals(link.getType()) && url.equals(link.getHref())) { return link; } } return null; }
3.68
hbase_AbstractMultiFileWriter_commitWriters
/** * Commit all writers. * <p> * Notice that here we use the same <code>maxSeqId</code> for all output files since we haven't * find an easy to find enough sequence ids for different output files in some corner cases. See * comments in HBASE-15400 for more details. */ public List<Path> commitWriters(long maxSeqId, boolean majorCompaction) throws IOException { return commitWriters(maxSeqId, majorCompaction, Collections.emptyList()); }
3.68
hadoop_ConfigurationWithLogging_set
/** * See {@link Configuration#set(String, String, String)}. */ @Override public void set(String name, String value, String source) { log.info("Set {} to '{}'{}", name, redactor.redact(name, value), source == null ? "" : " from " + source); super.set(name, value, source); }
3.68
hbase_DynamicMetricsRegistry_snapshot
/** * Sample all the mutable metrics and put the snapshot in the builder * @param builder to contain the metrics snapshot * @param all get all the metrics even if the values are not changed. */ public void snapshot(MetricsRecordBuilder builder, boolean all) { for (MetricsTag tag : tags()) { builder.add(tag); } for (MutableMetric metric : metrics()) { metric.snapshot(builder, all); } }
3.68
hadoop_SnappyCodec_createDirectDecompressor
/** * {@inheritDoc} */ @Override public DirectDecompressor createDirectDecompressor() { return new SnappyDirectDecompressor(); }
3.68
flink_MemorySegmentFactory_wrapCopy
/** * Copies the given heap memory region and creates a new memory segment wrapping it. * * @param bytes The heap memory region. * @param start starting position, inclusive * @param end end position, exclusive * @return A new memory segment that targets a copy of the given heap memory region. * @throws IllegalArgumentException if start > end or end > bytes.length */ public static MemorySegment wrapCopy(byte[] bytes, int start, int end) throws IllegalArgumentException { checkArgument(end >= start); checkArgument(end <= bytes.length); MemorySegment copy = allocateUnpooledSegment(end - start); copy.put(0, bytes, start, copy.size()); return copy; }
3.68
hbase_CoprocessorClassLoader_isClassExempt
/** * Determines whether the given class should be exempt from being loaded by this ClassLoader. * @param name the name of the class to test. * @return true if the class should *not* be loaded by this ClassLoader; false otherwise. */ protected boolean isClassExempt(String name, String[] includedClassPrefixes) { if (includedClassPrefixes != null) { for (String clsName : includedClassPrefixes) { if (name.startsWith(clsName)) { return false; } } } for (String exemptPrefix : CLASS_PREFIX_EXEMPTIONS) { if (name.startsWith(exemptPrefix)) { return true; } } return false; }
3.68
hudi_OptionsResolver_getIndexKeyField
/** * Returns the index key field. */ public static String getIndexKeyField(Configuration conf) { return conf.getString(FlinkOptions.INDEX_KEY_FIELD, conf.getString(FlinkOptions.RECORD_KEY_FIELD)); }
3.68
hbase_ByteBuffAllocator_allocateOneBuffer
/** * Allocate an buffer with buffer size from ByteBuffAllocator, Note to call the * {@link ByteBuff#release()} if no need any more, otherwise the memory leak happen in NIO * ByteBuffer pool. * @return an ByteBuff with the buffer size. */ public SingleByteBuff allocateOneBuffer() { if (isReservoirEnabled()) { ByteBuffer bb = getBuffer(); if (bb != null) { return new SingleByteBuff(() -> putbackBuffer(bb), bb); } } // Allocated from heap, let the JVM free its memory. return (SingleByteBuff) ByteBuff.wrap(allocateOnHeap(bufSize)); }
3.68
pulsar_ManagedLedgerImpl_advanceCursorsIfNecessary
/** * Non-durable cursors have to be moved forward when data is trimmed since they are not retain that data. * This is to make sure that the `consumedEntries` counter is correctly updated with the number of skipped * entries and the stats are reported correctly. */ @VisibleForTesting void advanceCursorsIfNecessary(List<LedgerInfo> ledgersToDelete) throws LedgerNotExistException { if (ledgersToDelete.isEmpty()) { return; } // Just ack messages like a consumer. Normally, consumers will not confirm a position that does not exist, so // find the latest existing position to ack. PositionImpl highestPositionToDelete = calculateLastEntryInLedgerList(ledgersToDelete); if (highestPositionToDelete == null) { log.warn("[{}] The ledgers to be trim are all empty, skip to advance non-durable cursors: {}", name, ledgersToDelete); return; } cursors.forEach(cursor -> { // move the mark delete position to the highestPositionToDelete only if it is smaller than the add confirmed // to prevent the edge case where the cursor is caught up to the latest and highestPositionToDelete may be // larger than the last add confirmed if (highestPositionToDelete.compareTo((PositionImpl) cursor.getMarkDeletedPosition()) > 0 && highestPositionToDelete.compareTo((PositionImpl) cursor.getManagedLedger() .getLastConfirmedEntry()) <= 0 && !(!cursor.isDurable() && cursor instanceof NonDurableCursorImpl && ((NonDurableCursorImpl) cursor).isReadCompacted())) { cursor.asyncMarkDelete(highestPositionToDelete, cursor.getProperties(), new MarkDeleteCallback() { @Override public void markDeleteComplete(Object ctx) { } @Override public void markDeleteFailed(ManagedLedgerException exception, Object ctx) { log.warn("[{}] Failed to mark delete while trimming data ledgers: {}", name, exception.getMessage()); } }, null); } }); }
3.68
hbase_ColumnValueFilter_convert
/** Returns A pb instance to represent this instance. */ FilterProtos.ColumnValueFilter convert() { FilterProtos.ColumnValueFilter.Builder builder = FilterProtos.ColumnValueFilter.newBuilder(); builder.setFamily(UnsafeByteOperations.unsafeWrap(this.family)); builder.setQualifier(UnsafeByteOperations.unsafeWrap(this.qualifier)); builder.setCompareOp(HBaseProtos.CompareType.valueOf(this.op.name())); builder.setComparator(ProtobufUtil.toComparator(this.comparator)); return builder.build(); }
3.68
flink_TimeIntervalJoin_calExpirationTime
/** * Calculate the expiration time with the given operator time and relative window size. * * @param operatorTime the operator time * @param relativeSize the relative window size * @return the expiration time for cached rows */ private long calExpirationTime(long operatorTime, long relativeSize) { if (operatorTime < Long.MAX_VALUE) { return operatorTime - relativeSize - allowedLateness - 1; } else { // When operatorTime = Long.MaxValue, it means the stream has reached the end. return Long.MAX_VALUE; } }
3.68
hadoop_Times_formatISO8601
/** * Given a time stamp returns ISO-8601 formated string in format * "yyyy-MM-dd'T'HH:mm:ss.SSSZ". * @param ts to be formatted in ISO format. * @return ISO 8601 formatted string. */ public static String formatISO8601(long ts) { return ISO_OFFSET_DATE_TIME.format(Instant.ofEpochMilli(ts)); }
3.68
framework_AbstractClientConnector_setErrorHandler
/* * (non-Javadoc) * * @see com.vaadin.server.ClientConnector#setErrorHandler(com.vaadin.server. * ErrorHandler) */ @Override public void setErrorHandler(ErrorHandler errorHandler) { this.errorHandler = errorHandler; }
3.68
hadoop_JobACL_getAclName
/** * Get the name of the ACL. Here it is same as the name of the configuration * property for specifying the ACL for the job. * * @return aclName */ public String getAclName() { return aclName; }
3.68
flink_CliFrontendParser_mergeOptions
/** * Merges the given {@link Options} into a new Options object. * * @param optionsA options to merge, can be null if none * @param optionsB options to merge, can be null if none * @return */ public static Options mergeOptions(@Nullable Options optionsA, @Nullable Options optionsB) { final Options resultOptions = new Options(); if (optionsA != null) { for (Option option : optionsA.getOptions()) { resultOptions.addOption(option); } } if (optionsB != null) { for (Option option : optionsB.getOptions()) { resultOptions.addOption(option); } } return resultOptions; }
3.68
hadoop_OBSFileSystem_getTrashDir
/** * Return trash directory for fast delete. * * @return the trash directory */ String getTrashDir() { return trashDir; }
3.68
hadoop_RouterResolver_getRpcServer
/** * Get the Router RPC server. * * @return Router RPC server. Null if not possible. */ protected RouterRpcServer getRpcServer() { if (this.router == null) { return null; } return router.getRpcServer(); }
3.68
morf_H2Dialect_getSqlForDaysBetween
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDaysBetween(org.alfasoftware.morf.sql.element.AliasedField, * org.alfasoftware.morf.sql.element.AliasedField) */ @Override protected String getSqlForDaysBetween(AliasedField toDate, AliasedField fromDate) { return "DATEDIFF('DAY'," + getSqlFrom(fromDate) + ", " + getSqlFrom(toDate) + ")"; }
3.68
hbase_WALKeyImpl_getNonceGroup
/** Returns The nonce group */ @Override public long getNonceGroup() { return nonceGroup; }
3.68
MagicPlugin_MageData_getLastDeathLocation
/** * Data can be saved asynchronously, and Locations' Worlds can be invalidated if the server unloads a world. * So do not call this method during saving. */ @Nullable public Location getLastDeathLocation() { return lastDeathLocation == null ? null : lastDeathLocation.asLocation(); }
3.68
hbase_RequestConverter_buildMultiRequest
/** * Create a protocol buffer MultiRequest for row mutations * @return a multi request */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { return buildMultiRequest(regionName, rowMutations, null, nonceGroup, nonce); }
3.68
querydsl_BooleanExpression_coalesce
/** * Create a {@code coalesce(this, args...)} expression * * @param args additional arguments * @return coalesce */ @Override public BooleanExpression coalesce(Boolean... args) { Coalesce<Boolean> coalesce = new Coalesce<Boolean>(getType(), mixin); for (Boolean arg : args) { coalesce.add(arg); } return coalesce.asBoolean(); }
3.68
morf_MySqlDialect_getSqlForDaysBetween
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDaysBetween(org.alfasoftware.morf.sql.element.AliasedField, org.alfasoftware.morf.sql.element.AliasedField) */ @Override protected String getSqlForDaysBetween(AliasedField toDate, AliasedField fromDate) { return "TO_DAYS(" + getSqlFrom(toDate) + ") - TO_DAYS("+ getSqlFrom(fromDate) + ")"; }
3.68
hadoop_WorkerId_getWorkerId
/** * Get workerId. * @return workerId : Worker identifier */ public final String getWorkerId() { return this.workerId.toString(); }
3.68
framework_VFormLayout_getOwner
/** * Returns Paintable for which this Caption belongs to. * * @return owner Widget */ public ComponentConnector getOwner() { return owner; }
3.68
framework_ErrorIndicator_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Show tooltip for caption and required indicator"; }
3.68
hudi_AbstractTableFileSystemView_sync
/** * Syncs the file system view from storage to memory. Performs complete reset of file-system * view. Subsequent partition view calls will load file slices against the latest timeline. * <p> * NOTE: The logic MUST BE guarded by the write lock. */ @Override public void sync() { try { writeLock.lock(); HoodieTimeline newTimeline = metaClient.reloadActiveTimeline().filterCompletedOrMajorOrMinorCompactionInstants(); clear(); // Initialize with new Hoodie timeline. init(metaClient, newTimeline); } finally { writeLock.unlock(); } }
3.68
hbase_SplitTableRegionProcedure_updateMeta
/** * Add daughter regions to META * @param env MasterProcedureEnv */ private void updateMeta(final MasterProcedureEnv env) throws IOException { env.getAssignmentManager().markRegionAsSplit(getParentRegion(), getParentRegionServerName(env), daughterOneRI, daughterTwoRI); }
3.68
hbase_ByteBufferIOEngine_write
/** * Transfers data from the given {@link ByteBuff} to the buffer array. Position of source will be * advanced by the {@link ByteBuffer#remaining()}. * @param src the given byte buffer from which bytes are to be read. * @param offset The offset in the ByteBufferArray of the first byte to be written * @throws IOException throws IOException if writing to the array throws exception */ @Override public void write(ByteBuff src, long offset) throws IOException { bufferArray.write(offset, src); }
3.68
dubbo_TripleServerStream_responsePlainTextError
/** * Error before create server stream, http plain text will be returned * * @param code code of error * @param status status of error */ private void responsePlainTextError(int code, TriRpcStatus status) { ChannelFuture checkResult = preCheck(); if (!checkResult.isSuccess()) { return; } Http2Headers headers = new DefaultHttp2Headers(true) .status(String.valueOf(code)) .setInt(TripleHeaderEnum.STATUS_KEY.getHeader(), status.code.code) .set(TripleHeaderEnum.MESSAGE_KEY.getHeader(), status.description) .set(TripleHeaderEnum.CONTENT_TYPE_KEY.getHeader(), TripleConstant.TEXT_PLAIN_UTF8); writeQueue.enqueue(HeaderQueueCommand.createHeaders(tripleStreamChannelFuture, headers, false)); writeQueue.enqueue(TextDataQueueCommand.createCommand(tripleStreamChannelFuture, status.description, true)); }
3.68
hmily_TransactionImpl_doDeList
/** * Do de list. * * @param flag the flag * @throws SystemException the system exception */ public void doDeList(final int flag) throws SystemException { delistResourceList = new ArrayList<>(enlistResourceList); for (XAResource resource : delistResourceList) { delistResource(resource, flag); } }
3.68
hudi_DataSourceUtils_tryOverrideParquetWriteLegacyFormatProperty
/** * Checks whether default value (false) of "hoodie.parquet.writelegacyformat.enabled" should be * overridden in case: * * <ul> * <li>Property has not been explicitly set by the writer</li> * <li>Data schema contains {@code DecimalType} that would be affected by it</li> * </ul> * * If both of the aforementioned conditions are true, will override the default value of the config * (by essentially setting the value) to make sure that the produced Parquet data files could be * read by {@code AvroParquetReader} * * @param properties properties specified by the writer * @param schema schema of the dataset being written */ public static void tryOverrideParquetWriteLegacyFormatProperty(Map<String, String> properties, StructType schema) { if (HoodieDataTypeUtils.hasSmallPrecisionDecimalType(schema) && properties.get(HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED.key()) == null) { // ParquetWriteSupport writes DecimalType to parquet as INT32/INT64 when the scale of decimalType // is less than {@code Decimal.MAX_LONG_DIGITS}, but {@code AvroParquetReader} which is used by // {@code HoodieParquetReader} does not support DecimalType encoded as INT32/INT64 as. // // To work this problem around we're checking whether // - Schema contains any decimals that could be encoded as INT32/INT64 // - {@code HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED} has not been explicitly // set by the writer // // If both of these conditions are true, then we override the default value of {@code // HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED} and set it to "true" LOG.warn("Small Decimal Type found in the persisted schema, reverting default value of 'hoodie.parquet.writelegacyformat.enabled' to true"); properties.put(HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED.key(), "true"); } }
3.68
MagicPlugin_BlockFace_getModY
/** * Get the amount of Y-coordinates to modify to get the represented block * * @return Amount of Y-coordinates to modify */ public int getModY() { return modY; }
3.68
flink_Channel_getLocalStrategyComparator
/** * Gets the local strategy comparator from this Channel. * * @return The local strategy comparator. */ public TypeComparatorFactory<?> getLocalStrategyComparator() { return localStrategyComparator; }
3.68
dubbo_ServiceAnnotationPostProcessor_generateServiceBeanName
/** * Generates the bean name of {@link ServiceBean} * * @param serviceAnnotationAttributes * @param serviceInterface the class of interface annotated {@link Service} * @return ServiceBean@interfaceClassName#annotatedServiceBeanName * @since 2.7.3 */ private String generateServiceBeanName(Map<String, Object> serviceAnnotationAttributes, String serviceInterface) { ServiceBeanNameBuilder builder = create(serviceInterface, environment) .group((String) serviceAnnotationAttributes.get("group")) .version((String) serviceAnnotationAttributes.get("version")); return builder.build(); }
3.68
hmily_SpringCloudXaProxy_getArgs
/** * Get Feign method arguments. * * @return method arguments */ public Object[] getArgs() { return args; }
3.68
morf_AbstractSelectStatement_getAlias
/** * Gets the alias of this select statement. * * @return the alias */ public String getAlias() { return alias; }
3.68
querydsl_Expressions_nullExpression
/** * Create a null expression for the specified path * * @param path path for type cast * @param <T> type of expression * @return null expression */ public static <T> NullExpression<T> nullExpression(Path<T> path) { return nullExpression(); }
3.68
Activiti_AbstractOperation_findFirstParentScopeExecution
/** * Returns the first parent execution of the provided execution that is a scope. */ protected ExecutionEntity findFirstParentScopeExecution(ExecutionEntity executionEntity) { ExecutionEntityManager executionEntityManager = commandContext.getExecutionEntityManager(); ExecutionEntity parentScopeExecution = null; ExecutionEntity currentlyExaminedExecution = executionEntityManager.findById(executionEntity.getParentId()); while (currentlyExaminedExecution != null && parentScopeExecution == null) { if (currentlyExaminedExecution.isScope()) { parentScopeExecution = currentlyExaminedExecution; } else { currentlyExaminedExecution = executionEntityManager.findById(currentlyExaminedExecution.getParentId()); } } return parentScopeExecution; }
3.68
flink_BufferBuilder_createBufferConsumer
/** * This method always creates a {@link BufferConsumer} starting from the current writer offset. * Data written to {@link BufferBuilder} before creation of {@link BufferConsumer} won't be * visible for that {@link BufferConsumer}. * * @return created matching instance of {@link BufferConsumer} to this {@link BufferBuilder}. */ public BufferConsumer createBufferConsumer() { return createBufferConsumer(positionMarker.cachedPosition); }
3.68
morf_Function_random
/** * Helper method to create an instance of the "random" SQL function. * * @return an instance of the random function. */ public static Function random() { return new Function(FunctionType.RANDOM); }
3.68
flink_ConfigurationParserUtils_getPageSize
/** * Parses the configuration to get the page size and validates the value. * * @param configuration configuration object * @return size of memory segment */ public static int getPageSize(Configuration configuration) { final int pageSize = checkedDownCast( configuration.get(TaskManagerOptions.MEMORY_SEGMENT_SIZE).getBytes()); // check page size of for minimum size checkConfigParameter( pageSize >= MemoryManager.MIN_PAGE_SIZE, pageSize, TaskManagerOptions.MEMORY_SEGMENT_SIZE.key(), "Minimum memory segment size is " + MemoryManager.MIN_PAGE_SIZE); // check page size for power of two checkConfigParameter( MathUtils.isPowerOf2(pageSize), pageSize, TaskManagerOptions.MEMORY_SEGMENT_SIZE.key(), "Memory segment size must be a power of 2."); return pageSize; }
3.68
pulsar_ConcurrentLongLongPairHashMap_forEach
/** * Iterate over all the entries in the map and apply the processor function to each of them. * <p> * <b>Warning: Do Not Guarantee Thread-Safety.</b> * @param processor the processor to process the elements. */ public void forEach(BiConsumerLongPair processor) { for (Section s : sections) { s.forEach(processor); } }
3.68
flink_ProjectableDecodingFormat_supportsNestedProjection
/** Returns whether this format supports nested projection. */ default boolean supportsNestedProjection() { return false; }
3.68
flink_KeyedStream_countWindow
/** * Windows this {@code KeyedStream} into sliding count windows. * * @param size The size of the windows in number of elements. * @param slide The slide interval in number of elements. */ public WindowedStream<T, KEY, GlobalWindow> countWindow(long size, long slide) { return window(GlobalWindows.create()) .evictor(CountEvictor.of(size)) .trigger(CountTrigger.of(slide)); }
3.68
flink_MemorySegment_isOffHeap
/** * Checks whether this memory segment is backed by off-heap memory. * * @return <tt>true</tt>, if the memory segment is backed by off-heap memory, <tt>false</tt> if * it is backed by heap memory. */ public boolean isOffHeap() { return heapMemory == null; }
3.68
framework_ServerRpcHandler_getIgnoredDisabledError
/** * Generates an error message when the client is trying to do something * ('what') with a connector which is disabled or invisible. * * @since 7.1.8 * @param what * the ignored operation * @param connector * the connector which is disabled (or invisible) * @return an error message */ public static String getIgnoredDisabledError(String what, ClientConnector connector) { String msg = "Ignoring " + what + " for disabled connector " + connector.getClass().getName(); if (connector instanceof Component) { String caption = ((Component) connector).getCaption(); if (caption != null) { msg += ", caption=" + caption; } } return msg; }
3.68
flink_WrappingCollector_setCollector
/** Sets the current collector which is used to emit the final result. */ public void setCollector(Collector<T> collector) { this.collector = collector; }
3.68
hadoop_NativeCrc32_verifyChunkedSums
/** * Verify the given buffers of data and checksums, and throw an exception * if any checksum is invalid. The buffers given to this function should * have their position initially at the start of the data, and their limit * set at the end of the data. The position, limit, and mark are not * modified. * * @param bytesPerSum the chunk size (eg 512 bytes) * @param checksumType the DataChecksum type constant (NULL is not supported) * @param sums the DirectByteBuffer pointing at the beginning of the * stored checksums * @param data the DirectByteBuffer pointing at the beginning of the * data to check * @param basePos the position in the file where the data buffer starts * @param fileName the name of the file being verified * @throws ChecksumException if there is an invalid checksum */ public static void verifyChunkedSums(int bytesPerSum, int checksumType, ByteBuffer sums, ByteBuffer data, String fileName, long basePos) throws ChecksumException { nativeComputeChunkedSums(bytesPerSum, checksumType, sums, sums.position(), data, data.position(), data.remaining(), fileName, basePos, true); }
3.68
hudi_AvroSchemaUtils_resolveNullableSchema
/** * Resolves typical Avro's nullable schema definition: {@code Union(Schema.Type.NULL, <NonNullType>)}, * decomposing union and returning the target non-null type */ public static Schema resolveNullableSchema(Schema schema) { if (schema.getType() != Schema.Type.UNION) { return schema; } List<Schema> innerTypes = schema.getTypes(); if (innerTypes.size() != 2) { throw new AvroRuntimeException( String.format("Unsupported Avro UNION type %s: Only UNION of a null type and a non-null type is supported", schema)); } Schema firstInnerType = innerTypes.get(0); Schema secondInnerType = innerTypes.get(1); if ((firstInnerType.getType() != Schema.Type.NULL && secondInnerType.getType() != Schema.Type.NULL) || (firstInnerType.getType() == Schema.Type.NULL && secondInnerType.getType() == Schema.Type.NULL)) { throw new AvroRuntimeException( String.format("Unsupported Avro UNION type %s: Only UNION of a null type and a non-null type is supported", schema)); } return firstInnerType.getType() == Schema.Type.NULL ? secondInnerType : firstInnerType; }
3.68
hudi_Types_isWiderThan
/** * Returns whether this DecimalType is wider than `other`. If yes, it means `other` * can be casted into `this` safely without losing any precision or range. */ public boolean isWiderThan(PrimitiveType other) { if (other instanceof DecimalType) { DecimalType dt = (DecimalType) other; return (precision - scale) >= (dt.precision - dt.scale) && scale > dt.scale; } if (other instanceof IntType) { return isWiderThan(get(10, 0)); } return false; }
3.68
flink_LogicalTypeChecks_isCompositeType
/** * Checks if the given type is a composite type. * * <p>Use {@link #getFieldCount(LogicalType)}, {@link #getFieldNames(LogicalType)}, {@link * #getFieldTypes(LogicalType)} for unified handling of composite types. * * @param logicalType Logical data type to check * @return True if the type is composite type. */ public static boolean isCompositeType(LogicalType logicalType) { if (logicalType instanceof DistinctType) { return isCompositeType(((DistinctType) logicalType).getSourceType()); } LogicalTypeRoot typeRoot = logicalType.getTypeRoot(); return typeRoot == STRUCTURED_TYPE || typeRoot == ROW; }
3.68
framework_FocusableScrollPanel_moveFocusElementAfterWidget
/** * Helper to keep focus element always in domChild[1]. Aids testing. */ private void moveFocusElementAfterWidget() { getElement().insertAfter(focusElement, getWidget().getElement()); }
3.68
flink_MutableIOMetrics_addIOMetrics
/** * Adds the IO metrics for the given attempt to this object. If the {@link AccessExecution} is * in a terminal state the contained {@link IOMetrics} object is added. Otherwise the given * {@link MetricFetcher} is used to retrieve the required metrics. * * @param attempt Attempt whose IO metrics should be added * @param fetcher MetricFetcher to retrieve metrics for running jobs * @param jobID JobID to which the attempt belongs * @param taskID TaskID to which the attempt belongs */ public void addIOMetrics( AccessExecution attempt, @Nullable MetricFetcher fetcher, String jobID, String taskID) { if (attempt.getState().isTerminal()) { IOMetrics ioMetrics = attempt.getIOMetrics(); if (ioMetrics != null) { // execAttempt is already finished, use final metrics stored in // ExecutionGraph this.numBytesIn += ioMetrics.getNumBytesIn(); this.numBytesOut += ioMetrics.getNumBytesOut(); this.numRecordsIn += ioMetrics.getNumRecordsIn(); this.numRecordsOut += ioMetrics.getNumRecordsOut(); this.accumulateBackPressuredTime += ioMetrics.getAccumulateBackPressuredTime(); this.accumulateIdleTime += ioMetrics.getAccumulateIdleTime(); if (Double.isNaN(ioMetrics.getAccumulateBusyTime())) { this.accumulateBusyTime = Double.NaN; } else { this.accumulateBusyTime += ioMetrics.getAccumulateBusyTime(); } } } else { // execAttempt is still running, use MetricQueryService instead if (fetcher != null) { fetcher.update(); MetricStore.ComponentMetricStore metrics = fetcher.getMetricStore() .getSubtaskAttemptMetricStore( jobID, taskID, attempt.getParallelSubtaskIndex(), attempt.getAttemptNumber()); if (metrics != null) { /** * We want to keep track of missing metrics to be able to make a difference * between 0 as a value and a missing value. In case a metric is missing for a * parallel instance of a task, we set the complete flag as false. */ if (metrics.getMetric(MetricNames.IO_NUM_BYTES_IN) == null) { this.numBytesInComplete = false; } else { this.numBytesIn += Long.valueOf(metrics.getMetric(MetricNames.IO_NUM_BYTES_IN)); } if (metrics.getMetric(MetricNames.IO_NUM_BYTES_OUT) == null) { this.numBytesOutComplete = false; } else { this.numBytesOut += Long.valueOf(metrics.getMetric(MetricNames.IO_NUM_BYTES_OUT)); } if (metrics.getMetric(MetricNames.IO_NUM_RECORDS_IN) == null) { this.numRecordsInComplete = false; } else { this.numRecordsIn += Long.valueOf(metrics.getMetric(MetricNames.IO_NUM_RECORDS_IN)); } if (metrics.getMetric(MetricNames.IO_NUM_RECORDS_OUT) == null) { this.numRecordsOutComplete = false; } else { this.numRecordsOut += Long.valueOf(metrics.getMetric(MetricNames.IO_NUM_RECORDS_OUT)); } if (metrics.getMetric(MetricNames.ACC_TASK_BACK_PRESSURED_TIME) != null) { this.accumulateBackPressuredTime += Long.parseLong( metrics.getMetric( MetricNames.ACC_TASK_BACK_PRESSURED_TIME)); } if (metrics.getMetric(MetricNames.ACC_TASK_IDLE_TIME) != null) { this.accumulateIdleTime += Long.parseLong(metrics.getMetric(MetricNames.ACC_TASK_IDLE_TIME)); } if (metrics.getMetric(MetricNames.ACC_TASK_BUSY_TIME) != null) { double busyTime = Double.parseDouble( metrics.getMetric(MetricNames.ACC_TASK_BUSY_TIME)); if (Double.isNaN(busyTime)) { this.accumulateBusyTime = Double.NaN; } else { this.accumulateBusyTime += busyTime; } } } else { this.numBytesInComplete = false; this.numBytesOutComplete = false; this.numRecordsInComplete = false; this.numRecordsOutComplete = false; } } } }
3.68
flink_FailoverStrategyFactoryLoader_loadFailoverStrategyFactory
/** * Loads a {@link FailoverStrategy.Factory} from the given configuration. * * @param config which specifies the failover strategy factory to load * @return failover strategy factory loaded */ public static FailoverStrategy.Factory loadFailoverStrategyFactory(final Configuration config) { checkNotNull(config); final String strategyParam = config.getString(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY); switch (strategyParam.toLowerCase()) { case FULL_RESTART_STRATEGY_NAME: return new RestartAllFailoverStrategy.Factory(); case PIPELINED_REGION_RESTART_STRATEGY_NAME: return new RestartPipelinedRegionFailoverStrategy.Factory(); default: throw new IllegalConfigurationException( "Unknown failover strategy: " + strategyParam); } }
3.68
framework_CSSInjectWithColorpicker_createTextColorSelect
/** * Create a text color selction dialog */ private Component createTextColorSelect() { // Colorpicker for changing text color ColorPicker textColor = new ColorPicker("Color", Color.BLACK); textColor.setWidth("110px"); textColor.setCaption("Color"); textColor.addValueChangeListener(event -> { // Get the new text color Color color = event.getValue(); // Get the stylesheet of the page Styles styles = Page.getCurrent().getStyles(); // inject the new color as a style styles.add(".v-app .v-textarea.text-label { color:" + color.getCSS() + "; }"); }); return textColor; }
3.68
hbase_RSGroupAdminClient_moveTables
/** * Move given set of tables to the specified target RegionServer group. This will unassign all of * a table's region so it can be reassigned to the correct group. */ public void moveTables(Set<TableName> tables, String targetGroup) throws IOException { MoveTablesRequest.Builder builder = MoveTablesRequest.newBuilder().setTargetGroup(targetGroup); for (TableName tableName : tables) { builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); if (!admin.tableExists(tableName)) { throw new TableNotFoundException(tableName); } } try { stub.moveTables(null, builder.build()); } catch (ServiceException e) { throw ProtobufUtil.handleRemoteException(e); } }
3.68
flink_ResourceSpec_lessThanOrEqual
/** * Checks the current resource less than or equal with the other resource by comparing all the * fields in the resource. * * @param other The resource to compare * @return True if current resource is less than or equal with the other resource, otherwise * return false. */ public boolean lessThanOrEqual(final ResourceSpec other) { checkNotNull(other, "Cannot compare with null resources"); if (this.equals(UNKNOWN) && other.equals(UNKNOWN)) { return true; } else if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) { throw new IllegalArgumentException( "Cannot compare specified resources with UNKNOWN resources."); } int cmp1 = this.cpuCores.getValue().compareTo(other.getCpuCores().getValue()); int cmp2 = this.taskHeapMemory.compareTo(other.taskHeapMemory); int cmp3 = this.taskOffHeapMemory.compareTo(other.taskOffHeapMemory); int cmp4 = this.managedMemory.compareTo(other.managedMemory); if (cmp1 <= 0 && cmp2 <= 0 && cmp3 <= 0 && cmp4 <= 0) { for (ExternalResource resource : extendedResources.values()) { if (!other.extendedResources.containsKey(resource.getName()) || other.extendedResources .get(resource.getName()) .getValue() .compareTo(resource.getValue()) < 0) { return false; } } return true; } return false; }
3.68
hudi_FileBasedInternalSchemaStorageManager_getMetaClient
// make metaClient build lazy private HoodieTableMetaClient getMetaClient() { if (metaClient == null) { metaClient = HoodieTableMetaClient.builder() .setBasePath(baseSchemaPath.getParent().getParent().toString()) .setConf(conf) .setTimeGeneratorConfig(HoodieTimeGeneratorConfig.defaultConfig(baseSchemaPath.getParent().getParent().toString())) .build(); } return metaClient; }
3.68
aws-saas-boost_DynamoTier_updateAttributeNames
// because name is a reserved keyword in DynamoDB update expressions, we need to change the update expression from // something like // SET name=:name,description=:description // to // SET #name=:name,#description=:description public Map<String, String> updateAttributeNames() { Map<String, String> updateAttributeNames = new HashMap<>(); for (String attributeName : attributesWithoutPrimaryKey().keySet()) { updateAttributeNames.put("#" + attributeName, attributeName); } return updateAttributeNames; }
3.68
hadoop_EncryptionSecrets_readObject
/** * For java serialization: read and then call {@link #init()}. * @param in input * @throws IOException IO problem * @throws ClassNotFoundException problem loading inner class. */ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); init(); }
3.68
open-banking-gateway_BaseDatasafeDbStorageService_list
/** * Lists object within Datasafe storage. * @param absoluteLocation Absolute path of the objects' directory including protocol. I.e. {@code db://storage/deadbeef} * @return All objects that have path within {@code absoluteLocation} */ @Override @Transactional public Stream<AbsoluteLocation<ResolvedResource>> list(AbsoluteLocation absoluteLocation) { throw new IllegalStateException("Unsupported operation"); }
3.68
flink_MutableHashTable_releaseTable
/** * Releases the table (the array of buckets) and returns the occupied memory segments to the * list of free segments. */ protected void releaseTable() { // set the counters back this.numBuckets = 0; if (this.buckets != null) { for (MemorySegment bucket : this.buckets) { this.availableMemory.add(bucket); } this.buckets = null; } }
3.68
framework_DateFieldElement_getISOFormatter
/** * Gets a date and time formatter for ISO-8601 dates. * * @return a date formatter for ISO-8601 * @since 8.1.0 */ protected DateTimeFormatter getISOFormatter() { return DateTimeFormatter.ISO_LOCAL_DATE; }
3.68
hudi_PartialUpdateAvroPayload_mergeDisorderRecordsWithMetadata
/** * Merges the given disorder records with metadata. * * @param schema The record schema * @param oldRecord The current record from file * @param updatingRecord The incoming record * @return the merged record option */ protected Option<IndexedRecord> mergeDisorderRecordsWithMetadata( Schema schema, GenericRecord oldRecord, GenericRecord updatingRecord, boolean isPreCombining) { if (isDeleteRecord(oldRecord) && !isPreCombining) { return Option.empty(); } else { final GenericRecordBuilder builder = new GenericRecordBuilder(schema); List<Schema.Field> fields = schema.getFields(); fields.forEach(field -> { final GenericRecord baseRecord; final GenericRecord mergedRecord; if (HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.containsKey(field.name())) { // this is a metadata field baseRecord = updatingRecord; mergedRecord = oldRecord; } else { baseRecord = oldRecord; mergedRecord = updatingRecord; } setField(baseRecord, mergedRecord, builder, field); }); return Option.of(builder.build()); } }
3.68
morf_PortableSqlStatement_getValueOf
/** * Returns value of enum. * * @param s The code * @return The DatabaseUpradeSupported */ public static DataUpgradeSupported getValueOf(String s) { for (DataUpgradeSupported type : DataUpgradeSupported.values()) { if (type.code.equals(s)) { return type; } } throw new IllegalArgumentException(String.format( "Unknown DataUpgradeSupported type [%s]", s)); }
3.68
hbase_MetricsREST_incrementFailedAppendRequests
/** * @param inc How much to add to failedAppendCount. */ public void incrementFailedAppendRequests(final int inc) { source.incrementFailedAppendRequests(inc); }
3.68
rocketmq-connect_ExpressionBuilder_appendIdentifier
/** * Append to this builder's expression the identifier. * * @param name the name to be appended * @param quoted true if the name should be quoted, or false otherwise * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendIdentifier( String name, QuoteMethod quoted ) { appendLeadingQuote(quoted); sb.append(name); appendTrailingQuote(quoted); return this; }
3.68
pulsar_ConcurrentLongPairSet_forEach
/** * Iterate over all the elements in the set and apply the provided function. * <p> * <b>Warning: Do Not Guarantee Thread-Safety.</b> * @param processor the processor to process the elements */ public void forEach(LongPairConsumer processor) { for (int i = 0; i < sections.length; i++) { sections[i].forEach(processor); } }
3.68
hbase_MiniHBaseCluster_join
/** * Wait for Mini HBase Cluster to shut down. */ public void join() { this.hbaseCluster.join(); }
3.68
framework_ValueContext_getLocale
/** * Returns an {@code Optional} for the {@code Locale} used in the value * conversion. * * @return the optional of locale */ public Optional<Locale> getLocale() { return Optional.ofNullable(locale); }
3.68
hudi_GenericRecordFullPayloadGenerator_determineExtraEntriesRequired
/** * Method help to calculate the number of entries to add. * * @return Number of entries to add */ private void determineExtraEntriesRequired(int numberOfComplexFields, int numberOfBytesToAdd) { for (Schema.Field f : baseSchema.getFields()) { Schema elementSchema = f.schema(); // Find the size of the primitive data type in bytes int primitiveDataTypeSize = 0; if (elementSchema.getType() == Type.ARRAY && isPrimitive(elementSchema.getElementType())) { primitiveDataTypeSize = getSize(elementSchema.getElementType()); } else if (elementSchema.getType() == Type.MAP && isPrimitive(elementSchema.getValueType())) { primitiveDataTypeSize = getSize(elementSchema.getValueType()); } else { continue; } int numEntriesToAdd = numberOfBytesToAdd / primitiveDataTypeSize; // If more than 10 entries are being added for this same complex field and there are still more complex fields to // be visited in the schema, reduce the number of entries to add by a factor of 10 to allow for other complex // fields to pack some entries if (numEntriesToAdd > 10 && numberOfComplexFields > 1) { numEntriesToAdd = 10; numberOfBytesToAdd -= numEntriesToAdd * primitiveDataTypeSize; } else { numberOfBytesToAdd = 0; } extraEntriesMap.put(f.name(), numEntriesToAdd); numberOfComplexFields -= 1; if (numberOfBytesToAdd <= 0) { break; } } }
3.68
framework_Tree_getParent
/** * Gets the ID of the parent Item of the specified Item. * * @see Container.Hierarchical#getParent(Object) */ @Override public Object getParent(Object itemId) { return ((Container.Hierarchical) items).getParent(itemId); }
3.68
morf_MySqlDialect_internalTableDeploymentStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#tableDeploymentStatements(org.alfasoftware.morf.metadata.Table) */ @Override public Collection<String> internalTableDeploymentStatements(Table table) { List<String> statements = new ArrayList<>(); // Create the table deployment statement StringBuilder createTableStatement = new StringBuilder(); createTableStatement.append("CREATE "); if (table.isTemporary()) { createTableStatement.append("TEMPORARY "); } createTableStatement.append("TABLE `"); createTableStatement.append(table.getName()); createTableStatement.append("` ("); List<String> primaryKeys = new ArrayList<>(); boolean first = true; Column autoIncrementColumn = null; int autoNumberStart = -1; for (Column column : table.columns()) { if (!first) { createTableStatement.append(", "); } createTableStatement.append("`"); createTableStatement.append(column.getName()); createTableStatement.append("` "); createTableStatement.append(sqlRepresentationOfColumnType(column)); if (column.isAutoNumbered()) { autoNumberStart = column.getAutoNumberStart() == -1 ? 1 : column.getAutoNumberStart(); createTableStatement.append(" AUTO_INCREMENT COMMENT 'AUTONUMSTART:[").append(autoNumberStart).append("]'"); autoIncrementColumn = column; } if (column.isPrimaryKey()) { primaryKeys.add(column.getName()); } first = false; } // Put on the primary key constraint if (!primaryKeys.isEmpty()) { createTableStatement .append(", ") .append(buildPrimaryKeyConstraint(table)); } createTableStatement.append(") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin"); if (autoIncrementColumn != null && autoIncrementColumn.getAutoNumberStart() != 0) { createTableStatement.append(" AUTO_INCREMENT=").append(autoNumberStart); } statements.add(createTableStatement.toString()); return statements; }
3.68
hudi_FlinkMergeHandle_deleteInvalidDataFile
/** * The flink checkpoints start in sequence and asynchronously, when one write task finish the checkpoint(A) * (thus the fs view got the written data files some of which may be invalid), * it goes on with the next round checkpoint(B) write immediately, * if it tries to reuse the last small data bucket(small file) of an invalid data file, * finally, when the coordinator receives the checkpoint success event of checkpoint(A), * the invalid data file would be cleaned, * and this merger got a FileNotFoundException when it close the write file handle. * * <p> To solve, deletes the invalid data file eagerly * so that the invalid file small bucket would never be reused. * * @param lastAttemptId The last attempt ID */ private void deleteInvalidDataFile(long lastAttemptId) { final String lastWriteToken = FSUtils.makeWriteToken(getPartitionId(), getStageId(), lastAttemptId); final String lastDataFileName = FSUtils.makeBaseFileName(instantTime, lastWriteToken, this.fileId, hoodieTable.getBaseFileExtension()); final Path path = makeNewFilePath(partitionPath, lastDataFileName); if (path.equals(oldFilePath)) { // In some rare cases, the old attempt file is used as the old base file to merge // because the flink index eagerly records that. // // The merge handle has the 'UPSERT' semantics so there is no need to roll over // and the file can still be used as the merge base file. return; } try { if (fs.exists(path)) { LOG.info("Deleting invalid MERGE base file due to task retry: " + lastDataFileName); fs.delete(path, false); } } catch (IOException e) { throw new HoodieException("Error while deleting the MERGE base file due to task retry: " + lastDataFileName, e); } }
3.68
querydsl_AbstractJDOQuery_addFetchGroup
/** * Add the fetch group to the set of active fetch groups. * * @param fetchGroupName fetch group name * @return the current object */ @Override public Q addFetchGroup(String fetchGroupName) { fetchGroups.add(fetchGroupName); return queryMixin.getSelf(); }
3.68
flink_Plan_getDefaultParallelism
/** * Gets the default parallelism for this job. That degree is always used when an operator is not * explicitly given a parallelism. * * @return The default parallelism for the plan. */ public int getDefaultParallelism() { return this.defaultParallelism; }
3.68
hadoop_ClientId_getClientId
/** * @return Return clientId as byte[]. */ public static byte[] getClientId() { UUID uuid = UUID.randomUUID(); ByteBuffer buf = ByteBuffer.wrap(new byte[BYTE_LENGTH]); buf.putLong(uuid.getMostSignificantBits()); buf.putLong(uuid.getLeastSignificantBits()); return buf.array(); }
3.68
hadoop_S3ARemoteInputStream_hasCapability
/** * Indicates whether the given {@code capability} is supported by this stream. * * @param capability the capability to check. * @return true if the given {@code capability} is supported by this stream, false otherwise. */ @Override public boolean hasCapability(String capability) { return capability.equalsIgnoreCase(StreamCapabilities.IOSTATISTICS) || capability.equalsIgnoreCase(StreamCapabilities.READAHEAD); }
3.68
framework_VScrollTable_moveFocusUp
/** * Moves the focus row upwards * * @return Returns true if succeeded, else false if the selection could not * be move upwards * */ private boolean moveFocusUp(int offset) { if (isSelectable()) { if (focusedRow == null && scrollBody.iterator().hasNext()) { // FIXME logic is exactly the same as in moveFocusDown, should // be the opposite?? return setRowFocus( (VScrollTableRow) scrollBody.iterator().next()); } else { VScrollTableRow prev = getPreviousRow(focusedRow, offset); if (prev != null) { return setRowFocus(prev); } else { getLogger().info("no previous available"); } } } return false; }
3.68
framework_Form_setValidationVisibleOnCommit
/** * Controls the making validation visible implicitly on commit. * * Having commit() call setValidationVisible(true) implicitly is the default * behavior. You can disable the implicit setting by setting this property * as false. * * It is useful, because you usually want to start with the form free of * errors and only display them after the user clicks Ok. You can disable * the implicit setting by setting this property as false. * * @param makeVisible * If true (default), validation is made visible when commit() is * called. If false, the visibility is left as it is. */ public void setValidationVisibleOnCommit(boolean makeVisible) { validationVisibleOnCommit = makeVisible; }
3.68
hadoop_CounterGroupFactory_version
/** * @return the counter factory version */ public int version() { return VERSION; }
3.68
hadoop_AbstractOperationAuditor_createSpanID
/** * Create a span ID. * @return a unique span ID. */ protected final String createSpanID() { return String.format("%s-%08d", auditorID, SPAN_ID_COUNTER.incrementAndGet()); }
3.68
flink_StreamProjection_projectTuple6
/** * Projects a {@link Tuple} {@link DataStream} to the previously selected fields. * * @return The projected DataStream. * @see Tuple * @see DataStream */ public <T0, T1, T2, T3, T4, T5> SingleOutputStreamOperator<Tuple6<T0, T1, T2, T3, T4, T5>> projectTuple6() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>> tType = new TupleTypeInfo<Tuple6<T0, T1, T2, T3, T4, T5>>(fTypes); return dataStream.transform( "Projection", tType, new StreamProject<IN, Tuple6<T0, T1, T2, T3, T4, T5>>( fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
3.68
framework_SuperDevMode_enableBasedOnParameter
/** * Enables SuperDevMode if the url contains the "superdevmode" parameter. * <p> * The caller should not continue initialization of the application if this * method returns true. The application will be restarted once compilation * is done and then this method will return false. * </p> * * @return true if a recompile operation has started and the page will be * reloaded once it is done, false if no recompilation will be done. */ public static boolean enableBasedOnParameter() { String superDevModeParameter = Location.getParameter("superdevmode"); if (superDevModeParameter != null) { // Need to check the recompile flag also because if we are running // in super dev mode, as a result of the recompile, the enabled // check will fail... if (!isSuperDevModeEnabledInModule()) { showError( "SuperDevMode is disabled for this module/widgetset.<br/>" + "Ensure that your module definition (.gwt.xml) does not contain <br/>" + "&lt;set-configuration-property name=&quot;devModeRedirectEnabled&quot; value=&quot;false&quot; /&gt;<br/>"); return false; } return SuperDevMode.recompileIfNeeded(superDevModeParameter); } return false; }
3.68
hadoop_AbstractS3ACommitter_cleanup
/** * Cleanup the job context, including aborting anything pending * and destroying the thread pool. * @param commitContext commit context * @param suppressExceptions should exceptions be suppressed? * @throws IOException any failure if exceptions were not suppressed. */ protected void cleanup(CommitContext commitContext, boolean suppressExceptions) throws IOException { try (DurationInfo d = new DurationInfo(LOG, "Cleanup job %s", jobIdString(commitContext.getJobContext()))) { abortPendingUploadsInCleanup(suppressExceptions, commitContext); } finally { cleanupStagingDirs(); } }
3.68
zxing_MatrixToImageWriter_writeToPath
/** * As {@link #writeToPath(BitMatrix, String, Path)}, but allows customization of the output. * * @param matrix {@link BitMatrix} to write * @param format image format * @param file file {@link Path} to write image to * @param config output configuration * @throws IOException if writes to the file fail */ public static void writeToPath(BitMatrix matrix, String format, Path file, MatrixToImageConfig config) throws IOException { BufferedImage image = toBufferedImage(matrix, config); if (!ImageIO.write(image, format, file.toFile())) { throw new IOException("Could not write an image of format " + format + " to " + file); } }
3.68
hadoop_ChangeDetectionPolicy_toString
/** * String value for logging. * @return source and mode. */ @Override public String toString() { return "Policy " + getSource() + "/" + getMode(); }
3.68
hudi_IncrSourceCloudStorageHelper_fetchFileData
/** * @param filepaths Files from which to fetch data * @return Data in the given list of files, as a Spark DataSet */ public static Option<Dataset<Row>> fetchFileData(SparkSession spark, List<String> filepaths, TypedProperties props, String fileFormat) { if (filepaths.isEmpty()) { return Option.empty(); } DataFrameReader dfReader = getDataFrameReader(spark, props, fileFormat); Dataset<Row> fileDataDs = dfReader.load(filepaths.toArray(new String[0])); return Option.of(fileDataDs); }
3.68
flink_RestartPipelinedRegionFailoverStrategy_getRegionsToRestart
/** * All 'involved' regions are proposed to be restarted. The 'involved' regions are calculated * with rules below: 1. The region containing the failed task is always involved 2. If an input * result partition of an involved region is not available, i.e. Missing or Corrupted, the * region containing the partition producer task is involved 3. If a region is involved, all of * its consumer regions are involved */ private Set<SchedulingPipelinedRegion> getRegionsToRestart( SchedulingPipelinedRegion failedRegion) { Set<SchedulingPipelinedRegion> regionsToRestart = Collections.newSetFromMap(new IdentityHashMap<>()); Set<SchedulingPipelinedRegion> visitedRegions = Collections.newSetFromMap(new IdentityHashMap<>()); Set<ConsumedPartitionGroup> visitedConsumedResultGroups = Collections.newSetFromMap(new IdentityHashMap<>()); Set<ConsumerVertexGroup> visitedConsumerVertexGroups = Collections.newSetFromMap(new IdentityHashMap<>()); // start from the failed region to visit all involved regions Queue<SchedulingPipelinedRegion> regionsToVisit = new ArrayDeque<>(); visitedRegions.add(failedRegion); regionsToVisit.add(failedRegion); while (!regionsToVisit.isEmpty()) { SchedulingPipelinedRegion regionToRestart = regionsToVisit.poll(); // an involved region should be restarted regionsToRestart.add(regionToRestart); // if a needed input result partition is not available, its producer region is involved for (IntermediateResultPartitionID consumedPartitionId : getConsumedPartitionsToVisit(regionToRestart, visitedConsumedResultGroups)) { if (!resultPartitionAvailabilityChecker.isAvailable(consumedPartitionId)) { SchedulingResultPartition consumedPartition = topology.getResultPartition(consumedPartitionId); SchedulingPipelinedRegion producerRegion = topology.getPipelinedRegionOfVertex( consumedPartition.getProducer().getId()); if (!visitedRegions.contains(producerRegion)) { visitedRegions.add(producerRegion); regionsToVisit.add(producerRegion); } } } // all consumer regions of an involved region should be involved for (ExecutionVertexID consumerVertexId : getConsumerVerticesToVisit(regionToRestart, visitedConsumerVertexGroups)) { SchedulingPipelinedRegion consumerRegion = topology.getPipelinedRegionOfVertex(consumerVertexId); if (!visitedRegions.contains(consumerRegion)) { visitedRegions.add(consumerRegion); regionsToVisit.add(consumerRegion); } } } return regionsToRestart; }
3.68
rocketmq-connect_Serializer_close
/** * Close this serializer. */ @Override default void close() { // intentionally left blank }
3.68
framework_VTooltip_getTooltipFor
/** * Locate the tooltip for given element * * @param element * Element used in search * @return TooltipInfo if connector and tooltip found, null if not */ private TooltipInfo getTooltipFor(Element element) { ApplicationConnection ac = getApplicationConnection(); ComponentConnector connector = Util.getConnectorForElement(ac, RootPanel.get(), element); // Try to find first connector with proper tooltip info TooltipInfo info = null; while (connector != null) { info = connector.getTooltipInfo(element); if (info != null && info.hasMessage()) { break; } if (!(connector.getParent() instanceof ComponentConnector)) { connector = null; info = null; break; } connector = (ComponentConnector) connector.getParent(); } if (connector != null && info != null) { assert connector.hasTooltip() : "getTooltipInfo for " + Util.getConnectorString(connector) + " returned a tooltip even though hasTooltip claims there are no tooltips for the connector."; return info; } return null; }
3.68