name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_CommonExecSink_getFieldInfoForLengthEnforcer
/** * Returns a List of {@link ConstraintEnforcer.FieldInfo}, each containing the info needed to * determine whether a string or binary value needs trimming and/or padding. */ private List<ConstraintEnforcer.FieldInfo> getFieldInfoForLengthEnforcer( RowType physicalType, LengthEnforcerType enforcerType) { LogicalTypeRoot staticType = null; LogicalTypeRoot variableType = null; int maxLength = 0; switch (enforcerType) { case CHAR: staticType = LogicalTypeRoot.CHAR; variableType = LogicalTypeRoot.VARCHAR; maxLength = CharType.MAX_LENGTH; break; case BINARY: staticType = LogicalTypeRoot.BINARY; variableType = LogicalTypeRoot.VARBINARY; maxLength = BinaryType.MAX_LENGTH; } final List<ConstraintEnforcer.FieldInfo> fieldsAndLengths = new ArrayList<>(); for (int i = 0; i < physicalType.getFieldCount(); i++) { LogicalType type = physicalType.getTypeAt(i); boolean isStatic = type.is(staticType); // Should trim and possibly pad if ((isStatic && (LogicalTypeChecks.getLength(type) < maxLength)) || (type.is(variableType) && (LogicalTypeChecks.getLength(type) < maxLength))) { fieldsAndLengths.add( new ConstraintEnforcer.FieldInfo( i, LogicalTypeChecks.getLength(type), isStatic)); } else if (isStatic) { // Should pad fieldsAndLengths.add(new ConstraintEnforcer.FieldInfo(i, null, isStatic)); } } return fieldsAndLengths; }
3.68
hadoop_RouterMetricsService_getRouterMetrics
/** * Get the metrics system for the Router. * * @return Router metrics. */ public RouterMetrics getRouterMetrics() { return this.routerMetrics; }
3.68
zxing_ResultHandler_searchMap
/** * Do a geo search using the address as the query. * * @param address The address to find */ final void searchMap(String address) { launchIntent(new Intent(Intent.ACTION_VIEW, Uri.parse("geo:0,0?q=" + Uri.encode(address)))); }
3.68
hmily_HmilyLimitSegment_getOffset
/** * Get offset. * * @return offset */ public Optional<HmilyPaginationValueSegment> getOffset() { return Optional.ofNullable(offset); }
3.68
flink_ComponentClosingUtils_closeAsyncWithTimeout
/** * Close a component with a timeout. * * @param componentName the name of the component. * @param closingSequence the closing logic. * @param closeTimeout the timeout to wait for the component to close. * @return An optional throwable which is non-empty if an error occurred when closing the * component. */ public static CompletableFuture<Void> closeAsyncWithTimeout( String componentName, ThrowingRunnable<Exception> closingSequence, Duration closeTimeout) { final CompletableFuture<Void> future = new CompletableFuture<>(); // Start a dedicate thread to close the component. final Thread t = new Thread( () -> { try { closingSequence.run(); future.complete(null); } catch (Throwable error) { future.completeExceptionally(error); } }); t.start(); // if the future fails due to a timeout, we interrupt the thread future.exceptionally( (error) -> { if (error instanceof TimeoutException && t.isAlive()) { abortThread(t); } return null; }); FutureUtils.orTimeout( future, closeTimeout.toMillis(), TimeUnit.MILLISECONDS, String.format( "Failed to close the %s before timeout of %d ms", componentName, closeTimeout.toMillis())); return future; }
3.68
hudi_BigQuerySchemaResolver_getTableSchema
/** * Get the BigQuery schema for the table. If the BigQuery table is configured with partitioning, the caller must pass in the partition fields so that they are not returned in the schema. * If the partition fields are in the schema, it will cause an error when querying the table since BigQuery will treat it as a duplicate column. * @param metaClient Meta client for the Hudi table * @param partitionFields The fields that are used for partitioning in BigQuery * @return The BigQuery schema for the table */ Schema getTableSchema(HoodieTableMetaClient metaClient, List<String> partitionFields) { try { Schema schema = convertSchema(tableSchemaResolverSupplier.apply(metaClient).getTableAvroSchema()); if (partitionFields.isEmpty()) { return schema; } else { return Schema.of(schema.getFields().stream().filter(field -> !partitionFields.contains(field.getName())).collect(Collectors.toList())); } } catch (Exception e) { throw new HoodieBigQuerySyncException("Failed to get table schema", e); } }
3.68
hadoop_ResourceRequest_nodeLabelExpression
/** * Set the <code>nodeLabelExpression</code> of the request. * @see ResourceRequest#setNodeLabelExpression(String) * @param nodeLabelExpression * <code>nodeLabelExpression</code> of the request * @return {@link ResourceRequestBuilder} */ @Public @Evolving public ResourceRequestBuilder nodeLabelExpression( String nodeLabelExpression) { resourceRequest.setNodeLabelExpression(nodeLabelExpression); return this; }
3.68
framework_ServiceInitEvent_addConnectorIdGenerator
/** * Adds as connector id generator to be used by this service. By default, * the service will fail to deploy if more than one connector id generator * has been registered. * * @param connectorIdGenerator * the connector id generator to add, not <code>null</code> * * @since 8.1 */ public void addConnectorIdGenerator( ConnectorIdGenerator connectorIdGenerator) { Objects.requireNonNull(connectorIdGenerator, "Connector id generator cannot be null"); /* * We're collecting all generators so that a custom service * implementation can pick which one to use even though the default * implementation throws if there are more than one. */ addedConnectorIdGenerators.add(connectorIdGenerator); }
3.68
hbase_AsyncRpcRetryingCallerFactory_single
/** * Create retry caller for single action, such as get, put, delete, etc. */ public <T> SingleRequestCallerBuilder<T> single() { return new SingleRequestCallerBuilder<>(); }
3.68
flink_JoinInputSideSpec_withUniqueKey
/** * Creates a {@link JoinInputSideSpec} that the input has an unique key. * * @param uniqueKeyType type information of the unique key * @param uniqueKeySelector key selector to extract unique key from the input row */ public static JoinInputSideSpec withUniqueKey( InternalTypeInfo<RowData> uniqueKeyType, KeySelector<RowData, RowData> uniqueKeySelector) { checkNotNull(uniqueKeyType); checkNotNull(uniqueKeySelector); return new JoinInputSideSpec(false, uniqueKeyType, uniqueKeySelector); }
3.68
dubbo_FileSystemDynamicConfiguration_registerDubboShutdownHook
/** * Register the Dubbo ShutdownHook * * @since 2.7.8 */ private void registerDubboShutdownHook() { if (!hasRegisteredShutdownHook.compareAndSet(false, true)) { return; } ShutdownHookCallbacks shutdownHookCallbacks = ScopeModelUtil.getApplicationModel(scopeModel).getBeanFactory().getBean(ShutdownHookCallbacks.class); shutdownHookCallbacks.addCallback(() -> { watchService.ifPresent(w -> { try { w.close(); } catch (IOException e) { throw new RuntimeException(e); } }); getWatchEventsLoopThreadPool().shutdown(); }); }
3.68
hadoop_JobConfigurationParser_parse
/** * Parse the job configuration file (as an input stream) and return a * {@link Properties} collection. The input stream will not be closed after * return from the call. * * @param input * The input data. * @return A {@link Properties} collection extracted from the job * configuration xml. * @throws IOException */ static Properties parse(InputStream input) throws IOException { Properties result = new Properties(); try { DocumentBuilderFactory dbf = XMLUtils.newSecureDocumentBuilderFactory(); DocumentBuilder db = dbf.newDocumentBuilder(); Document doc = db.parse(input); Element root = doc.getDocumentElement(); if (!"configuration".equals(root.getTagName())) { System.out.print("root is not a configuration node"); return null; } NodeList props = root.getChildNodes(); for (int i = 0; i < props.getLength(); ++i) { Node propNode = props.item(i); if (!(propNode instanceof Element)) continue; Element prop = (Element) propNode; if (!"property".equals(prop.getTagName())) { System.out.print("bad conf file: element not <property>"); } NodeList fields = prop.getChildNodes(); String attr = null; String value = null; @SuppressWarnings("unused") boolean finalParameter = false; for (int j = 0; j < fields.getLength(); j++) { Node fieldNode = fields.item(j); if (!(fieldNode instanceof Element)) { continue; } Element field = (Element) fieldNode; if ("name".equals(field.getTagName()) && field.hasChildNodes()) { attr = ((Text) field.getFirstChild()).getData().trim(); } if ("value".equals(field.getTagName()) && field.hasChildNodes()) { value = ((Text) field.getFirstChild()).getData(); } if ("final".equals(field.getTagName()) && field.hasChildNodes()) { finalParameter = "true".equals(((Text) field.getFirstChild()).getData()); } } if (attr != null && value != null) { result.put(attr, value); } } } catch (ParserConfigurationException e) { return null; } catch (SAXException e) { return null; } return result; }
3.68
hadoop_TimelineDomain_getId
/** * Get the domain ID * * @return the domain ID */ @XmlElement(name = "id") public String getId() { return id; }
3.68
flink_TaskStateSnapshot_getMapping
/** Returns the only valid mapping as ensured by {@link StateAssignmentOperation}. */ private InflightDataRescalingDescriptor getMapping( Function<OperatorSubtaskState, InflightDataRescalingDescriptor> mappingExtractor) { return Iterators.getOnlyElement( subtaskStatesByOperatorID.values().stream() .map(mappingExtractor) .filter(mapping -> !mapping.equals(NO_RESCALE)) .iterator(), NO_RESCALE); }
3.68
flink_ExecutionEnvironment_getJobListeners
/** Gets the config JobListeners. */ protected List<JobListener> getJobListeners() { return jobListeners; }
3.68
pulsar_ConsumerInterceptors_onAcknowledge
/** * This is called when acknowledge request return from the broker. * <p> * This method calls {@link ConsumerInterceptor#onAcknowledge(Consumer, MessageId, Throwable)} method for each * interceptor. * <p> * This method does not throw exceptions. Exceptions thrown by any of interceptors in the chain are logged, but not * propagated. * * @param consumer the consumer which contains the interceptors * @param messageId message to acknowledge. * @param exception exception returned by broker. */ public void onAcknowledge(Consumer<T> consumer, MessageId messageId, Throwable exception) { for (int i = 0, interceptorsSize = interceptors.size(); i < interceptorsSize; i++) { try { interceptors.get(i).onAcknowledge(consumer, messageId, exception); } catch (Throwable e) { log.warn("Error executing interceptor onAcknowledge callback ", e); } } }
3.68
flink_JoinOperatorSetsBase_equalTo
/** * Continues a Join transformation and defines a {@link KeySelector} function for the second * join {@link DataSet}. * * <p>The KeySelector function is called for each element of the second DataSet and extracts * a single key value on which the DataSet is joined. * * <p>The resulting {@link JoinFunctionAssigner} needs to be finished by providing a {@link * JoinFunction} by calling {@link JoinFunctionAssigner#with(JoinFunction)} * * @param keySelector The KeySelector function which extracts the key values from the second * DataSet on which it is joined. * @return A JoinFunctionAssigner. */ public <K> JoinFunctionAssigner<I1, I2> equalTo(KeySelector<I2, K> keySelector) { TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keySelector, input2.getType()); return createJoinFunctionAssigner( new Keys.SelectorFunctionKeys<>(keySelector, input2.getType(), keyType)); }
3.68
flink_FileCompactStrategy_enableCompactionOnCheckpoint
/** * Optional, compaction will be triggered when N checkpoints passed since the last * triggering, -1 by default indicating no compaction on checkpoint. */ public FileCompactStrategy.Builder enableCompactionOnCheckpoint( int numCheckpointsBeforeCompaction) { checkArgument( numCheckpointsBeforeCompaction > 0, "Number of checkpoints before compaction should be more than 0."); this.numCheckpointsBeforeCompaction = numCheckpointsBeforeCompaction; return this; }
3.68
hadoop_DirectoryPolicyImpl_getDirectoryPolicy
/** * Create/Get the policy for this configuration. * @param conf config * @param authoritativeness Callback to evaluate authoritativeness of a * path. * @return a policy */ public static DirectoryPolicy getDirectoryPolicy( final Configuration conf, final Predicate<Path> authoritativeness) { DirectoryPolicy policy; String option = conf.getTrimmed(DIRECTORY_MARKER_POLICY, DEFAULT_DIRECTORY_MARKER_POLICY); switch (option.toLowerCase(Locale.ENGLISH)) { case DIRECTORY_MARKER_POLICY_DELETE: // backwards compatible. LOG.debug("Directory markers will be deleted"); policy = DELETE; break; case DIRECTORY_MARKER_POLICY_KEEP: LOG.debug("Directory markers will be kept"); policy = KEEP; break; case DIRECTORY_MARKER_POLICY_AUTHORITATIVE: LOG.debug("Directory markers will be kept on authoritative" + " paths"); policy = new DirectoryPolicyImpl(MarkerPolicy.Authoritative, authoritativeness); break; default: throw new IllegalArgumentException(UNKNOWN_MARKER_POLICY + option); } return policy; }
3.68
pulsar_ConsumerHandler_handleEndOfTopic
// Check and notify consumer if reached end of topic. private void handleEndOfTopic() { if (log.isDebugEnabled()) { log.debug("[{}/{}] Received check reach the end of topic request from {} ", consumer.getTopic(), subscription, getRemote().getInetSocketAddress().toString()); } try { String msg = objectWriter().writeValueAsString( new EndOfTopicResponse(consumer.hasReachedEndOfTopic())); getSession().getRemote() .sendString(msg, new WriteCallback() { @Override public void writeFailed(Throwable th) { log.warn("[{}/{}] Failed to send end of topic msg to {} due to {}", consumer.getTopic(), subscription, getRemote().getInetSocketAddress().toString(), th.getMessage()); } @Override public void writeSuccess() { if (log.isDebugEnabled()) { log.debug("[{}/{}] End of topic message is delivered successfully to {} ", consumer.getTopic(), subscription, getRemote().getInetSocketAddress().toString()); } } }); } catch (JsonProcessingException e) { log.warn("[{}] Failed to generate end of topic response: {}", consumer.getTopic(), e.getMessage()); } catch (Exception e) { log.warn("[{}] Failed to send end of topic response: {}", consumer.getTopic(), e.getMessage()); } }
3.68
flink_BinaryExternalSorter_go
/** Entry point of the thread. */ public void go() throws IOException { final Queue<CircularElement> cache = new ArrayDeque<>(); CircularElement element; boolean cacheOnly = false; // ------------------- In-Memory Cache ------------------------ // fill cache while (isRunning()) { // take next currWriteBuffer from queue try { element = this.queues.spill.take(); } catch (InterruptedException iex) { throw new IOException("The spilling thread was interrupted."); } if (element == SPILLING_MARKER) { break; } else if (element == EOF_MARKER) { cacheOnly = true; break; } cache.add(element); } // check whether the thread was canceled if (!isRunning()) { return; } // ------------------- In-Memory Merge ------------------------ if (cacheOnly) { List<MutableObjectIterator<BinaryRowData>> iterators = new ArrayList<>(cache.size()); for (CircularElement cached : cache) { iterators.add(cached.buffer.getIterator()); } // set lazy iterator List<BinaryRowData> reusableEntries = new ArrayList<>(); for (int i = 0; i < iterators.size(); i++) { reusableEntries.add(serializer.createInstance()); } setResultIterator( iterators.isEmpty() ? EmptyMutableObjectIterator.get() : iterators.size() == 1 ? iterators.get(0) : new BinaryMergeIterator<>( iterators, reusableEntries, comparator::compare)); releaseEmptyBuffers(); // signal merging thread to exit (because there is nothing to merge externally) this.queues.merge.add(FINAL_MERGE_MARKER); return; } // ------------------- Spilling Phase ------------------------ final FileIOChannel.Enumerator enumerator = this.ioManager.createChannelEnumerator(); // loop as long as the thread is marked alive and we do not see the final // currWriteBuffer while (isRunning()) { try { element = cache.isEmpty() ? queues.spill.take() : cache.poll(); } catch (InterruptedException iex) { if (isRunning()) { LOG.error( "Spilling thread was interrupted (without being shut down) while grabbing a buffer. " + "Retrying to grab buffer..."); continue; } else { return; } } // check if we are still running if (!isRunning()) { return; } // check if this is the end-of-work buffer if (element == EOF_MARKER) { break; } if (element.buffer.getOccupancy() > 0) { // open next channel FileIOChannel.ID channel = enumerator.next(); channelManager.addChannel(channel); AbstractChannelWriterOutputView output = null; int bytesInLastBuffer; int blockCount; try { numSpillFiles++; output = FileChannelUtil.createOutputView( ioManager, channel, compressionEnabled, compressionCodecFactory, compressionBlockSize, memorySegmentSize); element.buffer.writeToOutput(output); spillInBytes += output.getNumBytes(); spillInCompressedBytes += output.getNumCompressedBytes(); bytesInLastBuffer = output.close(); blockCount = output.getBlockCount(); LOG.info( "here spill the {}th sort buffer data with {} bytes and {} compressed bytes", numSpillFiles, spillInBytes, spillInCompressedBytes); } catch (IOException e) { if (output != null) { output.close(); output.getChannel().deleteChannel(); } throw e; } // pass spill file meta to merging thread this.queues.merge.add( new ChannelWithMeta(channel, blockCount, bytesInLastBuffer)); } // pass empty sort-buffer to reading thread element.buffer.reset(); this.queues.empty.add(element); } // clear the sort buffers, as both sorting and spilling threads are done. releaseSortMemory(); // signal merging thread to begin the final merge this.queues.merge.add(FINAL_MERGE_MARKER); // Spilling thread done. }
3.68
hadoop_OBSLoginHelper_buildFSURI
/** * Build the filesystem URI. This can include stripping down of part of the * URI. * * @param uri filesystem uri * @return the URI to use as the basis for FS operation and qualifying paths. * @throws IllegalArgumentException if the URI is in some way invalid. */ public static URI buildFSURI(final URI uri) { Objects.requireNonNull(uri, "null uri"); Objects.requireNonNull(uri.getScheme(), "null uri.getScheme()"); if (uri.getHost() == null && uri.getAuthority() != null) { Objects.requireNonNull( uri.getHost(), "null uri host." + " This can be caused by unencoded / in the " + "password string"); } Objects.requireNonNull(uri.getHost(), "null uri host."); return URI.create(uri.getScheme() + "://" + uri.getHost()); }
3.68
dubbo_ServiceAddressURL_equals
/** * ignore consumer url compare. * It's only meaningful for comparing two address urls related to the same consumerURL. * * @param obj * @return */ @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof ServiceAddressURL)) { return false; } return super.equals(obj); }
3.68
hbase_BlockType_isData
/** Returns whether this block type is encoded or unencoded data block */ public final boolean isData() { return this == DATA || this == ENCODED_DATA; }
3.68
flink_FutureUtils_assertNoException
/** * Asserts that the given {@link CompletableFuture} is not completed exceptionally. If the * future is completed exceptionally, then it will call the {@link FatalExitExceptionHandler}. * * @param completableFuture to assert for no exceptions */ public static void assertNoException(CompletableFuture<?> completableFuture) { handleUncaughtException(completableFuture, FatalExitExceptionHandler.INSTANCE); }
3.68
morf_Function_blobLength
/** * Helper method to create an instance of the "length-of-BLOB" SQL function. * * @param fieldToEvaluate the field to evaluate in the length function. This can be any expression resulting in a single column of data. * @return an instance of the length function. */ public static Function blobLength(AliasedField fieldToEvaluate) { return new Function(FunctionType.BLOB_LENGTH, fieldToEvaluate); }
3.68
morf_SchemaValidator_hashCode
/** * @see java.lang.Object#hashCode() */ @Override public int hashCode() { return index.columnNames().hashCode() + Boolean.valueOf(index.isUnique()).hashCode(); }
3.68
querydsl_Expressions_datePath
/** * Create a new Path expression * * @param type type of expression * @param metadata path metadata * @param <T> type of expression * @return new path instance */ public static <T extends Comparable<?>> DatePath<T> datePath(Class<? extends T> type, PathMetadata metadata) { return new DatePath<T>(type, metadata); }
3.68
hbase_CatalogFamilyFormat_getRegionInfo
/** * Returns RegionInfo object from the column * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog table Result. * @param data a Result object from the catalog table scan * @return RegionInfo or null */ public static RegionInfo getRegionInfo(Result data) { return getRegionInfo(data, HConstants.REGIONINFO_QUALIFIER); }
3.68
hbase_HFileBlock_readBlockDataInternal
/** * Reads a version 2 block. * @param offset the offset in the stream to read at. * @param onDiskSizeWithHeaderL the on-disk size of the block, including the header and * checksums if present or -1 if unknown (as a long). Can be -1 if * we are doing raw iteration of blocks as when loading up file * metadata; i.e. the first read of a new file. Usually non-null * gotten from the file index. * @param pread whether to use a positional read * @param verifyChecksum Whether to use HBase checksums. If HBase checksum is switched * off, then use HDFS checksum. Can also flip on/off reading same * file if we hit a troublesome patch in an hfile. * @param updateMetrics whether need to update the metrics. * @param intoHeap allocate the ByteBuff of block from heap or off-heap. * @return the HFileBlock or null if there is a HBase checksum mismatch */ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset, long onDiskSizeWithHeaderL, boolean pread, boolean verifyChecksum, boolean updateMetrics, boolean intoHeap) throws IOException { final Span span = Span.current(); final AttributesBuilder attributesBuilder = Attributes.builder(); Optional.of(Context.current()).map(val -> val.get(CONTEXT_KEY)) .ifPresent(c -> c.accept(attributesBuilder)); if (offset < 0) { throw new IOException("Invalid offset=" + offset + " trying to read " + "block (onDiskSize=" + onDiskSizeWithHeaderL + ")"); } if (!checkCallerProvidedOnDiskSizeWithHeader(onDiskSizeWithHeaderL)) { LOG.trace("Caller provided invalid onDiskSizeWithHeaderL={}", onDiskSizeWithHeaderL); onDiskSizeWithHeaderL = -1; } int onDiskSizeWithHeader = (int) onDiskSizeWithHeaderL; // Try to use the cached header. Will serve us in rare case where onDiskSizeWithHeaderL==-1 // and will save us having to seek the stream backwards to reread the header we // read the last time through here. ByteBuff headerBuf = getCachedHeader(offset); LOG.trace( "Reading {} at offset={}, pread={}, verifyChecksum={}, cachedHeader={}, " + "onDiskSizeWithHeader={}", this.fileContext.getHFileName(), offset, pread, verifyChecksum, headerBuf, onDiskSizeWithHeader); // This is NOT same as verifyChecksum. This latter is whether to do hbase // checksums. Can change with circumstances. The below flag is whether the // file has support for checksums (version 2+). boolean checksumSupport = this.fileContext.isUseHBaseChecksum(); long startTime = EnvironmentEdgeManager.currentTime(); if (onDiskSizeWithHeader == -1) { // The caller does not know the block size. Need to get it from the header. If header was // not cached (see getCachedHeader above), need to seek to pull it in. This is costly // and should happen very rarely. Currently happens on open of a hfile reader where we // read the trailer blocks to pull in the indices. Otherwise, we are reading block sizes // out of the hfile index. To check, enable TRACE in this file and you'll get an exception // in a LOG every time we seek. See HBASE-17072 for more detail. if (headerBuf == null) { if (LOG.isTraceEnabled()) { LOG.trace("Extra seek to get block size!", new RuntimeException()); } span.addEvent("Extra seek to get block size!", attributesBuilder.build()); headerBuf = HEAP.allocate(hdrSize); readAtOffset(is, headerBuf, hdrSize, false, offset, pread); headerBuf.rewind(); } onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport); } // The common case is that onDiskSizeWithHeader was produced by a read without checksum // validation, so give it a sanity check before trying to use it. if (!checkOnDiskSizeWithHeader(onDiskSizeWithHeader)) { if (verifyChecksum) { invalidateNextBlockHeader(); span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build()); return null; } else { throw new IOException("Invalid onDiskSizeWithHeader=" + onDiskSizeWithHeader); } } int preReadHeaderSize = headerBuf == null ? 0 : hdrSize; // Allocate enough space to fit the next block's header too; saves a seek next time through. // onDiskBlock is whole block + header + checksums then extra hdrSize to read next header; // onDiskSizeWithHeader is header, body, and any checksums if present. preReadHeaderSize // says where to start reading. If we have the header cached, then we don't need to read // it again and we can likely read from last place we left off w/o need to backup and reread // the header we read last time through here. ByteBuff onDiskBlock = this.allocate(onDiskSizeWithHeader + hdrSize, intoHeap); boolean initHFileBlockSuccess = false; try { if (headerBuf != null) { onDiskBlock.put(0, headerBuf, 0, hdrSize).position(hdrSize); } boolean readNextHeader = readAtOffset(is, onDiskBlock, onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread); onDiskBlock.rewind(); // in case of moving position when copying a cached header // the call to validateChecksum for this block excludes the next block header over-read, so // no reason to delay extracting this value. int nextBlockOnDiskSize = -1; if (readNextHeader) { int parsedVal = getNextBlockOnDiskSize(onDiskBlock, onDiskSizeWithHeader); if (checkOnDiskSizeWithHeader(parsedVal)) { nextBlockOnDiskSize = parsedVal; } } if (headerBuf == null) { headerBuf = onDiskBlock.duplicate().position(0).limit(hdrSize); } ByteBuff curBlock = onDiskBlock.duplicate().position(0).limit(onDiskSizeWithHeader); // Verify checksum of the data before using it for building HFileBlock. if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) { invalidateNextBlockHeader(); span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build()); return null; } // TODO: is this check necessary or can we proceed with a provided value regardless of // what is in the header? int fromHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport); if (onDiskSizeWithHeader != fromHeader) { if (LOG.isTraceEnabled()) { LOG.trace("Passed in onDiskSizeWithHeader={} != {}, offset={}, fileContext={}", onDiskSizeWithHeader, fromHeader, offset, this.fileContext); } if (checksumSupport && verifyChecksum) { // This file supports HBase checksums and verification of those checksums was // requested. The block size provided by the caller (presumably from the block index) // does not match the block size written to the block header. treat this as // HBase-checksum failure. span.addEvent("Falling back to HDFS checksumming.", attributesBuilder.build()); invalidateNextBlockHeader(); return null; } throw new IOException("Passed in onDiskSizeWithHeader=" + onDiskSizeWithHeader + " != " + fromHeader + ", offset=" + offset + ", fileContext=" + this.fileContext); } // remove checksum from buffer now that it's verified int sizeWithoutChecksum = curBlock.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); curBlock.limit(sizeWithoutChecksum); long duration = EnvironmentEdgeManager.currentTime() - startTime; if (updateMetrics) { HFile.updateReadLatency(duration, pread); } // The onDiskBlock will become the headerAndDataBuffer for this block. // If nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already // contains the header of next block, so no need to set next block's header in it. HFileBlock hFileBlock = createFromBuff(curBlock, checksumSupport, offset, nextBlockOnDiskSize, fileContext, intoHeap ? HEAP : allocator); // Run check on uncompressed sizings. if (!fileContext.isCompressedOrEncrypted()) { hFileBlock.sanityCheckUncompressed(); } LOG.trace("Read {} in {} ms", hFileBlock, duration); if (!LOG.isTraceEnabled() && this.readWarnTime >= 0 && duration > this.readWarnTime) { LOG.warn("Read Block Slow: read {} cost {} ms, threshold = {} ms", hFileBlock, duration, this.readWarnTime); } span.addEvent("Read block", attributesBuilder.build()); // Cache next block header if we read it for the next time through here. if (nextBlockOnDiskSize != -1) { cacheNextBlockHeader(offset + hFileBlock.getOnDiskSizeWithHeader(), onDiskBlock, onDiskSizeWithHeader, hdrSize); } initHFileBlockSuccess = true; return hFileBlock; } finally { if (!initHFileBlockSuccess) { onDiskBlock.release(); } } }
3.68
flink_TaskTracker_add
/** @return true, if this checkpoint id need be committed. */ public boolean add(long checkpointId, int task) { Set<Integer> tasks = notifiedTasks.computeIfAbsent(checkpointId, (k) -> new HashSet<>()); tasks.add(task); if (tasks.size() == numberOfTasks) { notifiedTasks.headMap(checkpointId, true).clear(); return true; } return false; }
3.68
hadoop_AggregatedLogsPage_preHead
/* (non-Javadoc) * @see org.apache.hadoop.yarn.server.nodemanager.webapp.NMView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ @Override protected void preHead(Page.HTML<__> html) { String logEntity = $(ENTITY_STRING); if (logEntity == null || logEntity.isEmpty()) { logEntity = $(CONTAINER_ID); } if (logEntity == null || logEntity.isEmpty()) { logEntity = "UNKNOWN"; } set(TITLE, join("Logs for ", logEntity)); set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); }
3.68
hudi_HoodieTableMetadataUtil_isIndexingCommit
/** * Checks if a delta commit in metadata table is written by async indexer. * <p> * TODO(HUDI-5733): This should be cleaned up once the proper fix of rollbacks in the * metadata table is landed. * * @param instantTime Instant time to check. * @return {@code true} if from async indexer; {@code false} otherwise. */ public static boolean isIndexingCommit(String instantTime) { return instantTime.length() == MILLIS_INSTANT_ID_LENGTH + OperationSuffix.METADATA_INDEXER.getSuffix().length() && instantTime.endsWith(OperationSuffix.METADATA_INDEXER.getSuffix()); }
3.68
flink_HadoopFileSystem_getHadoopFileSystem
/** * Gets the underlying Hadoop FileSystem. * * @return The underlying Hadoop FileSystem. */ public org.apache.hadoop.fs.FileSystem getHadoopFileSystem() { return this.fs; }
3.68
hbase_CompositeImmutableSegment_getScanner
/** * Creates the scanner for the given read point * @return a scanner for the given read point */ @Override public KeyValueScanner getScanner(long readPoint) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); }
3.68
hadoop_SuccessData_getHostname
/** * @return host which created the file (implicitly: committed the work). */ public String getHostname() { return hostname; }
3.68
hadoop_WorkerId_write
/** {@inheritDoc} */ @Override public final void write(final DataOutput dataOutput) throws IOException { workerId.write(dataOutput); hostname.write(dataOutput); ipAdd.write(dataOutput); }
3.68
hbase_ZKProcedureMemberRpcs_sendMemberAcquired
/** * This attempts to create an acquired state znode for the procedure (snapshot name). It then * looks for the reached znode to trigger in-barrier execution. If not present we have a watcher, * if present then trigger the in-barrier action. */ @Override public void sendMemberAcquired(Subprocedure sub) throws IOException { String procName = sub.getName(); try { LOG.debug("Member: '" + memberName + "' joining acquired barrier for procedure (" + procName + ") in zk"); String acquiredZNode = ZNodePaths .joinZNode(ZKProcedureUtil.getAcquireBarrierNode(zkController, procName), memberName); ZKUtil.createAndFailSilent(zkController.getWatcher(), acquiredZNode); // watch for the complete node for this snapshot String reachedBarrier = zkController.getReachedBarrierNode(procName); LOG.debug("Watch for global barrier reached:" + reachedBarrier); if (ZKUtil.watchAndCheckExists(zkController.getWatcher(), reachedBarrier)) { receivedReachedGlobalBarrier(reachedBarrier); } } catch (KeeperException e) { member.controllerConnectionFailure( "Failed to acquire barrier for procedure: " + procName + " and member: " + memberName, e, procName); } }
3.68
hadoop_MutableCounterLong_incr
/** * Increment the value by a delta * @param delta of the increment */ public void incr(long delta) { value.add(delta); setChanged(); }
3.68
hbase_QuotaTableUtil_getNamespaceSnapshots
/** * Returns a set of the names of all namespaces containing snapshot entries. * @param conn connection to re-use */ public static Set<String> getNamespaceSnapshots(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); ResultScanner rs = quotaTable.getScanner(createScanForNamespaceSnapshotSizes())) { Set<String> snapshots = new HashSet<>(); for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { cs.current(); snapshots.add(getNamespaceFromRowKey(r.getRow())); } } return snapshots; } }
3.68
hbase_HStoreFile_initReader
/** * Initialize the reader used for pread. */ public void initReader() throws IOException { if (initialReader == null) { synchronized (this) { if (initialReader == null) { try { open(); } catch (Exception e) { try { boolean evictOnClose = cacheConf != null ? cacheConf.shouldEvictOnClose() : true; this.closeStoreFile(evictOnClose); } catch (IOException ee) { LOG.warn("failed to close reader", ee); } throw e; } } } } }
3.68
hadoop_FilterFileSystem_delete
/** Delete a file */ @Override public boolean delete(Path f, boolean recursive) throws IOException { return fs.delete(f, recursive); }
3.68
querydsl_ComparableExpressionBase_max
/** * Create a {@code max(this)} expression * * <p>Get the maximum value of this expression (aggregation)</p> * * @return max(this) */ public ComparableExpressionBase<T> max() { return Expressions.comparableOperation(getType(), Ops.AggOps.MAX_AGG, mixin); }
3.68
open-banking-gateway_EncryptionKeySerde_write
/** * Write symmetric key with initialization vector to output stream. * @param value Key to write * @param os Output stream to write to */ @SneakyThrows public void write(SecretKeyWithIv value, OutputStream os) { // Mapper may choose to close the stream if using stream interface, we don't want this // as objects are small - this is ok. os.write(mapper.writeValueAsBytes(new SecretKeyWithIvContainer(value))); }
3.68
hudi_SparkBootstrapCommitActionExecutor_fullBootstrap
/** * Perform Full Bootstrap. * @param partitionFilesList List of partitions and files within that partitions */ protected Option<HoodieWriteMetadata<HoodieData<WriteStatus>>> fullBootstrap(List<Pair<String, List<HoodieFileStatus>>> partitionFilesList) { if (null == partitionFilesList || partitionFilesList.isEmpty()) { return Option.empty(); } TypedProperties properties = new TypedProperties(); properties.putAll(config.getProps()); FullRecordBootstrapDataProvider inputProvider = (FullRecordBootstrapDataProvider) ReflectionUtils.loadClass(config.getFullBootstrapInputProvider(), properties, context); JavaRDD<HoodieRecord> inputRecordsRDD = (JavaRDD<HoodieRecord>) inputProvider.generateInputRecords("bootstrap_source", config.getBootstrapSourceBasePath(), partitionFilesList, config); // Start Full Bootstrap String bootstrapInstantTime = HoodieTimeline.FULL_BOOTSTRAP_INSTANT_TS; final HoodieInstant requested = new HoodieInstant( State.REQUESTED, table.getMetaClient().getCommitActionType(), bootstrapInstantTime); table.getActiveTimeline().createNewInstant(requested); // Setup correct schema and run bulk insert. Option<HoodieWriteMetadata<HoodieData<WriteStatus>>> writeMetadataOption = Option.of(getBulkInsertActionExecutor(HoodieJavaRDD.of(inputRecordsRDD)).execute()); // Delete the marker directory for the instant WriteMarkersFactory.get(config.getMarkersType(), table, bootstrapInstantTime) .quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism()); return writeMetadataOption; }
3.68
hudi_TableOptionProperties_createProperties
/** * Initialize the {@link #FILE_NAME} meta file. */ public static void createProperties(String basePath, Configuration hadoopConf, Map<String, String> options) throws IOException { Path propertiesFilePath = getPropertiesFilePath(basePath); FileSystem fs = FSUtils.getFs(basePath, hadoopConf); try (FSDataOutputStream outputStream = fs.create(propertiesFilePath)) { Properties properties = new Properties(); properties.putAll(options); properties.store(outputStream, "Table option properties saved on " + new Date(System.currentTimeMillis())); } LOG.info(String.format("Create file %s success.", propertiesFilePath)); }
3.68
morf_AbstractSqlDialectTest_expectedSelectOrderByTwoFields
/** * @return Expected SQL for {@link #testSelectOrderByTwoFields()} */ protected String expectedSelectOrderByTwoFields() { return "SELECT stringField1, stringField2 FROM " + tableName(ALTERNATE_TABLE) + " ORDER BY stringField1 DESC NULLS FIRST, stringField2 NULLS LAST"; }
3.68
hbase_TableDescriptors_exists
/** * Test whether a given table exists, i.e, has a table descriptor. */ default boolean exists(TableName tableName) throws IOException { return get(tableName) != null; }
3.68
hudi_HoodieMultiTableStreamer_resetTarget
/** * Resets target table name and target path using base-path-prefix. * * @param configuration * @param database * @param tableName * @return */ private static String resetTarget(Config configuration, String database, String tableName) { String basePathPrefix = configuration.basePathPrefix; basePathPrefix = basePathPrefix.charAt(basePathPrefix.length() - 1) == '/' ? basePathPrefix.substring(0, basePathPrefix.length() - 1) : basePathPrefix; String targetBasePath = basePathPrefix + Constants.FILE_DELIMITER + database + Constants.FILE_DELIMITER + tableName; configuration.targetTableName = database + Constants.DELIMITER + tableName; return targetBasePath; }
3.68
flink_FutureUtils_retry
/** * Retry the given operation the given number of times in case of a failure only when an * exception is retryable. * * @param operation to executed * @param retries if the operation failed * @param retryPredicate Predicate to test whether an exception is retryable * @param executor to use to run the futures * @param <T> type of the result * @return Future containing either the result of the operation or a {@link RetryException} */ public static <T> CompletableFuture<T> retry( final Supplier<CompletableFuture<T>> operation, final int retries, final Predicate<Throwable> retryPredicate, final Executor executor) { final CompletableFuture<T> resultFuture = new CompletableFuture<>(); retryOperation(resultFuture, operation, retries, retryPredicate, executor); return resultFuture; }
3.68
hadoop_FilterFileSystem_makeQualified
/** Make sure that a path specifies a FileSystem. */ @Override public Path makeQualified(Path path) { Path fqPath = fs.makeQualified(path); // swap in our scheme if the filtered fs is using a different scheme if (swapScheme != null) { try { // NOTE: should deal with authority, but too much other stuff is broken fqPath = new Path( new URI(swapScheme, fqPath.toUri().getSchemeSpecificPart(), null) ); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } return fqPath; }
3.68
pulsar_RecordSchemaBuilderImpl_splitName
/** * Split a full dotted-syntax name into a namespace and a single-component name. */ private static String[] splitName(String fullName) { String[] result = new String[2]; int indexLastDot = fullName.lastIndexOf('.'); if (indexLastDot >= 0) { result[0] = fullName.substring(0, indexLastDot); result[1] = fullName.substring(indexLastDot + 1); } else { result[0] = null; result[1] = fullName; } return result; }
3.68
framework_ComponentLocator_getElementsByPathStartingAt
/** * Locates elements using a String locator (path) which identifies DOM * elements. The path starts from the specified root element. * * @see #getElementByPath(String) * * @since 7.2 * @param path * The path of elements to be found * @param root * The root element where the path is anchored * @return The JavaScriptArray of DOM elements identified by {@code path} or * empty array if elements could not be located. */ public JsArray<Element> getElementsByPathStartingAt(String path, Element root) { JsArray<Element> jsElements = JavaScriptObject.createArray().cast(); for (LocatorStrategy strategy : locatorStrategies) { if (strategy.validatePath(path)) { List<Element> elements = strategy .getElementsByPathStartingAt(path, root); if (!elements.isEmpty()) { for (Element e : elements) { jsElements.push(e); } return jsElements; } } } return jsElements; }
3.68
hbase_BitSetNode_alignUp
// ======================================================================== // Helpers // ======================================================================== /** Returns upper boundary (aligned to multiple of BITS_PER_WORD) of bitmap range x belongs to. */ private static long alignUp(final long x) { return (x + (BITS_PER_WORD - 1)) & -BITS_PER_WORD; }
3.68
hudi_AbstractTableFileSystemView_fetchLatestBaseFile
/** * Default implementation for fetching latest base-file. * * @param partitionPath Partition path * @param fileId File Id * @return base File if present */ protected Option<HoodieBaseFile> fetchLatestBaseFile(String partitionPath, String fileId) { return Option.fromJavaOptional(fetchLatestBaseFiles(partitionPath) .filter(fs -> fs.getFileId().equals(fileId)).findFirst()); }
3.68
hadoop_TimelineEvent_getInfoJAXB
// required by JAXB @InterfaceAudience.Private @XmlElement(name = "info") public HashMap<String, Object> getInfoJAXB() { return info; }
3.68
flink_NullableSerializer_wrapIfNullIsNotSupported
/** * This method tries to serialize {@code null} value with the {@code originalSerializer} and * wraps it in case of {@link NullPointerException}, otherwise it returns the {@code * originalSerializer}. * * @param originalSerializer serializer to wrap and add {@code null} support * @param padNullValueIfFixedLen pad null value to preserve the fixed length of original * serializer * @return serializer which supports {@code null} values */ public static <T> TypeSerializer<T> wrapIfNullIsNotSupported( @Nonnull TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen) { return checkIfNullSupported(originalSerializer) ? originalSerializer : wrap(originalSerializer, padNullValueIfFixedLen); }
3.68
hadoop_S3ListResult_logAtDebug
/** * Dump the result at debug level. * @param log log to use */ public void logAtDebug(Logger log) { Collection<CommonPrefix> prefixes = getCommonPrefixes(); Collection<S3Object> s3Objects = getS3Objects(); log.debug("Prefix count = {}; object count={}", prefixes.size(), s3Objects.size()); for (S3Object s3Object : s3Objects) { log.debug("Summary: {} {}", s3Object.key(), s3Object.size()); } for (CommonPrefix prefix : prefixes) { log.debug("Prefix: {}", prefix.prefix()); } }
3.68
flink_FileStateHandle_getFilePath
/** * Gets the path where this handle's state is stored. * * @return The path where this handle's state is stored. */ public Path getFilePath() { return filePath; }
3.68
hbase_CommonFSUtils_getCurrentFileSystem
/** * Returns the filesystem of the hbase rootdir. * @throws IOException from underlying FileSystem */ public static FileSystem getCurrentFileSystem(Configuration conf) throws IOException { return getRootDir(conf).getFileSystem(conf); }
3.68
framework_VNotification_showNotification
/** * Creates and shows a {@code Notification} with the specified parameters. * * @param client * The client connection, cannot be {@code null}. * @param caption * The Notification caption, can be {@code null}. * @param description * The Notification description, can be {@code null}. * @param htmlContentAllowed * Whether {@code caption} and {@code description} are * interpreted as HTML or not. * @param iconUri * The icon URI, can be {@code null}. * @param styleName * The Notification style name, can be {@code null}. * @param position * The desired {@link Position}, can not be {@code null}. * @param delayMsec * The delay in milliseconds before disappearing, -1 for forever. * * @since 8.2 */ public static VNotification showNotification(ApplicationConnection client, String caption, String description, boolean htmlContentAllowed, String iconUri, String styleName, Position position, int delayMsec) { String html = ""; if (iconUri != null) { html += client.getIcon(iconUri).getElement().getString(); } if (caption != null) { if (!htmlContentAllowed) { caption = WidgetUtil.escapeHTML(caption); caption = caption.replaceAll("\\n", "<br />"); } html += "<h1 class='" + getDependentStyle(client, CAPTION) + "'>" + caption + "</h1>"; } if (description != null) { if (!htmlContentAllowed) { description = WidgetUtil.escapeHTML(description); description = description.replaceAll("\\n", "<br />"); } html += "<p class='" + getDependentStyle(client, DESCRIPTION) + "'>" + description + "</p>"; } VNotification vNotification = createNotification(delayMsec, client.getUIConnector().getWidget()); vNotification.show(html, position, styleName); return vNotification; }
3.68
hudi_DynamoTableUtils_waitUntilExists
/** * Waits up to a specified amount of time for a specified DynamoDB table to * resolve, indicating that it exists. If the table doesn't return a result * after this time, a SdkClientException is thrown. * * @param dynamo * The DynamoDB client to use to make requests. * @param tableName * The name of the table being resolved. * @param timeout * The maximum number of milliseconds to wait. * @param interval * The poll interval in milliseconds. * * @throws SdkClientException * If the specified table does not resolve before this method * times out and stops polling. * @throws InterruptedException * If the thread is interrupted while waiting for the table to * resolve. */ public static void waitUntilExists(final DynamoDbClient dynamo, final String tableName, final int timeout, final int interval) throws InterruptedException { TableDescription table = waitForTableDescription(dynamo, tableName, null, timeout, interval); if (table == null) { throw SdkClientException.builder().message("Table " + tableName + " never returned a result").build(); } }
3.68
hmily_HmilyTacTransactionManager_rollback
/** * Rollback. * * @param currentTransaction the current transaction */ public void rollback(final HmilyTransaction currentTransaction) { if (Objects.isNull(currentTransaction)) { return; } log.debug("TAC-tm-rollback ::: {}", currentTransaction); List<HmilyParticipant> hmilyParticipants = currentTransaction.getHmilyParticipants(); if (CollectionUtils.isEmpty(hmilyParticipants)) { return; } List<Boolean> successList = Lists.newArrayList(); for (HmilyParticipant participant : hmilyParticipants) { try { if (participant.getRole() == HmilyRoleEnum.START.getCode()) { HmilyTacLocalParticipantExecutor.cancel(participant); } else { HmilyReflector.executor(HmilyActionEnum.CANCELING, ExecutorTypeEnum.RPC, participant); } successList.add(true); } catch (Throwable e) { successList.add(false); log.error("HmilyParticipant rollback exception :{} ", participant.toString()); } finally { HmilyContextHolder.remove(); } } if (successList.stream().allMatch(e -> e)) { // remove global HmilyRepositoryStorage.removeHmilyTransaction(currentTransaction); } }
3.68
flink_PrimitiveArrayTypeInfo_getComponentClass
/** * Gets the class that represents the component type. * * @return The class of the component type. */ @PublicEvolving public Class<?> getComponentClass() { return this.arrayClass.getComponentType(); }
3.68
cron-utils_FieldValue_toString
/** * String representation of encapsulated value. * * @return String, never null */ @Override public final String toString() { return String.format("%s", getValue()); }
3.68
rocketmq-connect_AbstractConfigManagementService_deleteConnectorConfig
/** * delete config * * @param connectorName */ @Override public void deleteConnectorConfig(String connectorName) { if (!connectorKeyValueStore.containsKey(connectorName)) { throw new ConnectException("Connector [" + connectorName + "] does not exist"); } // new struct Struct struct = new Struct(CONNECTOR_DELETE_CONFIGURATION_V1); struct.put(FIELD_EPOCH, System.currentTimeMillis()); struct.put(FIELD_DELETED, true); byte[] config = converter.fromConnectData(topic, CONNECTOR_DELETE_CONFIGURATION_V1, struct); notify(TARGET_STATE_KEY(connectorName), config); }
3.68
hbase_Query_getReplicaId
/** * Returns region replica id where Query will fetch data from. * @return region replica id or -1 if not set. */ public int getReplicaId() { return this.targetReplicaId; }
3.68
pulsar_FieldParser_convert
/** * Convert the given object value to the given class. * * @param from * The object value to be converted. * @param to * The type class which the given object should be converted to. * @return The converted object value. * @throws UnsupportedOperationException * If no suitable converter can be found. * @throws RuntimeException * If conversion failed somehow. This can be caused by at least an ExceptionInInitializerError, * IllegalAccessException or InvocationTargetException. */ @SuppressWarnings("unchecked") public static <T> T convert(Object from, Class<T> to) { requireNonNull(to); if (from == null) { return null; } to = (Class<T>) wrap(to); // Can we cast? Then just do it. if (to.isAssignableFrom(from.getClass())) { return to.cast(from); } // Lookup the suitable converter. String converterId = from.getClass().getName() + "_" + to.getName(); Method converter = CONVERTERS.get(converterId); if (to.isEnum()) { // Converting string to enum EnumResolver r = EnumResolver.constructUsingToString((Class<Enum<?>>) to, ANNOTATION_INTROSPECTOR); T value = (T) r.findEnum((String) from); if (value == null) { throw new RuntimeException("Invalid value '" + from + "' for enum " + to); } return value; } if (converter == null) { throw new UnsupportedOperationException("Cannot convert from " + from.getClass().getName() + " to " + to.getName() + ". Requested converter does not exist."); } // Convert the value. try { Object val = converter.invoke(to, from); return to.cast(val); } catch (Exception e) { throw new RuntimeException("Cannot convert from " + from.getClass().getName() + " to " + to.getName() + ". Conversion failed with " + e.getMessage(), e); } }
3.68
morf_SqlUtils_blobLiteral
/** * Constructs a new {@link BlobFieldLiteral} from given String, * which is turned into bytes using a UTF-8 encoding. * * @param value the literal value to use * @return {@link BlobFieldLiteral} */ public static BlobFieldLiteral blobLiteral(String value) { return new BlobFieldLiteral(value.getBytes(StandardCharsets.UTF_8)); }
3.68
flink_CommittableCollector_getSubtaskId
/** * Returns subtask id. * * @return subtask id. */ public int getSubtaskId() { return subtaskId; }
3.68
framework_AbstractTransactionalQuery_beginTransaction
/** * Reserves a connection with auto-commit off if no transaction is in * progress. * * @throws IllegalStateException * if a transaction is already open * @throws SQLException * if a connection could not be obtained or configured */ public void beginTransaction() throws UnsupportedOperationException, SQLException { if (isInTransaction()) { throw new IllegalStateException("A transaction is already active!"); } activeConnection = connectionPool.reserveConnection(); activeConnection.setAutoCommit(false); }
3.68
hudi_HoodieRepairTool_doRepair
/** * Does repair, either in REPAIR or DRY_RUN mode. * * @param startingInstantOption {@link Option} of starting instant for scanning, can be empty. * @param endingInstantOption {@link Option} of ending instant for scanning, can be empty. * @param isDryRun Is dry run. * @throws IOException upon errors. */ boolean doRepair( Option<String> startingInstantOption, Option<String> endingInstantOption, boolean isDryRun) throws IOException { // Scans all partitions to find base and log files in the base path List<Path> allFilesInPartitions = HoodieDataTableUtils.getBaseAndLogFilePathsFromFileSystem(tableMetadata, cfg.basePath); // Buckets the files based on instant time // instant time -> relative paths of base and log files to base path Map<String, List<String>> instantToFilesMap = RepairUtils.tagInstantsOfBaseAndLogFiles( metaClient.getBasePath(), allFilesInPartitions); List<String> instantTimesToRepair = instantToFilesMap.keySet().stream() .filter(instant -> (!startingInstantOption.isPresent() || instant.compareTo(startingInstantOption.get()) >= 0) && (!endingInstantOption.isPresent() || instant.compareTo(endingInstantOption.get()) <= 0) ).collect(Collectors.toList()); HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline(); HoodieArchivedTimeline archivedTimeline = metaClient.getArchivedTimeline(); // This assumes that the archived timeline only has completed instants so this is safe archivedTimeline.loadCompletedInstantDetailsInMemory(); List<ImmutablePair<String, List<String>>> instantFilesToRemove = context.parallelize(instantTimesToRepair) .map(instantToRepair -> new ImmutablePair<>(instantToRepair, RepairUtils.findInstantFilesToRemove(instantToRepair, instantToFilesMap.get(instantToRepair), activeTimeline, archivedTimeline))) .collectAsList(); List<ImmutablePair<String, List<String>>> instantsWithDanglingFiles = instantFilesToRemove.stream().filter(e -> !e.getValue().isEmpty()).collect(Collectors.toList()); printRepairInfo(instantTimesToRepair, instantsWithDanglingFiles); if (!isDryRun) { List<String> relativeFilePathsToDelete = instantsWithDanglingFiles.stream() .flatMap(e -> e.getValue().stream()) .collect(Collectors.toList()); if (relativeFilePathsToDelete.size() > 0) { if (!backupFiles(relativeFilePathsToDelete)) { LOG.error("Error backing up dangling files. Exiting..."); return false; } return deleteFiles(context, cfg.basePath, relativeFilePathsToDelete); } LOG.info(String.format("Table repair on %s is successful", cfg.basePath)); } return true; }
3.68
morf_SchemaHomology_columnsMatch
/** * Compare two columns. * * @param column1 Column 1 * @param column2 Column 2 * @return Whether they match. */ public boolean columnsMatch(Column column1, Column column2) { noDifferences = true; checkColumn(null, column1, column2); return noDifferences; }
3.68
pulsar_ClientCnxIdleState_compareAndSetIdleStat
/** * Compare and switch idle-stat. * @return Whether the update is successful.Because there may be other threads competing, possible return false. */ boolean compareAndSetIdleStat(State originalStat, State newStat) { return STATE_UPDATER.compareAndSet(this, originalStat, newStat); }
3.68
flink_AbstractBytesMultiMap_free
/** @param reservedFixedMemory reserved fixed memory or not. */ @Override public void free(boolean reservedFixedMemory) { recordArea.release(); numKeys = 0; super.free(reservedFixedMemory); }
3.68
hudi_FileSystemViewManager_createSpillableMapBasedFileSystemView
/** * Create a spillable Map based file System view for a table. * * @param conf Hadoop Configuration * @param viewConf View Storage Configuration * @param metaClient HoodieTableMetaClient * @return */ private static SpillableMapBasedFileSystemView createSpillableMapBasedFileSystemView(SerializableConfiguration conf, FileSystemViewStorageConfig viewConf, HoodieTableMetaClient metaClient, HoodieCommonConfig commonConfig) { LOG.info("Creating SpillableMap based view for basePath " + metaClient.getBasePath()); HoodieTimeline timeline = metaClient.getActiveTimeline().filterCompletedAndCompactionInstants(); return new SpillableMapBasedFileSystemView(metaClient, timeline, viewConf, commonConfig); }
3.68
flink_DefaultCompletedCheckpointStore_tryRemove
/** * Tries to remove the checkpoint identified by the given checkpoint id. * * @param checkpointId identifying the checkpoint to remove * @return true if the checkpoint could be removed */ private boolean tryRemove(long checkpointId) throws Exception { return checkpointStateHandleStore.releaseAndTryRemove( completedCheckpointStoreUtil.checkpointIDToName(checkpointId)); }
3.68
hmily_HmilyTccTransactionExecutor_globalCancel
/** * cancel transaction. * * @param currentTransaction {@linkplain HmilyTransaction} */ public void globalCancel(final HmilyTransaction currentTransaction) { LogUtil.debug(LOGGER, () -> "tcc cancel ...........start!"); if (Objects.isNull(currentTransaction) || CollectionUtils.isEmpty(currentTransaction.getHmilyParticipants())) { return; } currentTransaction.setStatus(HmilyActionEnum.CANCELING.getCode()); //update cancel HmilyRepositoryStorage.updateHmilyTransactionStatus(currentTransaction); final List<HmilyParticipant> hmilyParticipants = currentTransaction.getHmilyParticipants(); for (HmilyParticipant hmilyParticipant : hmilyParticipants) { try { if (hmilyParticipant.getRole() == HmilyRoleEnum.START.getCode()) { HmilyReflector.executor(HmilyActionEnum.CANCELING, ExecutorTypeEnum.LOCAL, hmilyParticipant); HmilyRepositoryStorage.removeHmilyParticipant(hmilyParticipant); } else { HmilyReflector.executor(HmilyActionEnum.CANCELING, ExecutorTypeEnum.RPC, hmilyParticipant); } } catch (Throwable e) { LOGGER.error("HmilyParticipant cancel exception :{}", hmilyParticipant.toString(), e); } finally { HmilyContextHolder.remove(); } } }
3.68
hudi_HoodieFileGroup_isFileSliceCommitted
/** * A FileSlice is considered committed, if one of the following is true - There is a committed data file - There are * some log files, that are based off a commit or delta commit. */ private boolean isFileSliceCommitted(FileSlice slice) { if (!compareTimestamps(slice.getBaseInstantTime(), LESSER_THAN_OR_EQUALS, lastInstant.get().getTimestamp())) { return false; } return timeline.containsOrBeforeTimelineStarts(slice.getBaseInstantTime()); }
3.68
morf_Criterion_or
/** * Helper method to create a new "OR" expression. * * <blockquote><pre> * Criterion.or(listOfCriterions);</pre></blockquote> * * @param criteria the criteria * @return a new Criterion object */ public static Criterion or(Iterable<Criterion> criteria) { return new Criterion(Operator.OR, criteria); }
3.68
framework_VDateTimeCalendarPanel_buildTime
/** * Constructs the ListBoxes and updates their value * */ @SuppressWarnings("deprecation") private void buildTime() { clear(); hours = createListBox(); if (getDateTimeService().isTwelveHourClock()) { hours.addItem("12"); for (int i = 1; i < 12; i++) { hours.addItem(DateTimeService.asTwoDigits(i)); } } else { for (int i = 0; i < 24; i++) { hours.addItem(DateTimeService.asTwoDigits(i)); } } hours.addChangeHandler(this); if (getDateTimeService().isTwelveHourClock()) { ampm = createListBox(); final String[] ampmText = getDateTimeService().getAmPmStrings(); ampm.addItem(ampmText[0]); ampm.addItem(ampmText[1]); ampm.addChangeHandler(this); } if (getResolution().compareTo(DateTimeResolution.MINUTE) <= 0) { mins = createListBox(); for (int i = 0; i < 60; i++) { mins.addItem(DateTimeService.asTwoDigits(i)); } mins.addChangeHandler(this); } if (getResolution().compareTo(DateTimeResolution.SECOND) <= 0) { sec = createListBox(); for (int i = 0; i < 60; i++) { sec.addItem(DateTimeService.asTwoDigits(i)); } sec.addChangeHandler(this); } // Update times updateTimes(); final String delimiter = getDateTimeService().getClockDelimeter(); if (isReadonly()) { int h = 0; if (getDate() != null) { h = getDate().getHours(); } if (getDateTimeService().isTwelveHourClock()) { h -= h < 12 ? 0 : 12; } add(new VLabel(DateTimeService.asTwoDigits(h))); } else { add(hours); } if (getResolution().compareTo(DateTimeResolution.MINUTE) <= 0) { add(new VLabel(delimiter)); if (isReadonly()) { final int m = mins.getSelectedIndex(); add(new VLabel(DateTimeService.asTwoDigits(m))); } else { add(mins); } } if (getResolution().compareTo(DateTimeResolution.SECOND) <= 0) { add(new VLabel(delimiter)); if (isReadonly()) { final int s = sec.getSelectedIndex(); add(new VLabel(DateTimeService.asTwoDigits(s))); } else { add(sec); } } if (getResolution() == DateTimeResolution.HOUR) { add(new VLabel(delimiter + "00")); // o'clock } if (getDateTimeService().isTwelveHourClock()) { add(new VLabel("&nbsp;")); if (isReadonly()) { int i = 0; if (getDate() != null) { i = (getDate().getHours() < 12) ? 0 : 1; } add(new VLabel(ampm.getItemText(i))); } else { add(ampm); } } if (isReadonly()) { return; } ListBox lastDropDown = getLastDropDown(); lastDropDown.addKeyDownHandler(event -> { boolean shiftKey = event.getNativeEvent().getShiftKey(); if (!shiftKey) { int nativeKeyCode = event.getNativeKeyCode(); if (nativeKeyCode == KeyCodes.KEY_TAB) { onTabOut(event); } } }); }
3.68
graphhopper_PathSimplification_simplify
/** * Convenience method used to obtain the partitions from a calculated path with details and instructions */ public static PointList simplify(ResponsePath responsePath, RamerDouglasPeucker ramerDouglasPeucker, boolean enableInstructions) { final PointList pointList = responsePath.getPoints(); List<Partition> partitions = new ArrayList<>(); // make sure all waypoints are retained in the simplified point list // we copy the waypoint indices into temporary intervals where they will be mutated by the simplification, // afterwards we need to update the way point indices accordingly. List<Interval> intervals = new ArrayList<>(); for (int i = 0; i < responsePath.getWaypointIndices().size() - 1; i++) intervals.add(new Interval(responsePath.getWaypointIndices().get(i), responsePath.getWaypointIndices().get(i + 1))); partitions.add(new Partition() { @Override public int size() { return intervals.size(); } @Override public int getIntervalLength(int index) { return intervals.get(index).end - intervals.get(index).start; } @Override public void setInterval(int index, int start, int end) { intervals.get(index).start = start; intervals.get(index).end = end; } }); // todo: maybe this code can be simplified if path details and instructions would be merged, see #1121 if (enableInstructions) { final InstructionList instructions = responsePath.getInstructions(); partitions.add(new Partition() { @Override public int size() { return instructions.size(); } @Override public int getIntervalLength(int index) { return instructions.get(index).getLength(); } @Override public void setInterval(int index, int start, int end) { Instruction instruction = instructions.get(index); if (instruction instanceof ViaInstruction || instruction instanceof FinishInstruction) { if (start != end) { throw new IllegalStateException("via- and finish-instructions are expected to have zero length"); } // have to make sure that via instructions and finish instructions contain a single point // even though their 'instruction length' is zero. end++; } instruction.setPoints(pointList.shallowCopy(start, end, false)); } }); } for (final Map.Entry<String, List<PathDetail>> entry : responsePath.getPathDetails().entrySet()) { // If the pointList only contains one point, PathDetails have to be empty because 1 point => 0 edges final List<PathDetail> detail = entry.getValue(); if (detail.isEmpty() && pointList.size() > 1) throw new IllegalStateException("PathDetails " + entry.getKey() + " must not be empty"); partitions.add(new Partition() { @Override public int size() { return detail.size(); } @Override public int getIntervalLength(int index) { return detail.get(index).getLength(); } @Override public void setInterval(int index, int start, int end) { PathDetail pd = detail.get(index); pd.setFirst(start); pd.setLast(end); } }); } simplify(responsePath.getPoints(), partitions, ramerDouglasPeucker); List<Integer> simplifiedWaypointIndices = new ArrayList<>(); simplifiedWaypointIndices.add(intervals.get(0).start); for (Interval interval : intervals) simplifiedWaypointIndices.add(interval.end); responsePath.setWaypointIndices(simplifiedWaypointIndices); assertConsistencyOfPathDetails(responsePath.getPathDetails()); if (enableInstructions) assertConsistencyOfInstructions(responsePath.getInstructions(), responsePath.getPoints().size()); return pointList; }
3.68
flink_BufferConsumer_skip
/** @param bytesToSkip number of bytes to skip from currentReaderPosition */ void skip(int bytesToSkip) { writerPosition.update(); int cachedWriterPosition = writerPosition.getCached(); int bytesReadable = cachedWriterPosition - currentReaderPosition; checkState(bytesToSkip <= bytesReadable, "bytes to skip beyond readable range"); currentReaderPosition += bytesToSkip; }
3.68
hudi_HFileBootstrapIndex_commit
/** * Commit bootstrap index entries. Appends Metadata and closes write handles. */ private void commit() { try { if (!closed) { HoodieBootstrapIndexInfo partitionIndexInfo = HoodieBootstrapIndexInfo.newBuilder() .setCreatedTimestamp(new Date().getTime()) .setNumKeys(numPartitionKeysAdded) .setBootstrapBasePath(bootstrapBasePath) .build(); LOG.info("Adding Partition FileInfo :" + partitionIndexInfo); HoodieBootstrapIndexInfo fileIdIndexInfo = HoodieBootstrapIndexInfo.newBuilder() .setCreatedTimestamp(new Date().getTime()) .setNumKeys(numFileIdKeysAdded) .setBootstrapBasePath(bootstrapBasePath) .build(); LOG.info("Appending FileId FileInfo :" + fileIdIndexInfo); indexByPartitionWriter.appendFileInfo(INDEX_INFO_KEY, TimelineMetadataUtils.serializeAvroMetadata(partitionIndexInfo, HoodieBootstrapIndexInfo.class).get()); indexByFileIdWriter.appendFileInfo(INDEX_INFO_KEY, TimelineMetadataUtils.serializeAvroMetadata(fileIdIndexInfo, HoodieBootstrapIndexInfo.class).get()); close(); } } catch (IOException ioe) { throw new HoodieIOException(ioe.getMessage(), ioe); } }
3.68
hadoop_ReadWriteDiskValidatorMetrics_getMetric
/** * Get a metric by given directory name. * * @param dirName directory name * @return the metric */ public synchronized static ReadWriteDiskValidatorMetrics getMetric( String dirName) { MetricsSystem ms = DefaultMetricsSystem.instance(); ReadWriteDiskValidatorMetrics metrics = DIR_METRICS.get(dirName); if (metrics == null) { metrics = new ReadWriteDiskValidatorMetrics(); // Register with the MetricsSystems if (ms != null) { metrics = ms.register(sourceName(dirName), "Metrics for directory: " + dirName, metrics); } DIR_METRICS.put(dirName, metrics); } return metrics; }
3.68
flink_ExpressionUtils_extractValue
/** * Extracts the value (excluding null) of a given class from an expression assuming it is a * {@link ValueLiteralExpression}. * * @param expression literal to extract the value from * @param targetClass expected class to extract from the literal * @param <V> type of extracted value * @return extracted value or empty if could not extract value of given type */ public static <V> Optional<V> extractValue(Expression expression, Class<V> targetClass) { if (expression instanceof ValueLiteralExpression) { final ValueLiteralExpression valueLiteral = (ValueLiteralExpression) expression; return valueLiteral.getValueAs(targetClass); } return Optional.empty(); }
3.68
morf_AbstractSqlDialectTest_testSelectAllRecords
/** * Tests the SQL for a simple select statement. */ @Test public void testSelectAllRecords() { SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE)); assertEquals("SQL to select all records", "SELECT * FROM " + tableName(TEST_TABLE), testDialect.convertStatementToSQL(stmt)); }
3.68
hadoop_ActiveAuditManagerS3A_afterExecution
/** * Forward to the inner span. * {@inheritDoc} */ @Override public void afterExecution(Context.AfterExecution context, ExecutionAttributes executionAttributes) { span.afterExecution(context, executionAttributes); }
3.68
flink_HiveParserJoinTypeCheckCtx_getInputRRList
/** @return the inputRR List */ public List<HiveParserRowResolver> getInputRRList() { return inputRRLst; }
3.68
framework_SelectionEvent_setDifference
/** * Slightly optimized set difference that can return the original set or a * modified one. * * @param set1 * original set * @param set2 * the set to subtract * @return the difference set */ private static <T> Set<T> setDifference(Set<T> set1, Set<T> set2) { if (set2.isEmpty()) { return set1; } else { LinkedHashSet<T> set = new LinkedHashSet<T>(set1); set.removeAll(set2); return set; } }
3.68
hadoop_AbstractSchedulerPlanFollower_calculateReservationToPlanRatio
/** * Calculates ratio of reservationResources to planResources. */ private float calculateReservationToPlanRatio( ResourceCalculator rescCalculator, Resource clusterResources, Resource planResources, Resource reservationResources) { return Resources.divide(rescCalculator, clusterResources, reservationResources, planResources); }
3.68
graphhopper_VectorTileEncoder_encode
/** * @return a byte array with the vector tile */ public byte[] encode() { VectorTile.Tile.Builder tile = VectorTile.Tile.newBuilder(); for (Map.Entry<String, Layer> e : layers.entrySet()) { String layerName = e.getKey(); Layer layer = e.getValue(); VectorTile.Tile.Layer.Builder tileLayer = VectorTile.Tile.Layer.newBuilder(); tileLayer.setVersion(2); tileLayer.setName(layerName); tileLayer.addAllKeys(layer.keys()); for (Object value : layer.values()) { VectorTile.Tile.Value.Builder tileValue = VectorTile.Tile.Value.newBuilder(); if (value instanceof String) { tileValue.setStringValue((String) value); } else if (value instanceof Integer) { tileValue.setSintValue(((Integer) value).intValue()); } else if (value instanceof Long) { tileValue.setSintValue(((Long) value).longValue()); } else if (value instanceof Float) { tileValue.setFloatValue(((Float) value).floatValue()); } else if (value instanceof Double) { tileValue.setDoubleValue(((Double) value).doubleValue()); } else if (value instanceof BigDecimal) { tileValue.setStringValue(value.toString()); } else if (value instanceof Number) { tileValue.setDoubleValue(((Number) value).doubleValue()); } else if (value instanceof Boolean) { tileValue.setBoolValue(((Boolean) value).booleanValue()); } else { tileValue.setStringValue(value.toString()); } tileLayer.addValues(tileValue.build()); } tileLayer.setExtent(extent); for (Feature feature : layer.features) { Geometry geometry = feature.geometry; VectorTile.Tile.Feature.Builder featureBuilder = VectorTile.Tile.Feature.newBuilder(); featureBuilder.addAllTags(feature.tags); if (feature.id >= 0) { featureBuilder.setId(feature.id); } GeomType geomType = toGeomType(geometry); x = 0; y = 0; List<Integer> commands = commands(geometry); // skip features with no geometry commands if (commands.isEmpty()) { continue; } // Extra step to parse and check validity and try to repair. Probably expensive. if (simplificationDistanceTolerance > 0.0 && geomType == GeomType.POLYGON) { double scale = autoScale ? (extent / 256.0) : 1.0; Geometry decodedGeometry = VectorTileDecoder.decodeGeometry(gf, geomType, commands, scale); if (!isValid(decodedGeometry)) { // Invalid. Try more simplification and without preserving topology. geometry = DouglasPeuckerSimplifier.simplify(geometry, simplificationDistanceTolerance * 2.0); if (geometry.isEmpty()) { continue; } geomType = toGeomType(geometry); x = 0; y = 0; commands = commands(geometry); } } featureBuilder.setType(geomType); featureBuilder.addAllGeometry(commands); tileLayer.addFeatures(featureBuilder.build()); } tile.addLayers(tileLayer.build()); } return tile.build().toByteArray(); }
3.68
AreaShop_GeneralRegion_getLongSetting
/** * Get a long setting for this region, defined as follows * - If the region has the setting in its own file (/regions/regionName.yml), use that * - If the region has groups, use the setting defined by the most important group, if any * - Otherwise fallback to the default.yml file setting * @param path The path to get the setting of * @return The value of the setting */ public long getLongSetting(String path) { if(config.isSet(path)) { return config.getLong(path); } long result = 0; int priority = Integer.MIN_VALUE; boolean found = false; for(RegionGroup group : plugin.getFileManager().getGroups()) { if(group.isMember(this) && group.getSettings().isSet(path) && group.getPriority() > priority) { result = group.getSettings().getLong(path); priority = group.getPriority(); found = true; } } if(found) { return result; } if(this.getFileManager().getRegionSettings().isSet(path)) { return this.getFileManager().getRegionSettings().getLong(path); } else { return this.getFileManager().getFallbackRegionSettings().getLong(path); } }
3.68
hbase_ScheduledChore_triggerNow
/** Returns false when the Chore is not currently scheduled with a ChoreService */ public synchronized boolean triggerNow() { if (choreService == null) { return false; } choreService.triggerNow(this); return true; }
3.68
flink_FutureUtils_waitForAll
/** * Creates a future that is complete once all of the given futures have completed. The future * fails (completes exceptionally) once one of the given futures fails. * * <p>The ConjunctFuture gives access to how many Futures have already completed successfully, * via {@link ConjunctFuture#getNumFuturesCompleted()}. * * @param futures The futures to wait on. No null entries are allowed. * @return The WaitingFuture that completes once all given futures are complete (or one fails). */ public static ConjunctFuture<Void> waitForAll( Collection<? extends CompletableFuture<?>> futures) { checkNotNull(futures, "futures"); return new WaitingConjunctFuture(futures); }
3.68
dubbo_MeshRuleRouter_getInvokerList
/** * for ut only */ @Deprecated public BitList<Invoker<T>> getInvokerList() { return invokerList; }
3.68
flink_HiveInspectors_getObjectInspector
/** Get Hive {@link ObjectInspector} for a Flink {@link LogicalType}. */ public static ObjectInspector getObjectInspector(LogicalType flinkType) { return getObjectInspector(HiveTypeUtil.toHiveTypeInfo(flinkType, true)); }
3.68
hbase_MasterObserver_preTruncateRegion
/** * Called before the truncate region procedure is called. * @param c The environment to interact with the framework and master * @param regionInfo The Region being truncated */ @SuppressWarnings("unused") default void preTruncateRegion(final ObserverContext<MasterCoprocessorEnvironment> c, RegionInfo regionInfo) { }
3.68
flink_StatsSummarySnapshot_getQuantile
/** * Returns the value for the given quantile based on the represented histogram statistics or * {@link Double#NaN} if the histogram was not built. * * @param quantile Quantile to calculate the value for * @return Value for the given quantile */ public double getQuantile(double quantile) { return histogram == null ? Double.NaN : histogram.getQuantile(quantile); }
3.68
hadoop_RegistryOperationsFactory_createKerberosInstance
/** * Create a kerberos registry service client * @param conf configuration * @param jaasClientEntry the name of the login config entry * @param principal principal of the client. * @param keytab location to the keytab file * @return a registry service client instance */ public static RegistryOperations createKerberosInstance(Configuration conf, String jaasClientEntry, String principal, String keytab) { Preconditions.checkArgument(conf != null, "Null configuration"); conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_KERBEROS); conf.set(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, jaasClientEntry); RegistryOperationsClient operations = new RegistryOperationsClient("KerberosRegistryOperations"); operations.setKerberosPrincipalAndKeytab(principal, keytab); operations.init(conf); return operations; }
3.68