name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_FieldGroup_setReadOnly
/** * Sets the read only state to the given value for all fields with writable * data source. Fields with read only data source will always be set to read * only. * * @param fieldsReadOnly * true to set the fields with writable data source to read only, * false to set them to read write */ public void setReadOnly(boolean fieldsReadOnly) { readOnly = fieldsReadOnly; for (Field<?> field : getFields()) { if (field.getPropertyDataSource() == null || !field.getPropertyDataSource().isReadOnly()) { field.setReadOnly(fieldsReadOnly); } else { field.setReadOnly(true); } } }
3.68
hbase_MasterObserver_postSwitchRpcThrottle
/** * Called after switching rpc throttle enabled state. * @param ctx the coprocessor instance's environment * @param oldValue the previously rpc throttle value * @param newValue the newly rpc throttle value */ default void postSwitchRpcThrottle(final ObserverContext<MasterCoprocessorEnvironment> ctx, final boolean oldValue, final boolean newValue) throws IOException { }
3.68
framework_Page_replaceState
/** * Updates the browsers URI without causing actual page change in the same * way as {@link #pushState(URI)}, but does not add new entry to browsers * history. * * @param uri * the URI to be used for replaceState operation. The URI is * resolved over the current location. If the given URI is * absolute, it must be of same origin as the current URI or the * browser will not accept the new value. * @since 8.0 */ public void replaceState(URI uri) { replaceState(uri.toString()); }
3.68
AreaShop_GithubUpdateCheck_debug
/** * Print a debug message if DEBUG is enabled. * @param message Message to print */ private void debug(Object... message) { if(DEBUG) { logger.info("[" + this.getClass().getSimpleName() + "] [DEBUG] " + StringUtils.join(message, " ")); } }
3.68
querydsl_BeanMap_values
/** * Returns the values for the BeanMap. * * @return values for the BeanMap. The returned collection is not * modifiable. */ @Override public Collection<Object> values() { List<Object> answer = new ArrayList<Object>(readMethods.size()); for (Iterator<Object> iter = valueIterator(); iter.hasNext();) { answer.add(iter.next()); } return answer; }
3.68
graphhopper_ResponsePath_getTime
/** * @return time in millis */ public long getTime() { check("getTimes"); return time; }
3.68
hadoop_FSDirAppendOp_prepareFileForAppend
/** * Convert current node to under construction. * Recreate in-memory lease record. * * @param fsn namespace * @param iip inodes in the path containing the file * @param leaseHolder identifier of the lease holder on this file * @param clientMachine identifier of the client machine * @param newBlock if the data is appended to a new block * @param writeToEditLog whether to persist this change to the edit log * @param logRetryCache whether to record RPC ids in editlog for retry cache * rebuilding * @return the last block locations if the block is partial or null otherwise * @throws IOException */ static LocatedBlock prepareFileForAppend(final FSNamesystem fsn, final INodesInPath iip, final String leaseHolder, final String clientMachine, final boolean newBlock, final boolean writeToEditLog, final boolean logRetryCache) throws IOException { assert fsn.hasWriteLock(); final INodeFile file = iip.getLastINode().asFile(); final QuotaCounts delta = verifyQuotaForUCBlock(fsn, file, iip); file.recordModification(iip.getLatestSnapshotId()); file.toUnderConstruction(leaseHolder, clientMachine); fsn.getLeaseManager().addLease( file.getFileUnderConstructionFeature().getClientName(), file.getId()); LocatedBlock ret = null; if (!newBlock) { FSDirectory fsd = fsn.getFSDirectory(); ret = fsd.getBlockManager().convertLastBlockToUnderConstruction(file, 0); if (ret != null && delta != null) { Preconditions.checkState(delta.getStorageSpace() >= 0, "appending to" + " a block with size larger than the preferred block size"); fsd.writeLock(); try { fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta); } finally { fsd.writeUnlock(); } } } else { BlockInfo lastBlock = file.getLastBlock(); if (lastBlock != null) { ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock); ret = new LocatedBlock(blk, DatanodeInfo.EMPTY_ARRAY); } } if (writeToEditLog) { final String path = iip.getPath(); if (NameNodeLayoutVersion.supports(Feature.APPEND_NEW_BLOCK, fsn.getEffectiveLayoutVersion())) { fsn.getEditLog().logAppendFile(path, file, newBlock, logRetryCache); } else { fsn.getEditLog().logOpenFile(path, file, false, logRetryCache); } } return ret; }
3.68
flink_SupportsPartitioning_requiresPartitionGrouping
/** * Returns whether data needs to be grouped by partition before it is consumed by the sink. By * default, this is not required from the runtime and records arrive in arbitrary partition * order. * * <p>If this method returns true, the sink can expect that all records will be grouped by the * partition keys before consumed by the sink. In other words: The sink will receive all * elements of one partition and then all elements of another partition. Elements of different * partitions will not be mixed. For some sinks, this can be used to reduce the number of * partition writers and improve writing performance by writing one partition at a time. * * <p>The given argument indicates whether the current execution mode supports grouping or not. * For example, depending on the execution mode a sorting operation might not be available * during runtime. * * @param supportsGrouping whether the current execution mode supports grouping * @return whether data need to be grouped by partition before consumed by the sink. If {@code * supportsGrouping} is false, it should never return true, otherwise the planner will fail. */ @SuppressWarnings("unused") default boolean requiresPartitionGrouping(boolean supportsGrouping) { return false; }
3.68
flink_AbstractPythonFunctionOperator_checkInvokeFinishBundleByCount
/** Checks whether to invoke finishBundle by elements count. Called in processElement. */ protected void checkInvokeFinishBundleByCount() throws Exception { if (elementCount >= maxBundleSize) { invokeFinishBundle(); } }
3.68
flink_ResourceProfile_getOperatorsMemory
/** * Get the memory the operators needed. * * @return The operator memory */ public MemorySize getOperatorsMemory() { throwUnsupportedOperationExceptionIfUnknown(); return taskHeapMemory.add(taskOffHeapMemory).add(managedMemory); }
3.68
framework_AbsoluteLayout_internalSetPosition
/** * Updates the position for a component. Caller must ensure component is a * child of this layout. * * @param component * The component. Must be a child for this layout. Not enforced. * @param position * New position. Must not be null. */ private void internalSetPosition(Component component, ComponentPosition position) { componentToCoordinates.put(component, position); markAsDirty(); }
3.68
zxing_StringUtils_guessEncoding
/** * @param bytes bytes encoding a string, whose encoding should be guessed * @param hints decode hints if applicable * @return name of guessed encoding; at the moment will only guess one of: * "SJIS", "UTF8", "ISO8859_1", or the platform default encoding if none * of these can possibly be correct */ public static String guessEncoding(byte[] bytes, Map<DecodeHintType,?> hints) { Charset c = guessCharset(bytes, hints); if (c.equals(SHIFT_JIS_CHARSET)) { return "SJIS"; } if (c.equals(StandardCharsets.UTF_8)) { return "UTF8"; } if (c.equals(StandardCharsets.ISO_8859_1)) { return "ISO8859_1"; } return c.name(); }
3.68
morf_HumanReadableStatementHelper_generateCriterionString
/** * Generates a string describing a predicate. * * <p>The human-readable form of some of the logical operators, for example IN and LIKE, requires * that an inversion alter the display of the operator rather than use the machine-readable * NOT prefix. This is implemented through the {@code invert} parameter.</p> * * @param criterion the criterion to describe. * @param invert whether the predicate has been inverted. * @return a string containing the human-readable description of the clause. */ private static String generateCriterionString(final Criterion criterion, final boolean invert) { switch (criterion.getOperator()) { case AND: if (invert) { // NOT(AND(a,b,...)) === OR(NOT(A),NOT(B),...) return generateListCriterionString(criterion, " or ", true); } else { return generateListCriterionString(criterion, " and ", false); } case EQ: return generateBinaryOperatorString(criterion, invert ? "is not" : "is"); case EXISTS: if (invert) { return String.format("not exists %s", generateFromAndWhereClause(criterion.getSelectStatement(), false)); } else { return String.format("exists %s", generateFromAndWhereClause(criterion.getSelectStatement(), false)); } case GT: return generateBinaryOperatorString(criterion, invert ? "is less than or equal to" : "is greater than"); case GTE: return generateBinaryOperatorString(criterion, invert ? "is less than" : "is greater than or equal to"); case IN: return generateInCriterionString(criterion, invert); case ISNOTNULL: return String.format("%s is%s null", generateFieldSymbolString(criterion.getField()), invert ? "" : " not"); case ISNULL: return String.format("%s is%s null", generateFieldSymbolString(criterion.getField()), invert ? " not" : ""); case LIKE: return generateBinaryOperatorString(criterion, invert ? "is not like" : "is like"); case LT: return generateBinaryOperatorString(criterion, invert ? "is greater than or equal to" : "is less than"); case LTE: return generateBinaryOperatorString(criterion, invert ? "is greater than" : "is less than or equal to"); case NEQ: return generateBinaryOperatorString(criterion, invert ? "is" : "is not"); case NOT: return generateCriterionString(criterion.getCriteria().get(0), !invert); case OR: if (invert) { // NOT(OR(a,b,...)) === AND(NOT(A),NOT(B),...) return generateListCriterionString(criterion, " and ", true); } else { return generateListCriterionString(criterion, " or ", false); } default: throw new UnsupportedOperationException("Unable to generate data upgrade string for: [" + criterion.getOperator().name() + "]"); } }
3.68
open-banking-gateway_ConsentAccessUtil_getProtocolFacingConsent
/** * Retrieves exactly one consent out of available, throws if more area available. * @param consents Consents * @return 1st element of the collection. */ @NotNull public Optional<ProtocolFacingConsent> getProtocolFacingConsent(Collection<ProtocolFacingConsent> consents) { if (consents.isEmpty()) { return Optional.empty(); } if (consents.size() > 1) { throw new IllegalStateException("Too many consents"); } return Optional.of(consents.iterator().next()); }
3.68
hbase_HBaseCluster_waitForRegionServerToStart
/** * Wait for the specified region server to join the cluster * @throws IOException if something goes wrong or timeout occurs */ public void waitForRegionServerToStart(String hostname, int port, long timeout) throws IOException { long start = EnvironmentEdgeManager.currentTime(); while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { for (ServerName server : getClusterMetrics().getLiveServerMetrics().keySet()) { if (server.getHostname().equals(hostname) && server.getPort() == port) { return; } } Threads.sleep(100); } throw new IOException( "did timeout " + timeout + "ms waiting for region server to start: " + hostname); }
3.68
framework_VScrollTable_getWidthWithIndent
/** * This method exists for the needs of {@link VTreeTable} only. * * Returns the pixels width of the header cell. This includes the * indent, if applicable. * * @return The width in pixels */ protected int getWidthWithIndent() { if (scrollBody != null && isHierarchyColumn()) { int maxIndent = scrollBody.getMaxIndent(); if (maxIndent > width) { return maxIndent; } } return width; }
3.68
zxing_LocaleManager_isBookSearchUrl
/** * Does a given URL point to Google Book Search, regardless of domain. * * @param url The address to check. * @return True if this is a Book Search URL. */ public static boolean isBookSearchUrl(String url) { return url.startsWith("http://google.com/books") || url.startsWith("http://books.google."); }
3.68
flink_TaskStateSnapshot_hasState
/** * Returns true if at least one {@link OperatorSubtaskState} in subtaskStatesByOperatorID has * state. */ public boolean hasState() { for (OperatorSubtaskState operatorSubtaskState : subtaskStatesByOperatorID.values()) { if (operatorSubtaskState != null && operatorSubtaskState.hasState()) { return true; } } return isTaskDeployedAsFinished; }
3.68
flink_RuntimeOpenApiSpecGenerator_main
/** * Generates the Runtime REST API OpenAPI spec. * * @param args args[0] contains the directory into which the generated files are placed * @throws IOException if any file operation failed */ public static void main(String[] args) throws IOException, ConfigurationException { String outputDirectory = args[0]; for (final RuntimeRestAPIVersion apiVersion : RuntimeRestAPIVersion.values()) { if (apiVersion == RuntimeRestAPIVersion.V0) { // this version exists only for testing purposes continue; } createDocumentationFile( "Flink JobManager REST API", new DocumentingDispatcherRestEndpoint(), apiVersion, Paths.get( outputDirectory, "rest_" + apiVersion.getURLVersionPrefix() + "_dispatcher.yml")); } }
3.68
morf_CorrectPrimaryKeyColumns_reverse
/** * @see org.alfasoftware.morf.upgrade.ChangePrimaryKeyColumns#reverse(org.alfasoftware.morf.metadata.Schema) */ @Override public Schema reverse(Schema schema) { if (oldPrimaryKeyColumns != null) { return applyChange(schema, newPrimaryKeyColumns, oldPrimaryKeyColumns); } else { return schema; } }
3.68
framework_TreeGridElement_collapseWithClick
/** * Collapses the row at the given index in the grid with the given * hierarchical column index. * * @param rowIndex * 0-based row index to collapse * @param hierarchyColumnIndex * 0-based index of the hierarchy column */ public void collapseWithClick(int rowIndex, int hierarchyColumnIndex) { if (isRowCollapsed(rowIndex, hierarchyColumnIndex)) { throw new IllegalStateException("The element at row " + rowIndex + " was collapsed already"); } getExpandElement(rowIndex, hierarchyColumnIndex).click(); }
3.68
framework_Calendar_setEventSortOrder
/** * Sets sort order for events. By default sort order is * {@link EventSortOrder#DURATION_DESC}. * * @param order * sort strategy for events */ public void setEventSortOrder(EventSortOrder order) { if (order == null) { getState().eventSortOrder = EventSortOrder.DURATION_DESC; } else { getState().eventSortOrder = EventSortOrder.values()[order .ordinal()]; } }
3.68
hadoop_SaslParticipant_getNegotiatedQop
/** * After successful SASL negotation, returns the negotiated quality of * protection. * * @return negotiated quality of protection */ public String getNegotiatedQop() { if (saslClient != null) { return (String) saslClient.getNegotiatedProperty(Sasl.QOP); } else { return (String) saslServer.getNegotiatedProperty(Sasl.QOP); } }
3.68
hbase_TableOutputFormat_write
/** * Writes a key/value pair into the table. * @param key The key. * @param value The value. * @throws IOException When writing fails. * @see RecordWriter#write(Object, Object) */ @Override public void write(KEY key, Mutation value) throws IOException { if (!(value instanceof Put) && !(value instanceof Delete)) { throw new IOException("Pass a Delete or a Put"); } mutator.mutate(value); }
3.68
hadoop_HdfsFileStatus_perm
/** * Set the permission mask of this entity (default = null). * @param permission Permission bitmask * @return This Builder instance */ public Builder perm(FsPermission permission) { this.permission = permission; return this; }
3.68
hbase_HRegion_closeBulkRegionOperation
/** * Closes the lock. This needs to be called in the finally block corresponding to the try block of * #startRegionOperation */ private void closeBulkRegionOperation() { regionLockHolders.remove(Thread.currentThread()); if (lock.writeLock().isHeldByCurrentThread()) lock.writeLock().unlock(); else lock.readLock().unlock(); }
3.68
hbase_Threads_isNonDaemonThreadRunning
/** * Checks whether any non-daemon thread is running. * @return true if there are non daemon threads running, otherwise false */ public static boolean isNonDaemonThreadRunning() { AtomicInteger nonDaemonThreadCount = new AtomicInteger(); Set<Thread> threads = Thread.getAllStackTraces().keySet(); threads.forEach(t -> { // Exclude current thread if (t.getId() != Thread.currentThread().getId() && !t.isDaemon()) { nonDaemonThreadCount.getAndIncrement(); LOG.info("Non daemon thread {} is still alive", t.getName()); LOG.info(printStackTrace(t)); } }); return nonDaemonThreadCount.get() > 0; }
3.68
flink_SplitAssignmentTracker_recordSplitAssignment
/** * Record a new split assignment. * * @param splitsAssignment the new split assignment. */ public void recordSplitAssignment(SplitsAssignment<SplitT> splitsAssignment) { addSplitAssignment(splitsAssignment, uncheckpointedAssignments); }
3.68
hudi_WriteMarkers_quietDeleteMarkerDir
/** * Quietly deletes the marker directory. * * @param context {@code HoodieEngineContext} instance. * @param parallelism parallelism for deleting the marker files in the directory. */ public void quietDeleteMarkerDir(HoodieEngineContext context, int parallelism) { try { context.setJobStatus(this.getClass().getSimpleName(), "Deleting marker directory: " + basePath); deleteMarkerDir(context, parallelism); } catch (Exception e) { LOG.warn("Error deleting marker directory for instant " + instantTime, e); } }
3.68
flink_CopyOnWriteSkipListStateMap_updateValueWithReplace
/** * Update the value of the node with replace mode. The old value will be unlinked and replaced * by the new value, and can not be accessed later. Note that the space of the old value is not * freed here, and the caller of this method should be responsible for the space management. * * @param node the node whose value will be replaced. * @param value the value. * @return the old value pointer. */ private long updateValueWithReplace(long node, byte[] value) { // a null value indicates this is a removed node int valueSize = value == null ? 0 : value.length; int totalValueLen = SkipListUtils.getValueMetaLen() + valueSize; long valuePointer = allocateSpace(totalValueLen); Node nodeStorage = getNodeSegmentAndOffset(node); MemorySegment nodeSegment = nodeStorage.nodeSegment; int offsetInNodeSegment = nodeStorage.nodeOffset; long oldValuePointer = SkipListUtils.getValuePointer(nodeSegment, offsetInNodeSegment); long nextValuePointer = SkipListUtils.helpGetNextValuePointer(oldValuePointer, spaceAllocator); doWriteValue(valuePointer, value, stateMapVersion, node, nextValuePointer); // update value pointer in node after the new value has points the older value so that // old value can be accessed concurrently SkipListUtils.putValuePointer(nodeSegment, offsetInNodeSegment, valuePointer); return oldValuePointer; }
3.68
pulsar_AbstractDispatcherMultipleConsumers_getNextConsumer
/** * <pre> * Broker gives more priority while dispatching messages. Here, broker follows descending priorities. (eg: * 0=max-priority, 1, 2,..) * <p> * Broker will first dispatch messages to max priority-level consumers if they * have permits, else broker will consider next priority level consumers. * Also on the same priority-level, it selects consumer in round-robin manner. * <p> * If subscription has consumer-A with priorityLevel 1 and Consumer-B with priorityLevel 2 * then broker will dispatch * messages to only consumer-A until it runs out permit and then broker starts dispatching messages to Consumer-B. * <p> * Consumer PriorityLevel Permits * C1 0 2 * C2 0 1 * C3 0 1 * C4 1 2 * C5 1 1 * Result of getNextConsumer(): C1, C2, C3, C1, C4, C5, C4 * </pre> * * <pre> * <b>Algorithm:</b> * 1. consumerList: it stores consumers in sorted-list: max-priority stored first * 2. currentConsumerRoundRobinIndex: it always stores last served consumer-index * * Each time getNextConsumer() is called:<p> * 1. It always starts to traverse from the max-priority consumer (first element) from sorted-list * 2. Consumers on same priority-level will be treated equally and it tries to pick one of them in * round-robin manner * 3. If consumer is not available on given priority-level then only it will go to the next lower priority-level * consumers * 4. Returns null in case it doesn't find any available consumer * </pre> * * @return nextAvailableConsumer */ public Consumer getNextConsumer() { if (consumerList.isEmpty() || IS_CLOSED_UPDATER.get(this) == TRUE) { // abort read if no consumers are connected or if disconnect is initiated return null; } if (currentConsumerRoundRobinIndex >= consumerList.size()) { currentConsumerRoundRobinIndex = 0; } int currentRoundRobinConsumerPriority = consumerList.get(currentConsumerRoundRobinIndex).getPriorityLevel(); // first find available-consumer on higher level unless currentIndex is not on highest level which is 0 if (currentRoundRobinConsumerPriority != 0) { int higherPriorityConsumerIndex = getConsumerFromHigherPriority(currentRoundRobinConsumerPriority); if (higherPriorityConsumerIndex != -1) { currentConsumerRoundRobinIndex = higherPriorityConsumerIndex + 1; return consumerList.get(higherPriorityConsumerIndex); } } // currentIndex is already on highest level or couldn't find consumer on higher level so, find consumer on same // or lower level int availableConsumerIndex = getNextConsumerFromSameOrLowerLevel(currentConsumerRoundRobinIndex); if (availableConsumerIndex != -1) { currentConsumerRoundRobinIndex = availableConsumerIndex + 1; return consumerList.get(availableConsumerIndex); } // couldn't find available consumer return null; }
3.68
framework_Table_setColumnAlignment
/** * Sets the specified column's alignment. * * <p> * Throws IllegalArgumentException if the alignment is not one of the * following: {@link Align#LEFT}, {@link Align#CENTER} or * {@link Align#RIGHT} * </p> * * @param propertyId * the propertyID identifying the column. * @param alignment * the desired alignment. */ public void setColumnAlignment(Object propertyId, Align alignment) { if (alignment == null || alignment == Align.LEFT) { columnAlignments.remove(propertyId); } else { columnAlignments.put(propertyId, alignment); } // Assures the visual refresh. No need to reset the page buffer before // as the content has not changed, only the alignments. refreshRenderedCells(); }
3.68
hadoop_ConnectionPool_getLastActiveTime
/** * Get the last time the connection pool was used. * * @return Last time the connection pool was used. */ protected long getLastActiveTime() { return this.lastActiveTime; }
3.68
streampipes_StreamedQueryResultProvider_changeTimestampHeader
/** * Replaces the field 'time' of the data result with the actual timestamp field name of the measurement * * @param measurement contains the actual timestamp name value * @param dataResult the query result of the database with 'time' as timestamp field name */ private void changeTimestampHeader(DataLakeMeasure measurement, SpQueryResult dataResult) { var timeFieldIndex = dataResult.getHeaders().indexOf(TIME_FIELD); if (timeFieldIndex > -1) { dataResult.getHeaders().set(timeFieldIndex, measurement.getTimestampFieldName()); } }
3.68
flink_CheckpointStatsTracker_reportPendingCheckpoint
/** * Creates a new pending checkpoint tracker. * * @param checkpointId ID of the checkpoint. * @param triggerTimestamp Trigger timestamp of the checkpoint. * @param props The checkpoint properties. * @param vertexToDop mapping of {@link JobVertexID} to DOP * @return Tracker for statistics gathering. */ PendingCheckpointStats reportPendingCheckpoint( long checkpointId, long triggerTimestamp, CheckpointProperties props, Map<JobVertexID, Integer> vertexToDop) { PendingCheckpointStats pending = new PendingCheckpointStats(checkpointId, triggerTimestamp, props, vertexToDop); statsReadWriteLock.lock(); try { counts.incrementInProgressCheckpoints(); history.addInProgressCheckpoint(pending); dirty = true; } finally { statsReadWriteLock.unlock(); } return pending; }
3.68
graphhopper_AbstractNonCHBidirAlgo_fillEdgesFromUsingFilter
/** * @param edgeFilter edge filter used to filter edges during {@link #fillEdgesFrom()} */ protected void fillEdgesFromUsingFilter(EdgeFilter edgeFilter) { additionalEdgeFilter = edgeFilter; finishedFrom = !fillEdgesFrom(); additionalEdgeFilter = null; }
3.68
hbase_FileArchiverNotifierImpl_getSizeOfStoreFiles
/** * Computes the size of each store file in {@code storeFileNames} */ long getSizeOfStoreFiles(TableName tn, Set<StoreFileReference> storeFileNames) { return storeFileNames.stream() .collect(Collectors.summingLong((sfr) -> getSizeOfStoreFile(tn, sfr))); }
3.68
framework_VComboBox_setNavigationCallback
/** * Set a callback that is invoked when a page change occurs if there * have not been intervening requests to the server. The callback is * reset when any additional request is made to the server. * * @param callback * method to call after filtering has completed */ public void setNavigationCallback(Runnable callback) { showPopup = true; navigationCallback = callback; }
3.68
hbase_StoreFileWriter_withOutputDir
/** * Use either this method or {@link #withFilePath}, but not both. * @param dir Path to column family directory. The directory is created if does not exist. The * file is given a unique name within this directory. * @return this (for chained invocation) */ public Builder withOutputDir(Path dir) { Preconditions.checkNotNull(dir); this.dir = dir; return this; }
3.68
flink_TGetQueryIdReq_findByName
/** Find the _Fields constant that matches name, or null if its not found. */ public static _Fields findByName(java.lang.String name) { return byName.get(name); }
3.68
hbase_ByteArrayComparable_parseFrom
/** * Parse a serialized representation of {@link ByteArrayComparable} * @param pbBytes A pb serialized {@link ByteArrayComparable} instance * @return An instance of {@link ByteArrayComparable} made from <code>bytes</code> * @see #toByteArray */ @SuppressWarnings("DoNotCallSuggester") public static ByteArrayComparable parseFrom(final byte[] pbBytes) throws DeserializationException { throw new DeserializationException( "parseFrom called on base ByteArrayComparable, but should be called on derived type"); }
3.68
pulsar_BlockAwareSegmentInputStreamImpl_readEntries
// read ledger entries. private int readEntries() throws IOException { checkState(bytesReadOffset >= DataBlockHeaderImpl.getDataStartOffset()); checkState(bytesReadOffset < blockSize); // once reach the end of entry buffer, read more, if there is more if (bytesReadOffset < dataBlockFullOffset && entriesByteBuf.isEmpty() && startEntryId + blockEntryCount <= ledger.getLastAddConfirmed()) { entriesByteBuf = readNextEntriesFromLedger(startEntryId + blockEntryCount, ENTRIES_PER_READ); } if (!entriesByteBuf.isEmpty() && bytesReadOffset + entriesByteBuf.get(0).readableBytes() <= blockSize) { // always read from the first ByteBuf in the list, once read all of its content remove it. ByteBuf entryByteBuf = entriesByteBuf.get(0); int ret = entryByteBuf.readUnsignedByte(); bytesReadOffset++; if (entryByteBuf.readableBytes() == 0) { entryByteBuf.release(); entriesByteBuf.remove(0); blockEntryCount++; } return ret; } else { // no space for a new entry or there are no more entries // set data block full, return end padding if (dataBlockFullOffset == blockSize) { dataBlockFullOffset = bytesReadOffset; } return BLOCK_END_PADDING[(bytesReadOffset++ - dataBlockFullOffset) % BLOCK_END_PADDING.length]; } }
3.68
flink_TableDescriptor_partitionedBy
/** Define which columns this table is partitioned by. */ public Builder partitionedBy(String... partitionKeys) { this.partitionKeys.addAll(Arrays.asList(partitionKeys)); return this; }
3.68
dubbo_CacheFilter_setCacheFactory
/** * Dubbo will populate and set the cache factory instance based on service/method/consumer/provider configured * cache attribute value. Dubbo will search for the class name implementing configured <b>cache</b> in file org.apache.dubbo.cache.CacheFactory * under META-INF sub folders. * * @param cacheFactory instance of CacheFactory based on <b>cache</b> type */ public void setCacheFactory(CacheFactory cacheFactory) { this.cacheFactory = cacheFactory; }
3.68
hudi_AvroSchemaCompatibility_objectsEqual
/** * Borrowed from Guava's Objects.equal(a, b) */ private static boolean objectsEqual(Object obj1, Object obj2) { return Objects.equals(obj1, obj2); }
3.68
framework_ApplicationConnection_getServerRpcQueue
/** * Gets the server RPC queue for this application. * * @since 7.6 * @return the server RPC queue */ public ServerRpcQueue getServerRpcQueue() { return serverRpcQueue; }
3.68
dubbo_ReactorClientCalls_oneToMany
/** * Implements a unary -> stream call as Mono -> Flux * * @param invoker invoker * @param monoRequest the mono with request * @param methodDescriptor the method descriptor * @return the flux with response */ public static <TRequest, TResponse, TInvoker> Flux<TResponse> oneToMany( Invoker<TInvoker> invoker, Mono<TRequest> monoRequest, StubMethodDescriptor methodDescriptor) { try { return monoRequest.flatMapMany(request -> { ClientTripleReactorPublisher<TResponse> clientPublisher = new ClientTripleReactorPublisher<>(); StubInvocationUtil.serverStreamCall(invoker, methodDescriptor, request, clientPublisher); return clientPublisher; }); } catch (Throwable throwable) { return Flux.error(throwable); } }
3.68
flink_SingleInputOperator_setInput
/** * Sets the input to the union of the given operators. * * @param input The operator(s) that form the input. * @deprecated This method will be removed in future versions. Use the {@link Union} operator * instead. */ @Deprecated public void setInput(Operator<IN>... input) { this.input = Operator.createUnionCascade(null, input); }
3.68
flink_FlinkVersion_current
/** Returns the version for the current branch. */ public static FlinkVersion current() { return values()[values().length - 1]; }
3.68
graphhopper_SRTMProvider_init
/** * The URLs are a bit ugly and so we need to find out which area name a certain lat,lon * coordinate has. */ private SRTMProvider init() { try { String strs[] = {"Africa", "Australia", "Eurasia", "Islands", "North_America", "South_America"}; for (String str : strs) { InputStream is = getClass().getResourceAsStream(str + "_names.txt"); for (String line : Helper.readFile(new InputStreamReader(is, Helper.UTF_CS))) { int lat = Integer.parseInt(line.substring(1, 3)); if (line.substring(0, 1).charAt(0) == 'S') lat = -lat; int lon = Integer.parseInt(line.substring(4, 7)); if (line.substring(3, 4).charAt(0) == 'W') lon = -lon; int intKey = calcIntKey(lat, lon); String key = areas.put(intKey, str); if (key != null) throw new IllegalStateException("do not overwrite existing! key " + intKey + " " + key + " vs. " + str); } } return this; } catch (Exception ex) { throw new IllegalStateException("Cannot load area names from classpath", ex); } }
3.68
hudi_HoodieCombineHiveInputFormat_readFields
/** * Writable interface. */ @Override public void readFields(DataInput in) throws IOException { inputFormatClassName = Text.readString(in); if (HoodieParquetRealtimeInputFormat.class.getName().equals(inputFormatClassName)) { String inputShimClassName = Text.readString(in); inputSplitShim = ReflectionUtils.loadClass(inputShimClassName); inputSplitShim.readFields(in); } else { inputSplitShim.readFields(in); } }
3.68
flink_CoGroupedStreams_evictor
/** * Sets the {@code Evictor} that should be used to evict elements from a window before * emission. * * <p>Note: When using an evictor window performance will degrade significantly, since * pre-aggregation of window results cannot be used. */ @PublicEvolving public WithWindow<T1, T2, KEY, W> evictor( Evictor<? super TaggedUnion<T1, T2>, ? super W> newEvictor) { return new WithWindow<>( input1, input2, keySelector1, keySelector2, keyType, windowAssigner, trigger, newEvictor, allowedLateness); }
3.68
hadoop_AzureNativeFileSystemStore_connectToAzureStorageInSecureMode
/** * Method to set up the Storage Interaction layer in Secure mode. * @param accountName - Storage account provided in the initializer * @param containerName - Container name provided in the initializer * @param sessionUri - URI provided in the initializer */ private void connectToAzureStorageInSecureMode(String accountName, String containerName, URI sessionUri) throws AzureException, StorageException, URISyntaxException { LOG.debug("Connecting to Azure storage in Secure Mode"); // Assertion: storageInteractionLayer instance has to be a SecureStorageInterfaceImpl if (!(this.storageInteractionLayer instanceof SecureStorageInterfaceImpl)) { throw new AssertionError("connectToAzureStorageInSecureMode() should be called only" + " for SecureStorageInterfaceImpl instances"); } ((SecureStorageInterfaceImpl) this.storageInteractionLayer). setStorageAccountName(accountName); connectingUsingSAS = true; container = storageInteractionLayer.getContainerReference(containerName); rootDirectory = container.getDirectoryReference(""); canCreateOrModifyContainer = true; }
3.68
morf_AbstractSqlDialectTest_expectedCoalesce
/** * @return The expected SQL statement when performing the ANSI COALESCE call */ protected String expectedCoalesce() { return "SELECT COALESCE(NULL, bob) FROM " + tableName("MyTable"); }
3.68
hbase_AsyncAggregationClient_sumByRegion
// the map key is the startRow of the region private static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<NavigableMap<byte[], S>> sumByRegion(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) { CompletableFuture<NavigableMap<byte[], S>> future = new CompletableFuture<NavigableMap<byte[], S>>(); AggregateRequest req; try { req = validateArgAndGetPB(scan, ci, false); } catch (IOException e) { future.completeExceptionally(e); return future; } int firstPartIndex = scan.getFamilyMap().get(scan.getFamilies()[0]).size() - 1; AbstractAggregationCallback<NavigableMap<byte[], S>> callback = new AbstractAggregationCallback<NavigableMap<byte[], S>>(future) { private final NavigableMap<byte[], S> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); @Override protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { if (resp.getFirstPartCount() > 0) { map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex)); } } @Override protected NavigableMap<byte[], S> getFinalResult() { return map; } }; table .<AggregateService, AggregateResponse> coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback) .fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()) .toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute(); return future; }
3.68
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_buildClusteringGroupsForPartition
/** * Generate cluster group based on split, merge and sort rules */ @Override protected Stream<HoodieClusteringGroup> buildClusteringGroupsForPartition(String partitionPath, List<FileSlice> fileSlices) { Option<HoodieConsistentHashingMetadata> metadata = ConsistentBucketIndexUtils.loadMetadata(getHoodieTable(), partitionPath); ValidationUtils.checkArgument(metadata.isPresent(), "Metadata is empty for partition: " + partitionPath); ConsistentBucketIdentifier identifier = new ConsistentBucketIdentifier(metadata.get()); // Apply split rule int splitSlot = getWriteConfig().getBucketIndexMaxNumBuckets() - identifier.getNumBuckets(); Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> splitResult = buildSplitClusteringGroups(identifier, fileSlices, splitSlot); List<HoodieClusteringGroup> ret = new ArrayList<>(splitResult.getLeft()); List<FileSlice> remainedSlices = splitResult.getRight(); if (isBucketClusteringMergeEnabled()) { // Apply merge rule int mergeSlot = identifier.getNumBuckets() - getWriteConfig().getBucketIndexMinNumBuckets() + splitResult.getMiddle(); Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> mergeResult = buildMergeClusteringGroup(identifier, remainedSlices, mergeSlot); ret.addAll(mergeResult.getLeft()); remainedSlices = mergeResult.getRight(); } if (isBucketClusteringSortEnabled()) { // Apply sort only to the remaining file groups ret.addAll(remainedSlices.stream().map(fs -> { ConsistentHashingNode oldNode = identifier.getBucketByFileId(fs.getFileId()); ConsistentHashingNode newNode = new ConsistentHashingNode(oldNode.getValue(), FSUtils.createNewFileIdPfx(), ConsistentHashingNode.NodeTag.REPLACE); return HoodieClusteringGroup.newBuilder() .setSlices(getFileSliceInfo(Collections.singletonList(fs))) .setNumOutputFileGroups(1) .setMetrics(buildMetrics(Collections.singletonList(fs))) .setExtraMetadata(constructExtraMetadata(fs.getPartitionPath(), Collections.singletonList(newNode), identifier.getMetadata().getSeqNo())) .build(); }).collect(Collectors.toList())); } return ret.stream(); }
3.68
hmily_ExtensionLoaderFactory_load
/** * Load t. * * @param <T> the type parameter * @param service the service * @param name the name * @param argsType the args type * @param args the args * @return the t */ public static <T> T load(final Class<T> service, final String name, final Class<?>[] argsType, final Object[] args) { return ExtensionLoader.getExtensionLoader(service).load(name, argsType, args, findClassLoader()); }
3.68
flink_TableOperatorWrapperGenerator_calculateManagedMemoryFraction
/** calculate managed memory fraction for each operator wrapper. */ private void calculateManagedMemoryFraction() { for (Map.Entry<Transformation<?>, TableOperatorWrapper<?>> entry : visitedTransforms.entrySet()) { double fraction = 0; if (managedMemoryWeight != 0) { fraction = entry.getKey() .getManagedMemoryOperatorScopeUseCaseWeights() .getOrDefault(ManagedMemoryUseCase.OPERATOR, 0) * 1.0 / this.managedMemoryWeight; } entry.getValue().setManagedMemoryFraction(fraction); } }
3.68
framework_AbstractComponent_setWidth
/* * (non-Javadoc) * * @see com.vaadin.server.Sizeable#setWidth(java.lang.String) */ @Override public void setWidth(String width) { SizeWithUnit size = SizeWithUnit.parseStringSize(width); if (size != null) { setWidth(size.getSize(), size.getUnit()); } else { setWidth(-1, Unit.PIXELS); } }
3.68
morf_DataSetUtils_of
/** * Creates a new record decorator, which initially contains the values in * the fallback record, but allows values to be added or overridden. * * @param fallback The record to override. * @return A new {@link RecordBuilder}. */ public static RecordBuilder of(Record fallback) { return ofWithInitialCapacity(fallback, DEFAULT_CAPACITY); }
3.68
framework_AbstractBeanContainer_addListener
/** * @deprecated As of 7.0, replaced by {@link #addPropertySetChangeListener} */ @Deprecated @Override public void addListener(Container.PropertySetChangeListener listener) { addPropertySetChangeListener(listener); }
3.68
pulsar_WindowManager_add
/** * Tracks a window event. * * @param windowEvent the window event to track */ public void add(Event<T> windowEvent) { // watermark events are not added to the queue. if (windowEvent.isWatermark()) { if (log.isDebugEnabled()) { log.debug("Got watermark event with ts {}", windowEvent.getTimestamp()); } } else { queue.add(windowEvent); } track(windowEvent); compactWindow(); }
3.68
framework_VaadinPortlet_createVaadinRequest
/** * Wraps the request in a (possibly portal specific) Vaadin portlet request. * * @param request * The original PortletRequest * @return A wrapped version of the PortletRequest */ protected VaadinPortletRequest createVaadinRequest(PortletRequest request) { PortalContext portalContext = request.getPortalContext(); String portalInfo = portalContext.getPortalInfo() .toLowerCase(Locale.ROOT).trim(); VaadinPortletService service = getService(); if (portalInfo.contains("gatein")) { return new VaadinGateInRequest(request, service); } if (portalInfo.contains("liferay")) { return new VaadinLiferayRequest(request, service); } if (portalInfo.contains("websphere portal") || portalInfo.contains("hcl digital experience")) { return new VaadinWebSpherePortalRequest(request, service); } if (portalInfo.contains("weblogic portal")) { return new VaadinWebLogicPortalRequest(request, service); } return new VaadinPortletRequest(request, service); }
3.68
framework_ColumnProperty_getValue
/** * Returns the current value for this property. To get the previous value * (if one exists) for a modified property use {@link #getOldValue()}. * * @return */ @Override public Object getValue() { if (isModified()) { return changedValue; } return value; }
3.68
rocketmq-connect_JsonSchemaData_toConnectData
/** * to connect data * * @param schema * @param jsonValue * @return */ public static Object toConnectData(Schema schema, JsonNode jsonValue) { final FieldType schemaType; if (schema != null) { schemaType = schema.getFieldType(); if (jsonValue == null || jsonValue.isNull()) { if (schema.getDefaultValue() != null) { // any logical type conversions should already have been applied return schema.getDefaultValue(); } if (jsonValue == null || schema.isOptional()) { return null; } throw new ConnectException("Invalid null value for required " + schemaType + " field"); } } else { if (jsonValue == null) { return null; } switch (jsonValue.getNodeType()) { case NULL: return null; case BOOLEAN: schemaType = FieldType.BOOLEAN; break; case NUMBER: if (jsonValue.isIntegralNumber()) { schemaType = FieldType.INT64; } else { schemaType = FieldType.FLOAT64; } break; case ARRAY: schemaType = FieldType.ARRAY; break; case OBJECT: schemaType = FieldType.MAP; break; case STRING: schemaType = FieldType.STRING; break; case BINARY: case MISSING: case POJO: default: schemaType = null; break; } } final JsonToConnectTypeConverter typeConverter = TO_CONNECT_CONVERTERS.get(schemaType); if (typeConverter == null) { throw new ConnectException("Unknown schema type: " + schemaType); } if (schema != null && schema.getName() != null) { JsonToConnectLogicalTypeConverter logicalConverter = TO_CONNECT_LOGICAL_CONVERTERS.get(schema.getName()); if (logicalConverter != null) { return logicalConverter.convert(schema, jsonValue); } } return typeConverter.convert(schema, jsonValue); }
3.68
hbase_ReplicationSourceManager_acquireBufferQuota
/** * Add the size to {@link ReplicationSourceManager#totalBufferUsed} and check if it exceeds * {@link ReplicationSourceManager#totalBufferLimit}. * @return true if {@link ReplicationSourceManager#totalBufferUsed} exceeds * {@link ReplicationSourceManager#totalBufferLimit},we should stop increase buffer and * ship all. */ boolean acquireBufferQuota(long size) { if (size < 0) { throw new IllegalArgumentException("size should not less than 0"); } long newBufferUsed = addTotalBufferUsed(size); return newBufferUsed >= totalBufferLimit; }
3.68
framework_NativeButtonIconAndText_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Click the buttons to toggle icon alternate text"; }
3.68
framework_VScrollTable_toggleShiftSelection
/** * Is called when a user clicks an item when holding SHIFT key down. * This will select a new range from the last focused row * * @param deselectPrevious * Should the previous selected range be deselected */ private void toggleShiftSelection(boolean deselectPrevious) { /* * Ensures that we are in multiselect mode and that we have a * previous selection which was not a deselection */ if (isSingleSelectMode()) { // No previous selection found deselectAll(); toggleSelection(); return; } // Set the selectable range VScrollTableRow endRow = this; VScrollTableRow startRow = selectionRangeStart; if (startRow == null) { startRow = focusedRow; selectionRangeStart = focusedRow; // If start row is null then we have a multipage selection // from above if (startRow == null) { startRow = (VScrollTableRow) scrollBody.iterator() .next(); setRowFocus(endRow); } } else if (!startRow.isSelected()) { // The start row is no longer selected (probably removed) // and so we select from above startRow = (VScrollTableRow) scrollBody.iterator().next(); setRowFocus(endRow); } // Deselect previous items if so desired if (deselectPrevious) { deselectAll(); } // we'll ensure GUI state from top down even though selection // was the opposite way if (!startRow.isBefore(endRow)) { VScrollTableRow tmp = startRow; startRow = endRow; endRow = tmp; } SelectionRange range = new SelectionRange(startRow, endRow); for (Widget w : scrollBody) { VScrollTableRow row = (VScrollTableRow) w; if (range.inRange(row)) { if (!row.isSelected()) { row.toggleSelection(); } selectedRowKeys.add(row.getKey()); } } // Add range if (startRow != endRow) { selectedRowRanges.add(range); } }
3.68
flink_BloomFilter_optimalNumOfHashFunctions
/** * compute the optimal hash function number with given input entries and bits size, which would * make the false positive probability lowest. * * @param expectEntries * @param bitSize * @return hash function number */ static int optimalNumOfHashFunctions(long expectEntries, long bitSize) { return Math.max(1, (int) Math.round((double) bitSize / expectEntries * Math.log(2))); }
3.68
shardingsphere-elasticjob_TriggerNode_getTriggerPath
/** * Get trigger path. * * @param instanceId instance id * @return trigger path */ public String getTriggerPath(final String instanceId) { return String.format(TRIGGER, instanceId); }
3.68
streampipes_TrailingHeadlineToBoilerplateFilter_getInstance
/** * Returns the singleton instance for ExpandTitleToContentFilter. */ public static TrailingHeadlineToBoilerplateFilter getInstance() { return INSTANCE; }
3.68
dubbo_ClassUtils_isSimpleType
/** * The specified type is simple type or not * * @param type the type to test * @return if <code>type</code> is one element of {@link #SIMPLE_TYPES}, return <code>true</code>, or <code>false</code> * @see #SIMPLE_TYPES * @since 2.7.6 */ public static boolean isSimpleType(Class<?> type) { return SIMPLE_TYPES.contains(type); }
3.68
graphhopper_GHLongLongBTree_put
/** * @return the old value which was associated with the specified key or if no update it * returns noNumberValue */ ReturnValue put(long key, long newValue) { int index = binarySearch(keys, 0, entrySize, key); if (index >= 0) { // update byte[] oldValue = new byte[bytesPerValue]; System.arraycopy(values, index * bytesPerValue, oldValue, 0, bytesPerValue); // copy newValue to values fromLong(values, newValue, index * bytesPerValue); return new ReturnValue(oldValue); } index = ~index; ReturnValue downTreeRV; if (isLeaf || children[index] == null) { // insert downTreeRV = new ReturnValue(null); downTreeRV.tree = checkSplitEntry(); if (downTreeRV.tree == null) { insertKeyValue(index, key, fromLong(newValue)); } else if (index <= splitIndex) { downTreeRV.tree.children[0].insertKeyValue(index, key, fromLong(newValue)); } else { downTreeRV.tree.children[1].insertKeyValue(index - splitIndex - 1, key, fromLong(newValue)); } return downTreeRV; } downTreeRV = children[index].put(key, newValue); if (downTreeRV.oldValue != null) // only update return downTreeRV; if (downTreeRV.tree != null) { // split this treeEntry if it is too big BTreeEntry returnTree, downTree = returnTree = checkSplitEntry(); if (downTree == null) { insertTree(index, downTreeRV.tree); } else if (index <= splitIndex) { downTree.children[0].insertTree(index, downTreeRV.tree); } else { downTree.children[1].insertTree(index - splitIndex - 1, downTreeRV.tree); } downTreeRV.tree = returnTree; } return downTreeRV; }
3.68
flink_FlinkImageBuilder_asTaskManager
/** Use this image for building a TaskManager. */ public FlinkImageBuilder asTaskManager() { checkStartupCommandNotSet(); this.startupCommand = "bin/taskmanager.sh start-foreground && tail -f /dev/null"; this.imageNameSuffix = "taskmanager"; return this; }
3.68
hbase_LongComparator_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the comparator that are serialized are equal to the * corresponding fields in other. Used for testing. */ boolean areSerializedFieldsEqual(LongComparator other) { if (other == this) { return true; } if (other == null) { return false; } return super.areSerializedFieldsEqual(other); }
3.68
hadoop_Server_initConfig
/** * Loads and inializes the server configuration. * * @throws ServerException thrown if the configuration could not be loaded/initialized. */ protected void initConfig() throws ServerException { verifyDir(configDir); File file = new File(configDir); Configuration defaultConf; String defaultConfig = name + "-default.xml"; ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); InputStream inputStream = classLoader.getResourceAsStream(defaultConfig); if (inputStream == null) { log.warn("Default configuration file not available in classpath [{}]", defaultConfig); defaultConf = new Configuration(false); } else { try { defaultConf = new Configuration(false); ConfigurationUtils.load(defaultConf, inputStream); } catch (Exception ex) { throw new ServerException(ServerException.ERROR.S03, defaultConfig, ex.getMessage(), ex); } } if (config == null) { Configuration siteConf; File siteFile = new File(file, name + "-site.xml"); if (!siteFile.exists()) { log.warn("Site configuration file [{}] not found in config directory", siteFile); siteConf = new Configuration(false); } else { if (!siteFile.isFile()) { throw new ServerException(ServerException.ERROR.S05, siteFile.getAbsolutePath()); } try { log.debug("Loading site configuration from [{}]", siteFile); inputStream = Files.newInputStream(siteFile.toPath()); siteConf = new Configuration(false); ConfigurationUtils.load(siteConf, inputStream); } catch (IOException ex) { throw new ServerException(ServerException.ERROR.S06, siteFile, ex.getMessage(), ex); } } config = new Configuration(false); ConfigurationUtils.copy(siteConf, config); } ConfigurationUtils.injectDefaults(defaultConf, config); ConfigRedactor redactor = new ConfigRedactor(config); for (String name : System.getProperties().stringPropertyNames()) { String value = System.getProperty(name); if (name.startsWith(getPrefix() + ".")) { config.set(name, value); String redacted = redactor.redact(name, value); log.info("System property sets {}: {}", name, redacted); } } log.debug("Loaded Configuration:"); log.debug("------------------------------------------------------"); for (Map.Entry<String, String> entry : config) { String name = entry.getKey(); String value = config.get(entry.getKey()); String redacted = redactor.redact(name, value); log.debug(" {}: {}", entry.getKey(), redacted); } log.debug("------------------------------------------------------"); }
3.68
hbase_AsyncAdminBuilder_setMaxRetries
/** * Set the max retry times for an admin operation. Usually it is the max attempt times minus 1. * Operation timeout and max attempt times(or max retry times) are both limitations for retrying, * we will stop retrying when we reach any of the limitations. * @return this for invocation chaining */ default AsyncAdminBuilder setMaxRetries(int maxRetries) { return setMaxAttempts(retries2Attempts(maxRetries)); }
3.68
AreaShop_Utils_formatCurrency
/** * Format the currency amount with the characters before and after. * @param amount Amount of money to format * @return Currency character format string */ public static String formatCurrency(double amount) { String before = config.getString("moneyCharacter"); before = before.replace(AreaShop.currencyEuro, "€"); String after = config.getString("moneyCharacterAfter"); after = after.replace(AreaShop.currencyEuro, "€"); String result; // Check for infinite and NaN if(Double.isInfinite(amount)) { result = "\u221E"; // Infinite symbol } else if(Double.isNaN(amount)) { result = "NaN"; } else { BigDecimal bigDecimal = BigDecimal.valueOf(amount); boolean stripTrailingZeros = false; int fractionalNumber = config.getInt("fractionalNumbers"); // Add metric suffix if necessary if(config.getDouble("metricSuffixesAbove") != -1) { String suffix = null; double divider = 1; for(Double number : suffixes.keySet()) { if(amount >= number && number > divider) { divider = number; suffix = suffixes.get(number); } } if(suffix != null) { bigDecimal = BigDecimal.valueOf(amount / divider); after = suffix + after; fractionalNumber = config.getInt("fractionalNumbersShort"); stripTrailingZeros = true; } } // Round if necessary if(fractionalNumber >= 0) { bigDecimal = bigDecimal.setScale(fractionalNumber, RoundingMode.HALF_UP); } result = bigDecimal.toString(); if(config.getBoolean("hideEmptyFractionalPart")) { // Strip zero fractional: 12.00 -> 12 if(bigDecimal.remainder(BigDecimal.ONE).compareTo(BigDecimal.ZERO) == 0 && result.contains(".")) { result = result.substring(0, result.indexOf('.')); } // Strip zeros from suffixed numbers: 1.20M -> 1.2M if(stripTrailingZeros && result.contains(".")) { result = result.replaceAll("0+$", ""); } } } result = result.replace(".", config.getString("decimalMark")); Message resultMessage = Message.fromString(result); resultMessage.prepend(before); resultMessage.append(after); return resultMessage.getSingle(); }
3.68
hudi_HoodieTableMetadataUtil_convertMetadataToRollbackRecords
/** * Convert rollback action metadata to files partition records. * Consider only new log files added. */ private static List<HoodieRecord> convertMetadataToRollbackRecords(HoodieRollbackMetadata rollbackMetadata, String instantTime, HoodieTableMetaClient dataTableMetaClient) { Map<String, Map<String, Long>> partitionToAppendedFiles = new HashMap<>(); processRollbackMetadata(rollbackMetadata, partitionToAppendedFiles); reAddLogFilesFromRollbackPlan(dataTableMetaClient, instantTime, partitionToAppendedFiles); return convertFilesToFilesPartitionRecords(Collections.emptyMap(), partitionToAppendedFiles, instantTime, "Rollback"); }
3.68
hbase_SpaceLimitingException_getViolationPolicy
/** * Returns the violation policy in effect. * @return The violation policy in effect. */ public String getViolationPolicy() { return this.policyName; }
3.68
hadoop_FlowRunCoprocessor_getCellTimestamp
/** * Determines if the current cell's timestamp is to be used or a new unique * cell timestamp is to be used. The reason this is done is to inadvertently * overwrite cells when writes come in very fast. But for metric cells, the * cell timestamp signifies the metric timestamp. Hence we don't want to * overwrite it. * * @param timestamp * @param tags * @return cell timestamp */ private long getCellTimestamp(long timestamp, List<Tag> tags) { // if ts not set (hbase sets to HConstants.LATEST_TIMESTAMP by default) // then use the generator if (timestamp == HConstants.LATEST_TIMESTAMP) { return timestampGenerator.getUniqueTimestamp(); } else { return timestamp; } }
3.68
flink_TestcontainersSettings_build
/** * Returns a {@code TestContainersSettings} built from the parameters previously set. * * @return A {@code TestContainersSettings} built with parameters of this {@code * TestContainersSettings.Builder} */ public TestcontainersSettings build() { return new TestcontainersSettings(this); }
3.68
framework_Upload_changeVariables
/** * Invoked when the value of a variable has changed. * * @see com.vaadin.ui.LegacyComponent#changeVariables(java.lang.Object, * java.util.Map) */ @Override public void changeVariables(Object source, Map<String, Object> variables) { if (variables.containsKey("pollForStart")) { int id = (Integer) variables.get("pollForStart"); if (!isUploading && id == nextid) { notStarted = true; markAsDirty(); } else { } } }
3.68
framework_AbstractSelect_getCaptionChangeListener
// Caption change listener protected CaptionChangeListener getCaptionChangeListener() { if (captionChangeListener == null) { captionChangeListener = new CaptionChangeListener(); } return captionChangeListener; }
3.68
hudi_StreamWriteFunction_trace
/** * Trace the given record size {@code recordSize}. * * @param recordSize The record size * @return true if the buffer size exceeds the maximum buffer size */ boolean trace(long recordSize) { this.bufferSize += recordSize; return this.bufferSize > this.maxBufferSize; }
3.68
hbase_DynamicMetricsRegistry_get
/** * Get a metric by name * @param name of the metric * @return the metric object */ public MutableMetric get(String name) { return metricsMap.get(name); }
3.68
framework_VMenuBar_getNavigationSelectKey
/** * Get the key that selects a menu item. By default it is the Enter key but * by overriding this you can change the key to whatever you want. * * @deprecated use {@link #isNavigationSelectKey(int)} instead * @return */ @Deprecated protected int getNavigationSelectKey() { return KeyCodes.KEY_ENTER; }
3.68
framework_LayoutDependencyTree_getHorizontalLayoutTargets
/** * @return array of managed layouts waiting for horizontal layouting * @deprecated As of 7.0.1, use {@link #getHorizontalLayoutTargetsJsArray()} * for improved performance. */ @Deprecated public ManagedLayout[] getHorizontalLayoutTargets() { return asManagedLayoutArray(getHorizontalLayoutTargetsJsArray()); }
3.68
flink_InPlaceMutableHashTable_updateMatch
/** * This method can be called after getMatchFor returned a match. It will overwrite the * record that was found by getMatchFor. Warning: The new record should have the same key as * the old! WARNING; Don't do any modifications to the table between getMatchFor and * updateMatch! * * @param newRecord The record to override the old record with. * @throws IOException (EOFException specifically, if memory ran out) */ @Override public void updateMatch(T newRecord) throws IOException { if (closed) { return; } if (curElemPtr == END_OF_LIST) { throw new RuntimeException( "updateMatch was called after getMatchFor returned no match"); } try { // determine the new size stagingSegmentsOutView.reset(); buildSideSerializer.serialize(newRecord, stagingSegmentsOutView); final int newRecordSize = (int) stagingSegmentsOutView.getWritePosition(); stagingSegmentsInView.setReadPosition(0); // Determine the size of the place of the old record. final int oldRecordSize = (int) (recordEnd - (curElemPtr + RECORD_OFFSET_IN_LINK)); if (newRecordSize == oldRecordSize) { // overwrite record at its original place recordArea.overwriteRecordAt( curElemPtr + RECORD_OFFSET_IN_LINK, stagingSegmentsInView, newRecordSize); } else { // new record has a different size than the old one, append new at the end of // the record area. // Note: we have to do this, even if the new record is smaller, because // otherwise EntryIterator // wouldn't know the size of this place, and wouldn't know where does the next // record start. final long pointerToAppended = recordArea.appendPointerAndCopyRecord( nextPtr, stagingSegmentsInView, newRecordSize); // modify the pointer in the previous link if (prevElemPtr == INVALID_PREV_POINTER) { // list had only one element, so prev is in the bucketSegments bucketSegments[bucketSegmentIndex].putLong(bucketOffset, pointerToAppended); } else { recordArea.overwritePointerAt(prevElemPtr, pointerToAppended); } // write the negated size of the hole to the place where the next pointer was, // so that EntryIterator // will know the size of the place without reading the old record. // The negative sign will mean that the record is abandoned, and the // the -1 is for avoiding trouble in case of a record having 0 size. (though I // think this should // never actually happen) // Note: the last record in the record area can't be abandoned. (EntryIterator // makes use of this fact.) recordArea.overwritePointerAt(curElemPtr, -oldRecordSize - 1); holes += oldRecordSize; } } catch (EOFException ex) { compactOrThrow(); insertOrReplaceRecord(newRecord); } }
3.68
pulsar_BrokerService_setupBrokerPublishRateLimiterMonitor
/** * Schedules and monitors publish-throttling for broker that has publish-throttling configured. It also * disables and shutdowns publish-rate-limiter monitor for broker task if broker disables it. */ public void setupBrokerPublishRateLimiterMonitor() { // set broker PublishRateLimiterMonitor long brokerTickTimeMs = pulsar().getConfiguration().getBrokerPublisherThrottlingTickTimeMillis(); if (brokerTickTimeMs > 0) { brokerPublishRateLimiterMonitor.startOrUpdate(brokerTickTimeMs, this::checkBrokerPublishThrottlingRate, this::refreshBrokerPublishRate); } else { // disable publish-throttling for broker. brokerPublishRateLimiterMonitor.stop(); } }
3.68
framework_GridElement_getFooterRow
/** * Get a footer row by index. * * @param rowIndex * Row index * @return The tr element of the row */ public TestBenchElement getFooterRow(int rowIndex) { return getSubPart("#footer[" + rowIndex + "]"); }
3.68
hbase_User_addToken
/** * Adds the given Token to the user's credentials. * @param token the token to add */ public void addToken(Token<? extends TokenIdentifier> token) { ugi.addToken(token); }
3.68
hudi_HiveMetastoreBasedLockProvider_acquireLock
// This API is exposed for tests and not intended to be used elsewhere public boolean acquireLock(long time, TimeUnit unit, final LockComponent component) throws InterruptedException, ExecutionException, TimeoutException, TException { ValidationUtils.checkArgument(this.lock == null, ALREADY_ACQUIRED.name()); acquireLockInternal(time, unit, component); return this.lock != null && this.lock.getState() == LockState.ACQUIRED; }
3.68
graphhopper_OSMReader_preprocessWay
/** * This method is called for each way during the second pass and before the way is split into edges. * We currently use it to parse road names and calculate the distance of a way to determine the speed based on * the duration tag when it is present. The latter cannot be done on a per-edge basis, because the duration tag * refers to the duration of the entire way. */ protected void preprocessWay(ReaderWay way, WaySegmentParser.CoordinateSupplier coordinateSupplier) { // storing the road name does not yet depend on the flagEncoder so manage it directly List<KVStorage.KeyValue> list = new ArrayList<>(); if (config.isParseWayNames()) { // http://wiki.openstreetmap.org/wiki/Key:name String name = ""; if (!config.getPreferredLanguage().isEmpty()) name = fixWayName(way.getTag("name:" + config.getPreferredLanguage())); if (name.isEmpty()) name = fixWayName(way.getTag("name")); if (!name.isEmpty()) list.add(new KVStorage.KeyValue(STREET_NAME, name)); // http://wiki.openstreetmap.org/wiki/Key:ref String refName = fixWayName(way.getTag("ref")); if (!refName.isEmpty()) list.add(new KVStorage.KeyValue(STREET_REF, refName)); if (way.hasTag("destination:ref")) { list.add(new KVStorage.KeyValue(STREET_DESTINATION_REF, fixWayName(way.getTag("destination:ref")))); } else { if (way.hasTag("destination:ref:forward")) list.add(new KVStorage.KeyValue(STREET_DESTINATION_REF, fixWayName(way.getTag("destination:ref:forward")), true, false)); if (way.hasTag("destination:ref:backward")) list.add(new KVStorage.KeyValue(STREET_DESTINATION_REF, fixWayName(way.getTag("destination:ref:backward")), false, true)); } if (way.hasTag("destination")) { list.add(new KVStorage.KeyValue(STREET_DESTINATION, fixWayName(way.getTag("destination")))); } else { if (way.hasTag("destination:forward")) list.add(new KVStorage.KeyValue(STREET_DESTINATION, fixWayName(way.getTag("destination:forward")), true, false)); if (way.hasTag("destination:backward")) list.add(new KVStorage.KeyValue(STREET_DESTINATION, fixWayName(way.getTag("destination:backward")), false, true)); } } way.setTag("key_values", list); if (!isCalculateWayDistance(way)) return; double distance = calcDistance(way, coordinateSupplier); if (Double.isNaN(distance)) { // Some nodes were missing, and we cannot determine the distance. This can happen when ways are only // included partially in an OSM extract. In this case we cannot calculate the speed either, so we return. LOGGER.warn("Could not determine distance for OSM way: " + way.getId()); return; } way.setTag("way_distance", distance); // For ways with a duration tag we determine the average speed. This is needed for e.g. ferry routes, because // the duration tag is only valid for the entire way, and it would be wrong to use it after splitting the way // into edges. String durationTag = way.getTag("duration"); if (durationTag == null) { // no duration tag -> we cannot derive speed. happens very frequently for short ferries, but also for some long ones, see: #2532 if (isFerry(way) && distance > 500_000) OSM_WARNING_LOGGER.warn("Long ferry OSM way without duration tag: " + way.getId() + ", distance: " + Math.round(distance / 1000.0) + " km"); return; } long durationInSeconds; try { durationInSeconds = OSMReaderUtility.parseDuration(durationTag); } catch (Exception e) { OSM_WARNING_LOGGER.warn("Could not parse duration tag '" + durationTag + "' in OSM way: " + way.getId()); return; } double speedInKmPerHour = distance / 1000 / (durationInSeconds / 60.0 / 60.0); if (speedInKmPerHour < 0.1d) { // Often there are mapping errors like duration=30:00 (30h) instead of duration=00:30 (30min). In this case we // ignore the duration tag. If no such cases show up anymore, because they were fixed, maybe raise the limit to find some more. OSM_WARNING_LOGGER.warn("Unrealistic low speed calculated from duration. Maybe the duration is too long, or it is applied to a way that only represents a part of the connection? OSM way: " + way.getId() + ". duration=" + durationTag + " (= " + Math.round(durationInSeconds / 60.0) + " minutes), distance=" + distance + " m"); return; } // tag will be present if 1) isCalculateWayDistance was true for this way, 2) no OSM nodes were missing // such that the distance could actually be calculated, 3) there was a duration tag we could parse, and 4) the // derived speed was not unrealistically slow. way.setTag("speed_from_duration", speedInKmPerHour); }
3.68
morf_MySqlDialect_innerJoinKeyword
/** * If using {@link SelectStatement#useImplicitJoinOrder()}, we switch inner joins to STRAIGHT_JOINs. * * @see org.alfasoftware.morf.jdbc.SqlDialect#innerJoinKeyword(org.alfasoftware.morf.sql.AbstractSelectStatement) */ @Override protected String innerJoinKeyword(AbstractSelectStatement<?> stmt) { if (stmt instanceof SelectStatement) { List<Hint> hints = ((SelectStatement)stmt).getHints(); if (tryFind(hints, instanceOf(UseImplicitJoinOrder.class)).isPresent()) { return "STRAIGHT_JOIN"; } } return super.innerJoinKeyword(stmt); }
3.68
hadoop_CSQueueStore_getFullNameQueues
/** * This getter method will return an immutable map with all the queues with * queue path as the key. * @return Map containing all queues and having path as key */ Map<String, CSQueue> getFullNameQueues() { return ImmutableMap.copyOf(fullNameQueues); }
3.68
hibernate-validator_AnnotationMessageCheck_checkMessage
/** * Verifies that message passed as parameter is valid (passes a regexp check). * * @param message a message to verify * * @return {@code true} if message is valid, {@code false} otherwise */ protected boolean checkMessage(String message) { return MESSAGE_PATTERN.matcher( message ).matches(); }
3.68
hudi_InternalSchemaChangeApplier_applyAddChange
/** * Add columns to table. * * @param colName col name to be added. if we want to add col to a nested filed, the fullName should be specify * @param colType col type to be added. * @param doc col doc to be added. * @param position col position to be added * @param positionType col position change type. now support three change types: first/after/before */ public InternalSchema applyAddChange( String colName, Type colType, String doc, String position, TableChange.ColumnPositionChange.ColumnPositionType positionType) { TableChanges.ColumnAddChange add = TableChanges.ColumnAddChange.get(latestSchema); String parentName = TableChangesHelper.getParentName(colName); String leafName = TableChangesHelper.getLeafName(colName); add.addColumns(parentName, leafName, colType, doc); if (positionType != null) { switch (positionType) { case NO_OPERATION: break; case FIRST: add.addPositionChange(colName, "", positionType); break; case AFTER: case BEFORE: if (position == null || position.isEmpty()) { throw new IllegalArgumentException("position should not be null/empty_string when specify positionChangeType as after/before"); } String referParentName = TableChangesHelper.getParentName(position); if (!parentName.equals(referParentName)) { throw new IllegalArgumentException("cannot reorder two columns which has different parent"); } add.addPositionChange(colName, position, positionType); break; default: throw new IllegalArgumentException(String.format("only support first/before/after but found: %s", positionType)); } } else { throw new IllegalArgumentException(String.format("positionType should be specified")); } return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, add); }
3.68
flink_PythonStreamGroupTableAggregateOperator_getUserDefinedFunctionsProto
/** * Gets the proto representation of the Python user-defined table aggregate function to be * executed. */ @Override public FlinkFnApi.UserDefinedAggregateFunctions getUserDefinedFunctionsProto() { FlinkFnApi.UserDefinedAggregateFunctions.Builder builder = super.getUserDefinedFunctionsProto().toBuilder(); return builder.build(); }
3.68
hibernate-validator_MessagerAdapter_report
/** * Reports the given issue. Message parameters will be put into the template * retrieved from the resource bundle if applicable. * * @param issue The issue to report. * @param kind Kind of diagnostics to be used for reporting a given issue. */ private void report(ConstraintCheckIssue issue, Kind kind) { String message = errorMessages.getString( issue.getMessageKey() ); if ( issue.getMessageParameters() != null ) { MessageFormat messageFormat = new MessageFormat( message, Locale.getDefault() ); message = messageFormat.format( issue.getMessageParameters() ); } messager.printMessage( kind, message, issue.getElement(), issue.getAnnotationMirror() ); }
3.68