name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VAbstractCalendarPanel_setRangeStart
/** * Sets the start range for this component. The start range is inclusive, * and it depends on the current resolution, what is considered inside the * range. * * @param newRangeStart * - the allowed range's start date */ public void setRangeStart(String newRangeStart) { if (!SharedUtil.equals(rangeStart, newRangeStart)) { rangeStart = newRangeStart; if (initialRenderDone) { // Dynamic updates to the range needs to render the calendar to // update the element stylenames renderCalendar(); } } }
3.68
framework_StringToShortConverter_convertToModel
/* * (non-Javadoc) * * @see * com.vaadin.data.util.converter.Converter#convertToModel(java.lang.Object, * java.lang.Class, java.util.Locale) */ @Override public Short convertToModel(String value, Class<? extends Short> targetType, Locale locale) throws ConversionException { Number n = convertToNumber(value, targetType, locale); if (n == null) { return null; } short shortValue = n.shortValue(); if (shortValue == n.longValue()) { return shortValue; } throw new ConversionException("Could not convert '" + value + "' to " + Short.class.getName() + ": value out of range"); }
3.68
flink_ExecutionEnvironment_createLocalEnvironmentWithWebUI
/** * Creates a {@link LocalEnvironment} for local program execution that also starts the web * monitoring UI. * * <p>The local execution environment will run the program in a multi-threaded fashion in the * same JVM as the environment was created in. It will use the parallelism specified in the * parameter. * * <p>If the configuration key 'rest.port' was set in the configuration, that particular port * will be used for the web UI. Otherwise, the default port (8081) will be used. */ @PublicEvolving public static ExecutionEnvironment createLocalEnvironmentWithWebUI(Configuration conf) { checkNotNull(conf, "conf"); if (!conf.contains(RestOptions.PORT)) { // explicitly set this option so that it's not set to 0 later conf.setInteger(RestOptions.PORT, RestOptions.PORT.defaultValue()); } return createLocalEnvironment(conf, -1); }
3.68
hadoop_SelectTool_bandwidthMBs
/** * Work out the bandwidth in MB/s. * @param bytes bytes * @param durationMillisNS duration in nanos * @return the number of megabytes/second of the recorded operation */ public static double bandwidthMBs(long bytes, long durationMillisNS) { return durationMillisNS > 0 ? (bytes / 1048576.0 * 1000 / durationMillisNS) : 0; }
3.68
dubbo_StringUtils_stripEnd
/** * <p>Strips any of a set of characters from the end of a String.</p> * * <p>A {@code null} input String returns {@code null}. * An empty string ("") input returns the empty string.</p> * * <p>If the stripChars String is {@code null}, whitespace is * stripped as defined by {@link Character#isWhitespace(char)}.</p> * * <pre> * StringUtils.stripEnd(null, *) = null * StringUtils.stripEnd("", *) = "" * StringUtils.stripEnd("abc", "") = "abc" * StringUtils.stripEnd("abc", null) = "abc" * StringUtils.stripEnd(" abc", null) = " abc" * StringUtils.stripEnd("abc ", null) = "abc" * StringUtils.stripEnd(" abc ", null) = " abc" * StringUtils.stripEnd(" abcyx", "xyz") = " abc" * StringUtils.stripEnd("120.00", ".0") = "12" * </pre> * * @param str the String to remove characters from, may be null * @param stripChars the set of characters to remove, null treated as whitespace * @return the stripped String, {@code null} if null String input */ public static String stripEnd(final String str, final String stripChars) { int end; if (str == null || (end = str.length()) == 0) { return str; } if (stripChars == null) { while (end != 0 && Character.isWhitespace(str.charAt(end - 1))) { end--; } } else if (stripChars.isEmpty()) { return str; } else { while (end != 0 && stripChars.indexOf(str.charAt(end - 1)) != INDEX_NOT_FOUND) { end--; } } return str.substring(0, end); }
3.68
hadoop_DiskBalancerWorkItem_incBlocksCopied
/** * increments the number of blocks copied. */ public void incBlocksCopied() { blocksCopied++; }
3.68
hbase_TableState_isDisabling
/** Returns True if table is disabling. */ public boolean isDisabling() { return isInStates(State.DISABLING); }
3.68
hbase_Subprocedure_receiveReachedGlobalBarrier
/** * Callback for the member rpcs to call when the global barrier has been reached. This unblocks * the main subprocedure exectuion thread so that the Subprocedure's {@link #insideBarrier()} * method can be run. */ public void receiveReachedGlobalBarrier() { inGlobalBarrier.countDown(); }
3.68
rocketmq-connect_DebeziumOracleConnector_getConnectorClass
/** * get connector class */ @Override public String getConnectorClass() { return DEFAULT_CONNECTOR; }
3.68
framework_VAbstractOrderedLayout_removeWidget
/** * Remove a slot from the layout. * * @param widget * the widget whose slot to remove */ public void removeWidget(Widget widget) { Slot slot = widgetToSlot.remove(widget); if (slot != null) { removeSlot(slot); } }
3.68
hudi_ExternalSpillableMap_getCurrentInMemoryMapSize
/** * Approximate memory footprint of the in-memory map. */ public long getCurrentInMemoryMapSize() { return currentInMemoryMapSize; }
3.68
framework_AbstractOrderedLayoutConnector_updateLayoutHeight
/** * Re-calculate the layout height */ private void updateLayoutHeight() { if (needsFixedHeight()) { int h = getMaxHeight(); if (h < 0) { // Postpone change if there are elements that have not yet been // measured return; } h += getLayoutManager().getBorderHeight(getWidget().getElement()) + getLayoutManager() .getPaddingHeight(getWidget().getElement()); getWidget().getElement().getStyle().setHeight(h, Unit.PX); getLayoutManager().setNeedsMeasure(this); } }
3.68
hbase_ConnectionCache_getEffectiveUser
/** * Get the current thread local effective user */ public String getEffectiveUser() { return effectiveUserNames.get(); }
3.68
framework_Escalator_setStylePrimaryName
/** * The primary style name for the container. * * @param primaryStyleName * the style name to use as prefix for all row and cell style * names. */ protected void setStylePrimaryName(String primaryStyleName) { String oldStyle = getStylePrimaryName(); if (SharedUtil.equals(oldStyle, primaryStyleName)) { return; } this.primaryStyleName = primaryStyleName; // Update already rendered rows and cells Element row = root.getRows().getItem(0); while (row != null) { UIObject.setStylePrimaryName(row, primaryStyleName + "-row"); Element cell = TableRowElement.as(row).getCells().getItem(0); while (cell != null) { assert TableCellElement.is(cell); UIObject.setStylePrimaryName(cell, primaryStyleName + "-cell"); cell = cell.getNextSiblingElement(); } row = row.getNextSiblingElement(); } }
3.68
flink_LongHashPartition_updateIndex
/** Update the address in array for given key. */ private void updateIndex( long key, int hashCode, long address, int size, MemorySegment dataSegment, int currentPositionInSegment) throws IOException { assert (numKeys <= numBuckets / 2); int bucketId = findBucket(hashCode); // each bucket occupied 16 bytes (long key + long pointer to data address) int bucketOffset = bucketId * SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES; MemorySegment segment = buckets[bucketOffset >>> segmentSizeBits]; int segOffset = bucketOffset & segmentSizeMask; long currAddress; while (true) { currAddress = segment.getLong(segOffset + 8); if (segment.getLong(segOffset) != key && currAddress != INVALID_ADDRESS) { // hash conflicts, the bucket is occupied by another key // TODO test Conflict resolution: // now: +1 +1 +1... cache friendly but more conflict, so we set factor to 0.5 // other1: +1 +2 +3... less conflict, factor can be 0.75 // other2: Secondary hashCode... less and less conflict, but need compute hash again bucketId = (bucketId + 1) & numBucketsMask; if (segOffset + SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES < segmentSize) { // if the new bucket still in current segment, we only need to update offset // within this segment segOffset += SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES; } else { // otherwise, we should re-calculate segment and offset bucketOffset = bucketId * 16; segment = buckets[bucketOffset >>> segmentSizeBits]; segOffset = bucketOffset & segmentSizeMask; } } else { break; } } if (currAddress == INVALID_ADDRESS) { // this is the first value for this key, put the address in array. segment.putLong(segOffset, key); segment.putLong(segOffset + 8, address); numKeys += 1; // dataSegment may be null if we only have to rehash bucket area if (dataSegment != null) { dataSegment.putLong(currentPositionInSegment, toAddrAndLen(INVALID_ADDRESS, size)); } if (numKeys * 2 > numBuckets) { resize(); } } else { // there are some values for this key, put the address in the front of them. dataSegment.putLong(currentPositionInSegment, toAddrAndLen(currAddress, size)); segment.putLong(segOffset + 8, address); } }
3.68
dubbo_AbstractDynamicConfiguration_execute
/** * Executes the {@link Callable} with the specified timeout * * @param task the {@link Callable task} * @param timeout timeout in milliseconds * @param <V> the type of computing result * @return the computing result */ protected final <V> V execute(Callable<V> task, long timeout) { V value = null; try { if (timeout < 1) { // less or equal 0 value = task.call(); } else { Future<V> future = workersThreadPool.submit(task); value = future.get(timeout, TimeUnit.MILLISECONDS); } } catch (Exception e) { if (logger.isErrorEnabled()) { logger.error(COMMON_UNEXPECTED_EXCEPTION, "", "", e.getMessage(), e); } } return value; }
3.68
hbase_FSDataInputStreamWrapper_close
/** CloseClose stream(s) if necessary. */ @Override public void close() { if (!doCloseStreams) { return; } updateInputStreamStatistics(this.streamNoFsChecksum); // we do not care about the close exception as it is for reading, no data loss issue. Closeables.closeQuietly(streamNoFsChecksum); updateInputStreamStatistics(stream); Closeables.closeQuietly(stream); }
3.68
morf_SqlDialect_prepareBooleanParameter
/** * Overridable behaviour for mapping a boolean parameter to a prepared statement. * * @param statement The statement. * @param boolVal The boolean value. * @param parameter The parameter to map to. * @throws SQLException If an exception occurs setting the parameter. */ protected void prepareBooleanParameter(NamedParameterPreparedStatement statement, Boolean boolVal, SqlParameter parameter) throws SQLException { if (boolVal == null) { statement.setObject(parameter, null); } else { statement.setBoolean(parameter, boolVal); } }
3.68
hbase_AggregateImplementation_getMedian
/** * Gives a List containing sum of values and sum of weights. It is computed for the combination of * column family and column qualifier(s) in the given row range as defined in the Scan object. In * its current implementation, it takes one column family and two column qualifiers. The first * qualifier is for values column and the second qualifier (optional) is for weight column. */ @Override public void getMedian(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) { AggregateResponse response = null; InternalScanner scanner = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); S sumVal = null, sumWeights = null, tempVal = null, tempWeight = null; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = env.getRegion().getScanner(scan); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] valQualifier = null, weightQualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { valQualifier = qualifiers.pollFirst(); // if weighted median is requested, get qualifier for the weight column weightQualifier = qualifiers.pollLast(); } List<Cell> results = new ArrayList<>(); boolean hasMoreRows = false; do { tempVal = null; tempWeight = null; hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { Cell kv = results.get(i); tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv))); if (weightQualifier != null) { tempWeight = ci.add(tempWeight, ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); } } results.clear(); sumVal = ci.add(sumVal, tempVal); sumWeights = ci.add(sumWeights, tempWeight); } while (hasMoreRows); ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString(); S s = sumWeights == null ? ci.castToReturnType(ci.getMinValue()) : sumWeights; ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString(); AggregateResponse.Builder pair = AggregateResponse.newBuilder(); pair.addFirstPart(first_sumVal); pair.addFirstPart(first_sumWeights); response = pair.build(); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); } finally { if (scanner != null) { IOUtils.closeQuietly(scanner); } } done.run(response); }
3.68
AreaShop_GeneralRegion_getMinimumPoint
/** * Get the minimum corner of the region. * @return Vector */ public Vector getMinimumPoint() { return plugin.getWorldGuardHandler().getMinimumPoint(getRegion()); }
3.68
framework_UIProvider_getPushTransport
/** * Finds the {@link Transport} to use for a specific UI. If no transport is * defined, <code>null</code> is returned. * <p> * The default implementation uses the @{@link Push} annotation if it's * defined for the UI class. * * @param event * the UI create event with information about the UI and the * current request. * @return the transport type to use, or <code>null</code> if the default * transport type should be used */ public Transport getPushTransport(UICreateEvent event) { Push push = getAnnotationFor(event.getUIClass(), Push.class); if (push == null) { return null; } else { return push.transport(); } }
3.68
querydsl_GroupByBuilder_list
/** * Get the results as a list * * @param expression projection * @return new result transformer */ public <V> ResultTransformer<List<V>> list(FactoryExpression<V> expression) { final FactoryExpression<V> transformation = FactoryExpressionUtils.wrap(expression); List<Expression<?>> args = transformation.getArgs(); return new GroupByList<K, V>(key, args.toArray(new Expression<?>[0])) { @Override protected V transform(Group group) { // XXX Isn't group.toArray() suitable here? List<Object> args = new ArrayList<Object>(groupExpressions.size() - 1); for (int i = 1; i < groupExpressions.size(); i++) { args.add(group.getGroup(groupExpressions.get(i))); } return transformation.newInstance(args.toArray()); } }; }
3.68
hadoop_FutureDataInputStreamBuilderImpl_builder
/** * Get the builder. * This must be used after the constructor has been invoked to create * the actual builder: it allows for subclasses to do things after * construction. * * @return FutureDataInputStreamBuilder. */ public FutureDataInputStreamBuilder builder() { return getThisBuilder(); }
3.68
framework_FocusableFlowPanel_addBlurHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasBlurHandlers#addBlurHandler(com.google * .gwt.event.dom.client.BlurHandler) */ @Override public HandlerRegistration addBlurHandler(BlurHandler handler) { return addDomHandler(handler, BlurEvent.getType()); }
3.68
flink_FineGrainedSlotManager_start
/** * Starts the slot manager with the given leader id and resource manager actions. * * @param newResourceManagerId to use for communication with the task managers * @param newMainThreadExecutor to use to run code in the ResourceManager's main thread * @param newResourceAllocator to use for resource (de-)allocations * @param newBlockedTaskManagerChecker to query whether a task manager is blocked */ @Override public void start( ResourceManagerId newResourceManagerId, Executor newMainThreadExecutor, ResourceAllocator newResourceAllocator, ResourceEventListener newResourceEventListener, BlockedTaskManagerChecker newBlockedTaskManagerChecker) { LOG.info("Starting the slot manager."); resourceManagerId = Preconditions.checkNotNull(newResourceManagerId); mainThreadExecutor = Preconditions.checkNotNull(newMainThreadExecutor); resourceAllocator = Preconditions.checkNotNull(newResourceAllocator); resourceEventListener = Preconditions.checkNotNull(newResourceEventListener); slotStatusSyncer.initialize( taskManagerTracker, resourceTracker, resourceManagerId, mainThreadExecutor); blockedTaskManagerChecker = Preconditions.checkNotNull(newBlockedTaskManagerChecker); started = true; if (resourceAllocator.isSupported()) { clusterReconciliationCheck = scheduledExecutor.scheduleWithFixedDelay( () -> mainThreadExecutor.execute(this::checkClusterReconciliation), 0L, taskManagerTimeout.toMilliseconds(), TimeUnit.MILLISECONDS); } registerSlotManagerMetrics(); }
3.68
flink_NettyPartitionRequestClient_validateClientAndIncrementReferenceCounter
/** * Validate the client and increment the reference counter. * * <p>Note: the reference counter has to be incremented before returning the instance of this * client to ensure correct closing logic. * * @return whether this client can be used. */ boolean validateClientAndIncrementReferenceCounter() { if (!clientHandler.hasChannelError()) { return closeReferenceCounter.incrementAndGet() > 0; } return false; }
3.68
hadoop_FederationMembershipStateStoreInputValidator_checkSubClusterState
/** * Validate if the SubCluster State is present or not. * * @param state the state of the subcluster to be verified * @throws FederationStateStoreInvalidInputException if the SubCluster State * is invalid */ private static void checkSubClusterState(SubClusterState state) throws FederationStateStoreInvalidInputException { // check sub-cluster state is not empty if (state == null) { String message = "Missing SubCluster State information." + " Please try again by specifying SubCluster State information."; LOG.warn(message); throw new FederationStateStoreInvalidInputException(message); } }
3.68
hadoop_ManifestCommitter_setupJob
/** * Set up a job through a {@link SetupJobStage}. * @param jobContext Context of the job whose output is being written. * @throws IOException IO Failure. */ @Override public void setupJob(final JobContext jobContext) throws IOException { ManifestCommitterConfig committerConfig = enterCommitter(false, jobContext); StageConfig stageConfig = committerConfig .createStageConfig() .withOperations(createManifestStoreOperations()) .build(); // set up the job. new SetupJobStage(stageConfig) .apply(committerConfig.getCreateJobMarker()); logCommitterStatisticsAtDebug(); }
3.68
hudi_DateTimeUtils_microsToInstant
/** * Converts provided microseconds (from epoch) to {@link Instant} */ public static Instant microsToInstant(long microsFromEpoch) { long epochSeconds = microsFromEpoch / (1_000_000L); long nanoAdjustment = (microsFromEpoch % (1_000_000L)) * 1_000L; return Instant.ofEpochSecond(epochSeconds, nanoAdjustment); }
3.68
framework_VTabsheetPanel_insert
/** * Inserts a widget before the specified index. * * @param w * the widget to be inserted * @param beforeIndex * the index before which it will be inserted * @throws IndexOutOfBoundsException * if <code>beforeIndex</code> is out of range */ public void insert(Widget w, int beforeIndex) { Element el = createContainerElement(); DOM.insertChild(getElement(), el, beforeIndex); super.insert(w, el, beforeIndex, false); }
3.68
flink_PredefinedOptions_getValue
/** * Get a option value according to the pre-defined values. If not defined, return the default * value. * * @param option the option. * @param <T> the option value type. * @return the value if defined, otherwise return the default value. */ @Nullable @SuppressWarnings("unchecked") <T> T getValue(ConfigOption<T> option) { Object value = options.get(option.key()); if (value == null) { value = option.defaultValue(); } if (value == null) { return null; } return (T) value; }
3.68
streampipes_SpOpcUaConfigExtractor_extractAdapterConfig
/** * Creates {@link OpcUaAdapterConfig} instance in accordance with the given * {@link org.apache.streampipes.sdk.extractor.StaticPropertyExtractor}. * * @param extractor extractor for user inputs * @return {@link OpcUaAdapterConfig} instance based on information from {@code extractor} */ public static OpcUaAdapterConfig extractAdapterConfig(IStaticPropertyExtractor extractor) { var config = extractSharedConfig(extractor, new OpcUaAdapterConfig()); boolean usePullMode = extractor.selectedAlternativeInternalId(ADAPTER_TYPE.name()) .equals(PULL_MODE.name()); if (usePullMode) { Integer pullIntervalSeconds = extractor.singleValueParameter(PULLING_INTERVAL.name(), Integer.class); config.setPullIntervalMilliSeconds(pullIntervalSeconds); } return config; }
3.68
hadoop_S3ARemoteObjectReader_read
/** * Stars reading at {@code offset} and reads upto {@code size} bytes into {@code buffer}. * * @param buffer the buffer into which data is returned * @param offset the absolute offset into the underlying file where reading starts. * @param size the number of bytes to be read. * * @return number of bytes actually read. * @throws IOException if there is an error reading from the file. * * @throws IllegalArgumentException if buffer is null. * @throws IllegalArgumentException if offset is outside of the range [0, file size]. * @throws IllegalArgumentException if size is zero or negative. */ public int read(ByteBuffer buffer, long offset, int size) throws IOException { Validate.checkNotNull(buffer, "buffer"); Validate.checkWithinRange(offset, "offset", 0, this.remoteObject.size()); Validate.checkPositiveInteger(size, "size"); if (this.closed) { return -1; } int reqSize = (int) Math.min(size, this.remoteObject.size() - offset); return readOneBlockWithRetries(buffer, offset, reqSize); }
3.68
hadoop_EmptyIOStatistics_getInstance
/** * Get the single instance of this class. * @return a shared, empty instance. */ public static IOStatistics getInstance() { return INSTANCE; }
3.68
hbase_Scan_resetMvccReadPoint
/** * Set the mvcc read point to -1 which means do not use it. */ Scan resetMvccReadPoint() { return setMvccReadPoint(-1L); }
3.68
zxing_ITFReader_decodeMiddle
/** * @param row row of black/white values to search * @param payloadStart offset of start pattern * @param resultString {@link StringBuilder} to append decoded chars to * @throws NotFoundException if decoding could not complete successfully */ private static void decodeMiddle(BitArray row, int payloadStart, int payloadEnd, StringBuilder resultString) throws NotFoundException { // Digits are interleaved in pairs - 5 black lines for one digit, and the // 5 // interleaved white lines for the second digit. // Therefore, need to scan 10 lines and then // split these into two arrays int[] counterDigitPair = new int[10]; int[] counterBlack = new int[5]; int[] counterWhite = new int[5]; while (payloadStart < payloadEnd) { // Get 10 runs of black/white. recordPattern(row, payloadStart, counterDigitPair); // Split them into each array for (int k = 0; k < 5; k++) { int twoK = 2 * k; counterBlack[k] = counterDigitPair[twoK]; counterWhite[k] = counterDigitPair[twoK + 1]; } int bestMatch = decodeDigit(counterBlack); resultString.append((char) ('0' + bestMatch)); bestMatch = decodeDigit(counterWhite); resultString.append((char) ('0' + bestMatch)); for (int counterDigit : counterDigitPair) { payloadStart += counterDigit; } } }
3.68
hadoop_AzureBlobFileSystem_getFileBlockLocations
/** * Return an array containing hostnames, offset and size of * portions of the given file. For ABFS we'll just lie and give * fake hosts to make sure we get many splits in MR jobs. */ @Override public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len) { if (file == null) { return null; } if ((start < 0) || (len < 0)) { throw new IllegalArgumentException("Invalid start or len parameter"); } if (file.getLen() < start) { return new BlockLocation[0]; } final String blobLocationHost = abfsStore.getAbfsConfiguration().getAzureBlockLocationHost(); final String[] name = {blobLocationHost}; final String[] host = {blobLocationHost}; long blockSize = file.getBlockSize(); if (blockSize <= 0) { throw new IllegalArgumentException( "The block size for the given file is not a positive number: " + blockSize); } int numberOfLocations = (int) (len / blockSize) + ((len % blockSize == 0) ? 0 : 1); BlockLocation[] locations = new BlockLocation[numberOfLocations]; for (int i = 0; i < locations.length; i++) { long currentOffset = start + (i * blockSize); long currentLength = Math.min(blockSize, start + len - currentOffset); locations[i] = new BlockLocation(name, host, currentOffset, currentLength); } return locations; }
3.68
flink_TriggerSavepointMode_isTerminalMode
/** Whether the operation will result in a globally terminal job status. */ public boolean isTerminalMode() { return this == CANCEL_WITH_SAVEPOINT || this == TERMINATE_WITH_SAVEPOINT; }
3.68
hadoop_DefaultDataType_getValue
/** * Get the value of the attribute. */ @Override public String getValue() { return value; }
3.68
hadoop_NodeStatus_newInstance
/** * Create a new {@code NodeStatus}. * @param nodeId Identifier for this node. * @param responseId Identifier for the response. * @param containerStatuses Status of the containers running in this node. * @param keepAliveApplications Applications to keep alive. * @param nodeHealthStatus Health status of the node. * @param containersUtilization Utilization of the containers in this node. * @param nodeUtilization Utilization of the node. * @param increasedContainers Containers whose resource has been increased. * @return New {@code NodeStatus} with the provided information. */ public static NodeStatus newInstance(NodeId nodeId, int responseId, List<ContainerStatus> containerStatuses, List<ApplicationId> keepAliveApplications, NodeHealthStatus nodeHealthStatus, ResourceUtilization containersUtilization, ResourceUtilization nodeUtilization, List<Container> increasedContainers) { NodeStatus nodeStatus = Records.newRecord(NodeStatus.class); nodeStatus.setResponseId(responseId); nodeStatus.setNodeId(nodeId); nodeStatus.setContainersStatuses(containerStatuses); nodeStatus.setKeepAliveApplications(keepAliveApplications); nodeStatus.setNodeHealthStatus(nodeHealthStatus); nodeStatus.setContainersUtilization(containersUtilization); nodeStatus.setNodeUtilization(nodeUtilization); nodeStatus.setIncreasedContainers(increasedContainers); return nodeStatus; }
3.68
morf_XmlDataSetProducer_columnNames
/** * @see org.alfasoftware.morf.metadata.Index#columnNames() */ @Override public List<String> columnNames() { return columnNames; }
3.68
morf_AddTableFrom_accept
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
MagicPlugin_MageDataStore_load
/** * Load a single mage by id. * * <p>If there is no data for this mage, a new empty record should be returned. * * <p>If the provided callback is non-null, it should be called on completion. */ default void load(String id, MageDataCallback callback, boolean lock) { load(id, callback); }
3.68
framework_LoginForm_login
/** * Handles the login. * <p> * In deferred mode, this method is called after the dummy POST request that * triggers the password manager has been completed. In direct mode (the * default setting), it is called directly when the user hits the enter key * or clicks on the login button. In the latter case, you cannot change the * URL in the method or the password manager will not be triggered. */ private void login() { Map<String, String> params = new HashMap<>(); params.put("username", getUsernameField().getValue()); params.put("password", getPasswordField().getValue()); LoginEvent event = new LoginEvent(LoginForm.this, params); fireEvent(event); }
3.68
hbase_FsDelegationToken_getUserToken
/** Returns the delegation token acquired, or null in case it was not acquired */ public Token<?> getUserToken() { return userToken; }
3.68
flink_OptimizedPlan_getAllNodes
/** * Gets all the nodes from this OptimizedPlan. * * @return All nodes. */ public Collection<PlanNode> getAllNodes() { return allNodes; }
3.68
framework_WebBrowser_isWindowsPhone
/** * Tests whether the user is using Windows Phone. * * @return true if the user is using Windows Phone, false if the user is not * using Windows Phone or if no information on the browser is * present * @since 7.3.2 */ public boolean isWindowsPhone() { return browserDetails.isWindowsPhone(); }
3.68
hbase_Segment_getCellSet
/** Returns a set of all cells in the segment */ protected CellSet getCellSet() { return cellSet.get(); }
3.68
hbase_ProcedurePrepareLatch_getNoopLatch
/** * Returns the singleton latch which does nothing. */ public static ProcedurePrepareLatch getNoopLatch() { return noopLatch; }
3.68
framework_ContainerHierarchicalWrapper_removeItemSetChangeListener
/* * Removes a Item set change listener from the object. Don't add a JavaDoc * comment here, we use the default documentation from implemented * interface. */ @Override public void removeItemSetChangeListener( Container.ItemSetChangeListener listener) { if (container instanceof Container.ItemSetChangeNotifier) { ((Container.ItemSetChangeNotifier) container) .removeItemSetChangeListener( new PiggybackListener(listener)); } }
3.68
hbase_FSDataInputStreamWrapper_checksumOk
/** Report that checksum was ok, so we may ponder going back to HBase checksum. */ public void checksumOk() { if ( this.useHBaseChecksumConfigured && !this.useHBaseChecksum && (this.hbaseChecksumOffCount.getAndDecrement() < 0) ) { // The stream we need is already open (because we were using HBase checksum in the past). assert this.streamNoFsChecksum != null; this.useHBaseChecksum = true; } }
3.68
pulsar_OwnedBundle_handleUnloadRequest
/** * It unloads the bundle by closing all topics concurrently under this bundle. * * <pre> * a. disable bundle ownership in memory and not in zk * b. close all the topics concurrently * c. delete ownership znode from zookeeper. * </pre> * * @param pulsar * @param timeout * timeout for unloading bundle. It doesn't throw exception if it times out while waiting on closing all * topics * @param timeoutUnit * @throws Exception */ public CompletableFuture<Void> handleUnloadRequest(PulsarService pulsar, long timeout, TimeUnit timeoutUnit) { return handleUnloadRequest(pulsar, timeout, timeoutUnit, true); }
3.68
hadoop_OBSDataBlocks_hasRemaining
/** * Check if there is data left. * * @return true if there is data remaining in the buffer. */ public synchronized boolean hasRemaining() { return byteBuffer.hasRemaining(); }
3.68
morf_OracleDialect_addTableFromStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#addTableFromStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.sql.SelectStatement) */ @Override public Collection<String> addTableFromStatements(Table table, SelectStatement selectStatement) { return internalAddTableFromStatements(table, selectStatement, false); }
3.68
hbase_Permission_getVersion
/** Returns the object version number */ @Override public byte getVersion() { return VERSION; }
3.68
querydsl_AbstractHibernateQuery_createQuery
/** * Expose the original Hibernate query for the given projection * * @return query */ public Query createQuery() { return createQuery(getMetadata().getModifiers(), false); }
3.68
framework_VLayoutSlot_setCaption
/** * Sets the caption element for this slot. * * @param caption * the caption element, can be {@code null} */ public void setCaption(VCaption caption) { if (this.caption != null) { this.caption.removeFromParent(); } this.caption = caption; if (caption != null) { // Physical attach. DOM.insertBefore(wrapper, caption.getElement(), widget.getElement()); Style style = caption.getElement().getStyle(); style.setPosition(Position.ABSOLUTE); style.setTop(0, Unit.PX); } }
3.68
hbase_NamespaceDescriptor_getConfigurationValue
/** * Getter for accessing the configuration value by key */ public String getConfigurationValue(String key) { return configuration.get(key); }
3.68
hbase_IndividualBytesFieldCell_getTypeByte
// 5) Type @Override public byte getTypeByte() { return type; }
3.68
druid_StringUtils_subString
/** * Example: subString("abcdc","a","c",true)="bcd" * * @param src * @param start null while start from index=0 * @param to null while to index=src.length * @param toLast true while to index=src.lastIndexOf(to) * @return */ public static String subString(String src, String start, String to, boolean toLast) { int indexFrom = start == null ? 0 : src.indexOf(start); int indexTo; if (to == null) { indexTo = src.length(); } else { indexTo = toLast ? src.lastIndexOf(to) : src.indexOf(to); } if (indexFrom < 0 || indexTo < 0 || indexFrom > indexTo) { return null; } if (null != start) { indexFrom += start.length(); } return src.substring(indexFrom, indexTo); }
3.68
framework_VComboBox_isWaitingForInitialData
/** * For internal use only - this method will be removed in the future. * * @return true if the combo box is waiting for initial data from the * server, false otherwise */ public boolean isWaitingForInitialData() { return initialData; }
3.68
hbase_Segment_getComparator
/** * Returns the Cell comparator used by this segment * @return the Cell comparator used by this segment */ protected CellComparator getComparator() { return comparator; }
3.68
flink_TieredStorageNettyServiceImpl_trySetChannel
/** * Try to set input channel. * * @param channelIndex the index of channel. * @param channelSupplier supplier to provide channel. * @return true if the channel is successfully set, or false if the registration already has * an input channel. */ public boolean trySetChannel(int channelIndex, Supplier<InputChannel> channelSupplier) { if (isChannelSet()) { return false; } checkArgument(channelIndex >= 0); this.channelIndex = channelIndex; this.channelSupplier = checkNotNull(channelSupplier); tryCreateNettyConnectionReader(); return true; }
3.68
hbase_TableHFileArchiveTracker_getMonitor
/** Returns the tracker for which tables should be archived. */ public final HFileArchiveTableMonitor getMonitor() { return this.monitor; }
3.68
hbase_MetaTableAccessor_fullScanRegions
/** * Performs a full scan of <code>hbase:meta</code> for regions. * @param connection connection we're using */ public static List<Result> fullScanRegions(Connection connection) throws IOException { return fullScan(connection, QueryType.REGION); }
3.68
flink_FileInputFormat_acceptFile
/** * A simple hook to filter files and directories from the input. The method may be overridden. * Hadoop's FileInputFormat has a similar mechanism and applies the same filters by default. * * @param fileStatus The file status to check. * @return true, if the given file or directory is accepted */ public boolean acceptFile(FileStatus fileStatus) { final String name = fileStatus.getPath().getName(); return !name.startsWith("_") && !name.startsWith(".") && !filesFilter.filterPath(fileStatus.getPath()); }
3.68
framework_VCalendarPanel_setDate
/** * Sets the data of the Panel. * * @param currentDate * The date to set */ public void setDate(Date currentDate) { // Check that we are not re-rendering an already active date if (currentDate == value && currentDate != null) { return; } boolean currentDateWasAdjusted = false; // Check that selected date is inside the allowed range if (currentDate != null && !isDateInsideRange(currentDate, resolution)) { currentDate = adjustDateToFitInsideRange(currentDate); currentDateWasAdjusted = true; } Date oldDisplayedMonth = displayedMonth; value = currentDate; // If current date was adjusted, we will not select any date, // since that will look like a date is selected. Instead we // only focus on the adjusted value if (value == null || currentDateWasAdjusted) { // If ranges enabled, we may need to focus on a different view to // potentially not get stuck if (rangeStart != null || rangeEnd != null) { Date dateThatFitsInsideRange = adjustDateToFitInsideRange( new Date()); focusedDate = new FocusedDate(dateThatFitsInsideRange.getYear(), dateThatFitsInsideRange.getMonth(), dateThatFitsInsideRange.getDate()); displayedMonth = new FocusedDate( dateThatFitsInsideRange.getYear(), dateThatFitsInsideRange.getMonth(), 1); // value was adjusted. Set selected to null to not cause // confusion, but this is only needed (and allowed) when we have // a day // resolution if (getResolution().getCalendarField() >= Resolution.DAY .getCalendarField()) { value = null; } } else { focusedDate = displayedMonth = null; } } else { focusedDate = new FocusedDate(value.getYear(), value.getMonth(), value.getDate()); displayedMonth = new FocusedDate(value.getYear(), value.getMonth(), 1); } // Re-render calendar if the displayed month is changed, // or if a time selector is needed but does not exist. if ((isTimeSelectorNeeded() && time == null) || oldDisplayedMonth == null || value == null || oldDisplayedMonth.getYear() != value.getYear() || oldDisplayedMonth.getMonth() != value.getMonth()) { renderCalendar(); } else { focusDay(focusedDate); selectFocused(); if (isTimeSelectorNeeded()) { time.updateTimes(); } } if (!hasFocus) { focusDay(null); } }
3.68
flink_DataSet_sortPartition
/** * Locally sorts the partitions of the DataSet on the extracted key in the specified order. The * DataSet can be sorted on multiple values by returning a tuple from the KeySelector. * * <p>Note that no additional sort keys can be appended to a KeySelector sort keys. To sort the * partitions by multiple values using KeySelector, the KeySelector must return a tuple * consisting of the values. * * @param keyExtractor The KeySelector function which extracts the key values from the DataSet * on which the DataSet is sorted. * @param order The order in which the DataSet is sorted. * @return The DataSet with sorted local partitions. */ public <K> SortPartitionOperator<T> sortPartition(KeySelector<T, K> keyExtractor, Order order) { final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType()); return new SortPartitionOperator<>( this, new Keys.SelectorFunctionKeys<>(clean(keyExtractor), getType(), keyType), order, Utils.getCallLocationName()); }
3.68
hadoop_FilterFileSystem_getUri
/** Returns a URI whose scheme and authority identify this FileSystem.*/ @Override public URI getUri() { return fs.getUri(); }
3.68
morf_RemoveTable_apply
/** * {@inheritDoc} * * @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema) */ @Override public Schema apply(Schema schema) { if(!schema.tableExists(tableToBeRemoved.getName().toUpperCase())){ throw new IllegalArgumentException("Cannot remove table [" + tableToBeRemoved.getName() + "] as it does not exist."); } return new FilteredSchema(schema, tableToBeRemoved.getName()); }
3.68
hadoop_LoggedJob_setUnknownAttribute
// for input parameter ignored. @JsonAnySetter public void setUnknownAttribute(String attributeName, Object ignored) { if (!alreadySeenAnySetterAttributes.contains(attributeName)) { alreadySeenAnySetterAttributes.add(attributeName); System.err.println("In LoggedJob, we saw the unknown attribute " + attributeName + "."); } }
3.68
hbase_MetricsMaster_incrementQuotaObserverTime
/** * Sets the execution time of a period of the QuotaObserverChore. * @param executionTime The execution time in milliseconds. * @see MetricsMasterQuotaSource#incrementSpaceQuotaObserverChoreTime(long) */ public void incrementQuotaObserverTime(final long executionTime) { masterQuotaSource.incrementSpaceQuotaObserverChoreTime(executionTime); }
3.68
flink_AbstractFsCheckpointStorageAccess_encodePathAsReference
/** * Encodes the given path as a reference in bytes. The path is encoded as a UTF-8 string and * prepended as a magic number. * * @param path The path to encode. * @return The location reference. */ public static CheckpointStorageLocationReference encodePathAsReference(Path path) { byte[] refBytes = path.toString().getBytes(StandardCharsets.UTF_8); byte[] bytes = new byte[REFERENCE_MAGIC_NUMBER.length + refBytes.length]; System.arraycopy(REFERENCE_MAGIC_NUMBER, 0, bytes, 0, REFERENCE_MAGIC_NUMBER.length); System.arraycopy(refBytes, 0, bytes, REFERENCE_MAGIC_NUMBER.length, refBytes.length); return new CheckpointStorageLocationReference(bytes); }
3.68
hbase_QuotaFilter_setTableFilter
/** * Set the table filter regex * @param regex the table filter * @return the quota filter object */ public QuotaFilter setTableFilter(final String regex) { this.tableRegex = regex; hasFilters |= StringUtils.isNotEmpty(regex); return this; }
3.68
hbase_ByteBufferUtils_toBytes
/** * Copy the given number of bytes from specified offset into a new byte[] * @param buffer input bytebuffer to read * @param offset input offset where Bytes are * @param length the number of bytes to read * @return a new byte[] containing the bytes in the specified range */ public static byte[] toBytes(ByteBuffer buffer, int offset, int length) { byte[] output = new byte[length]; for (int i = 0; i < length; i++) { output[i] = buffer.get(offset + i); } return output; }
3.68
hudi_SparkInternalSchemaConverter_convertStringType
/** * Convert String type to other Type. * Now only support String -> Decimal/Date. * Notice: This convert maybe failed!!! * TODO: support more types */ private static boolean convertStringType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) { if (newType instanceof DateType || newType instanceof DecimalType) { for (int i = 0; i < len; i++) { if (oldV.isNullAt(i)) { newV.putNull(i); continue; } // to do support rebaseDate if (newType instanceof DateType) { int days = org.apache.spark.sql.catalyst.util.DateTimeUtils.fromJavaDate(Date.valueOf(oldV.getUTF8String(i).toString())); newV.putInt(i, days); } else if (newType instanceof DecimalType) { DecimalType decimalType = (DecimalType) newType; java.math.BigDecimal bigDecimal = new java.math.BigDecimal(oldV.getUTF8String(i).toString().trim()); Decimal sparkDecimal = Decimal.apply(bigDecimal); sparkDecimal.changePrecision(decimalType.precision(), decimalType.scale()); newV.putDecimal(i, sparkDecimal, decimalType.precision()); } } return true; } return false; }
3.68
framework_VAbstractSplitPanel_onMouseUp
/** * Handle concluding the content resize when dragging the splitter with a * mouse. This should only be called if resizing has been successfully * initialized via a mouse down event. * * @param event * the browser event */ public void onMouseUp(Event event) { DOM.releaseCapture(getElement()); hideDraggingCurtain(); stopResize(); resizing = false; if (!WidgetUtil.isTouchEvent(event)) { onMouseMove(event); } fireEvent(new SplitterMoveEvent(this)); }
3.68
hadoop_PendingSet_readObject
/** * Deserialize via java Serialization API: deserialize the instance * and then call {@link #validate()} to verify that the deserialized * data is valid. * @param inStream input stream * @throws IOException IO problem or validation failure * @throws ClassNotFoundException reflection problems */ private void readObject(ObjectInputStream inStream) throws IOException, ClassNotFoundException { inStream.defaultReadObject(); validate(); }
3.68
flink_FlinkMatchers_containsMessage
/** Checks for a {@link Throwable} that contains the expected error message. */ public static Matcher<Throwable> containsMessage(String errorMessage) { return new ContainsMessageMatcher(errorMessage); }
3.68
hadoop_RegexMountPointResolvedDstPathReplaceInterceptor_deserializeFromString
/** * Create interceptor from config string. The string should be in * replaceresolvedpath:wordToReplace:replaceString * Note that we'll assume there's no ':' in the regex for the moment. * * @return Interceptor instance or null on bad config. */ public static RegexMountPointResolvedDstPathReplaceInterceptor deserializeFromString(String serializedString) { String[] strings = serializedString .split(Character.toString(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP)); // We'll assume there's no ':' in the regex for the moment. if (strings.length != 3) { return null; } //The format should be like replaceresolvedpath:wordToReplace:replaceString return new RegexMountPointResolvedDstPathReplaceInterceptor(strings[1], strings[2]); }
3.68
pulsar_Schema_supportSchemaVersioning
/** * Returns whether this schema supports versioning. * * <p>Most of the schema implementations don't really support schema versioning, or it just doesn't * make any sense to support schema versionings (e.g. primitive schemas). Only schema returns * {@link GenericRecord} should support schema versioning. * * <p>If a schema implementation returns <tt>false</tt>, it should implement {@link #decode(byte[])}; * while a schema implementation returns <tt>true</tt>, it should implement {@link #decode(byte[], byte[])} * instead. * * @return true if this schema implementation supports schema versioning; otherwise returns false. */ default boolean supportSchemaVersioning() { return false; }
3.68
framework_JsonEncoder_encode
/** * Encode a value to a JSON representation for transport from the client to * the server. * * @param value * value to convert * @param type * type information, not needed for all encoding tasks, such as * encoding a String * @param connection * application connection providing the context, not needed for * all encoding tasks, such as encoding a String * @return JSON representation of the value */ @SuppressWarnings("unchecked") public static JsonValue encode(Object value, Type type, ApplicationConnection connection) { if (null == value) { return Json.createNull(); } else if (value instanceof JsonValue) { return (JsonValue) value; } else if (value instanceof String[]) { String[] array = (String[]) value; JsonArray jsonArray = Json.createArray(); for (int i = 0; i < array.length; ++i) { jsonArray.set(i, array[i]); } return jsonArray; } else if (value instanceof String) { return Json.create((String) value); } else if (value instanceof Boolean) { return Json.create((Boolean) value); } else if (value instanceof Number) { return Json.create(((Number) value).doubleValue()); } else if (value instanceof Character) { return Json.create(String.valueOf(value)); } else if (value instanceof Object[] && type == null) { // Non-legacy arrays handled by generated serializer return encodeLegacyObjectArray((Object[]) value, connection); } else if (value instanceof Enum) { return encodeEnum((Enum<?>) value); } else if (value instanceof Map) { return encodeMap((Map<Object, Object>) value, type, connection); } else if (value instanceof Connector) { Connector connector = (Connector) value; return Json.create(connector.getConnectorId()); } else if (value instanceof Collection) { return encodeCollection((Collection<?>) value, type, connection); } else if (value instanceof UidlValue) { return encodeVariableChange((UidlValue) value, connection); } else { // First see if there's a custom serializer JSONSerializer<Object> serializer = null; if (type != null) { serializer = (JSONSerializer<Object>) type.findSerializer(); if (serializer != null) { return serializer.serialize(value, connection); } } String transportType = getTransportType(value); if (transportType != null) { // Send the string value for remaining legacy types return Json.create(String.valueOf(value)); } else if (type != null) { // And finally try using bean serialization logic try { JsArrayObject<Property> properties = type .getPropertiesAsArray(); JsonObject jsonObject = Json.createObject(); int size = properties.size(); for (int i = 0; i < size; i++) { Property property = properties.get(i); Object propertyValue = property.getValue(value); Type propertyType = property.getType(); JsonValue encodedPropertyValue = encode(propertyValue, propertyType, connection); jsonObject.put(property.getName(), encodedPropertyValue); } return jsonObject; } catch (NoDataException e) { throw new RuntimeException( "Can not encode " + type.getSignature(), e); } } else { throw new RuntimeException("Can't encode " + value.getClass() + " without type information"); } } }
3.68
hbase_ConfigurationManager_deregisterObserver
/** * Deregister an observer class * @param observer to be deregistered. */ public void deregisterObserver(ConfigurationObserver observer) { synchronized (configurationObservers) { configurationObservers.remove(observer); if (observer instanceof PropagatingConfigurationObserver) { ((PropagatingConfigurationObserver) observer).deregisterChildren(this); } } }
3.68
querydsl_QueryResults_getTotal
/** * Get the total number of results * * @return total rows */ public long getTotal() { return total; }
3.68
flink_SourceStreamTask_getCompletionFuture
/** * @return future that is completed once this thread completes. If this task {@link * #isFailing()} and this thread is not alive (e.g. not started) returns a normally * completed future. */ CompletableFuture<Void> getCompletionFuture() { return isFailing() && !isAlive() ? CompletableFuture.completedFuture(null) : completionFuture; }
3.68
hbase_MasterObserver_preRevoke
/** * Called before revoking user permissions. * @param ctx the coprocessor instance's environment * @param userPermission the user and permissions */ default void preRevoke(ObserverContext<MasterCoprocessorEnvironment> ctx, UserPermission userPermission) throws IOException { }
3.68
hbase_Result_listCells
/** * Create a sorted list of the Cell's in this result. Since HBase 0.20.5 this is equivalent to * raw(). * @return sorted List of Cells; can be null if no cells in the result */ public List<Cell> listCells() { return isEmpty() ? null : Arrays.asList(rawCells()); }
3.68
flink_RequestedLocalProperties_isTrivial
/** Checks, if the properties in this object are trivial, i.e. only standard values. */ public boolean isTrivial() { return ordering == null && this.groupedFields == null; }
3.68
framework_FilesystemContainer_isRoot
/* * Tests if the specified Item is the root of the filesystem. Don't add a * JavaDoc comment here, we use the default documentation from implemented * interface. */ @Override public boolean isRoot(Object itemId) { if (!(itemId instanceof File)) { return false; } for (File root : roots) { if (root.equals(itemId)) { return true; } } return false; }
3.68
hbase_TableDescriptorBuilder_modifyColumnFamily
/** * Modifies the existing column family. * @param family to update * @return this (for chained invocation) */ public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { if (family.getName() == null || family.getName().length <= 0) { throw new IllegalArgumentException("Family name cannot be null or empty"); } if (!hasColumnFamily(family.getName())) { throw new IllegalArgumentException( "Column family '" + family.getNameAsString() + "' does not exist"); } return putColumnFamily(family); }
3.68
flink_MetricRegistryImpl_startQueryService
/** * Initializes the MetricQueryService. * * @param rpcService RpcService to create the MetricQueryService on * @param resourceID resource ID used to disambiguate the actor name */ public void startQueryService(RpcService rpcService, ResourceID resourceID) { synchronized (lock) { Preconditions.checkState( !isShutdown(), "The metric registry has already been shut down."); try { metricQueryServiceRpcService = rpcService; queryService = MetricQueryService.createMetricQueryService( rpcService, resourceID, maximumFramesize); queryService.start(); } catch (Exception e) { LOG.warn( "Could not start MetricDumpActor. No metrics will be submitted to the WebInterface.", e); } } }
3.68
hudi_TimelineUtils_getCommitsTimelineAfter
/** * Returns a Hudi timeline with commits after the given instant time (exclusive). * * @param metaClient {@link HoodieTableMetaClient} instance. * @param exclusiveStartInstantTime Start instant time (exclusive). * @param lastMaxCompletionTime Last commit max completion time synced * @return Hudi timeline. */ public static HoodieTimeline getCommitsTimelineAfter( HoodieTableMetaClient metaClient, String exclusiveStartInstantTime, Option<String> lastMaxCompletionTime) { HoodieDefaultTimeline writeTimeline = metaClient.getActiveTimeline().getWriteTimeline(); HoodieDefaultTimeline timeline = writeTimeline.isBeforeTimelineStarts(exclusiveStartInstantTime) ? metaClient.getArchivedTimeline(exclusiveStartInstantTime).mergeTimeline(writeTimeline) : writeTimeline; HoodieDefaultTimeline timelineSinceLastSync = (HoodieDefaultTimeline) timeline.getCommitsTimeline() .findInstantsAfter(exclusiveStartInstantTime, Integer.MAX_VALUE); if (lastMaxCompletionTime.isPresent()) { // Get 'hollow' instants that have less instant time than exclusiveStartInstantTime but with greater commit completion time HoodieDefaultTimeline hollowInstantsTimeline = (HoodieDefaultTimeline) timeline.getCommitsTimeline() .filter(s -> compareTimestamps(s.getTimestamp(), LESSER_THAN, exclusiveStartInstantTime)) .filter(s -> compareTimestamps(s.getCompletionTime(), GREATER_THAN, lastMaxCompletionTime.get())); if (!hollowInstantsTimeline.empty()) { return timelineSinceLastSync.mergeTimeline(hollowInstantsTimeline); } } return timelineSinceLastSync; }
3.68
hbase_MetaTableAccessor_scanMeta
/** * Performs a scan of META table. * @param connection connection we're using * @param startRow Where to start the scan. Pass null if want to begin scan at first row. * @param stopRow Where to stop the scan. Pass null if want to scan all rows from the start one * @param type scanned part of meta * @param maxRows maximum rows to return * @param visitor Visitor invoked against each row. */ public static void scanMeta(Connection connection, @Nullable final byte[] startRow, @Nullable final byte[] stopRow, QueryType type, int maxRows, final ClientMetaTableAccessor.Visitor visitor) throws IOException { scanMeta(connection, startRow, stopRow, type, null, maxRows, visitor); }
3.68
hmily_RpcMediator_acquire
/** * Acquire hmily transaction context. * * @param <T> the type parameter * @param rpcAcquire the rpc acquire * @param clazz the clazz * @return the hmily transaction context */ public <T> T acquire(final RpcAcquire rpcAcquire, final Class<T> clazz) { T hmilyTransactionContext = null; final String context = rpcAcquire.acquire(CommonConstant.HMILY_TRANSACTION_CONTEXT); if (StringUtils.isNoneBlank(context)) { hmilyTransactionContext = GsonUtils.getInstance().fromJson(context, clazz); } return hmilyTransactionContext; }
3.68
querydsl_AliasFactory_getCurrentAndReset
/** * Get the current thread bound expression and reset it * * @param <A> * @return expression */ @Nullable public <A extends Expression<?>> A getCurrentAndReset() { A rv = this.getCurrent(); reset(); return rv; }
3.68
streampipes_AbstractConfigurablePipelineElementBuilder_requiredHtmlInputParameter
/** * Defines a text-based configuration parameter provided by pipeline developers at pipeline authoring time. The * input field generated in the StreamPipes UI allows to enter HTML content (and an HTML Wysiwyg editor will be * rendered). * * @param label The {@link org.apache.streampipes.sdk.helpers.Label} that describes why this parameter is needed in a * user-friendly manner. * @return this */ public K requiredHtmlInputParameter(Label label) { FreeTextStaticProperty fsp = new FreeTextStaticProperty(label.getInternalId(), label.getLabel(), label.getDescription()); fsp.setMultiLine(true); fsp.setHtmlAllowed(true); fsp.setPlaceholdersSupported(true); this.staticProperties.add(fsp); return me(); }
3.68
hibernate-validator_ValidationConfigStaxBuilder_build
/** * Returns an enum set with the executable types corresponding to the given * XML configuration, considering the special elements * {@link ExecutableType#ALL} and {@link ExecutableType#NONE}. * * @return An enum set representing the given executable types. */ public EnumSet<ExecutableType> build() { return executableTypes.isEmpty() ? null : executableTypes; }
3.68
hbase_RequestConverter_buildTruncateTableRequest
/** * Creates a protocol buffer TruncateTableRequest * @param tableName name of table to truncate * @param preserveSplits True if the splits should be preserved * @return a TruncateTableRequest */ public static TruncateTableRequest buildTruncateTableRequest(final TableName tableName, final boolean preserveSplits, final long nonceGroup, final long nonce) { TruncateTableRequest.Builder builder = TruncateTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setPreserveSplits(preserveSplits); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); return builder.build(); }
3.68
framework_GridLayout_setRows
/** * Sets the number of rows in the grid. The number of rows can not be * reduced if there are any areas that would be outside of the shrunk grid. * * @param rows * the new number of rows in the grid. */ public void setRows(int rows) { // The the param if (rows < 1) { throw new IllegalArgumentException( "The number of columns and rows in the grid must be at least 1"); } // In case of no change if (getRows() == rows) { return; } // Checks for overlaps if (getRows() > rows) { for (Entry<Connector, ChildComponentData> entry : getState().childData .entrySet()) { if (entry.getValue().row2 >= rows) { throw new OutOfBoundsException(new Area(entry.getValue(), (Component) entry.getKey())); } } } // Forget expands for removed rows if (rows < getRows()) { for (int i = rows; i < getRows(); i++) { rowExpandRatio.remove(i); getState().explicitRowRatios.remove(i); } } getState().rows = rows; }
3.68
hbase_QuotaRetriever_open
/** * Open a QuotaRetriever with the specified filter. * @param conf Configuration object to use. * @param filter the QuotaFilter * @return the QuotaRetriever * @throws IOException if a remote or network exception occurs */ public static QuotaRetriever open(final Configuration conf, final QuotaFilter filter) throws IOException { Scan scan = QuotaTableUtil.makeScan(filter); QuotaRetriever scanner = new QuotaRetriever(); scanner.init(conf, scan); return scanner; }
3.68