name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_TableDescriptorBuilder_toCoprocessorDescriptor
/** * This method is mostly intended for internal use. However, it it also relied on by hbase-shell * for backwards compatibility. */ private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); if (matcher.matches()) { // jar file path can be empty if the cp class can be loaded // from class loader. String path = matcher.group(1).trim().isEmpty() ? null : matcher.group(1).trim(); String className = matcher.group(2).trim(); if (className.isEmpty()) { return Optional.empty(); } String priorityStr = matcher.group(3).trim(); int priority = priorityStr.isEmpty() ? Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); String cfgSpec = null; try { cfgSpec = matcher.group(4); } catch (IndexOutOfBoundsException ex) { // ignore } Map<String, String> ourConf = new TreeMap<>(); if (cfgSpec != null && !cfgSpec.trim().equals("|")) { cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); while (m.find()) { ourConf.put(m.group(1), m.group(2)); } } return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) .setPriority(priority).setProperties(ourConf).build()); } return Optional.empty(); }
3.68
framework_VAbstractCalendarPanel_focusNextYear
/** * Selects the next year */ @SuppressWarnings("deprecation") private void focusNextYear(int years) { if (focusedDate == null) { return; } Date nextYearDate = (Date) focusedDate.clone(); nextYearDate.setYear(nextYearDate.getYear() + years); // Do not focus if not inside range if (!isDateInsideRange(nextYearDate, getResolution(this::isYear))) { return; } // If we add one year, but have to roll back a bit, fit it // into the calendar. Also the months have to be changed if (!isDateInsideRange(nextYearDate, getResolution(this::isDay))) { nextYearDate = adjustDateToFitInsideRange(nextYearDate); focusedDate.setYear(nextYearDate.getYear()); focusedDate.setMonth(nextYearDate.getMonth()); focusedDate.setDate(nextYearDate.getDate()); displayedMonth.setYear(nextYearDate.getYear()); displayedMonth.setMonth(nextYearDate.getMonth()); } else { int currentMonth = focusedDate.getMonth(); focusedDate.setYear(focusedDate.getYear() + years); displayedMonth.setYear(displayedMonth.getYear() + years); /* * If the focused date was a leap day (Feb 29), the new date becomes * Mar 1 if the new year is not also a leap year. Set it to Feb 28 * instead. */ if (focusedDate.getMonth() != currentMonth) { focusedDate.setDate(0); } } renderCalendar(); }
3.68
hadoop_IOStatisticsStoreImpl_aggregate
/** * Aggregate those statistics which the store is tracking; * ignore the rest. * * @param source statistics; may be null * @return true if a statistics reference was supplied/aggregated. */ @Override public synchronized boolean aggregate( @Nullable final IOStatistics source) { if (source == null) { return false; } // counters: addition Map<String, Long> sourceCounters = source.counters(); counterMap.entrySet(). forEach(e -> { Long sourceValue = lookupQuietly(sourceCounters, e.getKey()); if (sourceValue != null) { e.getValue().addAndGet(sourceValue); } }); // gauge: add positive values only Map<String, Long> sourceGauges = source.gauges(); gaugeMap.entrySet().forEach(e -> { Long sourceGauge = lookupQuietly(sourceGauges, e.getKey()); if (sourceGauge != null && sourceGauge > 0) { e.getValue().addAndGet(sourceGauge); } }); // min: min of current and source Map<String, Long> sourceMinimums = source.minimums(); minimumMap.entrySet().forEach(e -> { Long sourceValue = lookupQuietly(sourceMinimums, e.getKey()); if (sourceValue != null) { AtomicLong dest = e.getValue(); dest.set(aggregateMaximums(dest.get(), sourceValue)); dest.set(aggregateMinimums(dest.get(), sourceValue)); } }); // max: max of current and source Map<String, Long> sourceMaximums = source.maximums(); maximumMap.entrySet().forEach(e -> { Long sourceValue = lookupQuietly(sourceMaximums, e.getKey()); if (sourceValue != null) { AtomicLong dest = e.getValue(); dest.set(aggregateMaximums(dest.get(), sourceValue)); } }); // the most complex Map<String, MeanStatistic> sourceMeans = source.meanStatistics(); meanStatisticMap.entrySet().forEach(e -> { MeanStatistic current = e.getValue(); MeanStatistic sourceValue = lookupQuietly( sourceMeans, e.getKey()); if (sourceValue != null) { current.add(sourceValue); } }); return true; }
3.68
AreaShop_WorldGuardHandler7_beta_2_buildDomain
/** * Build a DefaultDomain from a RegionAccessSet. * @param regionAccessSet RegionAccessSet to read * @return DefaultDomain containing the entities from the RegionAccessSet */ private DefaultDomain buildDomain(RegionAccessSet regionAccessSet) { DefaultDomain owners = new DefaultDomain(); for(String playerName : regionAccessSet.getPlayerNames()) { owners.addPlayer(playerName); } for(UUID uuid : regionAccessSet.getPlayerUniqueIds()) { owners.addPlayer(uuid); } for(String group : regionAccessSet.getGroupNames()) { owners.addGroup(group); } return owners; }
3.68
querydsl_GeometryExpression_distanceSphere
// TODO maybe move out public NumberExpression<Double> distanceSphere(Expression<? extends Geometry> geometry) { return Expressions.numberOperation(Double.class, SpatialOps.DISTANCE_SPHERE, mixin, geometry); }
3.68
hudi_StreamWriteFunction_getBucketID
/** * Returns the bucket ID with the given value {@code value}. */ private String getBucketID(HoodieRecord<?> record) { final String fileId = record.getCurrentLocation().getFileId(); return StreamerUtil.generateBucketKey(record.getPartitionPath(), fileId); }
3.68
streampipes_StatementHandler_extendPreparedStatement
/** * @param event * @param s1 * @param s2 * @param index * @param preProperty * @param prefix * @return */ public int extendPreparedStatement(DbDescription dbDescription, final Map<String, Object> event, StringBuilder s1, StringBuilder s2, int index, String preProperty, String prefix) throws SpRuntimeException { for (Map.Entry<String, Object> pair : event.entrySet()) { if (pair.getValue() instanceof Map) { index = extendPreparedStatement(dbDescription, (Map<String, Object>) pair.getValue(), s1, s2, index, pair.getKey() + "_", prefix); } else { SQLStatementUtils.checkRegEx(pair.getKey(), "Columnname", dbDescription); eventParameterMap.put(pair.getKey(), new ParameterInformation(index, DbDataTypeFactory.getFromObject(pair.getValue(), dbDescription.getEngine()))); if (dbDescription.isColumnNameQuoted()) { s1.append(prefix).append("\"").append(preProperty).append(pair.getKey()).append("\""); } else { s1.append(prefix).append(preProperty).append(pair.getKey()); } s2.append(prefix).append("?"); index++; } prefix = ", "; } return index; }
3.68
hbase_ColumnSchemaModel___setCompression
/** * @param value the desired value of the COMPRESSION attribute */ public void __setCompression(String value) { attrs.put(COMPRESSION, value); }
3.68
hbase_KeyValue_getTagsLength
/** Return the total length of the tag bytes */ @Override public int getTagsLength() { int tagsLen = this.length - (getKeyLength() + getValueLength() + KEYVALUE_INFRASTRUCTURE_SIZE); if (tagsLen > 0) { // There are some Tag bytes in the byte[]. So reduce 2 bytes which is added to denote the tags // length tagsLen -= TAGS_LENGTH_SIZE; } return tagsLen; }
3.68
hbase_SimpleRegionNormalizer_isLargeEnoughForMerge
/** * Return {@code true} when {@code regionInfo} has a size that is sufficient to be considered for * a merge operation, {@code false} otherwise. * </p> * Callers beware: for safe concurrency, be sure to pass in the local instance of * {@link NormalizerConfiguration}, don't use {@code this}'s instance. */ private boolean isLargeEnoughForMerge(final NormalizerConfiguration normalizerConfiguration, final NormalizeContext ctx, final RegionInfo regionInfo) { return getRegionSizeMB(regionInfo) >= normalizerConfiguration.getMergeMinRegionSizeMb(ctx); }
3.68
hbase_Scan_getMaxVersions
/** Returns the max number of versions to fetch */ public int getMaxVersions() { return this.maxVersions; }
3.68
flink_FactoryUtil_createModuleFactoryHelper
/** * Creates a utility that helps validating options for a {@link ModuleFactory}. * * <p>Note: This utility checks for left-over options in the final step. */ public static ModuleFactoryHelper createModuleFactoryHelper( ModuleFactory factory, ModuleFactory.Context context) { return new ModuleFactoryHelper(factory, context); }
3.68
hbase_SimplePositionedMutableByteRange_putVLong
// Copied from com.google.protobuf.CodedOutputStream v2.5.0 writeRawVarint64 @Override public int putVLong(int index, long val) { int rPos = 0; while (true) { if ((val & ~0x7F) == 0) { bytes[offset + index + rPos] = (byte) val; break; } else { bytes[offset + index + rPos] = (byte) ((val & 0x7F) | 0x80); val >>>= 7; } rPos++; } clearHashCache(); return rPos + 1; }
3.68
framework_Query_getOffset
/** * Gets the first index of items to fetch. The offset is only used when * fetching items, but not when counting the number of available items. * * @return offset for data request */ public int getOffset() { return offset; }
3.68
hadoop_CacheDirectiveStats_setFilesNeeded
/** * Sets the files needed by this directive. * @param filesNeeded The number of files needed * @return This builder, for call chaining. */ public Builder setFilesNeeded(long filesNeeded) { this.filesNeeded = filesNeeded; return this; }
3.68
querydsl_JTSGeometryExpression_eq
/* (non-Javadoc) * @see com.querydsl.core.types.dsl.SimpleExpression#eq(com.querydsl.core.types.Expression) */ @Override public BooleanExpression eq(Expression<? super T> right) { return Expressions.booleanOperation(SpatialOps.EQUALS, mixin, right); }
3.68
framework_DragSourceExtension_initListeners
/** * Initializes dragstart and -end event listeners for this drag source to * capture the active drag source for the UI. */ private void initListeners() { // Set current extension as active drag source in the UI dragStartListenerHandle = addDragStartListener( event -> getUI().setActiveDragSource(this)); // Remove current extension as active drag source from the UI dragEndListenerHandle = addDragEndListener( event -> getUI().setActiveDragSource(null)); }
3.68
hadoop_BalanceProcedure_nextProcedure
/** * Get the next procedure. */ public String nextProcedure() { return nextProcedure; }
3.68
flink_CopyOnWriteSkipListStateMap_releaseAllResource
/** Release all resource used by the map. */ private void releaseAllResource() { long node = levelIndexHeader.getNextNode(0); while (node != NIL_NODE) { long nextNode = helpGetNextNode(node, 0); long valuePointer = SkipListUtils.helpGetValuePointer(node, spaceAllocator); spaceAllocator.free(node); SkipListUtils.removeAllValues(valuePointer, spaceAllocator); node = nextNode; } totalSize = 0; logicallyRemovedNodes.clear(); }
3.68
hbase_StoreFileWriter_trackTimestamps
/** * Record the earlest Put timestamp. If the timeRangeTracker is not set, update TimeRangeTracker * to include the timestamp of this key */ public void trackTimestamps(final Cell cell) { if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); } timeRangeTracker.includeTimestamp(cell); }
3.68
flink_DefaultCompletedCheckpointStoreUtils_getMaximumNumberOfRetainedCheckpoints
/** * Extracts maximum number of retained checkpoints configuration from the passed {@link * Configuration}. The default value is used as a fallback if the passed value is a value larger * than {@code 0}. * * @param config The configuration that is accessed. * @param logger The {@link Logger} used for exposing the warning if the configured value is * invalid. * @return The maximum number of retained checkpoints based on the passed {@code Configuration}. */ public static int getMaximumNumberOfRetainedCheckpoints(Configuration config, Logger logger) { final int maxNumberOfCheckpointsToRetain = config.getInteger(CheckpointingOptions.MAX_RETAINED_CHECKPOINTS); if (maxNumberOfCheckpointsToRetain <= 0) { // warning and use 1 as the default value if the setting in // state.checkpoints.max-retained-checkpoints is not greater than 0. logger.warn( "The setting for '{} : {}' is invalid. Using default value of {}", CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.key(), maxNumberOfCheckpointsToRetain, CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue()); return CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue(); } return maxNumberOfCheckpointsToRetain; }
3.68
hbase_ScannerContext_setFields
/** * Set all fields together. */ void setFields(int batch, long dataSize, long heapSize, long blockSize) { setBatch(batch); setDataSize(dataSize); setHeapSize(heapSize); setBlockSize(blockSize); }
3.68
hmily_EventData_setValue
/** * Sets value. * * @param value the value */ public void setValue(final Object value) { this.value = value; }
3.68
flink_CompositeBuffer_getFullBufferData
/** * Returns the full buffer data in one piece of {@link MemorySegment}. If there is multiple * partial buffers, the partial data will be copied to the given target {@link MemorySegment}. */ public Buffer getFullBufferData(MemorySegment segment) { checkState(!partialBuffers.isEmpty()); checkState(currentLength <= segment.size()); if (partialBuffers.size() == 1) { return partialBuffers.get(0); } int offset = 0; for (Buffer buffer : partialBuffers) { segment.put(offset, buffer.getNioBufferReadable(), buffer.readableBytes()); offset += buffer.readableBytes(); } recycleBuffer(); return new NetworkBuffer( segment, BufferRecycler.DummyBufferRecycler.INSTANCE, dataType, isCompressed, currentLength); }
3.68
flink_CheckpointStorageLoader_fromConfig
/** * Loads the checkpoint storage from the configuration, from the parameter * 'state.checkpoint-storage', as defined in {@link CheckpointingOptions#CHECKPOINT_STORAGE}. * * <p>The implementation can be specified either via their shortcut name, or via the class name * of a {@link CheckpointStorageFactory}. If a CheckpointStorageFactory class name is specified, * the factory is instantiated (via its zero-argument constructor) and its {@link * CheckpointStorageFactory#createFromConfig(ReadableConfig, ClassLoader)} method is called. * * <p>Recognized shortcut names are '{@value #JOB_MANAGER_STORAGE_NAME}', and '{@value * #FILE_SYSTEM_STORAGE_NAME}'. * * @param config The configuration to load the checkpoint storage from * @param classLoader The class loader that should be used to load the checkpoint storage * @param logger Optionally, a logger to log actions to (may be null) * @return The instantiated checkpoint storage. * @throws DynamicCodeLoadingException Thrown if a checkpoint storage factory is configured and * the factory class was not found or the factory could not be instantiated * @throws IllegalConfigurationException May be thrown by the CheckpointStorageFactory when * creating / configuring the checkpoint storage in the factory */ public static Optional<CheckpointStorage> fromConfig( ReadableConfig config, ClassLoader classLoader, @Nullable Logger logger) throws IllegalStateException, DynamicCodeLoadingException { Preconditions.checkNotNull(config, "config"); Preconditions.checkNotNull(classLoader, "classLoader"); final String storageName = config.get(CheckpointingOptions.CHECKPOINT_STORAGE); if (storageName == null) { if (logger != null) { logger.debug( "The configuration {} has not be set in the current" + " sessions flink-conf.yaml. Falling back to a default CheckpointStorage" + " type. Users are strongly encouraged explicitly set this configuration" + " so they understand how their applications are checkpointing" + " snapshots for fault-tolerance.", CheckpointingOptions.CHECKPOINT_STORAGE.key()); } return Optional.empty(); } switch (storageName.toLowerCase()) { case JOB_MANAGER_STORAGE_NAME: return Optional.of(createJobManagerCheckpointStorage(config, classLoader, logger)); case FILE_SYSTEM_STORAGE_NAME: return Optional.of(createFileSystemCheckpointStorage(config, classLoader, logger)); default: if (logger != null) { logger.info("Loading state backend via factory '{}'", storageName); } CheckpointStorageFactory<?> factory; try { @SuppressWarnings("rawtypes") Class<? extends CheckpointStorageFactory> clazz = Class.forName(storageName, false, classLoader) .asSubclass(CheckpointStorageFactory.class); factory = clazz.newInstance(); } catch (ClassNotFoundException e) { throw new DynamicCodeLoadingException( "Cannot find configured state backend factory class: " + storageName, e); } catch (ClassCastException | InstantiationException | IllegalAccessException e) { throw new DynamicCodeLoadingException( "The class configured under '" + CheckpointingOptions.CHECKPOINT_STORAGE.key() + "' is not a valid checkpoint storage factory (" + storageName + ')', e); } return Optional.of(factory.createFromConfig(config, classLoader)); } }
3.68
hadoop_UriUtils_maskUrlQueryParameters
/** * Generic function to mask a set of query parameters partially/fully and * return the resultant query string * @param keyValueList List of NameValuePair instances for query keys/values * @param queryParamsForFullMask values for these params will appear as "XXXX" * @param queryParamsForPartialMask values will be masked with 'X', except for * the last PARTIAL_MASK_VISIBLE_LEN characters * @param queryLen to initialize StringBuilder for the masked query * @return the masked url query part */ public static String maskUrlQueryParameters(List<NameValuePair> keyValueList, Set<String> queryParamsForFullMask, Set<String> queryParamsForPartialMask, int queryLen) { StringBuilder maskedUrl = new StringBuilder(queryLen); for (NameValuePair keyValuePair : keyValueList) { String key = keyValuePair.getName(); if (key.isEmpty()) { throw new IllegalArgumentException("Query param key should not be empty"); } String value = keyValuePair.getValue(); maskedUrl.append(key); maskedUrl.append(EQUAL); if (value != null && !value.isEmpty()) { //no mask if (queryParamsForFullMask.contains(key)) { maskedUrl.append(FULL_MASK); } else if (queryParamsForPartialMask.contains(key)) { int valueLen = value.length(); int maskedLen = valueLen > PARTIAL_MASK_VISIBLE_LEN ? PARTIAL_MASK_VISIBLE_LEN : valueLen / 2; maskedUrl.append(value, 0, valueLen - maskedLen); maskedUrl.append(StringUtils.repeat(CHAR_MASK, maskedLen)); } else { maskedUrl.append(value); } } maskedUrl.append(AND_MARK); } maskedUrl.deleteCharAt(maskedUrl.length() - 1); return maskedUrl.toString(); }
3.68
framework_Heartbeat_getConnection
/** * @return the application connection */ @Deprecated protected ApplicationConnection getConnection() { return connection; }
3.68
hmily_HmilyLockManager_releaseLocks
/** * Release locks. * * @param hmilyLocks hmily locks */ public void releaseLocks(final Collection<HmilyLock> hmilyLocks) { HmilyRepositoryStorage.releaseHmilyLocks(hmilyLocks); hmilyLocks.forEach(lock -> HmilyLockCacheManager.getInstance().removeByKey(lock.getLockId())); log.debug("TAC-release-lock ::: {}", hmilyLocks); }
3.68
dubbo_ModuleConfigManager_getDefaultProvider
/** * Only allows one default ProviderConfig */ public Optional<ProviderConfig> getDefaultProvider() { List<ProviderConfig> providerConfigs = getDefaultConfigs(getConfigsMap(getTagName(ProviderConfig.class))); if (CollectionUtils.isNotEmpty(providerConfigs)) { return Optional.of(providerConfigs.get(0)); } return Optional.empty(); }
3.68
flink_TimestampData_toLocalDateTime
/** Converts this {@link TimestampData} object to a {@link LocalDateTime}. */ public LocalDateTime toLocalDateTime() { int date = (int) (millisecond / MILLIS_PER_DAY); int time = (int) (millisecond % MILLIS_PER_DAY); if (time < 0) { --date; time += MILLIS_PER_DAY; } long nanoOfDay = time * 1_000_000L + nanoOfMillisecond; LocalDate localDate = LocalDate.ofEpochDay(date); LocalTime localTime = LocalTime.ofNanoOfDay(nanoOfDay); return LocalDateTime.of(localDate, localTime); }
3.68
pulsar_SchemaHash_of
// Shouldn't call this method frequently, otherwise will bring performance regression public static SchemaHash of(byte[] schemaBytes, SchemaType schemaType) { return new SchemaHash(hashFunction.hashBytes(schemaBytes == null ? new byte[0] : schemaBytes), schemaType); }
3.68
flink_AbstractPagedInputView_getCurrentSegment
/** * Gets the memory segment that will be used to read the next bytes from. If the segment is * exactly exhausted, meaning that the last byte read was the last byte available in the * segment, then this segment will not serve the next bytes. The segment to serve the next bytes * will be obtained through the {@link #nextSegment(MemorySegment)} method. * * @return The current memory segment. */ public MemorySegment getCurrentSegment() { return this.currentSegment; }
3.68
hadoop_VolumeFailureSummary_getEstimatedCapacityLostTotal
/** * Returns estimate of capacity lost. This is said to be an estimate, because * in some cases it's impossible to know the capacity of the volume, such as if * we never had a chance to query its capacity before the failure occurred. * * @return estimate of capacity lost in bytes */ public long getEstimatedCapacityLostTotal() { return this.estimatedCapacityLostTotal; }
3.68
framework_ScrollbarBundle_forceScrollbar
/** * Force the scrollbar to be visible with CSS. In practice, this means to * set either <code>overflow-x</code> or <code>overflow-y</code> to " * <code>scroll</code>" in the scrollbar's direction. * <p> * This method is an IE8 workaround, since it doesn't always show scrollbars * with <code>overflow: auto</code> enabled. * <p> * Firefox on the other hand loses pending scroll events when the scrollbar * is hidden, so the event must be fired manually. * <p> * When IE8 support is dropped, this should really be simplified. * * @param enable * {@code true} if the scrollbar should be forced to be visible, * {@code false} otherwise. */ protected void forceScrollbar(boolean enable) { if (enable) { root.getStyle().clearDisplay(); } else { if (BrowserInfo.get().isFirefox()) { /* * This is related to the Firefox workaround in setScrollSize * for setScrollPos(0) */ scrollEventFirer.scheduleEvent(); } root.getStyle().setDisplay(Display.NONE); } internalForceScrollbar(enable); }
3.68
hbase_ReplicationSourceLogQueue_getQueueSize
/** * Get the queue size for the given walGroupId. * @param walGroupId walGroupId */ public int getQueueSize(String walGroupId) { Queue<Path> queue = queues.get(walGroupId); if (queue == null) { return 0; } return queue.size(); }
3.68
rocketmq-connect_ProcessingContext_currentContext
/** * A helper method to set both the stage and the class. * * @param stage the stage * @param klass the class which will execute the operation in this stage. */ public void currentContext(ErrorReporter.Stage stage, Class<?> klass) { stage(stage); executingClass(klass); }
3.68
hudi_ClusteringUtils_getAllPendingClusteringPlans
/** * Get all pending clustering plans along with their instants. */ public static Stream<Pair<HoodieInstant, HoodieClusteringPlan>> getAllPendingClusteringPlans( HoodieTableMetaClient metaClient) { List<HoodieInstant> pendingReplaceInstants = metaClient.getActiveTimeline().filterPendingReplaceTimeline().getInstants(); return pendingReplaceInstants.stream().map(instant -> getClusteringPlan(metaClient, instant)) .filter(Option::isPresent).map(Option::get); }
3.68
hbase_ScheduledChore_getInitialDelay
/** Returns initial delay before executing chore in getTimeUnit() units */ public long getInitialDelay() { return initialDelay; }
3.68
hbase_ColumnSchemaModel___setBlocksize
/** * @param value the desired value of the BLOCKSIZE attribute */ public void __setBlocksize(int value) { attrs.put(BLOCKSIZE, Integer.toString(value)); }
3.68
hbase_ZKProcedureMemberRpcs_sendMemberAborted
/** * This should be called by the member and should write a serialized root cause exception as to * the abort znode. */ @Override public void sendMemberAborted(Subprocedure sub, ForeignException ee) { if (sub == null) { LOG.error("Failed due to null subprocedure", ee); return; } String procName = sub.getName(); LOG.debug("Aborting procedure (" + procName + ") in zk"); String procAbortZNode = zkController.getAbortZNode(procName); try { String source = (ee.getSource() == null) ? memberName : ee.getSource(); byte[] errorInfo = ProtobufUtil.prependPBMagic(ForeignException.serialize(source, ee)); ZKUtil.createAndFailSilent(zkController.getWatcher(), procAbortZNode, errorInfo); LOG.debug("Finished creating abort znode:" + procAbortZNode); } catch (KeeperException e) { // possible that we get this error for the procedure if we already reset the zk state, but in // that case we should still get an error for that procedure anyways zkController.logZKTree(zkController.getBaseZnode()); member.controllerConnectionFailure( "Failed to post zk node:" + procAbortZNode + " to abort procedure", e, procName); } }
3.68
framework_VCustomLayout_remove
/** Removes given widget from the layout. */ @Override public boolean remove(Widget w) { final String location = getLocation(w); if (location != null) { locationToWidget.remove(location); } final VCaptionWrapper cw = childWidgetToCaptionWrapper.get(w); if (cw != null) { childWidgetToCaptionWrapper.remove(w); return super.remove(cw); } else if (w != null) { return super.remove(w); } return false; }
3.68
morf_ChangePrimaryKeyColumns_applyChange
/** * Applies the change * @param schema The target schema * @param from the old primary key * @param to the new primary key * @return The resulting schema */ protected Schema applyChange(Schema schema, List<String> from, List<String> to) { // Construct a list of column names converted to all upper case - this is to satisfy certain databases (H2). List<String> newPrimaryKeyColumnsUpperCase = toUpperCase(to); // Does the table exist? if (!schema.tableExists(tableName)) { throw new RuntimeException("The table [" + tableName + "] does not exist."); } // Prepare a map of the columns for later re-ordering Table table = schema.getTable(tableName); ImmutableMap<String, Column> columnsMap = Maps.uniqueIndex(table.columns(), new Function<Column, String>() { @Override public String apply(Column input) { return input.getUpperCaseName(); } }); assertExistingPrimaryKey(from, table); verifyNewPrimaryKeyIsNotIndexed(table, newPrimaryKeyColumnsUpperCase); // Do the "to" primary key columns exist? List<String> allColumns = table.columns().stream().map(Column::getUpperCaseName).collect(Collectors.toList()); // Build up the columns in the correct order List<Column> newColumns = new ArrayList<>(); // Remove primaries from the full list so the non-primaries can be added afterwards List<Column> nonPrimaries = Lists.newArrayList(table.columns()); for (String newPrimaryColumn : newPrimaryKeyColumnsUpperCase) { if (allColumns.contains(newPrimaryColumn)) { Column pk = columnsMap.get(newPrimaryColumn); newColumns.add(column(pk).primaryKey()); nonPrimaries.remove(pk); } else { throw new RuntimeException("The column [" + newPrimaryColumn + "] does not exist on [" + table.getName() + "]"); } } // Add in the rest for(Column nonpk : nonPrimaries) { newColumns.add(column(nonpk).notPrimaryKey()); } return new TableOverrideSchema(schema, new AlteredTable(table, SchemaUtils.namesOfColumns(newColumns), newColumns)); }
3.68
flink_Execution_releaseAssignedResource
/** * Releases the assigned resource and completes the release future once the assigned resource * has been successfully released. * * @param cause for the resource release, null if none */ private void releaseAssignedResource(@Nullable Throwable cause) { assertRunningInJobMasterMainThread(); final LogicalSlot slot = assignedResource; if (slot != null) { ComponentMainThreadExecutor jobMasterMainThreadExecutor = getVertex().getExecutionGraphAccessor().getJobMasterMainThreadExecutor(); slot.releaseSlot(cause) .whenComplete( (Object ignored, Throwable throwable) -> { jobMasterMainThreadExecutor.assertRunningInMainThread(); if (throwable != null) { releaseFuture.completeExceptionally(throwable); } else { releaseFuture.complete(null); } }); } else { // no assigned resource --> we can directly complete the release future releaseFuture.complete(null); } }
3.68
hbase_HRegionWALFileSystem_archiveRecoveredEdits
/** * Closes and archives the specified store files from the specified family. * @param familyName Family that contains the store filesMeta * @param storeFiles set of store files to remove * @throws IOException if the archiving fails */ public void archiveRecoveredEdits(String familyName, Collection<HStoreFile> storeFiles) throws IOException { HFileArchiver.archiveRecoveredEdits(this.conf, this.fs, this.regionInfoForFs, Bytes.toBytes(familyName), storeFiles); }
3.68
hadoop_HdfsLocatedFileStatus_getLocalNameInBytes
/** * Get the Java UTF8 representation of the local name. * @return the local name in java UTF8 */ @Override public byte[] getLocalNameInBytes() { return uPath; }
3.68
hadoop_ManifestPrinter_println
/** * Print a line to the output stream. * @param format format string * @param args arguments. */ private void println(String format, Object... args) { out.format(format, args); out.println(); }
3.68
rocketmq-connect_JsonSchemaUtils_validate
/** * validate object * * @param schema * @param value * @throws JsonProcessingException * @throws ValidationException */ public static void validate(Schema schema, Object value) throws JsonProcessingException, ValidationException { Object primitiveValue = NONE_MARKER; if (isPrimitive(value)) { primitiveValue = value; } else if (value instanceof BinaryNode) { primitiveValue = ((BinaryNode) value).asText(); } else if (value instanceof BooleanNode) { primitiveValue = ((BooleanNode) value).asBoolean(); } else if (value instanceof NullNode) { primitiveValue = null; } else if (value instanceof NumericNode) { primitiveValue = ((NumericNode) value).numberValue(); } else if (value instanceof TextNode) { primitiveValue = ((TextNode) value).asText(); } if (primitiveValue != NONE_MARKER) { schema.validate(primitiveValue); } else { Object jsonObject; if (value instanceof ArrayNode) { jsonObject = OBJECT_MAPPER.treeToValue((ArrayNode) value, JSONArray.class); } else if (value instanceof JsonNode) { jsonObject = new JSONObject(OBJECT_MAPPER.writeValueAsString(value)); } else if (value.getClass().isArray()) { jsonObject = OBJECT_MAPPER.convertValue(value, JSONArray.class); } else { jsonObject = OBJECT_MAPPER.convertValue(value, JSONObject.class); } schema.validate(jsonObject); } }
3.68
querydsl_AbstractMongodbQuery_setReadPreference
/** * Sets the read preference for this query * * @param readPreference read preference */ public void setReadPreference(ReadPreference readPreference) { this.readPreference = readPreference; }
3.68
hadoop_AllocateRequest_askList
/** * Set the <code>askList</code> of the request. * @see AllocateRequest#setAskList(List) * @param askList <code>askList</code> of the request * @return {@link AllocateRequestBuilder} */ @Public @Stable public AllocateRequestBuilder askList(List<ResourceRequest> askList) { allocateRequest.setAskList(askList); return this; }
3.68
flink_DefaultDelegationTokenManager_obtainDelegationTokens
/** * Obtains new tokens in a one-time fashion and leaves it up to the caller to distribute them. */ @Override public void obtainDelegationTokens(DelegationTokenContainer container) throws Exception { LOG.info("Obtaining delegation tokens"); obtainDelegationTokensAndGetNextRenewal(container); LOG.info("Delegation tokens obtained successfully"); }
3.68
hadoop_IOStatisticsSnapshot_writeObject
/** * Serialize by converting each map to a TreeMap, and saving that * to the stream. * @param s ObjectOutputStream. * @throws IOException raised on errors performing I/O. */ private synchronized void writeObject(ObjectOutputStream s) throws IOException { // Write out the core s.defaultWriteObject(); s.writeObject(new TreeMap<String, Long>(counters)); s.writeObject(new TreeMap<String, Long>(gauges)); s.writeObject(new TreeMap<String, Long>(minimums)); s.writeObject(new TreeMap<String, Long>(maximums)); s.writeObject(new TreeMap<String, MeanStatistic>(meanStatistics)); }
3.68
pulsar_MultiTopicsConsumerImpl_onTopicsExtended
// Check partitions changes of passed in topics, and subscribe new added partitions. @Override public CompletableFuture<Void> onTopicsExtended(Collection<String> topicsExtended) { CompletableFuture<Void> future = new CompletableFuture<>(); if (topicsExtended.isEmpty()) { future.complete(null); return future; } if (log.isDebugEnabled()) { log.debug("[{}] run onTopicsExtended: {}, size: {}", topic, topicsExtended.toString(), topicsExtended.size()); } List<CompletableFuture<Void>> futureList = Lists.newArrayListWithExpectedSize(topicsExtended.size()); topicsExtended.forEach(topic -> futureList.add(subscribeIncreasedTopicPartitions(topic))); FutureUtil.waitForAll(futureList) .thenAccept(finalFuture -> future.complete(null)) .exceptionally(ex -> { log.warn("[{}] Failed to subscribe increased topics partitions: {}", topic, ex.getMessage()); future.completeExceptionally(ex); return null; }); return future; }
3.68
framework_Escalator_scrollToRowAndSpacer
/** * Scrolls vertically to a row and the spacer below it. * <p> * If a spacer is not open at that index, this method behaves like * {@link #scrollToRow(int, ScrollDestination, int)} * * @since 7.5.0 * @param rowIndex * the index of the logical row to scroll to. -1 takes the * topmost spacer into account as well. * @param destination * where the row should be aligned visually after scrolling * @param padding * the number pixels to place between the scrolled-to row and the * viewport edge. * @see #scrollToRow(int, ScrollDestination, int) * @see #scrollToSpacer(int, ScrollDestination, int) * @throws IllegalArgumentException * if {@code destination} is {@link ScrollDestination#MIDDLE} * and {@code padding} is not zero; or if {@code rowIndex} is * not a valid row index, or -1; or if * {@code destination == null}; or if {@code rowIndex == -1} and * there is no spacer open at that index. */ public void scrollToRowAndSpacer(final int rowIndex, final ScrollDestination destination, final int padding) throws IllegalArgumentException { Scheduler.get().scheduleFinally(new ScheduledCommand() { @Override public void execute() { validateScrollDestination(destination, padding); if (rowIndex != -1) { verifyValidRowIndex(rowIndex); } // row range final Range rowRange; if (rowIndex != -1) { int rowTop = (int) Math.floor(body.getRowTop(rowIndex)); int rowHeight = (int) Math.ceil(body.getDefaultRowHeight()); rowRange = Range.withLength(rowTop, rowHeight); } else { rowRange = Range.withLength(0, 0); } // get spacer final SpacerContainer.SpacerImpl spacer = body.spacerContainer .getSpacer(rowIndex); if (rowIndex == -1 && spacer == null) { throw new IllegalArgumentException( "Cannot scroll to row index " + "-1, as there is no spacer open at that index."); } // make into target range final Range targetRange; if (spacer != null) { final int spacerTop = (int) Math.floor(spacer.getTop()); final int spacerHeight = (int) Math .ceil(spacer.getHeight()); Range spacerRange = Range.withLength(spacerTop, spacerHeight); targetRange = rowRange.combineWith(spacerRange); } else { targetRange = rowRange; } // get params int targetStart = targetRange.getStart(); int targetEnd = targetRange.getEnd(); double viewportStart = getScrollTop(); double viewportEnd = viewportStart + body.getHeightOfSection(); double scrollPos = getScrollPos(destination, targetStart, targetEnd, viewportStart, viewportEnd, padding); setScrollTop(scrollPos); } }); }
3.68
hudi_ClusteringUtils_getEarliestInstantToRetainForClustering
/** * Returns the earliest instant to retain. * Make sure the clustering instant won't be archived before cleaned, and the earliest inflight clustering instant has a previous commit. * * @param activeTimeline The active timeline * @param metaClient The meta client * @return the earliest instant to retain for clustering */ public static Option<HoodieInstant> getEarliestInstantToRetainForClustering( HoodieActiveTimeline activeTimeline, HoodieTableMetaClient metaClient) throws IOException { Option<HoodieInstant> oldestInstantToRetain = Option.empty(); HoodieTimeline replaceTimeline = activeTimeline.getTimelineOfActions(CollectionUtils.createSet(HoodieTimeline.REPLACE_COMMIT_ACTION)); if (!replaceTimeline.empty()) { Option<HoodieInstant> cleanInstantOpt = activeTimeline.getCleanerTimeline().filterCompletedInstants().lastInstant(); if (cleanInstantOpt.isPresent()) { // The first clustering instant of which timestamp is greater than or equal to the earliest commit to retain of // the clean metadata. HoodieInstant cleanInstant = cleanInstantOpt.get(); HoodieActionInstant earliestInstantToRetain = CleanerUtils.getCleanerPlan(metaClient, cleanInstant.isRequested() ? cleanInstant : HoodieTimeline.getCleanRequestedInstant(cleanInstant.getTimestamp())) .getEarliestInstantToRetain(); String retainLowerBound; if (earliestInstantToRetain != null && !StringUtils.isNullOrEmpty(earliestInstantToRetain.getTimestamp())) { retainLowerBound = earliestInstantToRetain.getTimestamp(); } else { // no earliestInstantToRetain, indicate KEEP_LATEST_FILE_VERSIONS clean policy, // retain first instant after clean instant. // For KEEP_LATEST_FILE_VERSIONS cleaner policy, file versions are only maintained for active file groups // not for replaced file groups. So, last clean instant can be considered as a lower bound, since // the cleaner would have removed all the file groups until then. But there is a catch to this logic, // while cleaner is running if there is a pending replacecommit then those files are not cleaned. // TODO: This case has to be handled. HUDI-6352 retainLowerBound = cleanInstant.getTimestamp(); } oldestInstantToRetain = replaceTimeline.filter(instant -> HoodieTimeline.compareTimestamps( instant.getTimestamp(), HoodieTimeline.GREATER_THAN_OR_EQUALS, retainLowerBound)) .firstInstant(); } else { oldestInstantToRetain = replaceTimeline.firstInstant(); } } return oldestInstantToRetain; }
3.68
flink_CollectionUtil_partition
/** Partition a collection into approximately n buckets. */ public static <T> Collection<List<T>> partition(Collection<T> elements, int numBuckets) { Map<Integer, List<T>> buckets = newHashMapWithExpectedSize(numBuckets); int initialCapacity = elements.size() / numBuckets; int index = 0; for (T element : elements) { int bucket = index % numBuckets; buckets.computeIfAbsent(bucket, key -> new ArrayList<>(initialCapacity)).add(element); index++; } return buckets.values(); }
3.68
framework_Navigator_getStateParameterMap
/** * Returns the current navigation state reported by this Navigator's * {@link NavigationStateManager} as Map<String, String> where each key * represents a parameter in the state. The state parameter separator * character needs to be specified with the separator. * * @param separator * the string (typically one character) used to separate values * from each other * @return The parameters from the navigation state as a map * @see #getStateParameterMap() * @since 8.1 */ public Map<String, String> getStateParameterMap(String separator) { return parseStateParameterMap(Objects.requireNonNull(separator)); }
3.68
hadoop_MawoConfiguration_getJobBuilderClass
/** * Get job builder class. * @return value of mawo.job-builder.class */ public String getJobBuilderClass() { return configsMap.get(JOB_BUILDER_CLASS); }
3.68
flink_DataStreamSink_setResources
/** * Sets the resources for this sink, the minimum and preferred resources are the same by * default. * * @param resources The resources for this sink. * @return The sink with set minimum and preferred resources. */ private DataStreamSink<T> setResources(ResourceSpec resources) { transformation.setResources(resources, resources); return this; }
3.68
hadoop_RMActiveServiceContext_incrTokenSequenceNo
/** * Increment token sequence no. * */ public void incrTokenSequenceNo() { this.tokenSequenceNo.incrementAndGet(); }
3.68
framework_FlyweightCell_setElement
/** * Sets the DOM element for this FlyweightCell, either a <code>TD</code> or * a <code>TH</code>. It is the caller's responsibility to actually insert * the given element to the document when needed. * * @param element * the element corresponding to this cell, cannot be null */ public void setElement(TableCellElement element) { assert element != null; assertSetup(); this.element = element; }
3.68
shardingsphere-elasticjob_JobScheduleController_pauseJob
/** * Pause job. */ public synchronized void pauseJob() { try { if (!scheduler.isShutdown()) { scheduler.pauseAll(); } } catch (final SchedulerException ex) { throw new JobSystemException(ex); } }
3.68
flink_CheckpointRequestDecider_chooseRequestToExecute
/** * Choose the next {@link CheckpointTriggerRequest request} to execute based on the provided * candidate and the current state. Acquires a lock and may update the state. * * @return request that should be executed */ private Optional<CheckpointTriggerRequest> chooseRequestToExecute( boolean isTriggering, long lastCompletionMs) { if (isTriggering || queuedRequests.isEmpty() || numberOfCleaningCheckpointsSupplier.getAsInt() > maxConcurrentCheckpointAttempts) { return Optional.empty(); } if (pendingCheckpointsSizeSupplier.getAsInt() >= maxConcurrentCheckpointAttempts) { return Optional.of(queuedRequests.first()) .filter(CheckpointTriggerRequest::isForce) .map(unused -> queuedRequests.pollFirst()); } CheckpointTriggerRequest first = queuedRequests.first(); if (!first.isForce() && first.isPeriodic) { long currentRelativeTime = clock.relativeTimeMillis(); long nextTriggerDelayMillis = lastCompletionMs - currentRelativeTime + minPauseBetweenCheckpoints; if (nextTriggerDelayMillis > 0) { queuedRequests .pollFirst() .completeExceptionally( new CheckpointException(MINIMUM_TIME_BETWEEN_CHECKPOINTS)); rescheduleTrigger.accept(currentRelativeTime, nextTriggerDelayMillis); return Optional.empty(); } } return Optional.of(queuedRequests.pollFirst()); }
3.68
hbase_CryptoAES_unwrap
/** * Decrypts input data. The input composes of (msg, padding if needed, mac) and sequence num. The * result is msg. * @param data the input byte array * @param offset the offset in input where the input starts * @param len the input length * @return the new decrypted byte array. * @throws SaslException if error happens */ public byte[] unwrap(byte[] data, int offset, int len) throws SaslException { // get plaintext and seqNum byte[] decrypted = new byte[len - 4]; byte[] peerSeqNum = new byte[4]; try { decryptor.update(data, offset, len - 4, decrypted, 0); } catch (ShortBufferException sbe) { // this should not happen throw new SaslException("Error happens during decrypt data", sbe); } System.arraycopy(data, offset + decrypted.length, peerSeqNum, 0, 4); // get msg and mac byte[] msg = new byte[decrypted.length - 10]; byte[] mac = new byte[10]; System.arraycopy(decrypted, 0, msg, 0, msg.length); System.arraycopy(decrypted, msg.length, mac, 0, 10); // check mac integrity and msg sequence if (!integrity.compareHMAC(mac, peerSeqNum, msg, 0, msg.length)) { throw new SaslException("Unmatched MAC"); } if (!integrity.comparePeerSeqNum(peerSeqNum)) { throw new SaslException("Out of order sequencing of messages. Got: " + integrity.byteToInt(peerSeqNum) + " Expected: " + integrity.peerSeqNum); } integrity.incPeerSeqNum(); return msg; }
3.68
querydsl_NumberExpression_intValue
/** * Create a {@code this.intValue()} expression * * <p>Get the int expression of this numeric expression</p> * * @return this.intValue() * @see java.lang.Number#intValue() */ public NumberExpression<Integer> intValue() { return castToNum(Integer.class); }
3.68
Activiti_RootPropertyResolver_isProperty
/** * Test property * * @param property * property name * @return <code>true</code> if the given property is associated with a value */ public boolean isProperty(String property) { return map.containsKey(property); }
3.68
pulsar_SimpleLoadManagerImpl_findBrokerForPlacement
/** * Assign owner for specified ServiceUnit from the given candidates, following the the principles: 1) Optimum * distribution: fill up one broker till its load reaches optimum level (defined by underload threshold) before pull * another idle broker in; 2) Even distribution: once all brokers' load are above optimum level, maintain all * brokers to have even load; 3) Set the underload threshold to small value (like 1) for pure even distribution, and * high value (like 80) for pure optimum distribution; * <p> * Strategy to select broker: 1) The first choice is the least loaded broker which is underload but not idle; 2) The * second choice is idle broker (if there is any); 3) Otherwise simply select the least loaded broker if it is NOT * overloaded; 4) If all brokers are overloaded, select the broker with maximum available capacity (considering * brokers could have different hardware configuration, this usually means to select the broker with more hardware * resource); * <p> * Broker's load level: 1) Load ranking (triggered by LoadReport update) estimate the load level according to the * resource usage and namespace bundles already loaded by each broker; 2) When leader broker decide the owner for a * new namespace bundle, it may take time for the real owner to actually load the bundle and refresh LoadReport, * leader broker will store the bundle in a list called preAllocatedBundles, and the quota of all * preAllocatedBundles in preAllocatedQuotas, and re-estimate the broker's load level by putting the * preAllocatedQuota into calculation; 3) Everything (preAllocatedBundles and preAllocatedQuotas) will get reset in * load ranking. */ private synchronized ResourceUnit findBrokerForPlacement(Multimap<Long, ResourceUnit> candidates, ServiceUnitId serviceUnit) { long underloadThreshold = this.getLoadBalancerBrokerUnderloadedThresholdPercentage(); long overloadThreshold = this.getLoadBalancerBrokerOverloadedThresholdPercentage(); ResourceQuota defaultQuota = pulsar.getBrokerService().getBundlesQuotas().getDefaultResourceQuota().join(); double minLoadPercentage = 101.0; long maxAvailability = -1; ResourceUnit idleRU = null; ResourceUnit maxAvailableRU = null; ResourceUnit randomRU = null; ResourceUnit selectedRU = null; ResourceUnitRanking selectedRanking = null; String serviceUnitId = serviceUnit.toString(); // If the ranking is expected to be in the range [0,100] (which is the case for LOADBALANCER_STRATEGY_LLS), // the ranks are bounded. Otherwise (as is the case in LOADBALANCER_STRATEGY_LEAST_MSG, the ranks are simply // the total message rate which is in the range [0,Infinity) so they are unbounded. The // "boundedness" affects how two ranks are compared to see which one is better boolean unboundedRanks = getLoadBalancerPlacementStrategy().equals(LOADBALANCER_STRATEGY_LEAST_MSG); long randomBrokerIndex = (candidates.size() > 0) ? (this.brokerRotationCursor % candidates.size()) : 0; // find the least loaded & not-idle broker for (Map.Entry<Long, ResourceUnit> candidateOwner : candidates.entries()) { ResourceUnit candidate = candidateOwner.getValue(); randomBrokerIndex--; // skip broker which is not ranked. this should never happen except in unit test if (!resourceUnitRankings.containsKey(candidate)) { continue; } ResourceUnitRanking ranking = resourceUnitRankings.get(candidate); // check if this ServiceUnit is already loaded if (ranking.isServiceUnitLoaded(serviceUnitId)) { ranking.removeLoadedServiceUnit(serviceUnitId, this.getResourceQuota(serviceUnitId)); } // record a random broker if (randomBrokerIndex < 0 && randomRU == null) { randomRU = candidate; } // check the available capacity double loadPercentage = ranking.getEstimatedLoadPercentage(); double availablePercentage = Math.max(0, (100 - loadPercentage) / 100); long availability = (long) (ranking.estimateMaxCapacity(defaultQuota) * availablePercentage); if (availability > maxAvailability) { maxAvailability = availability; maxAvailableRU = candidate; } // check the load percentage if (ranking.isIdle()) { if (idleRU == null) { idleRU = candidate; } } else { if (selectedRU == null) { selectedRU = candidate; selectedRanking = ranking; minLoadPercentage = loadPercentage; } else { if ((unboundedRanks ? ranking.compareMessageRateTo(selectedRanking) : ranking.compareTo(selectedRanking)) < 0) { minLoadPercentage = loadPercentage; selectedRU = candidate; selectedRanking = ranking; } } } } if ((minLoadPercentage > underloadThreshold && idleRU != null) || selectedRU == null) { // assigned to idle broker is the least loaded broker already have optimum load (which means NOT // underloaded), or all brokers are idle selectedRU = idleRU; } else if (minLoadPercentage >= 100.0 && randomRU != null && !unboundedRanks) { // all brokers are full, assign to a random one selectedRU = randomRU; } else if (minLoadPercentage > overloadThreshold && !unboundedRanks) { // assign to the broker with maximum available capacity if all brokers are overloaded selectedRU = maxAvailableRU; } // re-calculate load level for selected broker if (selectedRU != null) { this.brokerRotationCursor = (this.brokerRotationCursor + 1) % 1000000; ResourceUnitRanking ranking = resourceUnitRankings.get(selectedRU); String loadPercentageDesc = ranking.getEstimatedLoadPercentageString(); log.info("Assign {} to {} with ({}).", serviceUnitId, selectedRU.getResourceId(), loadPercentageDesc); if (!ranking.isServiceUnitPreAllocated(serviceUnitId)) { final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(serviceUnitId); final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(serviceUnitId); ResourceQuota quota = this.getResourceQuota(serviceUnitId); // Add preallocated bundle range so incoming bundles from the same namespace are not assigned to the // same broker. brokerToNamespaceToBundleRange .computeIfAbsent(selectedRU.getResourceId().replace("http://", ""), k -> ConcurrentOpenHashMap.<String, ConcurrentOpenHashSet<String>>newBuilder() .build()) .computeIfAbsent(namespaceName, k -> ConcurrentOpenHashSet.<String>newBuilder().build()) .add(bundleRange); ranking.addPreAllocatedServiceUnit(serviceUnitId, quota); resourceUnitRankings.put(selectedRU, ranking); } } return selectedRU; }
3.68
flink_DuplicatingFileSystem_of
/** A factory method for creating a simple pair of source/destination. */ static CopyRequest of(Path source, Path destination) { return new CopyRequest() { @Override public Path getSource() { return source; } @Override public Path getDestination() { return destination; } }; }
3.68
pulsar_PulsarAvroRowDecoder_decodeRow
/** * decode ByteBuf by {@link org.apache.pulsar.client.api.schema.GenericSchema}. * @param byteBuf * @return */ @Override public Optional<Map<DecoderColumnHandle, FieldValueProvider>> decodeRow(ByteBuf byteBuf) { GenericRecord avroRecord; try { GenericAvroRecord record = (GenericAvroRecord) genericAvroSchema.decode(byteBuf); avroRecord = record.getAvroRecord(); } catch (Exception e) { e.printStackTrace(); throw new TrinoException(GENERIC_INTERNAL_ERROR, "Decoding avro record failed.", e); } return Optional.of(columnDecoders.entrySet().stream() .collect(toImmutableMap( Map.Entry::getKey, entry -> entry.getValue().decodeField(avroRecord)))); }
3.68
hbase_LruBlockCache_assertCounterSanity
/** * Sanity-checking for parity between actual block cache content and metrics. Intended only for * use with TRACE level logging and -ea JVM. */ private static void assertCounterSanity(long mapSize, long counterVal) { if (counterVal < 0) { LOG.trace("counterVal overflow. Assertions unreliable. counterVal=" + counterVal + ", mapSize=" + mapSize); return; } if (mapSize < Integer.MAX_VALUE) { double pct_diff = Math.abs((((double) counterVal) / ((double) mapSize)) - 1.); if (pct_diff > 0.05) { LOG.trace("delta between reported and actual size > 5%. counterVal=" + counterVal + ", mapSize=" + mapSize); } } }
3.68
querydsl_GroupBy_avg
/** * Create a new aggregating avg expression with a user-provided MathContext * * @param expression expression for which the accumulated average value will be used in the group by projection * @param mathContext mathContext for average calculation * @return wrapper expression */ public static <E extends Number> AbstractGroupExpression<E, E> avg(Expression<E> expression, MathContext mathContext) { return new GAvg<E>(expression, mathContext); }
3.68
querydsl_MapExpressionBase_size
/** * Create a {@code this.size()} expression * * @return this.size() */ public final NumberExpression<Integer> size() { if (size == null) { size = Expressions.numberOperation(Integer.class, Ops.MAP_SIZE, mixin); } return size; }
3.68
flink_CheckpointConfig_setAlignedCheckpointTimeout
/** * Only relevant if {@link ExecutionCheckpointingOptions.ENABLE_UNALIGNED} is enabled. * * <p>If {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT} has value equal to * <code>0</code>, checkpoints will * * <p>always start unaligned. * * <p>If {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT} has value greater then * <code>0</code>, checkpoints will start aligned. If during checkpointing, checkpoint start * delay exceeds this {@link ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT}, * alignment will timeout and checkpoint will start working as unaligned checkpoint. */ @PublicEvolving public void setAlignedCheckpointTimeout(Duration alignedCheckpointTimeout) { configuration.set( ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT, alignedCheckpointTimeout); }
3.68
dubbo_AdaptiveClassCodeGenerator_getUrlTypeIndex
/** * get index of parameter with type URL */ private int getUrlTypeIndex(Method method) { int urlTypeIndex = -1; Class<?>[] pts = method.getParameterTypes(); for (int i = 0; i < pts.length; ++i) { if (pts[i].equals(URL.class)) { urlTypeIndex = i; break; } } return urlTypeIndex; }
3.68
morf_SqlServerDialect_getSqlForLastDayOfMonth
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForLastDayOfMonth */ @Override protected String getSqlForLastDayOfMonth(AliasedField date) { return "DATEADD(s,-1,DATEADD(mm, DATEDIFF(m,0," + getSqlFrom(date) + ")+1,0))"; }
3.68
hbase_RegionServerRpcQuotaManager_getQuota
/** * Returns the quota for an operation. * @param ugi the user that is executing the operation * @param table the table where the operation will be executed * @return the OperationQuota */ public OperationQuota getQuota(final UserGroupInformation ugi, final TableName table) { if (isQuotaEnabled() && !table.isSystemTable() && isRpcThrottleEnabled()) { UserQuotaState userQuotaState = quotaCache.getUserQuotaState(ugi); QuotaLimiter userLimiter = userQuotaState.getTableLimiter(table); boolean useNoop = userLimiter.isBypass(); if (userQuotaState.hasBypassGlobals()) { if (LOG.isTraceEnabled()) { LOG.trace("get quota for ugi=" + ugi + " table=" + table + " userLimiter=" + userLimiter); } if (!useNoop) { return new DefaultOperationQuota(this.rsServices.getConfiguration(), userLimiter); } } else { QuotaLimiter nsLimiter = quotaCache.getNamespaceLimiter(table.getNamespaceAsString()); QuotaLimiter tableLimiter = quotaCache.getTableLimiter(table); QuotaLimiter rsLimiter = quotaCache.getRegionServerQuotaLimiter(QuotaTableUtil.QUOTA_REGION_SERVER_ROW_KEY); useNoop &= tableLimiter.isBypass() && nsLimiter.isBypass() && rsLimiter.isBypass(); boolean exceedThrottleQuotaEnabled = quotaCache.isExceedThrottleQuotaEnabled(); if (LOG.isTraceEnabled()) { LOG.trace("get quota for ugi=" + ugi + " table=" + table + " userLimiter=" + userLimiter + " tableLimiter=" + tableLimiter + " nsLimiter=" + nsLimiter + " rsLimiter=" + rsLimiter + " exceedThrottleQuotaEnabled=" + exceedThrottleQuotaEnabled); } if (!useNoop) { if (exceedThrottleQuotaEnabled) { return new ExceedOperationQuota(this.rsServices.getConfiguration(), rsLimiter, userLimiter, tableLimiter, nsLimiter); } else { return new DefaultOperationQuota(this.rsServices.getConfiguration(), userLimiter, tableLimiter, nsLimiter, rsLimiter); } } } } return NoopOperationQuota.get(); }
3.68
pulsar_ConsumerImpl_clearReceiverQueue
/** * Clear the internal receiver queue and returns the message id of what was the 1st message in the queue that was * not seen by the application. */ private MessageIdAdv clearReceiverQueue() { List<Message<?>> currentMessageQueue = new ArrayList<>(incomingMessages.size()); incomingMessages.drainTo(currentMessageQueue); resetIncomingMessageSize(); if (duringSeek.compareAndSet(true, false)) { return seekMessageId; } else if (subscriptionMode == SubscriptionMode.Durable) { return startMessageId; } if (!currentMessageQueue.isEmpty()) { MessageIdAdv nextMessageInQueue = (MessageIdAdv) currentMessageQueue.get(0).getMessageId(); MessageIdAdv previousMessage; if (MessageIdAdvUtils.isBatch(nextMessageInQueue)) { // Get on the previous message within the current batch previousMessage = new BatchMessageIdImpl(nextMessageInQueue.getLedgerId(), nextMessageInQueue.getEntryId(), nextMessageInQueue.getPartitionIndex(), nextMessageInQueue.getBatchIndex() - 1); } else { // Get on previous message in previous entry previousMessage = MessageIdAdvUtils.prevMessageId(nextMessageInQueue); } // release messages if they are pooled messages currentMessageQueue.forEach(Message::release); return previousMessage; } else if (!lastDequeuedMessageId.equals(MessageId.earliest)) { // If the queue was empty we need to restart from the message just after the last one that has been dequeued // in the past return new BatchMessageIdImpl((MessageIdImpl) lastDequeuedMessageId); } else { // No message was received or dequeued by this consumer. Next message would still be the startMessageId return startMessageId; } }
3.68
morf_JdbcUrlElements_withSchemaName
/** * Sets the schema name. Defaults to null (no schema specified). * * @param schemaName The schema name * @return this */ public Builder withSchemaName(String schemaName) { this.schemaName = schemaName; return this; }
3.68
flink_StreamOperatorStateContext_isRestored
/** * Returns true if the states provided by this context are restored from a checkpoint/savepoint. */ default boolean isRestored() { return getRestoredCheckpointId().isPresent(); }
3.68
morf_FieldLiteral_deepCopyInternal
/** * @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation) */ @Override protected FieldLiteral deepCopyInternal(final DeepCopyTransformation transformer) { return new FieldLiteral(this.getAlias(), this.value, this.dataType); }
3.68
hadoop_RoleModel_newSid
/** * Statement ID factory. * @return a statement ID unique for this JVM's life. */ public static String newSid() { SID_COUNTER.incrementAndGet(); return SID_COUNTER.toString(); }
3.68
incubator-hugegraph-toolchain_SplicingIdGenerator_splicing
/** * Concat multiple parts into a single id with ID_SPLITOR * * @param parts the string id values to be spliced * @return spliced id object */ public static Id splicing(String... parts) { String escaped = IdUtil.escape(ID_SPLITOR, ESCAPE, parts); return IdGenerator.of(escaped); }
3.68
hadoop_StreamCapabilitiesPolicy_unbuffer
/** * Implement the policy for {@link CanUnbuffer#unbuffer()}. * * @param in the input stream */ public static void unbuffer(InputStream in) { try { if (in instanceof StreamCapabilities && ((StreamCapabilities) in).hasCapability( StreamCapabilities.UNBUFFER)) { ((CanUnbuffer) in).unbuffer(); } else { LOG.debug(in.getClass().getName() + ":" + " does not implement StreamCapabilities" + " and the unbuffer capability"); } } catch (ClassCastException e) { throw new UnsupportedOperationException(in.getClass().getName() + ": " + CAN_UNBUFFER_NOT_IMPLEMENTED_MESSAGE); } }
3.68
framework_Calendar_getEventProvider
/** * @return the {@link CalendarEventProvider} currently used */ public CalendarEventProvider getEventProvider() { return calendarEventProvider; }
3.68
Activiti_CollectionUtil_map
/** * Helper method to easily create a map with keys of type String and values of type Object. Null values are allowed. * * @param objects varargs containing the key1, value1, key2, value2, etc. Note: although an Object, we will cast the key to String internally * @throws ActivitiIllegalArgumentException when objects are not even or key/value are not expected types */ public static Map<String, Object> map(Object... objects) { return mapOfClass(Object.class, objects); }
3.68
hbase_EntryBuffers_getChunkToWrite
/** Returns RegionEntryBuffer a buffer of edits to be written. */ synchronized RegionEntryBuffer getChunkToWrite() { long biggestSize = 0; byte[] biggestBufferKey = null; for (Map.Entry<byte[], RegionEntryBuffer> entry : buffers.entrySet()) { long size = entry.getValue().heapSize(); if (size > biggestSize && (!currentlyWriting.contains(entry.getKey()))) { biggestSize = size; biggestBufferKey = entry.getKey(); } } if (biggestBufferKey == null) { return null; } RegionEntryBuffer buffer = buffers.remove(biggestBufferKey); currentlyWriting.add(biggestBufferKey); return buffer; }
3.68
framework_VCalendar_removeMonthEvent
/** * Remove a month event from the view. * * @param target * The event to remove * * @param repaintImmediately * Should we repaint after the event was removed? */ public void removeMonthEvent(CalendarEvent target, boolean repaintImmediately) { if (target != null && target.getSlotIndex() >= 0) { // Remove event for (int row = 0; row < monthGrid.getRowCount(); row++) { for (int cell = 0; cell < monthGrid.getCellCount(row); cell++) { SimpleDayCell sdc = (SimpleDayCell) monthGrid.getWidget(row, cell); if (sdc == null) { return; } sdc.removeEvent(target, repaintImmediately); } } } }
3.68
dubbo_GlobalResourcesRepository_registerGlobalDisposable
/** * Register a global reused disposable. The disposable will be executed when all dubbo FrameworkModels are destroyed. * Note: the global disposable should be registered in static code, it's reusable and will not be removed when dubbo shutdown. * * @param disposable */ public static void registerGlobalDisposable(Disposable disposable) { if (!globalReusedDisposables.contains(disposable)) { synchronized (GlobalResourcesRepository.class) { if (!globalReusedDisposables.contains(disposable)) { globalReusedDisposables.add(disposable); } } } }
3.68
hbase_MasterObserver_postRenameRSGroup
/** * Called after rename rsgroup. * @param ctx the environment to interact with the framework and master * @param oldName old rsgroup name * @param newName new rsgroup name */ default void postRenameRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx, final String oldName, final String newName) throws IOException { }
3.68
hbase_ProcedureStoreTracker_getAllActiveProcIds
/** * Will be used when there are too many proc wal files. We will rewrite the states of the active * procedures in the oldest proc wal file so that we can delete it. * @return all the active procedure ids in this tracker. */ public long[] getAllActiveProcIds() { return map.values().stream().map(BitSetNode::getActiveProcIds).filter(p -> p.length > 0) .flatMapToLong(LongStream::of).toArray(); }
3.68
hadoop_MutableGaugeInt_decr
/** * decrement by delta * @param delta of the decrement */ public void decr(int delta) { value.addAndGet(-delta); setChanged(); }
3.68
MagicPlugin_Targeting_getNextBlock
/** * Move "steps" forward along line of vision and returns the block there * * @return The block at the new location */ @Nullable protected Block getNextBlock() { previousPreviousBlock = previousBlock; previousBlock = currentBlock; if (blockIterator == null || !blockIterator.hasNext()) { currentBlock = null; } else { currentBlock = blockIterator.next(); } return currentBlock; }
3.68
flink_HsMemoryDataManager_append
/** * Append record to {@link HsMemoryDataManager}, It will be managed by {@link * HsSubpartitionMemoryDataManager} witch it belongs to. * * @param record to be managed by this class. * @param targetChannel target subpartition of this record. * @param dataType the type of this record. In other words, is it data or event. */ public void append(ByteBuffer record, int targetChannel, Buffer.DataType dataType) throws IOException { try { getSubpartitionMemoryDataManager(targetChannel).append(record, dataType); } catch (InterruptedException e) { throw new IOException(e); } }
3.68
rocketmq-connect_PluginUtils_simpleName
/** * Return the simple class name of a plugin as {@code String}. * * @param plugin the plugin descriptor. * @return the plugin's simple class name. */ public static String simpleName(PluginWrapper<?> plugin) { return plugin.pluginClass().getSimpleName(); }
3.68
graphhopper_VectorTile_getUintValue
/** * <code>optional uint64 uint_value = 5;</code> */ public long getUintValue() { return uintValue_; }
3.68
flink_BinaryStringDataUtil_trimLeft
/** * Walk each character of current string from left end, remove the character if it is in trim * string. Stops at the first character which is not in trim string. Return the new substring. * * @param trimStr the trim string * @return A subString which removes all of the character from the left side that is in trim * string. */ public static BinaryStringData trimLeft(BinaryStringData str, BinaryStringData trimStr) { str.ensureMaterialized(); if (trimStr == null) { return null; } trimStr.ensureMaterialized(); if (isSpaceString(trimStr)) { return trimLeft(str); } if (str.inFirstSegment()) { int searchIdx = 0; while (searchIdx < str.getSizeInBytes()) { int charBytes = numBytesForFirstByte(str.getByteOneSegment(searchIdx)); BinaryStringData currentChar = str.copyBinaryStringInOneSeg(searchIdx, charBytes); // try to find the matching for the character in the trimString characters. if (trimStr.contains(currentChar)) { searchIdx += charBytes; } else { break; } } // empty string if (searchIdx >= str.getSizeInBytes()) { return EMPTY_UTF8; } else { return str.copyBinaryStringInOneSeg(searchIdx, str.getSizeInBytes() - searchIdx); } } else { return trimLeftSlow(str, trimStr); } }
3.68
framework_Criterion_setValue
/** * Sets the value of the payload to be compared. * * @param value * value of the payload to be compared */ public void setValue(String value) { this.value = value; }
3.68
flink_HiveParserASTNodeOrigin_getObjectName
/** @return the name of the object from which an HiveParserASTNode originated, e.g. "v". */ public String getObjectName() { return objectName; }
3.68
flink_Catalog_alterTable
/** * Modifies an existing table or view. Note that the new and old {@link CatalogBaseTable} must * be of the same kind. For example, this doesn't allow altering a regular table to partitioned * table, or altering a view to a table, and vice versa. * * <p>The framework will make sure to call this method with fully validated {@link * ResolvedCatalogTable} or {@link ResolvedCatalogView}. Those instances are easy to serialize * for a durable catalog implementation. * * @param tablePath path of the table or view to be modified * @param newTable the new table definition * @param tableChanges change to describe the modification between the newTable and the original * table. * @param ignoreIfNotExists flag to specify behavior when the table or view does not exist: if * set to false, throw an exception, if set to true, do nothing. * @throws TableNotExistException if the table does not exist * @throws CatalogException in case of any runtime exception */ default void alterTable( ObjectPath tablePath, CatalogBaseTable newTable, List<TableChange> tableChanges, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException { alterTable(tablePath, newTable, ignoreIfNotExists); }
3.68
framework_Table_getCellStyleGenerator
/** * Get the current cell style generator. * */ public CellStyleGenerator getCellStyleGenerator() { return cellStyleGenerator; }
3.68
hbase_SimpleRpcServer_getReader
// The method that will return the next reader to work with // Simplistic implementation of round robin for now Reader getReader() { currentReader = (currentReader + 1) % readers.length; return readers[currentReader]; }
3.68