name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_SqlFunctionUtils_regexpReplace
/** * Returns a string resulting from replacing all substrings that match the regular expression * with replacement. */ public static String regexpReplace(String str, String regex, String replacement) { if (str == null || regex == null || replacement == null) { return null; } try { return str.replaceAll(regex, Matcher.quoteReplacement(replacement)); } catch (Exception e) { LOG.error( String.format( "Exception in regexpReplace('%s', '%s', '%s')", str, regex, replacement), e); // return null if exception in regex replace return null; } }
3.68
hadoop_SubApplicationRowKey_getRowKey
/** * Constructs a row key for the sub app table as follows: * {@code subAppUserId!clusterId!entityType * !entityPrefix!entityId!userId}. * Typically used while querying a specific sub app. * * subAppUserId is usually the doAsUser. * userId is the yarn user that the AM runs as. * * @return byte array with the row key. */ public byte[] getRowKey() { return subAppRowKeyConverter.encode(this); }
3.68
hadoop_AbstractPolicyManager_internalPolicyGetter
/** * Common functionality to instantiate a reinitialize a {@link * ConfigurableFederationPolicy}. */ private ConfigurableFederationPolicy internalPolicyGetter( final FederationPolicyInitializationContext federationPolicyContext, ConfigurableFederationPolicy oldInstance, Class policy) throws FederationPolicyInitializationException { FederationPolicyInitializationContextValidator .validate(federationPolicyContext, this.getClass().getCanonicalName()); if (oldInstance == null || !oldInstance.getClass().equals(policy)) { try { oldInstance = (ConfigurableFederationPolicy) policy.newInstance(); } catch (InstantiationException e) { throw new FederationPolicyInitializationException(e); } catch (IllegalAccessException e) { throw new FederationPolicyInitializationException(e); } } //copying the context to avoid side-effects FederationPolicyInitializationContext modifiedContext = updateContext(federationPolicyContext, oldInstance.getClass().getCanonicalName()); oldInstance.reinitialize(modifiedContext); return oldInstance; }
3.68
flink_RocksDBKeyedStateBackend_tryRegisterKvStateInformation
/** * Registers a k/v state information, which includes its state id, type, RocksDB column family * handle, and serializers. * * <p>When restoring from a snapshot, we don’t restore the individual k/v states, just the * global RocksDB database and the list of k/v state information. When a k/v state is first * requested we check here whether we already have a registered entry for that and return it * (after some necessary state compatibility checks) or create a new one if it does not exist. */ private <N, S extends State, SV, SEV> Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> tryRegisterKvStateInformation( StateDescriptor<S, SV> stateDesc, TypeSerializer<N> namespaceSerializer, @Nonnull StateSnapshotTransformFactory<SEV> snapshotTransformFactory, boolean allowFutureMetadataUpdates) throws Exception { RocksDbKvStateInfo oldStateInfo = kvStateInformation.get(stateDesc.getName()); TypeSerializer<SV> stateSerializer = stateDesc.getSerializer(); RocksDbKvStateInfo newRocksStateInfo; RegisteredKeyValueStateBackendMetaInfo<N, SV> newMetaInfo; if (oldStateInfo != null) { @SuppressWarnings("unchecked") RegisteredKeyValueStateBackendMetaInfo<N, SV> castedMetaInfo = (RegisteredKeyValueStateBackendMetaInfo<N, SV>) oldStateInfo.metaInfo; newMetaInfo = updateRestoredStateMetaInfo( Tuple2.of(oldStateInfo.columnFamilyHandle, castedMetaInfo), stateDesc, namespaceSerializer, stateSerializer); newMetaInfo = allowFutureMetadataUpdates ? newMetaInfo.withSerializerUpgradesAllowed() : newMetaInfo; newRocksStateInfo = new RocksDbKvStateInfo(oldStateInfo.columnFamilyHandle, newMetaInfo); kvStateInformation.put(stateDesc.getName(), newRocksStateInfo); } else { newMetaInfo = new RegisteredKeyValueStateBackendMetaInfo<>( stateDesc.getType(), stateDesc.getName(), namespaceSerializer, stateSerializer, StateSnapshotTransformFactory.noTransform()); newMetaInfo = allowFutureMetadataUpdates ? newMetaInfo.withSerializerUpgradesAllowed() : newMetaInfo; newRocksStateInfo = RocksDBOperationUtils.createStateInfo( newMetaInfo, db, columnFamilyOptionsFactory, ttlCompactFiltersManager, optionsContainer.getWriteBufferManagerCapacity()); RocksDBOperationUtils.registerKvStateInformation( this.kvStateInformation, this.nativeMetricMonitor, stateDesc.getName(), newRocksStateInfo); } StateSnapshotTransformFactory<SV> wrappedSnapshotTransformFactory = wrapStateSnapshotTransformFactory( stateDesc, snapshotTransformFactory, newMetaInfo.getStateSerializer()); newMetaInfo.updateSnapshotTransformFactory(wrappedSnapshotTransformFactory); return Tuple2.of(newRocksStateInfo.columnFamilyHandle, newMetaInfo); }
3.68
hbase_ChunkCreator_numberOfMappedChunks
// the chunks in the chunkIdMap may already be released so we shouldn't relay // on this counting for strong correctness. This method is used only in testing. int numberOfMappedChunks() { return this.chunkIdMap.size(); }
3.68
querydsl_AbstractLuceneQuery_filter
/** * Apply the given Lucene filter to the search results * * @param filter filter * @return the current object */ @SuppressWarnings("unchecked") public Q filter(Filter filter) { if (filters.isEmpty()) { this.filter = filter; filters = Collections.singletonList(filter); } else { this.filter = null; if (filters.size() == 1) { filters = new ArrayList<Filter>(); } filters.add(filter); } return (Q) this; }
3.68
hadoop_Quota_getGlobalQuota
/** * Get global quota for the federation path. * @param path Federation path. * @return global quota for path. * @throws IOException If the quota system is disabled. */ QuotaUsage getGlobalQuota(String path) throws IOException { if (!router.isQuotaEnabled()) { throw new IOException("The quota system is disabled in Router."); } long nQuota = HdfsConstants.QUOTA_RESET; long sQuota = HdfsConstants.QUOTA_RESET; long[] typeQuota = new long[StorageType.values().length]; eachByStorageType(t -> typeQuota[t.ordinal()] = HdfsConstants.QUOTA_RESET); RouterQuotaManager manager = this.router.getQuotaManager(); TreeMap<String, RouterQuotaUsage> pts = manager.getParentsContainingQuota(path); Entry<String, RouterQuotaUsage> entry = pts.lastEntry(); while (entry != null && (nQuota == HdfsConstants.QUOTA_RESET || sQuota == HdfsConstants.QUOTA_RESET || orByStorageType( t -> typeQuota[t.ordinal()] == HdfsConstants.QUOTA_RESET))) { String ppath = entry.getKey(); QuotaUsage quota = entry.getValue(); if (nQuota == HdfsConstants.QUOTA_RESET) { nQuota = quota.getQuota(); } if (sQuota == HdfsConstants.QUOTA_RESET) { sQuota = quota.getSpaceQuota(); } eachByStorageType(t -> { if (typeQuota[t.ordinal()] == HdfsConstants.QUOTA_RESET) { typeQuota[t.ordinal()] = quota.getTypeQuota(t); } }); entry = pts.lowerEntry(ppath); } return new QuotaUsage.Builder().quota(nQuota).spaceQuota(sQuota) .typeQuota(typeQuota).build(); }
3.68
hadoop_DateTimeUtils_isRecentlyModified
/** * Tries to identify if an operation was recently executed based on the LMT of * a file or folder. LMT needs to be more recent that the original request * start time. To include any clock skew with server, LMT within * DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS from the request start time is going * to be considered to qualify for recent operation. * @param lastModifiedTime File/Folder LMT * @param expectedLMTUpdateTime original request timestamp which should * have updated the LMT on target * @return true if the LMT is within timespan for recent operation, else false */ public static boolean isRecentlyModified(final String lastModifiedTime, final Instant expectedLMTUpdateTime) { long lmtEpochTime = DateTimeUtils.parseLastModifiedTime(lastModifiedTime); long currentEpochTime = expectedLMTUpdateTime.toEpochMilli(); return ((lmtEpochTime > currentEpochTime) || ((currentEpochTime - lmtEpochTime) <= DEFAULT_CLOCK_SKEW_WITH_SERVER_IN_MS)); }
3.68
hbase_MutableRegionInfo_isSplitParent
/** * @return True if this is a split parent region. * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. * @see <a href="https://issues.apache.org/jira/browse/HBASE-25210">HBASE-25210</a> */ @Override @Deprecated public boolean isSplitParent() { if (!isSplit()) { return false; } if (!isOffline()) { LOG.warn("Region is split but NOT offline: " + getRegionNameAsString()); } return true; }
3.68
hbase_HRegionServer_buildReportAndSend
/** * Builds the region size report and sends it to the master. Upon successful sending of the * report, the region sizes that were sent are marked as sent. * @param rss The stub to send to the Master * @param regionSizeStore The store containing region sizes */ private void buildReportAndSend(RegionServerStatusService.BlockingInterface rss, RegionSizeStore regionSizeStore) throws ServiceException { RegionSpaceUseReportRequest request = buildRegionSpaceUseReportRequest(Objects.requireNonNull(regionSizeStore)); rss.reportRegionSpaceUse(null, request); // Record the number of size reports sent if (metricsRegionServer != null) { metricsRegionServer.incrementNumRegionSizeReportsSent(regionSizeStore.size()); } }
3.68
zxing_AddressBookParsedResult_getAddressTypes
/** * @return optional descriptions of the type of each e-mail. It could be like "WORK", but, * there is no guaranteed or standard format. */ public String[] getAddressTypes() { return addressTypes; }
3.68
Activiti_RootPropertyResolver_properties
/** * Get properties * * @return all property names (in no particular order) */ public Iterable<String> properties() { return map.keySet(); }
3.68
hudi_BaseRollbackHelper_maybeDeleteAndCollectStats
/** * May be delete interested files and collect stats or collect stats only. * * @param context instance of {@link HoodieEngineContext} to use. * @param instantToRollback {@link HoodieInstant} of interest for which deletion or collect stats is requested. * @param rollbackRequests List of {@link ListingBasedRollbackRequest} to be operated on. * @param doDelete {@code true} if deletion has to be done. {@code false} if only stats are to be collected w/o performing any deletes. * @return stats collected with or w/o actual deletions. */ List<Pair<String, HoodieRollbackStat>> maybeDeleteAndCollectStats(HoodieEngineContext context, HoodieInstant instantToRollback, List<SerializableHoodieRollbackRequest> rollbackRequests, boolean doDelete, int numPartitions) { return context.flatMap(rollbackRequests, (SerializableFunction<SerializableHoodieRollbackRequest, Stream<Pair<String, HoodieRollbackStat>>>) rollbackRequest -> { List<String> filesToBeDeleted = rollbackRequest.getFilesToBeDeleted(); if (!filesToBeDeleted.isEmpty()) { List<HoodieRollbackStat> rollbackStats = deleteFiles(metaClient, filesToBeDeleted, doDelete); List<Pair<String, HoodieRollbackStat>> partitionToRollbackStats = new ArrayList<>(); rollbackStats.forEach(entry -> partitionToRollbackStats.add(Pair.of(entry.getPartitionPath(), entry))); return partitionToRollbackStats.stream(); } else if (!rollbackRequest.getLogBlocksToBeDeleted().isEmpty()) { HoodieLogFormat.Writer writer = null; final Path filePath; try { String fileId = rollbackRequest.getFileId(); writer = HoodieLogFormat.newWriterBuilder() .onParentPath(FSUtils.getPartitionPath(metaClient.getBasePath(), rollbackRequest.getPartitionPath())) .withFileId(fileId) .withDeltaCommit(instantToRollback.getTimestamp()) .withFs(metaClient.getFs()) .withFileExtension(HoodieLogFile.DELTA_EXTENSION).build(); // generate metadata if (doDelete) { Map<HoodieLogBlock.HeaderMetadataType, String> header = generateHeader(instantToRollback.getTimestamp()); // if update belongs to an existing log file // use the log file path from AppendResult in case the file handle may roll over filePath = writer.appendBlock(new HoodieCommandBlock(header)).logFile().getPath(); } else { filePath = writer.getLogFile().getPath(); } } catch (IOException | InterruptedException io) { throw new HoodieRollbackException("Failed to rollback for instant " + instantToRollback, io); } finally { try { if (writer != null) { writer.close(); } } catch (IOException io) { throw new HoodieIOException("Error appending rollback block", io); } } // This step is intentionally done after writer is closed. Guarantees that // getFileStatus would reflect correct stats and FileNotFoundException is not thrown in // cloud-storage : HUDI-168 Map<FileStatus, Long> filesToNumBlocksRollback = Collections.singletonMap( metaClient.getFs().getFileStatus(Objects.requireNonNull(filePath)), 1L ); return Collections.singletonList( Pair.of(rollbackRequest.getPartitionPath(), HoodieRollbackStat.newBuilder() .withPartitionPath(rollbackRequest.getPartitionPath()) .withRollbackBlockAppendResults(filesToNumBlocksRollback) .build())) .stream(); } else { return Collections.singletonList( Pair.of(rollbackRequest.getPartitionPath(), HoodieRollbackStat.newBuilder() .withPartitionPath(rollbackRequest.getPartitionPath()) .build())) .stream(); } }, numPartitions); }
3.68
AreaShop_BuyRegion_getMoneyBackAmount
/** * Get the amount of money that should be paid to the player when selling the region. * @return The amount of money the player should get back */ public double getMoneyBackAmount() { return getPrice() * (getMoneyBackPercentage() / 100.0); }
3.68
hadoop_AzureADAuthenticator_getRequestId
/** * Gets http request id . * @return http request id. */ public String getRequestId() { return this.requestId; }
3.68
flink_PekkoUtils_createLocalActorSystem
/** * Creates a local actor system without remoting. * * @param configuration instance containing the user provided configuration values * @return The created actor system */ public static ActorSystem createLocalActorSystem(Configuration configuration) { return createActorSystem(getConfig(configuration, null)); }
3.68
flink_AccumulatorHelper_deserializeAndUnwrapAccumulators
/** * Takes the serialized accumulator results and tries to deserialize them using the provided * class loader, and then try to unwrap the value unchecked. * * @param serializedAccumulators The serialized accumulator results. * @param loader The class loader to use. * @return The deserialized and unwrapped accumulator results. */ public static Map<String, Object> deserializeAndUnwrapAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException { Map<String, OptionalFailure<Object>> deserializedAccumulators = deserializeAccumulators(serializedAccumulators, loader); if (deserializedAccumulators.isEmpty()) { return Collections.emptyMap(); } Map<String, Object> accumulators = CollectionUtil.newHashMapWithExpectedSize(serializedAccumulators.size()); for (Map.Entry<String, OptionalFailure<Object>> entry : deserializedAccumulators.entrySet()) { accumulators.put(entry.getKey(), entry.getValue().getUnchecked()); } return accumulators; }
3.68
AreaShop_FileManager_sendRentExpireWarnings
/** * Send out rent expire warnings. */ public void sendRentExpireWarnings() { Do.forAll( plugin.getConfig().getInt("expireWarning.regionsPerTick"), getRents(), RentRegion::sendExpirationWarnings ); }
3.68
druid_PoolUpdater_update
/** * Process the given NodeEvent[]. Maybe add / delete some nodes. */ @Override public void update(Observable o, Object arg) { if (!(o instanceof NodeListener)) { return; } if (arg == null || !(arg instanceof NodeEvent[])) { return; } NodeEvent[] events = (NodeEvent[]) arg; if (events.length <= 0) { return; } try { LOG.info("Waiting for Lock to start processing NodeEvents."); lock.lock(); LOG.info("Start processing the NodeEvent[" + events.length + "]."); for (NodeEvent e : events) { if (e.getType() == NodeEventTypeEnum.ADD) { addNode(e); } else if (e.getType() == NodeEventTypeEnum.DELETE) { deleteNode(e); } } } catch (Exception e) { LOG.error("Exception occurred while updating Pool.", e); } finally { lock.unlock(); } }
3.68
querydsl_NumberExpression_max
/** * Create a {@code max(this)} expression * * <p>Get the maximum value of this expression (aggregation)</p> * * @return max(this) */ @Override public NumberExpression<T> max() { if (max == null) { max = Expressions.numberOperation(getType(), Ops.AggOps.MAX_AGG, mixin); } return max; }
3.68
framework_DragAndDropService_constructDragDropDetails
/** * Construct DragDropDetails based on variables from client drop target. * Uses DragDropDetailsTranslator if available, otherwise a default * DragDropDetails implementation is used. * * @param dropTarget * @param variables * @return */ @SuppressWarnings("unchecked") private TargetDetails constructDragDropDetails(DropTarget dropTarget, Map<String, Object> variables) { Map<String, Object> rawDragDropDetails = (Map<String, Object>) variables .get("evt"); TargetDetails dropData = dropTarget .translateDropTargetDetails(rawDragDropDetails); if (dropData == null) { // Create a default DragDropDetails with all the raw variables dropData = new TargetDetailsImpl(rawDragDropDetails, dropTarget); } return dropData; }
3.68
hadoop_SnappyCompressor_reset
/** * Resets compressor so that a new set of input data can be processed. */ @Override public void reset() { finish = false; finished = false; uncompressedDirectBuf.clear(); uncompressedDirectBufLen = 0; compressedDirectBuf.clear(); compressedDirectBuf.limit(0); userBufOff = userBufLen = 0; bytesRead = bytesWritten = 0L; }
3.68
morf_AbstractSelectStatement_drive
/** * @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(org.alfasoftware.morf.util.ObjectTreeTraverser) */ @Override public void drive(ObjectTreeTraverser dispatcher) { dispatcher .dispatch(table) .dispatch(fields) .dispatch(joins) .dispatch(fromSelects) .dispatch(whereCriterion) .dispatch(orderBys); }
3.68
querydsl_NumberExpression_notBetween
/** * Create a {@code this not between from and to} expression * * <p>Is equivalent to {@code this < from || this > to}</p> * * @param from inclusive start of range * @param to inclusive end of range * @return this not between from and to */ public final <A extends Number & Comparable<?>> BooleanExpression notBetween(Expression<A> from, Expression<A> to) { return between(from, to).not(); }
3.68
hadoop_SharedCacheManager_getSCMStore
/** * For testing purposes only. */ @VisibleForTesting SCMStore getSCMStore() { return this.store; }
3.68
flink_ExternalResourceOptions_getExternalResourceDriverFactoryConfigOptionForResource
/** * Generate the config option key for the factory class name of {@link * org.apache.flink.api.common.externalresource.ExternalResourceDriver}. */ public static String getExternalResourceDriverFactoryConfigOptionForResource( String resourceName) { return keyWithResourceNameAndSuffix(resourceName, EXTERNAL_RESOURCE_DRIVER_FACTORY_SUFFIX); }
3.68
flink_AvroWriters_forReflectRecord
/** * Creates an {@link AvroWriterFactory} for the given type. The Avro writers will use reflection * to create the schema for the type and use that schema to write the records. * * @param type The class of the type to write. */ public static <T> AvroWriterFactory<T> forReflectRecord(Class<T> type) { String schemaString = ReflectData.get().getSchema(type).toString(); AvroBuilder<T> builder = (out) -> createAvroDataFileWriter(schemaString, ReflectDatumWriter::new, out); return new AvroWriterFactory<>(builder); }
3.68
hadoop_TFile_flush
// Avoiding flushing call to down stream. @Override public void flush() { // do nothing }
3.68
hadoop_GlobExpander_expand
/** * Expand globs in the given <code>filePattern</code> into a collection of * file patterns so that in the expanded set no file pattern has a slash * character ("/") in a curly bracket pair. * <p> * Some examples of how the filePattern is expanded:<br> * <pre> * <b> * filePattern - Expanded file pattern </b> * {a/b} - a/b * /}{a/b} - /}a/b * p{a/b,c/d}s - pa/bs, pc/ds * {a/b,c/d,{e,f}} - a/b, c/d, {e,f} * {a/b,c/d}{e,f} - a/b{e,f}, c/d{e,f} * {a,b}/{b,{c/d,e/f}} - {a,b}/b, {a,b}/c/d, {a,b}/e/f * {a,b}/{c/\d} - {a,b}/c/d * </pre> * * @param filePattern file pattern. * @return expanded file patterns * @throws IOException raised on errors performing I/O. */ public static List<String> expand(String filePattern) throws IOException { List<String> fullyExpanded = new ArrayList<String>(); List<StringWithOffset> toExpand = new ArrayList<StringWithOffset>(); toExpand.add(new StringWithOffset(filePattern, 0)); while (!toExpand.isEmpty()) { StringWithOffset path = toExpand.remove(0); List<StringWithOffset> expanded = expandLeftmost(path); if (expanded == null) { fullyExpanded.add(path.string); } else { toExpand.addAll(0, expanded); } } return fullyExpanded; }
3.68
flink_ModuleManager_useModules
/** * Enable modules in use with declared name order. Modules that have been loaded but not exist * in names varargs will become unused. * * @param names module names to be used * @throws ValidationException when module names contain an unloaded name */ public void useModules(String... names) { checkNotNull(names, "names cannot be null"); Set<String> deduplicateNames = new HashSet<>(); for (String name : names) { if (!loadedModules.containsKey(name)) { throw new ValidationException( String.format("No module with name '%s' exists", name)); } if (!deduplicateNames.add(name)) { throw new ValidationException( String.format("Module '%s' appears more than once", name)); } } usedModules.clear(); usedModules.addAll(Arrays.asList(names)); }
3.68
morf_AbstractSqlDialectTest_testMergeWhenAllFieldsInPrimaryKey
/** * Ensure that we can merge into a table where all columns are in the primary key. */ @Test public void testMergeWhenAllFieldsInPrimaryKey() { TableReference foo = new TableReference("foo").as("foo"); TableReference somewhere = new TableReference("somewhere"); SelectStatement sourceStmt = new SelectStatement(somewhere.field("newId").as("id")).from(somewhere).alias("somewhere"); MergeStatement stmt = new MergeStatement().into(foo).tableUniqueKey(foo.field("id")).from(sourceStmt); assertEquals("Merge scripts are not the same", expectedMergeForAllPrimaryKeys(), testDialect.convertStatementToSQL(stmt)); }
3.68
flink_RpcEndpoint_internalCallOnStart
/** * Internal method which is called by the RpcService implementation to start the RpcEndpoint. * * @throws Exception indicating that the rpc endpoint could not be started. If an exception * occurs, then the rpc endpoint will automatically terminate. */ public final void internalCallOnStart() throws Exception { validateRunsInMainThread(); isRunning = true; onStart(); }
3.68
morf_UpgradePath_addCommentsToDropUpgradeStatusTable
/** * At the end of the StringBuilder given, add comments to explain how to drop * {@value UpgradeStatusTableService#UPGRADE_STATUS} table if the upgrade is * done manually. */ private void addCommentsToDropUpgradeStatusTable(final StringBuilder sqlOutput) { String separator = System.getProperty("line.separator"); sqlOutput.append("-- WARNING - This upgrade step creates a temporary table " + UpgradeStatusTableService.UPGRADE_STATUS + "." + separator); sqlOutput.append("-- WARNING - If the upgrade is run automatically, the table will be automatically removed at a later point." + separator); sqlOutput.append("-- WARNING - If this step is being applied manually, the table must be manually removed - to do so, uncomment the following SQL lines." + separator); sqlOutput.append("-- WARNING - Manual removal should not be applied during full deployment of the application to an empty database." + separator); for (String statement : connectionResources.sqlDialect().dropStatements(SchemaUtils.table(UpgradeStatusTableService.UPGRADE_STATUS))) { sqlOutput.append("-- " + statement + separator); } }
3.68
hbase_HMaster_switchSnapshotCleanup
/** * Turn on/off Snapshot Cleanup Chore * @param on indicates whether Snapshot Cleanup Chore is to be run */ void switchSnapshotCleanup(final boolean on, final boolean synchronous) throws IOException { if (synchronous) { synchronized (this.snapshotCleanerChore) { switchSnapshotCleanup(on); } } else { switchSnapshotCleanup(on); } }
3.68
framework_BeanItem_setBean
/** * Changes the Java Bean this item is based on. * <p> * This will cause any existing properties to be re-mapped to the new bean. * Any added custom properties which are not of type {@link MethodProperty} * or {@link NestedMethodProperty} will not be updated to reflect the change * of bean. * <p> * Changing the bean will fire value change events for all properties of * type {@link MethodProperty} or {@link NestedMethodProperty}. * * @param bean * The new bean to use for this item, not <code>null</code> * @since 7.7.7 */ public void setBean(BT bean) { if (bean == null) { throw new IllegalArgumentException("Bean cannot be null"); } if (getBean().getClass() != bean.getClass()) { throw new IllegalArgumentException( "The new bean class " + bean.getClass().getName() + " does not match the old bean class " + getBean().getClass()); } // Remap properties for (Object propertyId : getItemPropertyIds()) { Property p = getItemProperty(propertyId); if (p instanceof MethodProperty) { MethodProperty mp = (MethodProperty) p; assert (mp.getInstance() == getBean()); mp.setInstance(bean); } else if (p instanceof NestedMethodProperty) { NestedMethodProperty nmp = (NestedMethodProperty) p; assert (nmp.getInstance() == getBean()); nmp.setInstance(bean); } } this.bean = bean; }
3.68
hudi_StreamReadOperator_consumeAsMiniBatch
/** * Consumes at most {@link #MINI_BATCH_SIZE} number of records * for the given input split {@code split}. * * <p>Note: close the input format and remove the input split for the queue {@link #splits} * if the split reads to the end. * * @param split The input split */ private void consumeAsMiniBatch(MergeOnReadInputSplit split) throws IOException { for (int i = 0; i < MINI_BATCH_SIZE; i++) { if (!format.reachedEnd()) { sourceContext.collect(format.nextRecord(null)); split.consume(); } else { // close the input format format.close(); // remove the split splits.poll(); break; } } }
3.68
dubbo_ServiceInstanceMetadataUtils_setDefaultParams
/** * Set the default parameters via the specified {@link URL providerURL} * * @param params the parameters * @param providerURL the provider's {@link URL} */ private static void setDefaultParams(Map<String, String> params, URL providerURL) { for (String parameterName : DEFAULT_REGISTER_PROVIDER_KEYS) { String parameterValue = providerURL.getParameter(parameterName); if (!isBlank(parameterValue)) { params.put(parameterName, parameterValue); } } }
3.68
framework_ComboBox_changeVariables
/** * Invoked when the value of a variable has changed. * * @see com.vaadin.ui.AbstractComponent#changeVariables(java.lang.Object, * java.util.Map) */ @Override public void changeVariables(Object source, Map<String, Object> variables) { // Not calling super.changeVariables due the history of select // component hierarchy // Selection change if (variables.containsKey("selected")) { final String[] ka = (String[]) variables.get("selected"); // Single select mode if (ka.length == 0) { // Allows deselection only if the deselected item is visible final Object current = getValue(); final Collection<?> visible = getVisibleItemIds(); if (visible != null && visible.contains(current)) { setValue(null, true); } } else { final Object id = itemIdMapper.get(ka[0]); if (id != null && id.equals(getNullSelectionItemId())) { setValue(null, true); } else { setValue(id, true); } } } String newFilter; if ((newFilter = (String) variables.get("filter")) != null) { // this is a filter request currentPage = ((Integer) variables.get("page")).intValue(); filterstring = newFilter; if (filterstring != null) { filterstring = filterstring.toLowerCase(getLocale()); } requestRepaint(); } else if (isNewItemsAllowed()) { // New option entered (and it is allowed) final String newitem = (String) variables.get("newitem"); if (newitem != null && !newitem.isEmpty()) { getNewItemHandler().addNewItem(newitem); // rebuild list filterstring = null; prevfilterstring = null; } } if (variables.containsKey(FocusEvent.EVENT_ID)) { fireEvent(new FocusEvent(this)); } if (variables.containsKey(BlurEvent.EVENT_ID)) { fireEvent(new BlurEvent(this)); } }
3.68
hadoop_ActiveAuditManagerS3A_modifyRequest
/** * Forward to the inner span. * {@inheritDoc} */ @Override public SdkRequest modifyRequest(Context.ModifyRequest context, ExecutionAttributes executionAttributes) { return span.modifyRequest(context, executionAttributes); }
3.68
flink_MapValue_isEmpty
/* * (non-Javadoc) * @see java.util.Map#isEmpty() */ @Override public boolean isEmpty() { return this.map.isEmpty(); }
3.68
hbase_RemoteWithExtrasException_getHostname
/** Returns null if not set */ public String getHostname() { return this.hostname; }
3.68
hbase_ParseFilter_getFilterName
/** * Returns the filter name given a simple filter expression * <p> * @param filterStringAsByteArray a simple filter expression * @return name of filter in the simple filter expression */ public static byte[] getFilterName(byte[] filterStringAsByteArray) { int filterNameStartIndex = 0; int filterNameEndIndex = 0; for (int i = filterNameStartIndex; i < filterStringAsByteArray.length; i++) { if ( filterStringAsByteArray[i] == ParseConstants.LPAREN || filterStringAsByteArray[i] == ParseConstants.WHITESPACE ) { filterNameEndIndex = i; break; } } if (filterNameEndIndex == 0) { throw new IllegalArgumentException("Incorrect Filter Name"); } byte[] filterName = new byte[filterNameEndIndex - filterNameStartIndex]; Bytes.putBytes(filterName, 0, filterStringAsByteArray, 0, filterNameEndIndex - filterNameStartIndex); return filterName; }
3.68
hbase_Procedure_hasLock
/** * This is used in conjunction with {@link #holdLock(Object)}. If {@link #holdLock(Object)} * returns true, the procedure executor will call acquireLock() once and thereafter not call * {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls release/acquire * around each invocation of {@link #execute(Object)}. * @see #holdLock(Object) * @return true if the procedure has the lock, false otherwise. */ public final boolean hasLock() { return locked; }
3.68
hbase_MultiRowRangeFilter_setFoundFirstRange
/** * Sets {@link #foundFirstRange} to {@code true}, indicating that we found a matching row range. */ public void setFoundFirstRange() { this.foundFirstRange = true; }
3.68
hbase_OrderedBytes_decodeNumericAsDouble
/** * Decode a primitive {@code double} value from the Numeric encoding. Numeric encoding is based on * {@link BigDecimal}; in the event the encoded value is larger than can be represented in a * {@code double}, this method performs an implicit narrowing conversion as described in * {@link BigDecimal#doubleValue()}. * @throws NullPointerException when the encoded value is {@code NULL}. * @throws IllegalArgumentException when the encoded value is not a Numeric. * @see #encodeNumeric(PositionedByteRange, double, Order) * @see BigDecimal#doubleValue() */ public static double decodeNumericAsDouble(PositionedByteRange src) { // TODO: should an encoded NULL value throw unexpectedHeader() instead? if (isNull(src)) { throw new NullPointerException("A null value cannot be decoded to a double."); } if (isNumericNaN(src)) { src.get(); return Double.NaN; } if (isNumericZero(src)) { src.get(); return Double.valueOf(0.0); } byte header = -1 == Integer.signum(src.peek()) ? DESCENDING.apply(src.peek()) : src.peek(); if (header == NEG_INF) { src.get(); return Double.NEGATIVE_INFINITY; } else if (header == POS_INF) { src.get(); return Double.POSITIVE_INFINITY; } else { return decodeNumericValue(src).doubleValue(); } }
3.68
hbase_StoreScanner_trySkipToNextRow
/** * See if we should actually SEEK or rather just SKIP to the next Cell (see HBASE-13109). * ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row, or seek to an * arbitrary seek key. This method decides whether a seek is the most efficient _actual_ way to * get us to the requested cell (SEEKs are more expensive than SKIP, SKIP, SKIP inside the * current, loaded block). It does this by looking at the next indexed key of the current HFile. * This key is then compared with the _SEEK_ key, where a SEEK key is an artificial 'last possible * key on the row' (only in here, we avoid actually creating a SEEK key; in the compare we work * with the current Cell but compare as though it were a seek key; see down in * matcher.compareKeyForNextRow, etc). If the compare gets us onto the next block we *_SEEK, * otherwise we just SKIP to the next requested cell. * <p> * Other notes: * <ul> * <li>Rows can straddle block boundaries</li> * <li>Versions of columns can straddle block boundaries (i.e. column C1 at T1 might be in a * different block than column C1 at T2)</li> * <li>We want to SKIP if the chance is high that we'll find the desired Cell after a few * SKIPs...</li> * <li>We want to SEEK when the chance is high that we'll be able to seek past many Cells, * especially if we know we need to go to the next block.</li> * </ul> * <p> * A good proxy (best effort) to determine whether SKIP is better than SEEK is whether we'll * likely end up seeking to the next block (or past the next block) to get our next column. * Example: * * <pre> * | BLOCK 1 | BLOCK 2 | * | r1/c1, r1/c2, r1/c3 | r1/c4, r1/c5, r2/c1 | * ^ ^ * | | * Next Index Key SEEK_NEXT_ROW (before r2/c1) * * * | BLOCK 1 | BLOCK 2 | * | r1/c1/t5, r1/c1/t4, r1/c1/t3 | r1/c1/t2, r1/c1/T1, r1/c2/T3 | * ^ ^ * | | * Next Index Key SEEK_NEXT_COL * </pre> * * Now imagine we want columns c1 and c3 (see first diagram above), the 'Next Index Key' of r1/c4 * is > r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we only * want one version of c1, after we have it, a SEEK_COL will be issued to get to c2. Looking at * the 'Next Index Key', it would land us in the next block, so we should SEEK. In other scenarios * where the SEEK will not land us in the next block, it is very likely better to issues a series * of SKIPs. * @param cell current cell * @return true means skip to next row, false means not */ protected boolean trySkipToNextRow(Cell cell) throws IOException { Cell nextCell = null; // used to guard against a changed next indexed key by doing a identity comparison // when the identity changes we need to compare the bytes again Cell previousIndexedKey = null; do { Cell nextIndexedKey = getNextIndexedKey(); if ( nextIndexedKey != null && nextIndexedKey != KeyValueScanner.NO_NEXT_INDEXED_KEY && (nextIndexedKey == previousIndexedKey || matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) ) { this.heap.next(); ++kvsScanned; previousIndexedKey = nextIndexedKey; } else { return false; } } while ((nextCell = this.heap.peek()) != null && CellUtil.matchingRows(cell, nextCell)); return true; }
3.68
hbase_HFileOutputFormat2_createFamilyDataBlockEncodingMap
/** * Runs inside the task to deserialize column family to data block encoding type map from the * configuration. * @param conf to read the serialized values from * @return a map from column family to HFileDataBlockEncoder for the configured data block type * for the family */ @InterfaceAudience.Private static Map<byte[], DataBlockEncoding> createFamilyDataBlockEncodingMap(Configuration conf) { Map<byte[], String> stringMap = createFamilyConfValueMap(conf, DATABLOCK_ENCODING_FAMILIES_CONF_KEY); Map<byte[], DataBlockEncoding> encoderMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue()))); } return encoderMap; }
3.68
flink_TaskStatsRequestCoordinator_handleFailedResponse
/** * Handles the failed stats response by canceling the corresponding unfinished pending request. * * @param requestId ID of the request to cancel. * @param cause Cause of the cancelling (can be <code>null</code>). */ public void handleFailedResponse(int requestId, @Nullable Throwable cause) { synchronized (lock) { if (isShutDown) { return; } PendingStatsRequest<T, V> pendingRequest = pendingRequests.remove(requestId); if (pendingRequest != null) { log.info("Cancelling request {}", requestId, cause); pendingRequest.discard(cause); rememberRecentRequestId(requestId); } } }
3.68
pulsar_TopicsBase_extractException
// Return error code depends on exception we got indicating if client should retry with same broker. private void extractException(Exception e, ProducerAck produceMessageResult) { if (!(e instanceof BrokerServiceException.TopicFencedException && e instanceof ManagedLedgerException)) { produceMessageResult.setErrorCode(2); } else { produceMessageResult.setErrorCode(1); } produceMessageResult.setErrorMsg(e.getMessage()); }
3.68
hbase_HBaseTrustManager_performHostVerification
/** * Compares peer's hostname with the one stored in the provided client certificate. Performs * verification with the help of provided HostnameVerifier. * @param inetAddress Peer's inet address. * @param certificate Peer's certificate * @throws CertificateException Thrown if the provided certificate doesn't match the peer * hostname. */ private void performHostVerification(InetAddress inetAddress, X509Certificate certificate) throws CertificateException { String hostAddress = ""; String hostName = ""; try { hostAddress = inetAddress.getHostAddress(); hostnameVerifier.verify(hostAddress, certificate); } catch (SSLException addressVerificationException) { // If we fail with hostAddress, we should try the hostname. // The inetAddress may have been created with a hostname, in which case getHostName() will // return quickly below. If not, a reverse lookup will happen, which can be expensive. // We provide the option to skip the reverse lookup if preferring to fail fast. // Handle logging here to aid debugging. The easiest way to check for an existing // hostname is through toString, see javadoc. String inetAddressString = inetAddress.toString(); if (!inetAddressString.startsWith("/")) { LOG.debug( "Failed to verify host address: {}, but inetAddress {} has a hostname, trying that", hostAddress, inetAddressString, addressVerificationException); } else if (allowReverseDnsLookup) { LOG.debug( "Failed to verify host address: {}, attempting to verify host name with reverse dns", hostAddress, addressVerificationException); } else { LOG.debug("Failed to verify host address: {}, but reverse dns lookup is disabled", hostAddress, addressVerificationException); throw new CertificateException( "Failed to verify host address, and reverse lookup is disabled", addressVerificationException); } try { hostName = inetAddress.getHostName(); hostnameVerifier.verify(hostName, certificate); } catch (SSLException hostnameVerificationException) { LOG.error("Failed to verify host address: {}", hostAddress, addressVerificationException); LOG.error("Failed to verify hostname: {}", hostName, hostnameVerificationException); throw new CertificateException("Failed to verify both host address and host name", hostnameVerificationException); } } }
3.68
hibernate-validator_PlatformResourceBundleLocator_getResourceBundle
/** * Search current thread classloader for the resource bundle. If not found, * search validator (this) classloader. * * @param locale The locale of the bundle to load. * * @return the resource bundle or {@code null} if none is found. */ @Override public ResourceBundle getResourceBundle(Locale locale) { if ( !preloadedResourceBundles.isEmpty() ) { // we need to use containsKey() as the cached resource bundle can be null if ( preloadedResourceBundles.containsKey( locale ) ) { return preloadedResourceBundles.get( locale ); } else { throw LOG.uninitializedLocale( locale ); } } return doGetResourceBundle( locale ); }
3.68
flink_InputGateMetrics_refreshAndGetTotal
/** * Iterates over all input channels and collects the total number of queued buffers in a * best-effort way. * * @return total number of queued buffers */ long refreshAndGetTotal() { long total = 0; for (InputChannel channel : inputGate.getInputChannels().values()) { if (channel instanceof RemoteInputChannel) { RemoteInputChannel rc = (RemoteInputChannel) channel; total += rc.unsynchronizedGetNumberOfQueuedBuffers(); } } return total; }
3.68
hadoop_MutableGaugeInt_incr
/** * Increment by delta * @param delta of the increment */ public void incr(int delta) { value.addAndGet(delta); setChanged(); }
3.68
hadoop_ServiceShutdownHook_run
/** * Shutdown handler. * Query the service hook reference -if it is still valid the * {@link Service#stop()} operation is invoked. */ @Override public void run() { shutdown(); }
3.68
dubbo_Stack_peek
/** * peek. * * @return the last element. */ public E peek() { if (mSize == 0) { throw new EmptyStackException(); } return mElements.get(mSize - 1); }
3.68
hadoop_OBSCommonUtils_translateException
/** * Translate an exception raised in an operation into an IOException. The * specific type of IOException depends on the class of {@link ObsException} * passed in, and any status codes included in the operation. That is: HTTP * error codes are examined and can be used to build a more specific * response. * * @param operation operation * @param path path operated on (must not be null) * @param exception obs exception raised * @return an IOE which wraps the caught exception. */ static IOException translateException(final String operation, final Path path, final ObsException exception) { return translateException(operation, path.toString(), exception); }
3.68
hbase_HBaseTestingUtility_createMultiRegionsInMeta
/** * Create rows in hbase:meta for regions of the specified table with the specified start keys. The * first startKey should be a 0 length byte array if you want to form a proper range of regions. * @return list of region info for regions added to meta */ public List<RegionInfo> createMultiRegionsInMeta(final Configuration conf, final TableDescriptor htd, byte[][] startKeys) throws IOException { Table meta = getConnection().getTable(TableName.META_TABLE_NAME); Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); List<RegionInfo> newRegions = new ArrayList<>(startKeys.length); MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(), TableState.State.ENABLED); // add custom ones for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKeys[i]) .setEndKey(startKeys[j]).build(); MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1); newRegions.add(hri); } meta.close(); return newRegions; }
3.68
flink_EvictingWindowReader_process
/** * Reads window state generated without any preaggregation such as {@code WindowedStream#apply} * and {@code WindowedStream#process}. * * @param uid The uid of the operator. * @param readerFunction The window reader function. * @param keyType The key type of the window. * @param stateType The type of records stored in state. * @param outputType The output type of the reader function. * @param <K> The type of the key. * @param <T> The type of the records stored in state. * @param <OUT> The output type of the reader function. * @return A {@code DataSet} of objects read from keyed state. * @throws IOException If the savepoint does not contain the specified uid. */ public <K, T, OUT> DataSet<OUT> process( String uid, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> stateType, TypeInformation<OUT> outputType) throws IOException { WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator = WindowReaderOperator.evictingWindow( new ProcessEvictingWindowReader<>(readerFunction), keyType, windowSerializer, stateType, env.getConfig()); return readWindowOperator(uid, outputType, operator); }
3.68
flink_AvroParquetWriters_forReflectRecord
/** * Creates a ParquetWriterFactory for the given type. The Parquet writers will use Avro to * reflectively create a schema for the type and use that schema to write the columnar data. * * @param type The class of the type to write. */ public static <T> ParquetWriterFactory<T> forReflectRecord(Class<T> type) { final String schemaString = ReflectData.get().getSchema(type).toString(); final ParquetBuilder<T> builder = (out) -> createAvroParquetWriter(schemaString, ReflectData.get(), out); return new ParquetWriterFactory<>(builder); }
3.68
hibernate-validator_ConstraintValidatorFactoryImpl_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
framework_IndexedContainer_getType
/** * Gets the type of a Property stored in the list. * * @param propertyId * the ID of the Property. * @return Type of the requested Property */ @Override public Class<?> getType(Object propertyId) { return types.get(propertyId); }
3.68
flink_ResolveCallByArgumentsRule_getOptionalTypeInference
/** Temporary method until all calls define a type inference. */ private Optional<TypeInference> getOptionalTypeInference(FunctionDefinition definition) { if (definition instanceof ScalarFunctionDefinition || definition instanceof TableFunctionDefinition || definition instanceof AggregateFunctionDefinition || definition instanceof TableAggregateFunctionDefinition) { return Optional.empty(); } final TypeInference inference = definition.getTypeInference(resolutionContext.typeFactory()); if (inference.getOutputTypeStrategy() != TypeStrategies.MISSING) { return Optional.of(inference); } else { return Optional.empty(); } }
3.68
hadoop_WordStandardDeviation_map
/** * Emits 3 key-value pairs for counting the word, its length, and the * squares of its length. Outputs are (Text, LongWritable). * * @param value * This will be a line of text coming in from our input file. */ public void map(Object key, Text value, Context context) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()) { String string = itr.nextToken(); this.wordLen.set(string.length()); // the square of an integer is an integer... this.wordLenSq.set((long) Math.pow(string.length(), 2.0)); context.write(LENGTH, this.wordLen); context.write(SQUARE, this.wordLenSq); context.write(COUNT, ONE); } }
3.68
flink_UnsortedGrouping_sum
/** * Syntactic sugar for aggregate (SUM, field). * * @param field The index of the Tuple field on which the aggregation function is applied. * @return An AggregateOperator that represents the summed DataSet. * @see org.apache.flink.api.java.operators.AggregateOperator */ public AggregateOperator<T> sum(int field) { return this.aggregate(Aggregations.SUM, field, Utils.getCallLocationName()); }
3.68
hbase_InternalScan_isCheckOnlyMemStore
/** * Returns true if only the MemStore should be checked. False if not. * @return true to only check MemStore */ public boolean isCheckOnlyMemStore() { return (memOnly); }
3.68
hudi_Pair_toString
/** * <p> * Formats the receiver using the given format. * </p> * * <p> * This uses {@link java.util.Formattable} to perform the formatting. Two variables may be used to embed the left and * right elements. Use {@code %1$s} for the left element (key) and {@code %2$s} for the right element (value). The * default format used by {@code toString()} is {@code (%1$s,%2$s)}. * </p> * * @param format the format string, optionally containing {@code %1$s} and {@code %2$s}, not null * @return the formatted string, not null */ public String toString(final String format) { return String.format(format, getLeft(), getRight()); }
3.68
morf_InsertStatementDefaulter_addColumns
/** * Adds the list of {@code fields} to the {@code columnsWithValues}. * * @param fields the fields to add. * @param columnsWithValues the set to add to. */ private void addColumns(List<AliasedField> fields, Set<String> columnsWithValues) { for (AliasedField field : fields) { columnsWithValues.add(field.getAlias().toUpperCase()); } }
3.68
hudi_TransactionUtils_getInflightAndRequestedInstants
/** * Get InflightAndRequest instants. * * @param metaClient * @return */ public static Set<String> getInflightAndRequestedInstants(HoodieTableMetaClient metaClient) { // collect InflightAndRequest instants for deltaCommit/commit/compaction/clustering Set<String> timelineActions = CollectionUtils .createImmutableSet(HoodieTimeline.REPLACE_COMMIT_ACTION, HoodieTimeline.COMPACTION_ACTION, HoodieTimeline.DELTA_COMMIT_ACTION, HoodieTimeline.COMMIT_ACTION); return metaClient .getActiveTimeline() .getTimelineOfActions(timelineActions) .filterInflightsAndRequested() .getInstantsAsStream() .map(HoodieInstant::getTimestamp) .collect(Collectors.toSet()); }
3.68
hbase_Procedure_haveSameParent
/** * @param a the first procedure to be compared. * @param b the second procedure to be compared. * @return true if the two procedures have the same parent */ public static boolean haveSameParent(Procedure<?> a, Procedure<?> b) { return a.hasParent() && b.hasParent() && (a.getParentProcId() == b.getParentProcId()); }
3.68
hbase_RegionServerSnapshotManager_initialize
/** * Create a default snapshot handler - uses a zookeeper based member controller. * @param rss region server running the handler * @throws KeeperException if the zookeeper cluster cannot be reached */ @Override public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; ZKWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); // read in the snapshot request configuration properties Configuration conf = rss.getConfiguration(); long keepAlive = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); int opThreads = conf.getInt(SNAPSHOT_REQUEST_THREADS_KEY, SNAPSHOT_REQUEST_THREADS_DEFAULT); // create the actual snapshot procedure member ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new SnapshotSubprocedureBuilder()); }
3.68
hadoop_RegisterApplicationMasterRequest_newInstance
/** * Create a new instance of <code>RegisterApplicationMasterRequest</code>. * If <em>port, trackingUrl</em> is not used, use the following default value: * <ul> * <li>port: -1</li> * <li>trackingUrl: null</li> * </ul> * The port is allowed to be any integer larger than or equal to -1. * @param host host on which the ApplicationMaster is running. * @param port the RPC port on which the ApplicationMaster is responding. * @param trackingUrl tracking URL for the ApplicationMaster. * @return the new instance of <code>RegisterApplicationMasterRequest</code> */ @Public @Stable public static RegisterApplicationMasterRequest newInstance(String host, int port, String trackingUrl) { RegisterApplicationMasterRequest request = Records.newRecord(RegisterApplicationMasterRequest.class); request.setHost(host); request.setRpcPort(port); request.setTrackingUrl(trackingUrl); return request; }
3.68
hbase_MetaTableAccessor_fullScan
/** * Performs a full scan of <code>hbase:meta</code>. * @param connection connection we're using * @param type scanned part of meta * @return List of {@link Result} */ private static List<Result> fullScan(Connection connection, QueryType type) throws IOException { ClientMetaTableAccessor.CollectAllVisitor v = new ClientMetaTableAccessor.CollectAllVisitor(); scanMeta(connection, null, null, type, v); return v.getResults(); }
3.68
hbase_MasterObserver_postClearDeadServers
/** * Called after clear dead region servers. */ default void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx, List<ServerName> servers, List<ServerName> notClearedServers) throws IOException { }
3.68
Activiti_IntegerToString_primTransform
/** * {@inheritDoc} */ @Override protected Object primTransform(Object anObject) throws Exception { return ((Integer) anObject).toString(); }
3.68
flink_ProcessorArchitecture_getAlternativeNames
/** * Gets the alternative names for the processor architecture. Alternative names are for example * "i586" for "x86", or "x86_64" for "amd64". */ public List<String> getAlternativeNames() { return alternativeNames; }
3.68
hbase_SpaceQuotaRefresherChore_getInitialDelay
/** * Extracts the initial delay for the chore from the configuration. * @param conf The configuration object. * @return The configured chore initial delay or the default value. */ static long getInitialDelay(Configuration conf) { return conf.getLong(POLICY_REFRESHER_CHORE_DELAY_KEY, POLICY_REFRESHER_CHORE_DELAY_DEFAULT); }
3.68
hadoop_SchedulerAppReport_isPending
/** * Is this application pending? * @return true if it is else false. */ public boolean isPending() { return pending; }
3.68
hbase_CommonFSUtils_getDefaultReplication
/* * Get the default replication. * @param fs filesystem object * @param f path of file * @return default replication for the path's filesystem */ public static short getDefaultReplication(final FileSystem fs, final Path path) { return fs.getDefaultReplication(path); }
3.68
hadoop_NvidiaGPUPluginForRuntimeV2_computeCostOfDevices
/** * The cost function used to calculate costs of a sub set of devices. * It calculate link weight of each pair in non-duplicated combination of * devices. */ @VisibleForTesting public int computeCostOfDevices(Device[] devices) { int cost = 0; String gpuIndex0; String gpuIndex1; for (int i = 0; i < devices.length; i++) { gpuIndex0 = String.valueOf(devices[i].getMinorNumber()); for (int j = i + 1; j < devices.length; j++) { gpuIndex1 = String.valueOf(devices[j].getMinorNumber()); cost += this.devicePairToWeight.get(gpuIndex0 + "-" + gpuIndex1); } } return cost; }
3.68
flink_SortMergeSubpartitionReader_unsynchronizedGetNumberOfQueuedBuffers
// suppress warning as this method is only for unsafe purpose. @SuppressWarnings("FieldAccessNotGuarded") @Override public int unsynchronizedGetNumberOfQueuedBuffers() { return buffersRead.size(); }
3.68
hudi_TableHeader_indexOf
/** * Index of the field in the table. * * @param fieldName Field Name */ public int indexOf(String fieldName) { return fieldNames.indexOf(fieldName); }
3.68
AreaShop_AreaShop_cleanVersion
/** * Cleanup a version number. * @param version Version to clean * @return Cleaned up version (removed prefixes and suffixes) */ private String cleanVersion(String version) { version = version.toLowerCase(); // Strip 'v' as used on Github tags if(version.startsWith("v")) { version = version.substring(1); } // Strip build number as used by Jenkins if(version.contains("#")) { version = version.substring(0, version.indexOf("#")); } return version; }
3.68
framework_VLoadingIndicator_hide
/** * Hides the loading indicator (if visible). Cancels any possibly running * timers. */ public void hide() { firstTimer.cancel(); secondTimer.cancel(); thirdTimer.cancel(); getElement().getStyle().setDisplay(Display.NONE); }
3.68
dubbo_PayloadDropper_getRequestWithoutData
/** * only log body in debugger mode for size & security consideration. * * @param message * @return */ public static Object getRequestWithoutData(Object message) { if (logger.isDebugEnabled()) { return message; } if (message instanceof Request) { Request request = (Request) message; request.setData(null); return request; } else if (message instanceof Response) { Response response = (Response) message; response.setResult(null); return response; } return message; }
3.68
hadoop_NullGroupsMapping_getGroups
/** * Returns an empty list. * @param user ignored * @return an empty list */ @Override public List<String> getGroups(String user) { return Collections.emptyList(); }
3.68
framework_AbstractClientConnector_markAsDirty
/* Documentation copied from interface */ @Override public void markAsDirty() { assert getSession() == null || getSession().hasLock() : buildLockAssertMessage( "markAsDirty()"); UI uI = getUI(); if (uI != null) { uI.getConnectorTracker().markDirty(this); } }
3.68
hbase_MasterCoprocessorHost_postCompletedMergeRegionsAction
/** * Invoked after completing merge regions operation * @param regionsToMerge the regions to merge * @param mergedRegion the new merged region * @param user the user */ public void postCompletedMergeRegionsAction(final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.postCompletedMergeRegionsAction(this, regionsToMerge, mergedRegion); } }); }
3.68
framework_AbstractOrderedLayout_getComponentIndex
/** * Returns the index of the given component. * * @param component * The component to look up. * @return The index of the component or -1 if the component is not a child. */ public int getComponentIndex(Component component) { return components.indexOf(component); }
3.68
open-banking-gateway_JsonTemplateInterpolation_injectKonto6IfNeeded
// kapott creates does not handle which element to create properly if 2 entries have same name like KInfo5 and KInfo6 // It simply creates 1st one (KInfo5) that does not have IBAN @SneakyThrows private void injectKonto6IfNeeded(Message message, String key, Map<String, String> values, Set<String> kontos6Injected) { Pattern konto6Pattern = Pattern.compile("UPD\\.KInfo.*\\.iban"); Pattern targetPattern = Pattern.compile("(UPD\\.KInfo.*?\\.)"); Pattern kontoPattern = Pattern.compile("UPD\\.KInfo.*"); if (!kontoPattern.matcher(key).find()) { return; } Matcher matcher = targetPattern.matcher(key); matcher.find(); String root = matcher.group(1); boolean hasKonto6 = values.entrySet().stream() .filter(it -> it.getKey().startsWith(root)) .anyMatch(it -> konto6Pattern.matcher(it.getKey()).matches()); if (!hasKonto6 || kontos6Injected.contains(root)) { return; } log.info("Injecting Konto6 for {}", key); kontos6Injected.add(root); SyntaxElement updElem = message.getElement(message.getPath() + ".UPD"); XPath xPath = XPathFactory.newInstance().newXPath(); Node konto6 = (Node) xPath.compile("/hbci/SFs/SFdef[@id='UPD']/SEG[@type='KInfo6']").evaluate(ParsingUtil.SYNTAX, XPathConstants.NODE); int konto6Idx = ((Double) xPath .compile("count(/hbci/SFs/SFdef[@id='UPD']/SEG[@type='KInfo6']/preceding-sibling::*)+1") .evaluate(ParsingUtil.SYNTAX, XPathConstants.NUMBER)).intValue(); Method createNewChildContainer = SyntaxElement.class.getDeclaredMethod("createNewChildContainer", Node.class, Document.class); createNewChildContainer.setAccessible(true); MultipleSyntaxElements newKonto6Elem = (MultipleSyntaxElements) createNewChildContainer.invoke(updElem, konto6, ParsingUtil.SYNTAX); // Ensure correct element position Method setIdx = MultipleSyntaxElements.class.getDeclaredMethod("setSyntaxIdx", int.class); setIdx.setAccessible(true); setIdx.invoke(newKonto6Elem, konto6Idx); updElem.getChildContainers().add(newKonto6Elem); }
3.68
hmily_HmilyBootstrap_start
/** * hmily initialization. * */ public void start() { try { ConfigLoaderServer.load(); HmilyConfig hmilyConfig = ConfigEnv.getInstance().getConfig(HmilyConfig.class); check(hmilyConfig); registerProvide(); loadHmilyRepository(hmilyConfig); registerAutoCloseable(new HmilyTransactionSelfRecoveryScheduled(), HmilyRepositoryEventPublisher.getInstance()); initMetrics(); } catch (Exception e) { LOGGER.error(" hmily init exception:", e); System.exit(0); } new HmilyLogo().logo(); }
3.68
morf_HumanReadableStatementHelper_generateDataUpgradeString
/** * Generates a human-readable data upgrade description. * * @param statement the data upgrade statement to describe. * @param preferredSQLDialect the dialect to use, by preference, when a human readable description is not available. If * SQL is not available in this dialect, or none was specified, then an arbitrary choice is made from the bundle * of available raw SQL fragments. * @return a string containing the human-readable description of the action. */ public static String generateDataUpgradeString(final Statement statement, final String preferredSQLDialect) { if (statement instanceof DeleteStatement) { return generateDeleteStatementString((DeleteStatement)statement); } else if (statement instanceof InsertStatement) { return generateInsertStatementString((InsertStatement)statement); } else if (statement instanceof MergeStatement) { return generateMergeStatementString((MergeStatement)statement); } else if (statement instanceof PortableSqlStatement) { return generatePortableSqlStatementString((PortableSqlStatement)statement, preferredSQLDialect); } else if (statement instanceof TruncateStatement) { return generateTruncateStatementString((TruncateStatement)statement); } else if (statement instanceof UpdateStatement) { return generateUpdateStatementString((UpdateStatement)statement); } else { throw new UnsupportedOperationException("Unable to generate data upgrade string for: [" + statement.getClass().getName() + "]"); } }
3.68
framework_VMenuBar_getParentMenu
/** * Returns the parent menu of this menu, or null if this is the top-level * menu. * * @return */ public VMenuBar getParentMenu() { return parentMenu; }
3.68
hbase_SpaceQuotaRefresherChore_getTimeUnit
/** * Extracts the time unit for the chore period and initial delay from the configuration. The * configuration value for {@link #POLICY_REFRESHER_CHORE_TIMEUNIT_KEY} must correspond to a * {@link TimeUnit} value. * @param conf The configuration object. * @return The configured time unit for the chore period and initial delay or the default value. */ static TimeUnit getTimeUnit(Configuration conf) { return TimeUnit.valueOf( conf.get(POLICY_REFRESHER_CHORE_TIMEUNIT_KEY, POLICY_REFRESHER_CHORE_TIMEUNIT_DEFAULT)); }
3.68
flink_Tuple10_of
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> of( T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9) { return new Tuple10<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9); }
3.68
hadoop_WriteOperationHelper_toString
/** * The toString method is intended to be used in logging/toString calls. * @return a string description. */ @Override public String toString() { final StringBuilder sb = new StringBuilder( "WriteOperationHelper {bucket=").append(bucket); sb.append('}'); return sb.toString(); }
3.68
querydsl_ColumnMetadata_getColumnMetadata
/** * Returns this path's column metadata if present. Otherwise returns default * metadata where the column name is equal to the path's name. */ public static ColumnMetadata getColumnMetadata(Path<?> path) { Path<?> parent = path.getMetadata().getParent(); if (parent instanceof EntityPath) { Object columnMetadata = ((EntityPath<?>) parent).getMetadata(path); if (columnMetadata instanceof ColumnMetadata) { return (ColumnMetadata) columnMetadata; } } return ColumnMetadata.named(path.getMetadata().getName()); }
3.68
hadoop_BooleanWritable_readFields
/** */ @Override public void readFields(DataInput in) throws IOException { value = in.readBoolean(); }
3.68
streampipes_JdbcClient_ensureTableExists
/** * If this method returns successfully a table with the name in * {@link JdbcConnectionParameters#getDbTable()} exists in the database * with the given database name exists on the server, specified by the url. * * @param url The JDBC url containing the needed information (e.g. "jdbc:iotdb://127.0.0.1:6667/") * @param databaseName The database in which the table should exist * @throws SpRuntimeException If the table does not exist and could not be created */ protected void ensureTableExists(String url, String databaseName) throws SpRuntimeException { try { // Database should exist by now so we can establish a connection connection = DriverManager.getConnection(url + databaseName, this.dbDescription.getUsername(), this.dbDescription.getPassword()); this.statementHandler.setStatement(connection.createStatement()); ResultSet rs = connection.getMetaData().getTables(null, null, this.tableDescription.getName(), null); if (rs.next()) { validateTable(); } else { createTable(); } this.tableDescription.setTableExists(); rs.close(); } catch (SQLException e) { closeAll(); throw new SpRuntimeException(e.getMessage()); } }
3.68
framework_RpcDataProviderExtension_addActiveItems
/** * Registers ValueChangeListeners for given item ids. * <p> * Note: This method will clean up any unneeded listeners and key * mappings * * @param itemIds * collection of new active item ids */ public void addActiveItems(Collection<?> itemIds) { for (Object itemId : itemIds) { if (!activeItemMap.containsKey(itemId)) { activeItemMap.put(itemId, new GridValueChangeListener( itemId, container.getItem(itemId))); } } // Remove still active rows that were "dropped" droppedItems.removeAll(itemIds); internalDropItems(droppedItems); droppedItems.clear(); }
3.68
hbase_MetaTableAccessor_addRegionsToMeta
/** * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is * CLOSED. * @param connection connection we're using * @param regionInfos region information list * @param ts desired timestamp * @throws IOException if problem connecting or updating meta */ public static void addRegionsToMeta(Connection connection, List<RegionInfo> regionInfos, int regionReplication, long ts) throws IOException { List<Put> puts = new ArrayList<>(); for (RegionInfo regionInfo : regionInfos) { if (!RegionReplicaUtil.isDefaultReplica(regionInfo)) { continue; } Put put = makePutFromRegionInfo(regionInfo, ts); // New regions are added with initial state of CLOSED. addRegionStateToPut(put, regionInfo.getReplicaId(), RegionState.State.CLOSED); // Add empty locations for region replicas so that number of replicas can be cached // whenever the primary region is looked up from meta for (int i = 1; i < regionReplication; i++) { addEmptyLocation(put, i); } puts.add(put); } putsToMetaTable(connection, puts); LOG.info("Added {} regions to meta.", puts.size()); }
3.68