name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
MagicPlugin_Wand_checkSpellLevelsAndInventory
/** * Covers the special case of a wand having spell levels and inventory slots that came from configs, * but now we've modified the spells list and need to figure out if we also need to persist the levels and * slots separately. * * <p>This should all be moved to CasterProperties at some point to handle the same sort of issues with mage class * configs. */ private void checkSpellLevelsAndInventory() { if (!spellLevels.isEmpty()) { MagicProperties storage = getStorage("spell_levels"); if (storage == null || storage == this) { if (!configuration.contains("spell_levels")) { configuration.set("spell_levels", spellLevels); } } } if (!spellInventory.isEmpty()) { MagicProperties storage = getStorage("spell_inventory"); if (storage == null || storage == this) { if (!configuration.contains("spell_inventory")) { configuration.set("spell_inventory", spellInventory); } } } }
3.68
hbase_MobFileCache_getMissCount
/** * Gets the count of misses to the mob file cache. * @return The count of misses to the mob file cache. */ public long getMissCount() { return miss.sum(); }
3.68
hadoop_PlacementConstraints_allocationTagWithNamespace
/** * Constructs a target expression on a set of allocation tags under * a certain namespace. * * @param namespace namespace of the allocation tags * @param allocationTags allocation tags * @return a target expression */ public static TargetExpression allocationTagWithNamespace(String namespace, String... allocationTags) { return new TargetExpression(TargetType.ALLOCATION_TAG, namespace, allocationTags); }
3.68
dubbo_ServiceAnnotationPostProcessor_scanServiceBeans
/** * Scan and registers service beans whose classes was annotated {@link Service} * * @param packagesToScan The base packages to scan * @param registry {@link BeanDefinitionRegistry} */ private void scanServiceBeans(Set<String> packagesToScan, BeanDefinitionRegistry registry) { scanned = true; if (CollectionUtils.isEmpty(packagesToScan)) { if (logger.isWarnEnabled()) { logger.warn( CONFIG_NO_BEANS_SCANNED, "", "", "packagesToScan is empty , ServiceBean registry will be ignored!"); } return; } DubboClassPathBeanDefinitionScanner scanner = new DubboClassPathBeanDefinitionScanner(registry, environment, resourceLoader); BeanNameGenerator beanNameGenerator = resolveBeanNameGenerator(registry); scanner.setBeanNameGenerator(beanNameGenerator); for (Class<? extends Annotation> annotationType : serviceAnnotationTypes) { scanner.addIncludeFilter(new AnnotationTypeFilter(annotationType)); } ScanExcludeFilter scanExcludeFilter = new ScanExcludeFilter(); scanner.addExcludeFilter(scanExcludeFilter); for (String packageToScan : packagesToScan) { // avoid duplicated scans if (servicePackagesHolder.isPackageScanned(packageToScan)) { if (logger.isInfoEnabled()) { logger.info("Ignore package who has already bean scanned: " + packageToScan); } continue; } // Registers @Service Bean first scanner.scan(packageToScan); // Finds all BeanDefinitionHolders of @Service whether @ComponentScan scans or not. Set<BeanDefinitionHolder> beanDefinitionHolders = findServiceBeanDefinitionHolders(scanner, packageToScan, registry, beanNameGenerator); if (!CollectionUtils.isEmpty(beanDefinitionHolders)) { if (logger.isInfoEnabled()) { List<String> serviceClasses = new ArrayList<>(beanDefinitionHolders.size()); for (BeanDefinitionHolder beanDefinitionHolder : beanDefinitionHolders) { serviceClasses.add( beanDefinitionHolder.getBeanDefinition().getBeanClassName()); } logger.info("Found " + beanDefinitionHolders.size() + " classes annotated by Dubbo @Service under package [" + packageToScan + "]: " + serviceClasses); } for (BeanDefinitionHolder beanDefinitionHolder : beanDefinitionHolders) { processScannedBeanDefinition(beanDefinitionHolder); servicePackagesHolder.addScannedClass( beanDefinitionHolder.getBeanDefinition().getBeanClassName()); } } else { if (logger.isWarnEnabled()) { logger.warn( CONFIG_NO_ANNOTATIONS_FOUND, "No annotations were found on the class", "", "No class annotated by Dubbo @DubboService or @Service was found under package [" + packageToScan + "], ignore re-scanned classes: " + scanExcludeFilter.getExcludedCount()); } } servicePackagesHolder.addScannedPackage(packageToScan); } }
3.68
rocketmq-connect_Serdes_String
/** * A serde for nullable {@code String} type. */ static public Serde<String> String() { return new StringSerde(); }
3.68
hbase_OnlineLogRecord_getScan
/** * If {@value org.apache.hadoop.hbase.HConstants#SLOW_LOG_SCAN_PAYLOAD_ENABLED} is enabled then * this value may be present and should represent the Scan that produced the given * {@link OnlineLogRecord} */ public Optional<Scan> getScan() { return scan; }
3.68
framework_AbstractDateField_setRangeEnd
/** * Sets the end range for this component. If the value is set after this * date (taking the resolution into account), the component will not * validate. If {@code endDate} is set to {@code null}, any value after * {@code startDate} will be accepted by the range. * <p> * Note: It's usually recommended to use only one of the following at the * same time: Range validator with Binder or DateField's setRangeEnd check. * * @param endDate * the allowed range's end date (inclusive, based on the current * resolution) */ public void setRangeEnd(T endDate) { String date = convertToDateString(endDate); if (afterDate(convertFromDateString(getState().rangeStart), endDate)) { throw new IllegalStateException( "endDate cannot be earlier than startDate"); } getState().rangeEnd = date; }
3.68
flink_SplitDataProperties_splitsOrderedBy
/** * Defines that the data within an input split is sorted on the fields defined by the field * expressions in the specified orders. Multiple field expressions must be separated by the * semicolon ';' character. All records of an input split must be emitted by the input format in * the defined order. * * <p><b> IMPORTANT: Providing wrong information with SplitDataProperties can cause wrong * results! </b> * * @param orderFields The field expressions of the grouping key. * @param orders The orders of the fields. * @return This SplitDataProperties object. */ public SplitDataProperties<T> splitsOrderedBy(String orderFields, Order[] orders) { if (orderFields == null || orders == null) { throw new InvalidProgramException("OrderFields or Orders may not be null."); } String[] orderKeysA = orderFields.split(";"); if (orderKeysA.length == 0) { throw new InvalidProgramException("OrderFields may not be empty."); } else if (orders.length == 0) { throw new InvalidProgramException("Orders may not be empty"); } else if (orderKeysA.length != orders.length) { throw new InvalidProgramException("Number of OrderFields and Orders must match."); } if (this.splitGroupKeys != null) { throw new InvalidProgramException("DataSource may either be grouped or sorted."); } this.splitOrdering = new Ordering(); for (int i = 0; i < orderKeysA.length; i++) { String keyExp = orderKeysA[i]; Keys.ExpressionKeys<T> ek = new Keys.ExpressionKeys<>(keyExp, this.type); int[] flatKeys = ek.computeLogicalKeyPositions(); for (int key : flatKeys) { // check for duplicates for (int okey : splitOrdering.getFieldPositions()) { if (key == okey) { throw new InvalidProgramException( "Duplicate field in field expression " + keyExp); } } // append key this.splitOrdering.appendOrdering(key, null, orders[i]); } } return this; }
3.68
graphhopper_Helper_intToDegree
/** * Converts back the integer value. * * @return the degree value of the specified integer */ public static double intToDegree(int storedInt) { if (storedInt == Integer.MAX_VALUE) return Double.MAX_VALUE; if (storedInt == -Integer.MAX_VALUE) return -Double.MAX_VALUE; return (double) storedInt / DEGREE_FACTOR; }
3.68
querydsl_BooleanExpression_isTrue
/** * Create a {@code this == true} expression * * @return this == true */ public BooleanExpression isTrue() { return eq(true); }
3.68
framework_DragEndEvent_getDropEffect
/** * Get drop effect of the dragend event. The value will be the desired * action, that is the dropEffect value of the last dragenter or dragover * event. The value depends on the effectAllowed parameter of the drag * source, the dropEffect parameter of the drop target, and its drag over * and drop criteria. * <p> * If the drop is not successful, the value will be {@code NONE}. * <p> * In case the desired drop effect is {@code MOVE}, the data being dragged * should be removed from the source. * * @return The {@code DataTransfer.dropEffect} parameter of the client side * dragend event. * @see DragSourceExtension#setEffectAllowed(com.vaadin.shared.ui.dnd.EffectAllowed) * DragSourceExtension#setEffectAllowed(EffectAllowed) * @see DropTargetExtension#setDropEffect(DropEffect) * @see DropTargetExtension#setDropCriteriaScript(String) */ public DropEffect getDropEffect() { return dropEffect; }
3.68
zxing_BitMatrix_clear
/** * Clears all bits (sets to false). */ public void clear() { int max = bits.length; for (int i = 0; i < max; i++) { bits[i] = 0; } }
3.68
framework_RowReference_getGrid
/** * Gets the grid that contains the referenced row. * * @return the grid that contains referenced row */ public Grid<T> getGrid() { return grid; }
3.68
flink_ExecutionConfig_getRegisteredTypesWithKryoSerializerClasses
/** Returns the registered types with their Kryo Serializer classes. */ public LinkedHashMap<Class<?>, Class<? extends Serializer<?>>> getRegisteredTypesWithKryoSerializerClasses() { return registeredTypesWithKryoSerializerClasses; }
3.68
hbase_RatioBasedCompactionPolicy_applyCompactionPolicy
/** * -- Default minor compaction selection algorithm: choose CompactSelection from candidates -- * First exclude bulk-load files if indicated in configuration. Start at the oldest file and stop * when you find the first file that meets compaction criteria: (1) a recently-flushed, small file * (i.e. <= minCompactSize) OR (2) within the compactRatio of sum(newer_files) Given normal skew, * any newer files will also meet this criteria * <p/> * Additional Note: If fileSizes.size() >> maxFilesToCompact, we will recurse on compact(). * Consider the oldest files first to avoid a situation where we always compact * [end-threshold,end). Then, the last file becomes an aggregate of the previous compactions. * normal skew: older ----> newer (increasing seqID) _ | | _ | | | | _ --|-|- |-|- * |-|---_-------_------- minCompactSize | | | | | | | | _ | | | | | | | | | | | | | | | | | | | | * | | | | | | * @param candidates pre-filtrate * @return filtered subset */ protected ArrayList<HStoreFile> applyCompactionPolicy(ArrayList<HStoreFile> candidates, boolean mayUseOffPeak, boolean mayBeStuck) throws IOException { if (candidates.isEmpty()) { return candidates; } // we're doing a minor compaction, let's see what files are applicable int start = 0; double ratio = comConf.getCompactionRatio(); if (mayUseOffPeak) { ratio = comConf.getCompactionRatioOffPeak(); LOG.info("Running an off-peak compaction, selection ratio = " + ratio); } // get store file sizes for incremental compacting selection. final int countOfFiles = candidates.size(); long[] fileSizes = new long[countOfFiles]; long[] sumSize = new long[countOfFiles]; for (int i = countOfFiles - 1; i >= 0; --i) { HStoreFile file = candidates.get(i); fileSizes[i] = file.getReader().length(); // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo int tooFar = i + comConf.getMaxFilesToCompact() - 1; sumSize[i] = fileSizes[i] + ((i + 1 < countOfFiles) ? sumSize[i + 1] : 0) - ((tooFar < countOfFiles) ? fileSizes[tooFar] : 0); } while ( countOfFiles - start >= comConf.getMinFilesToCompact() && fileSizes[start] > Math.max(comConf.getMinCompactSize(), (long) (sumSize[start + 1] * ratio)) ) { ++start; } if (start < countOfFiles) { LOG.info("Default compaction algorithm has selected " + (countOfFiles - start) + " files from " + countOfFiles + " candidates"); } else if (mayBeStuck) { // We may be stuck. Compact the latest files if we can. int filesToLeave = candidates.size() - comConf.getMinFilesToCompact(); if (filesToLeave >= 0) { start = filesToLeave; } } candidates.subList(0, start).clear(); return candidates; }
3.68
hadoop_OBSDataBlocks_startUpload
/** * Switch to the upload state and return a stream for uploading. Base class * calls {@link #enterState(DestState, DestState)} to manage the state * machine. * * @return the stream * @throws IOException trouble */ Object startUpload() throws IOException { LOG.debug("Start datablock[{}] upload", index); enterState(DestState.Writing, DestState.Upload); return null; }
3.68
flink_ChannelReaderInputView_sendReadRequest
/** * Sends a new read requests, if further requests remain. Otherwise, this method adds the * segment directly to the readers return queue. * * @param seg The segment to use for the read request. * @throws IOException Thrown, if the reader is in error. */ protected void sendReadRequest(MemorySegment seg) throws IOException { if (this.numRequestsRemaining != 0) { this.reader.readBlock(seg); if (this.numRequestsRemaining != -1) { this.numRequestsRemaining--; } } else { // directly add it to the end of the return queue this.freeMem.add(seg); } }
3.68
flink_InputChannel_setError
/** * Atomically sets an error for this channel and notifies the input gate about available data to * trigger querying this channel by the task thread. */ protected void setError(Throwable cause) { if (this.cause.compareAndSet(null, checkNotNull(cause))) { // Notify the input gate. notifyChannelNonEmpty(); } }
3.68
hbase_TableDescriptorChecker_sanityCheck
/** * Checks whether the table conforms to some sane limits, and configured values (compression, etc) * work. Throws an exception if something is wrong. */ public static void sanityCheck(final Configuration c, final TableDescriptor td) throws IOException { CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues()); // Setting logs to warning instead of throwing exception if sanityChecks are disabled boolean logWarn = !shouldSanityCheck(conf); // check max file size long maxFileSizeLowerLimit = 2 * 1024 * 1024L; // 2M is the default lower limit // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE)); if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + maxFileSize + ") is too small, which might cause over splitting into unmanageable " + "number of regions."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check flush size long flushSizeLowerLimit = 1024 * 1024L; // 1M is the default lower limit // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in // hbase-site.xml, use flushSizeLowerLimit instead to skip this check long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE)); if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + flushSize + ") is too small, which might cause" + " very frequent flushing."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check that coprocessors and other specified plugin classes can be loaded checkClassLoading(conf, td); if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) { // check compression can be loaded checkCompression(conf, td); } if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) { // check encryption can be loaded checkEncryption(conf, td); } // Verify compaction policy checkCompactionPolicy(conf, td); // check that we have at least 1 CF if (td.getColumnFamilyCount() == 0) { String message = "Table should have at least one column family."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check that we have minimum 1 region replicas int regionReplicas = td.getRegionReplication(); if (regionReplicas < 1) { String message = "Table region replication should be at least one."; warnOrThrowExceptionForFailure(logWarn, message, null); } // Meta table shouldn't be set as read only, otherwise it will impact region assignments if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) { warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null); } // check replication scope checkReplicationScope(conf, td); // check bloom filter type checkBloomFilterType(conf, td); for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) { if (hcd.getTimeToLive() <= 0) { String message = "TTL for column family " + hcd.getNameAsString() + " must be positive."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check blockSize if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) { String message = "Block size for column family " + hcd.getNameAsString() + " must be between 1K and 16MB."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check versions if (hcd.getMinVersions() < 0) { String message = "Min versions for column family " + hcd.getNameAsString() + " must be positive."; warnOrThrowExceptionForFailure(logWarn, message, null); } // max versions already being checked // HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor // does not throw IllegalArgumentException // check minVersions <= maxVerions if (hcd.getMinVersions() > hcd.getMaxVersions()) { String message = "Min versions for column family " + hcd.getNameAsString() + " must be less than the Max versions."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check data replication factor, it can be 0(default value) when user has not explicitly // set the value, in this case we use default replication factor set in the file system. if (hcd.getDFSReplication() < 0) { String message = "HFile Replication for column family " + hcd.getNameAsString() + " must be greater than zero."; warnOrThrowExceptionForFailure(logWarn, message, null); } // check in-memory compaction try { hcd.getInMemoryCompaction(); } catch (IllegalArgumentException e) { warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e); } } }
3.68
hbase_IpcClientSpanBuilder_getRpcPackageAndService
/** * Retrieve the combined {@code $package.$service} value from {@code sd}. */ public static String getRpcPackageAndService(final Descriptors.ServiceDescriptor sd) { // it happens that `getFullName` returns a string in the $package.$service format required by // the otel RPC specification. Use it for now; might have to parse the value in the future. return sd.getFullName(); }
3.68
zxing_WifiConfigManager_changeNetworkUnEncrypted
// Adding an open, unsecured network private static void changeNetworkUnEncrypted(WifiManager wifiManager, WifiParsedResult wifiResult) { WifiConfiguration config = changeNetworkCommon(wifiResult); config.allowedKeyManagement.set(WifiConfiguration.KeyMgmt.NONE); updateNetwork(wifiManager, config); }
3.68
flink_DataSinkNode_getInputConnection
/** * Gets the input of the sink. * * @return The input connection. */ public DagConnection getInputConnection() { return this.input; }
3.68
hbase_ReplicationThrottler_resetStartTick
/** * Reset the cycle start tick to NOW */ public void resetStartTick() { if (this.enabled) { this.cycleStartTick = EnvironmentEdgeManager.currentTime(); } }
3.68
shardingsphere-elasticjob_ElasticJobTracingConfiguration_tracingDataSource
/** * Create a bean of tracing DataSource. * * @param tracingProperties tracing Properties * @return tracing DataSource */ @Bean("tracingDataSource") public DataSource tracingDataSource(final TracingProperties tracingProperties) { DataSourceProperties dataSource = tracingProperties.getDataSource(); if (dataSource == null) { return null; } HikariDataSource tracingDataSource = new HikariDataSource(); tracingDataSource.setJdbcUrl(dataSource.getUrl()); BeanUtils.copyProperties(dataSource, tracingDataSource); return tracingDataSource; }
3.68
flink_SqlLikeUtils_similar
/** SQL {@code SIMILAR} function with escape. */ public static boolean similar(String s, String pattern, String escape) { final String regex = sqlToRegexSimilar(pattern, escape); return Pattern.matches(regex, s); }
3.68
framework_AbstractClientConnector_addMethodInvocationToQueue
/** * For internal use: adds a method invocation to the pending RPC call queue. * * @param interfaceName * RPC interface name * @param method * RPC method * @param parameters * RPC all parameters * * @since 7.0 */ protected void addMethodInvocationToQueue(String interfaceName, Method method, Object[] parameters) { // add to queue pendingInvocations.add(new ClientMethodInvocation(this, interfaceName, method, parameters)); // TODO no need to do full repaint if only RPC calls requestRepaint(); }
3.68
morf_SchemaChangeSequence_tableAdditions
/** * @return The set of all table which are added by this sequence. */ public Set<String> tableAdditions() { return tableAdditions; }
3.68
flink_DataStreamScanProvider_produceDataStream
/** Creates a scan Java {@link DataStream} from a {@link StreamExecutionEnvironment}. */ @Deprecated default DataStream<RowData> produceDataStream(StreamExecutionEnvironment execEnv) { throw new UnsupportedOperationException( "This method is deprecated. " + "Use produceDataStream(ProviderContext, StreamExecutionEnvironment) instead"); }
3.68
framework_DataCommunicator_cleanUp
/** * Executes the data destruction for dropped data that is not sent to * the client. This method takes most recently sent data objects in a * collection. Doing the clean up like this prevents the * {@link ActiveDataHandler} from creating new keys for rows that were * dropped but got re-requested by the client-side. In the case of * having all data at the client, the collection should be all the data * in the back end. * * @param dataObjects * collection of most recently sent data to the client */ public void cleanUp(Stream<T> dataObjects) { Collection<String> keys = dataObjects.map(getKeyMapper()::key) .collect(Collectors.toSet()); // Remove still active rows that were dropped by the client droppedData.removeAll(keys); // Do data clean up for object no longer needed. dropData(droppedData); droppedData.clear(); }
3.68
flink_TaskLocalStateStoreImpl_discardLocalStateForCheckpoint
/** * Helper method that discards state objects with an executor and reports exceptions to the log. */ private void discardLocalStateForCheckpoint(long checkpointID, Optional<TaskStateSnapshot> o) { if (LOG.isTraceEnabled()) { LOG.trace( "Discarding local task state snapshot of checkpoint {} for subtask ({} - {} - {}).", checkpointID, jobID, jobVertexID, subtaskIndex); } else { LOG.debug( "Discarding local task state snapshot {} of checkpoint {} for subtask ({} - {} - {}).", o, checkpointID, jobID, jobVertexID, subtaskIndex); } o.ifPresent( taskStateSnapshot -> { try { taskStateSnapshot.discardState(); } catch (Exception discardEx) { LOG.warn( "Exception while discarding local task state snapshot of checkpoint {} in subtask ({} - {} - {}).", checkpointID, jobID, jobVertexID, subtaskIndex, discardEx); } }); File checkpointDir = getCheckpointDirectory(checkpointID); LOG.debug( "Deleting local state directory {} of checkpoint {} for subtask ({} - {} - {}).", checkpointDir, checkpointID, jobID, jobVertexID, subtaskIndex); try { deleteDirectory(checkpointDir); } catch (IOException ex) { LOG.warn( "Exception while deleting local state directory of checkpoint {} in subtask ({} - {} - {}).", checkpointID, jobID, jobVertexID, subtaskIndex, ex); } }
3.68
pulsar_MessageIdAdv_getAckSet
/** * Get the BitSet that indicates which messages in the batch. * * @implNote The message IDs of a batch should share a BitSet. For example, given 3 messages in the same batch whose * size is 3, all message IDs of them should return "111" (i.e. a BitSet whose size is 3 and all bits are 1). If the * 1st message has been acknowledged, the returned BitSet should become "011" (i.e. the 1st bit become 0). * * @return null if the message is a non-batched message */ default BitSet getAckSet() { return null; }
3.68
hadoop_StoragePolicySatisfyManager_getNextPathId
/** * @return the next SPS path id, on which path users has invoked to satisfy * storages. */ public Long getNextPathId() { synchronized (pathsToBeTraversed) { return pathsToBeTraversed.poll(); } }
3.68
framework_MarginInfo_hasRight
/** * Checks if this MarginInfo object has the right edge margin enabled. * * @return true if right edge margin is enabled */ public boolean hasRight() { return (bitMask & RIGHT) == RIGHT; }
3.68
querydsl_NumberExpression_shortValue
/** * Create a {@code this.shortValue()} expression * * <p>Get the short expression of this numeric expression</p> * * @return this.shortValue() * @see java.lang.Number#shortValue() */ public NumberExpression<Short> shortValue() { return castToNum(Short.class); }
3.68
hbase_HRegionFileSystem_bulkLoadStoreFile
/** * Bulk load: Add a specified store file to the specified family. If the source file is on the * same different file-system is moved from the source location to the destination location, * otherwise is copied over. * @param familyName Family that will gain the file * @param srcPath {@link Path} to the file to import * @param seqNum Bulk Load sequence number * @return The destination {@link Path} of the bulk loaded file */ Pair<Path, Path> bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum) throws IOException { // Copy the file if it's on another filesystem FileSystem srcFs = srcPath.getFileSystem(conf); srcPath = srcFs.resolvePath(srcPath); FileSystem realSrcFs = srcPath.getFileSystem(conf); FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem) fs).getBackingFs() : fs; // We can't compare FileSystem instances as equals() includes UGI instance // as part of the comparison and won't work when doing SecureBulkLoad // TODO deal with viewFS if (!FSUtils.isSameHdfs(conf, realSrcFs, desFs)) { LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " + "the destination store. Copying file over to destination filesystem."); Path tmpPath = createTempName(); FileUtil.copy(realSrcFs, srcPath, fs, tmpPath, false, conf); LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath); srcPath = tmpPath; } return new Pair<>(srcPath, preCommitStoreFile(familyName, srcPath, seqNum, true)); }
3.68
flink_BinaryStringData_numChars
/** Returns the number of UTF-8 code points in the string. */ public int numChars() { ensureMaterialized(); if (inFirstSegment()) { int len = 0; for (int i = 0; i < binarySection.sizeInBytes; i += numBytesForFirstByte(getByteOneSegment(i))) { len++; } return len; } else { return numCharsMultiSegs(); } }
3.68
hbase_SaslClientAuthenticationProvider_canRetry
/** * Returns true if the implementation is capable of performing some action which may allow a * failed authentication to become a successful authentication. Otherwise, returns false */ default boolean canRetry() { return false; }
3.68
framework_Tree_removeExpandListener
/** * Removes the expand listener. * * @param listener * the Listener to be removed. */ public void removeExpandListener(ExpandListener listener) { removeListener(ExpandEvent.class, listener, ExpandListener.EXPAND_METHOD); }
3.68
flink_YarnClusterDescriptor_addShipFiles
/** * Adds the given files to the list of files to ship. * * <p>Note that any file matching "<tt>flink-dist*.jar</tt>" will be excluded from the upload by * {@link YarnApplicationFileUploader#registerMultipleLocalResources(Collection, String, * LocalResourceType)} since we upload the Flink uber jar ourselves and do not need to deploy it * multiple times. * * @param shipFiles files to ship */ public void addShipFiles(List<Path> shipFiles) { checkArgument( !isUsrLibDirIncludedInShipFiles(shipFiles, yarnConfiguration), "User-shipped directories configured via : %s should not include %s.", YarnConfigOptions.SHIP_FILES.key(), ConfigConstants.DEFAULT_FLINK_USR_LIB_DIR); this.shipFiles.addAll(shipFiles); }
3.68
flink_SourceCoordinatorSerdeUtils_writeCoordinatorSerdeVersion
/** Write the current serde version. */ static void writeCoordinatorSerdeVersion(DataOutputStream out) throws IOException { out.writeInt(CURRENT_VERSION); }
3.68
flink_FileOutputFormat_initializeGlobal
/** * Initialization of the distributed file system if it is used. * * @param parallelism The task parallelism. */ @Override public void initializeGlobal(int parallelism) throws IOException { final Path path = getOutputFilePath(); final FileSystem fs = path.getFileSystem(); // only distributed file systems can be initialized at start-up time. if (fs.isDistributedFS()) { final WriteMode writeMode = getWriteMode(); final OutputDirectoryMode outDirMode = getOutputDirectoryMode(); if (parallelism == 1 && outDirMode == OutputDirectoryMode.PARONLY) { // output is not written in parallel and should be written to a single file. // prepare distributed output path if (!fs.initOutPathDistFS(path, writeMode, false)) { // output preparation failed! Cancel task. throw new IOException("Output path could not be initialized."); } } else { // output should be written to a directory // only distributed file systems can be initialized at start-up time. if (!fs.initOutPathDistFS(path, writeMode, true)) { throw new IOException("Output directory could not be created."); } } } }
3.68
flink_ResultInfo_getColumnInfos
/** Get the column info of the data. */ public List<ColumnInfo> getColumnInfos() { return Collections.unmodifiableList(columnInfos); }
3.68
morf_SqlServer_getXADataSource
/** * Returns a SQL Server XA data source. Note that this method may fail at * run-time if {@code SQLServerXADataSource} is not available on the classpath. * * @throws IllegalStateException If the data source cannot be created. * * @see org.alfasoftware.morf.jdbc.DatabaseType#getXADataSource(java.lang.String, * java.lang.String, java.lang.String) */ @Override public XADataSource getXADataSource(String jdbcUrl, String username, String password) { try { log.info("Initialising SQL Server XA data source..."); XADataSource dataSource = (XADataSource) Class.forName("com.microsoft.sqlserver.jdbc.SQLServerXADataSource").newInstance(); dataSource.getClass().getMethod("setURL", String.class).invoke(dataSource, jdbcUrl); dataSource.getClass().getMethod("setUser", String.class).invoke(dataSource, username); dataSource.getClass().getMethod("setPassword", String.class).invoke(dataSource, password); return dataSource; } catch (Exception e) { throw new IllegalStateException("Failed to create SQL Server XA data source", e); } }
3.68
MagicPlugin_MapController_forceReload
/** * Force reload of the specific url and cropping. */ public void forceReload(String worldName, String url, int x, int y, int width, int height) { get(worldName, url, x, y, width, height).reload(); }
3.68
hbase_HRegionLocation_hashCode
/** * @see java.lang.Object#hashCode() */ @Override public int hashCode() { return this.serverName.hashCode(); }
3.68
framework_AbstractStringToNumberConverter_convertToNumber
/** * Convert the value to a Number using the given locale and * {@link #getFormat(Locale)}. * * @param value * The value to convert * @param locale * The locale to use for conversion * @return The converted value * @throws ConversionException * If there was a problem converting the value * @since 7.1 */ protected Number convertToNumber(String value, Class<? extends Number> targetType, Locale locale) throws ConversionException { if (value == null) { return null; } // Remove leading and trailing white space value = value.trim(); // Parse and detect errors. If the full string was not used, it is // an error. ParsePosition parsePosition = new ParsePosition(0); Number parsedValue = getFormat(locale).parse(value, parsePosition); if (parsePosition.getIndex() != value.length()) { throw new ConversionException("Could not convert '" + value + "' to " + getModelType().getName()); } if (parsedValue == null) { // Convert "" to null return null; } return parsedValue; }
3.68
hbase_Increment_isReturnResults
/** Returns current setting for returnResults */ // This method makes public the superclasses's protected method. @Override public boolean isReturnResults() { return super.isReturnResults(); }
3.68
hbase_ClientMetaTableAccessor_scanMeta
/** * Performs a scan of META table for given table. * @param metaTable scanner over meta table * @param startRow Where to start the scan * @param stopRow Where to stop the scan * @param type scanned part of meta * @param maxRows maximum rows to return * @param visitor Visitor invoked against each row */ private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable, byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; Scan scan = getMetaScan(metaTable, rowUpperLimit); for (byte[] family : type.getFamilies()) { scan.addFamily(family); } if (startRow != null) { scan.withStartRow(startRow); } if (stopRow != null) { scan.withStopRow(stopRow); } if (LOG.isDebugEnabled()) { LOG.debug("Scanning META" + " starting at row=" + Bytes.toStringBinary(scan.getStartRow()) + " stopping at row=" + Bytes.toStringBinary(scan.getStopRow()) + " for max=" + rowUpperLimit + " with caching=" + scan.getCaching()); } CompletableFuture<Void> future = new CompletableFuture<Void>(); // Get the region locator's meta replica mode. CatalogReplicaMode metaReplicaMode = CatalogReplicaMode.fromString(metaTable.getConfiguration() .get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString())); if (metaReplicaMode == CatalogReplicaMode.LOAD_BALANCE) { addListener(metaTable.getDescriptor(), (desc, error) -> { if (error != null) { LOG.error("Failed to get meta table descriptor, error: ", error); future.completeExceptionally(error); return; } int numOfReplicas = desc.getRegionReplication(); if (numOfReplicas > 1) { int replicaId = ThreadLocalRandom.current().nextInt(numOfReplicas); // When the replicaId is 0, do not set to Consistency.TIMELINE if (replicaId > 0) { scan.setReplicaId(replicaId); scan.setConsistency(Consistency.TIMELINE); } } metaTable.scan(scan, new MetaTableScanResultConsumer(rowUpperLimit, visitor, future)); }); } else { if (metaReplicaMode == CatalogReplicaMode.HEDGED_READ) { scan.setConsistency(Consistency.TIMELINE); } metaTable.scan(scan, new MetaTableScanResultConsumer(rowUpperLimit, visitor, future)); } return future; }
3.68
framework_CustomLayout_getTemplateName
/** Get the name of the template. */ public String getTemplateName() { return getState(false).templateName; }
3.68
MagicPlugin_BaseSpell_onPlayerQuit
/** * Listener method, called on player quit for registered spells. * * @param event The player who just quit */ public void onPlayerQuit(PlayerQuitEvent event) { }
3.68
morf_PostgreSQL_getXADataSource
/** * Returns a PostgreSQL XA data source. * * @throws IllegalStateException If the data source cannot be created. * * @see org.alfasoftware.morf.jdbc.DatabaseType#getXADataSource(String, String, String) */ @Override public XADataSource getXADataSource(String jdbcUrl, String username, String password) { try { log.info("Initialising PostgreSQL XA data source..."); XADataSource dataSource = (XADataSource) Class.forName("org.postgresql.xa.PGXADataSource").newInstance(); dataSource.getClass().getMethod("setURL", String.class).invoke(dataSource, jdbcUrl); dataSource.getClass().getMethod("setUser", String.class).invoke(dataSource, username); dataSource.getClass().getMethod("setPassword", String.class).invoke(dataSource, password); return dataSource; } catch (Exception e) { throw new IllegalStateException("Failed to create PostgreSQL XA data source", e); } }
3.68
flink_ConfigOptions_asList
/** Defines that the option's type should be a list of previously defined atomic type. */ public ListConfigOptionBuilder<T> asList() { return new ListConfigOptionBuilder<>(key, clazz); }
3.68
flink_HiveParserSemanticAnalyzer_processPTFChain
/* * - tree form is * ^(TOK_PTBLFUNCTION name alias? partitionTableFunctionSource partitioningSpec? arguments*) * - a partitionTableFunctionSource can be a tableReference, a SubQuery or another * PTF invocation. */ private PartitionedTableFunctionSpec processPTFChain(HiveParserQB qb, HiveParserASTNode ptf) throws SemanticException { int childCount = ptf.getChildCount(); if (childCount < 2) { throw new SemanticException( HiveParserUtils.generateErrorMessage(ptf, "Not enough Children " + childCount)); } PartitionedTableFunctionSpec ptfSpec = new PartitionedTableFunctionSpec(); ptfSpec.setAstNode(ptf); // name HiveParserASTNode nameNode = (HiveParserASTNode) ptf.getChild(0); ptfSpec.setName(nameNode.getText()); int inputIdx = 1; // alias HiveParserASTNode secondChild = (HiveParserASTNode) ptf.getChild(1); if (secondChild.getType() == HiveASTParser.Identifier) { ptfSpec.setAlias(secondChild.getText()); inputIdx++; } // input HiveParserASTNode inputNode = (HiveParserASTNode) ptf.getChild(inputIdx); ptfSpec.setInput(processPTFSource(qb, inputNode)); int argStartIdx = inputIdx + 1; // partitioning Spec int pSpecIdx = inputIdx + 1; HiveParserASTNode pSpecNode = ptf.getChildCount() > inputIdx ? (HiveParserASTNode) ptf.getChild(pSpecIdx) : null; if (pSpecNode != null && pSpecNode.getType() == HiveASTParser.TOK_PARTITIONINGSPEC) { PartitioningSpec partitioning = processPTFPartitionSpec(pSpecNode); ptfSpec.setPartitioning(partitioning); argStartIdx++; } // arguments for (int i = argStartIdx; i < ptf.getChildCount(); i++) { ptfSpec.addArg((HiveParserASTNode) ptf.getChild(i)); } return ptfSpec; }
3.68
morf_AliasedField_getAlias
/** * Gets the alias of the field. * * @return the alias */ public String getAlias() { return alias; }
3.68
flink_DualInputOperator_clearFirstInput
/** Clears this operator's first input. */ public void clearFirstInput() { this.input1 = null; }
3.68
dubbo_Pane_isTimeInWindow
/** * Check whether given timestamp is in current pane. * * @param timeMillis timestamp in milliseconds. * @return true if the given time is in current pane, otherwise false */ public boolean isTimeInWindow(long timeMillis) { // [) return startInMs <= timeMillis && timeMillis < endInMs; }
3.68
framework_AbstractGridRendererConnector_getColumnId
/** * Gets the column id for a column. * <p> * In case this renderer wants be able to identify a column in such a way * that the server also understands it, the column id is used for that. * Columns are identified by unified ids between the client and the server. * * @param column * the column object * @return the column id for the given column */ protected String getColumnId(Column<?, JsonObject> column) { return getGridConnector().getColumnId(column); }
3.68
hadoop_BlockBlobInputStream_getPos
/** * Gets the read position of the stream. * @return the zero-based byte offset of the read position. * @throws IOException IO failure */ @Override public synchronized long getPos() throws IOException { checkState(); return (streamBuffer != null) ? streamPosition - streamBufferLength + streamBufferPosition : streamPosition; }
3.68
framework_AbstractSelect_isSelected
/** * Tests if an item is selected. * * <p> * In single select mode testing selection status of the item identified by * {@link #getNullSelectionItemId()} returns true if the value of the * property is null. * </p> * * @param itemId * the Id the of the item to be tested. * @see #getNullSelectionItemId() * @see #setNullSelectionItemId(Object) * */ public boolean isSelected(Object itemId) { if (itemId == null) { return false; } if (isMultiSelect()) { return ((Set<?>) getValue()).contains(itemId); } else { final Object value = getValue(); return itemId .equals(value == null ? getNullSelectionItemId() : value); } }
3.68
hbase_RegionLocator_getRegionLocations
/** * Find all the replicas for the region on which the given row is being served. * @param row Row to find. * @return Locations for all the replicas of the row. * @throws IOException if a remote or network exception occurs */ default List<HRegionLocation> getRegionLocations(byte[] row) throws IOException { return getRegionLocations(row, false); }
3.68
flink_DefaultExecutionTopology_containsIntraRegionAllToAllEdge
/** * Check if the {@link DefaultLogicalPipelinedRegion} contains intra-region all-to-all edges or * not. */ private static boolean containsIntraRegionAllToAllEdge( DefaultLogicalPipelinedRegion logicalPipelinedRegion) { for (LogicalVertex vertex : logicalPipelinedRegion.getVertices()) { for (LogicalEdge inputEdge : vertex.getInputs()) { if (inputEdge.getDistributionPattern() == DistributionPattern.ALL_TO_ALL && logicalPipelinedRegion.contains(inputEdge.getProducerVertexId())) { return true; } } } return false; }
3.68
hbase_TableDescriptorBuilder_setNormalizerTargetRegionCount
/** * Setting the target region count of table normalization . * @param regionCount the target region count. * @return the modifyable TD */ public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); }
3.68
framework_NativeButtonIconAndText_buttonClick
/* * (non-Javadoc) * * @see com.vaadin.ui.Button.ClickListener#buttonClick(com.vaadin.ui.Button. * ClickEvent) */ @Override public void buttonClick(ClickEvent event) { Button b = event.getButton(); String was = b.getIconAlternateText(); if (was == null || was.isEmpty()) { b.setIconAlternateText(UPDATED_ALTERNATE_TEXT); } else { b.setIconAlternateText(null); } }
3.68
hbase_CommonFSUtils_getWALRegionDir
/** * Returns the WAL region directory based on the given table name and region name * @param conf configuration to determine WALRootDir * @param tableName Table that the region is under * @param encodedRegionName Region name used for creating the final region directory * @return the region directory used to store WALs under the WALRootDir * @throws IOException if there is an exception determining the WALRootDir */ public static Path getWALRegionDir(final Configuration conf, final TableName tableName, final String encodedRegionName) throws IOException { return new Path(getWALTableDir(conf, tableName), encodedRegionName); }
3.68
hbase_RegionMover_rackManager
/** * Set specific rackManager implementation. This setter method is for testing purpose only. * @param rackManager rackManager impl * @return RegionMoverBuilder object */ @InterfaceAudience.Private public RegionMoverBuilder rackManager(RackManager rackManager) { this.rackManager = rackManager; return this; }
3.68
zilla_WsClientFactory_assembleHeader
// @return no bytes consumed to assemble websocket header private int assembleHeader( DirectBuffer buffer, int offset, int length) { int remaining = Math.min(length, MAXIMUM_HEADER_SIZE - headerLength); // may copy more than actual header length (up to max header length), but will adjust at the end header.putBytes(headerLength, buffer, offset, remaining); int consumed = remaining; if (headerLength + remaining >= 2) { int wsHeaderLength = wsHeaderLength(header); // eventual headLength must not be more than wsHeaderLength if (headerLength + remaining > wsHeaderLength) { consumed = wsHeaderLength - headerLength; } } headerLength += consumed; return consumed; }
3.68
hudi_HoodieTable_getInvalidDataPaths
/** * Returns the possible invalid data file name with given marker files. */ protected Set<String> getInvalidDataPaths(WriteMarkers markers) throws IOException { return markers.createdAndMergedDataPaths(context, config.getFinalizeWriteParallelism()); }
3.68
flink_MapDataUtil_convertToJavaMap
/** * Converts a {@link MapData} into Java {@link Map}, the keys and values of the Java map still * holds objects of internal data structures. */ public static Map<Object, Object> convertToJavaMap( MapData map, LogicalType keyType, LogicalType valueType) { ArrayData keyArray = map.keyArray(); ArrayData valueArray = map.valueArray(); Map<Object, Object> javaMap = new HashMap<>(); ArrayData.ElementGetter keyGetter = ArrayData.createElementGetter(keyType); ArrayData.ElementGetter valueGetter = ArrayData.createElementGetter(valueType); for (int i = 0; i < map.size(); i++) { Object key = keyGetter.getElementOrNull(keyArray, i); Object value = valueGetter.getElementOrNull(valueArray, i); javaMap.put(key, value); } return javaMap; }
3.68
hbase_HRegionFileSystem_createTempName
/** * Generate a unique temporary Path. Used in conjuction with commitStoreFile() to get a safer file * creation. <code> * Path file = fs.createTempName(); * ...StoreFile.Writer(file)... * fs.commitStoreFile("family", file); * </code> * @param suffix extra information to append to the generated name * @return Unique {@link Path} of the temporary file */ public Path createTempName(final String suffix) { return new Path(getTempDir(), generateUniqueName(suffix)); }
3.68
framework_TabsheetBaseConnector_init
/* * (non-Javadoc) * * @see com.vaadin.client.ui.AbstractConnector#init() */ @Override protected void init() { super.init(); getWidget().setClient(getConnection()); }
3.68
framework_CompositeValidator_validate
/** * Validates the given value. * <p> * The value is valid, if: * <ul> * <li><code>MODE_AND</code>: All of the sub-validators are valid * <li><code>MODE_OR</code>: Any of the sub-validators are valid * </ul> * * If the value is invalid, validation error is thrown. If the error message * is set (non-null), it is used. If the error message has not been set, the * first error occurred is thrown. * </p> * * @param value * the value to check. * @throws Validator.InvalidValueException * if the value is not valid. */ @Override public void validate(Object value) throws Validator.InvalidValueException { switch (mode) { case AND: for (Validator validator : validators) { validator.validate(value); } return; case OR: Validator.InvalidValueException first = null; for (Validator v : validators) { try { v.validate(value); return; } catch (final Validator.InvalidValueException e) { if (first == null) { first = e; } } } if (first == null) { return; } final String em = getErrorMessage(); if (em != null) { throw new Validator.InvalidValueException(em); } else { throw first; } } }
3.68
flink_Tuple17_copy
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple17<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16> copy() { return new Tuple17<>( this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16); }
3.68
hibernate-validator_MethodValidationConfiguration_allowOverridingMethodAlterParameterConstraint
/** * Define whether overriding methods that override constraints should throw a {@code ConstraintDefinitionException}. * The default value is {@code false}, i.e. do not allow. * * See Section 5.6.5 of the Jakarta Bean Validation Specification, specifically * <pre> * "In sub types (be it sub classes/interfaces or interface implementations), no parameter constraints may * be declared on overridden or implemented methods, nor may parameters be marked for cascaded validation. * This would pose a strengthening of preconditions to be fulfilled by the caller." * </pre> * * @param allow flag determining whether validation will allow overriding to alter parameter constraints. * * @return {@code this} following the chaining method pattern */ public Builder allowOverridingMethodAlterParameterConstraint(boolean allow) { this.allowOverridingMethodAlterParameterConstraint = allow; return this; }
3.68
shardingsphere-elasticjob_RegExceptionHandler_handleException
/** * Handle exception. * * @param cause exception to be handled */ public static void handleException(final Exception cause) { if (null == cause) { return; } if (isIgnoredException(cause) || null != cause.getCause() && isIgnoredException(cause.getCause())) { log.debug("Elastic job: ignored exception for: {}", cause.getMessage()); } else if (cause instanceof InterruptedException) { Thread.currentThread().interrupt(); } else { throw new RegException(cause); } }
3.68
hbase_CacheConfig_shouldCacheBloomsOnWrite
/** * @return true if bloom blocks should be written to the cache when an HFile is written, false if * not */ public boolean shouldCacheBloomsOnWrite() { return this.cacheBloomsOnWrite; }
3.68
hbase_MasterDDLOperationHelper_deleteColumnFamilyFromFileSystem
/** * Remove the column family from the file system **/ public static void deleteColumnFamilyFromFileSystem(final MasterProcedureEnv env, final TableName tableName, final List<RegionInfo> regionInfoList, final byte[] familyName, final boolean hasMob) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); if (LOG.isDebugEnabled()) { LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName); } for (RegionInfo hri : regionInfoList) { // Delete the family directory in FS for all the regions one by one mfs.deleteFamilyFromFS(hri, familyName); } if (hasMob) { // Delete the mob region Path mobRootDir = new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME); RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName); mfs.deleteFamilyFromFS(mobRootDir, mobRegionInfo, familyName); } }
3.68
flink_CopyOnWriteStateMap_makeTable
/** * Allocate a table of the given capacity and set the threshold accordingly. * * @param newCapacity must be a power of two */ private StateMapEntry<K, N, S>[] makeTable(int newCapacity) { if (newCapacity < MAXIMUM_CAPACITY) { threshold = (newCapacity >> 1) + (newCapacity >> 2); // 3/4 capacity } else { if (size() > MAX_ARRAY_SIZE) { throw new IllegalStateException( "Maximum capacity of CopyOnWriteStateMap is reached and the job " + "cannot continue. Please consider scaling-out your job or using a different keyed state backend " + "implementation!"); } else { LOG.warn( "Maximum capacity of 2^30 in StateMap reached. Cannot increase hash map size. This can " + "lead to more collisions and lower performance. Please consider scaling-out your job or using a " + "different keyed state backend implementation!"); threshold = MAX_ARRAY_SIZE; } } @SuppressWarnings("unchecked") StateMapEntry<K, N, S>[] newMap = (StateMapEntry<K, N, S>[]) new StateMapEntry[newCapacity]; return newMap; }
3.68
querydsl_Expressions_booleanTemplate
/** * Create a new Template expression * * @param template template * @param args template parameters * @return template expression */ public static BooleanTemplate booleanTemplate(Template template, List<?> args) { return new BooleanTemplate(template, args); }
3.68
hudi_RollbackUtils_getRollbackPlan
/** * Get Latest version of Rollback plan corresponding to a clean instant. * * @param metaClient Hoodie Table Meta Client * @param rollbackInstant Instant referring to rollback action * @return Rollback plan corresponding to rollback instant * @throws IOException */ public static HoodieRollbackPlan getRollbackPlan(HoodieTableMetaClient metaClient, HoodieInstant rollbackInstant) throws IOException { // TODO: add upgrade step if required. final HoodieInstant requested = HoodieTimeline.getRollbackRequestedInstant(rollbackInstant); return TimelineMetadataUtils.deserializeAvroMetadata( metaClient.getActiveTimeline().readRollbackInfoAsBytes(requested).get(), HoodieRollbackPlan.class); }
3.68
AreaShop_GeneralRegion_getConfigurationSectionSetting
/** * Get a configuration section setting for this region, defined as follows * - If earlyResult is non-null, use that * - Else if the region has the setting in its own file (/regions/regionName.yml), use that * - Else if the region has groups, use the setting defined by the most important group, if any * - Otherwise fallback to the default.yml file setting * @param path The path to get the setting of * @param translateProfileName The name of the profile section in the plugin config file to translate result strings into sections * @param earlyResult Result that should have priority over the rest * @return The value of the setting */ public ConfigurationSection getConfigurationSectionSetting(String path, String translateProfileName, Object earlyResult) { Object result = null; if(earlyResult != null) { result = earlyResult; } else if(config.isSet(path)) { result = config.get(path); } else { boolean found = false; int priority = Integer.MIN_VALUE; for(RegionGroup group : plugin.getFileManager().getGroups()) { if(group.isMember(this) && group.getSettings().isSet(path) && group.getPriority() > priority) { result = group.getSettings().get(path); priority = group.getPriority(); found = true; } } if(!found) { if(this.getFileManager().getRegionSettings().isSet(path)) { result = this.getFileManager().getRegionSettings().get(path); } else { result = this.getFileManager().getFallbackRegionSettings().get(path); } } } // Either result is a ConfigurationSection or is used as key in the plugin config to get a ConfigurationSection if(result == null) { return null; } else if(result instanceof ConfigurationSection) { return (ConfigurationSection)result; } else { return plugin.getConfig().getConfigurationSection(translateProfileName + "." + result.toString()); } }
3.68
flink_Table_getSchema
/** * Returns the schema of this table. * * @deprecated This method has been deprecated as part of FLIP-164. {@link TableSchema} has been * replaced by two more dedicated classes {@link Schema} and {@link ResolvedSchema}. Use * {@link Schema} for declaration in APIs. {@link ResolvedSchema} is offered by the * framework after resolution and validation. */ @Deprecated default TableSchema getSchema() { return TableSchema.fromResolvedSchema(getResolvedSchema()); }
3.68
incubator-hugegraph-toolchain_HugeGraphLoader_executeParseTask
/** * Execute parse task sync */ private void executeParseTask(InputStruct struct, ElementMapping mapping, ParseTaskBuilder.ParseTask task) { long start = System.currentTimeMillis(); // Sync parse List<List<Record>> batches = task.get(); long end = System.currentTimeMillis(); this.context.summary().addTimeRange(mapping.type(), start, end); if (this.context.options().dryRun || CollectionUtils.isEmpty(batches)) { return; } // Async load for (List<Record> batch : batches) { this.manager.submitBatch(struct, mapping, batch); } }
3.68
framework_VFilterSelect_filterOptions
/** * Filters the options at certain page using the given filter * * @param page * The page to filter * @param filter * The filter to apply to the options * @param immediate * Whether to send the options request immediately */ private void filterOptions(int page, String filter, boolean immediate) { debug("VFS: filterOptions(" + page + ", " + filter + ", " + immediate + ")"); if (filter.equals(lastFilter) && currentPage == page) { if (!suggestionPopup.isAttached()) { suggestionPopup.showSuggestions(currentSuggestions, currentPage, totalMatches); } return; } if (!filter.equals(lastFilter)) { // when filtering, let the server decide the page unless we've // set the filter to empty and explicitly said that we want to see // the results starting from page 0. if ("".equals(filter) && page != 0) { // let server decide page = -1; } else { page = 0; } } waitingForFilteringResponse = true; client.updateVariable(paintableId, "filter", filter, false); client.updateVariable(paintableId, "page", page, immediate); afterUpdateClientVariables(); lastFilter = filter; currentPage = page; }
3.68
AreaShop_GeneralRegion_getFeature
/** * Get a feature of this region. * @param clazz The class of the feature to get * @param <T> The feature to get * @return The feature (either just instantiated or cached) */ public <T extends RegionFeature> T getFeature(Class<T> clazz) { RegionFeature result = features.get(clazz); if(result == null) { result = plugin.getFeatureManager().getRegionFeature(this, clazz); features.put(clazz, result); } return clazz.cast(result); }
3.68
graphhopper_Measurement_start
// creates properties file in the format key=value // Every value is one y-value in a separate diagram with an identical x-value for every Measurement.start call void start(PMap args) throws IOException { final String graphLocation = args.getString("graph.location", ""); final boolean useJson = args.getBool("measurement.json", false); boolean cleanGraph = args.getBool("measurement.clean", false); stopOnError = args.getBool("measurement.stop_on_error", false); String summaryLocation = args.getString("measurement.summaryfile", ""); final String timeStamp = new SimpleDateFormat("yyyy-MM-dd_HH:mm:ss").format(new Date()); put("measurement.timestamp", timeStamp); String propFolder = args.getString("measurement.folder", ""); if (!propFolder.isEmpty()) { Files.createDirectories(Paths.get(propFolder)); } String propFilename = args.getString("measurement.filename", ""); if (isEmpty(propFilename)) { if (useJson) { // if we start from IDE or otherwise jar was not built using maven the git commit id will be unknown String id = Constants.GIT_INFO != null ? Constants.GIT_INFO.getCommitHash().substring(0, 8) : "unknown"; propFilename = "measurement_" + id + "_" + timeStamp + ".json"; } else { propFilename = "measurement_" + timeStamp + ".properties"; } } final String propLocation = Paths.get(propFolder).resolve(propFilename).toString(); seed = args.getLong("measurement.seed", 123); put("measurement.gitinfo", args.getString("measurement.gitinfo", "")); int count = args.getInt("measurement.count", 5000); put("measurement.name", args.getString("measurement.name", "no_name")); put("measurement.map", args.getString("datareader.file", "unknown")); final boolean useMeasurementTimeAsRefTime = args.getBool("measurement.use_measurement_time_as_ref_time", false); if (useMeasurementTimeAsRefTime && !useJson) { throw new IllegalArgumentException("Using measurement time as reference time only works with json files"); } GraphHopper hopper = new GraphHopper() { @Override protected Map<String, PrepareContractionHierarchies.Result> prepareCH(boolean closeEarly, List<CHConfig> configsToPrepare) { StopWatch sw = new StopWatch().start(); Map<String, PrepareContractionHierarchies.Result> result = super.prepareCH(closeEarly, configsToPrepare); // note that we measure the total time of all (possibly edge&node) CH preparations put(Parameters.CH.PREPARE + "time", sw.stop().getMillis()); if (result.get("profile_no_tc") != null) { int shortcuts = result.get("profile_no_tc").getCHStorage().getShortcuts(); put(Parameters.CH.PREPARE + "node.shortcuts", shortcuts); put(Parameters.CH.PREPARE + "node.time", result.get("profile_no_tc").getTotalPrepareTime()); } if (result.get("profile_tc") != null) { int shortcuts = result.get("profile_tc").getCHStorage().getShortcuts(); put(Parameters.CH.PREPARE + "edge.shortcuts", shortcuts); put(Parameters.CH.PREPARE + "edge.time", result.get("profile_tc").getTotalPrepareTime()); } return result; } @Override protected List<PrepareLandmarks> prepareLM(boolean closeEarly, List<LMConfig> configsToPrepare) { List<PrepareLandmarks> prepareLandmarks = super.prepareLM(closeEarly, configsToPrepare); for (PrepareLandmarks plm : prepareLandmarks) { put(Landmark.PREPARE + "time", plm.getTotalPrepareTime()); } return prepareLandmarks; } @Override protected void cleanUp() { StopWatch sw = new StopWatch().start(); super.cleanUp(); put("graph.subnetwork_removal_time_ms", sw.stop().getMillis()); } @Override protected void importOSM() { StopWatch sw = new StopWatch().start(); super.importOSM(); sw.stop(); put("graph.import_time", sw.getSeconds()); put("graph.import_time_ms", sw.getMillis()); } }; hopper.init(createConfigFromArgs(args)); if (cleanGraph) { hopper.clean(); } hopper.importOrLoad(); BaseGraph g = hopper.getBaseGraph(); EncodingManager encodingManager = hopper.getEncodingManager(); BooleanEncodedValue accessEnc = encodingManager.getBooleanEncodedValue(VehicleAccess.key(vehicle)); boolean withTurnCosts = encodingManager.hasTurnEncodedValue(TurnRestriction.key(vehicle)); StopWatch sw = new StopWatch().start(); try { maxNode = g.getNodes(); final boolean runSlow = args.getBool("measurement.run_slow_routing", true); printGraphDetails(g, vehicle); measureGraphTraversal(g, accessEnc, count * 100); measureLocationIndex(g, hopper.getLocationIndex(), count); if (runSlow) { boolean isCH = false; boolean isLM = false; measureRouting(hopper, new QuerySettings("routing", count / 20, isCH, isLM). withInstructions()); measureRouting(hopper, new QuerySettings("routing_alt", count / 500, isCH, isLM). alternative()); if (withTurnCosts) { measureRouting(hopper, new QuerySettings("routing_edge", count / 20, isCH, isLM). withInstructions().edgeBased()); // unfortunately alt routes are so slow that we cannot really afford many iterations measureRouting(hopper, new QuerySettings("routing_edge_alt", count / 500, isCH, isLM). edgeBased().alternative() ); } } if (hopper.getLMPreparationHandler().isEnabled()) { gcAndWait(); boolean isCH = false; boolean isLM = true; Helper.parseList(args.getString("measurement.lm.active_counts", "[4,8,12]")).stream() .mapToInt(Integer::parseInt).forEach(activeLMCount -> { measureRouting(hopper, new QuerySettings("routingLM" + activeLMCount, count / 20, isCH, isLM). withInstructions().activeLandmarks(activeLMCount)); measureRouting(hopper, new QuerySettings("routingLM" + activeLMCount + "_alt", count / 500, isCH, isLM). activeLandmarks(activeLMCount).alternative()); if (args.getBool("measurement.lm.edge_based", withTurnCosts)) { measureRouting(hopper, new QuerySettings("routingLM" + activeLMCount + "_edge", count / 20, isCH, isLM). withInstructions().activeLandmarks(activeLMCount).edgeBased()); measureRouting(hopper, new QuerySettings("routingLM" + activeLMCount + "_alt_edge", count / 500, isCH, isLM). activeLandmarks(activeLMCount).edgeBased().alternative()); } }); } if (hopper.getCHPreparationHandler().isEnabled()) { boolean isCH = true; boolean isLM = false; gcAndWait(); RoutingCHGraph nodeBasedCH = hopper.getCHGraphs().get("profile_no_tc"); if (nodeBasedCH != null) { measureGraphTraversalCH(nodeBasedCH, count * 100); gcAndWait(); measureRouting(hopper, new QuerySettings("routingCH", count, isCH, isLM). withInstructions().sod()); measureRouting(hopper, new QuerySettings("routingCH_alt", count / 100, isCH, isLM). withInstructions().sod().alternative()); measureRouting(hopper, new QuerySettings("routingCH_with_hints", count, isCH, isLM). withInstructions().sod().withPointHints()); measureRouting(hopper, new QuerySettings("routingCH_no_sod", count, isCH, isLM). withInstructions()); measureRouting(hopper, new QuerySettings("routingCH_no_instr", count, isCH, isLM). sod()); measureRouting(hopper, new QuerySettings("routingCH_full", count, isCH, isLM). withInstructions().withPointHints().sod().simplify().pathDetails()); // for some strange (jvm optimizations) reason adding these measurements reduced the measured time for routingCH_full... see #2056 measureRouting(hopper, new QuerySettings("routingCH_via_100", count / 100, isCH, isLM). withPoints(100).sod()); measureRouting(hopper, new QuerySettings("routingCH_via_100_full", count / 100, isCH, isLM). withPoints(100).sod().withInstructions().simplify().pathDetails()); } RoutingCHGraph edgeBasedCH = hopper.getCHGraphs().get("profile_tc"); if (edgeBasedCH != null) { measureRouting(hopper, new QuerySettings("routingCH_edge", count, isCH, isLM). edgeBased().withInstructions()); measureRouting(hopper, new QuerySettings("routingCH_edge_alt", count / 100, isCH, isLM). edgeBased().withInstructions().alternative()); measureRouting(hopper, new QuerySettings("routingCH_edge_no_instr", count, isCH, isLM). edgeBased()); measureRouting(hopper, new QuerySettings("routingCH_edge_full", count, isCH, isLM). edgeBased().withInstructions().withPointHints().simplify().pathDetails()); // for some strange (jvm optimizations) reason adding these measurements reduced the measured time for routingCH_edge_full... see #2056 measureRouting(hopper, new QuerySettings("routingCH_edge_via_100", count / 100, isCH, isLM). withPoints(100).edgeBased().sod()); measureRouting(hopper, new QuerySettings("routingCH_edge_via_100_full", count / 100, isCH, isLM). withPoints(100).edgeBased().sod().withInstructions().simplify().pathDetails()); } } measureCountryAreaIndex(count); } catch (Exception ex) { logger.error("Problem while measuring " + graphLocation, ex); if (stopOnError) System.exit(1); put("error", ex.toString()); } finally { put("gh.gitinfo", Constants.GIT_INFO != null ? Constants.GIT_INFO.toString() : "unknown"); put("measurement.count", count); put("measurement.seed", seed); put("measurement.time", sw.stop().getMillis()); gcAndWait(); put("measurement.totalMB", getTotalMB()); put("measurement.usedMB", getUsedMB()); if (!isEmpty(summaryLocation)) { writeSummary(summaryLocation, propLocation); } if (useJson) { storeJson(propLocation, useMeasurementTimeAsRefTime); } else { storeProperties(propLocation); } } }
3.68
framework_BeanValidator_getJavaxBeanValidatorFactory
/** * Returns the underlying JSR-303 bean validator factory used. A factory is * created using {@link Validation} if necessary. * * @return the validator factory to use */ protected static ValidatorFactory getJavaxBeanValidatorFactory() { return LazyFactoryInitializer.FACTORY; }
3.68
flink_ExecutionEnvironment_registerType
/** * Registers the given type with the serialization stack. If the type is eventually serialized * as a POJO, then the type is registered with the POJO serializer. If the type ends up being * serialized with Kryo, then it will be registered at Kryo to make sure that only tags are * written. * * @param type The class of the type to register. */ public void registerType(Class<?> type) { if (type == null) { throw new NullPointerException("Cannot register null type class."); } TypeInformation<?> typeInfo = TypeExtractor.createTypeInfo(type); if (typeInfo instanceof PojoTypeInfo) { config.registerPojoType(type); } else { config.registerKryoType(type); } }
3.68
pulsar_LedgerMetadataUtils_buildMetadataForDelayedIndexBucket
/** * Build additional metadata for a delayed message index bucket. * * @param bucketKey key of the delayed message bucket * @param topicName name of the topic * @param cursorName name of the cursor * @return an immutable map which describes the schema */ public static Map<String, byte[]> buildMetadataForDelayedIndexBucket(String bucketKey, String topicName, String cursorName) { return Map.of( METADATA_PROPERTY_APPLICATION, METADATA_PROPERTY_APPLICATION_PULSAR, METADATA_PROPERTY_COMPONENT, METADATA_PROPERTY_COMPONENT_DELAYED_INDEX_BUCKET, METADATA_PROPERTY_DELAYED_INDEX_BUCKET_KEY, bucketKey.getBytes(StandardCharsets.UTF_8), METADATA_PROPERTY_DELAYED_INDEX_TOPIC, topicName.getBytes(StandardCharsets.UTF_8), METADATA_PROPERTY_DELAYED_INDEX_CURSOR, cursorName.getBytes(StandardCharsets.UTF_8) ); }
3.68
flink_Schema_fromFields
/** Adopts the given field names and field data types as physical columns of the schema. */ public Builder fromFields( List<String> fieldNames, List<? extends AbstractDataType<?>> fieldDataTypes) { Preconditions.checkNotNull(fieldNames, "Field names must not be null."); Preconditions.checkNotNull(fieldDataTypes, "Field data types must not be null."); Preconditions.checkArgument( fieldNames.size() == fieldDataTypes.size(), "Field names and field data types must have the same length."); IntStream.range(0, fieldNames.size()) .forEach(i -> column(fieldNames.get(i), fieldDataTypes.get(i))); return this; }
3.68
flink_ProducerMergedPartitionFileWriter_flush
/** Called in single-threaded ioExecutor. Order is guaranteed. */ private void flush( List<SubpartitionBufferContext> toWrite, CompletableFuture<Void> flushSuccessNotifier) { try { List<ProducerMergedPartitionFileIndex.FlushedBuffer> buffers = new ArrayList<>(); calculateSizeAndFlushBuffers(toWrite, buffers); partitionFileIndex.addBuffers(buffers); flushSuccessNotifier.complete(null); } catch (IOException exception) { ExceptionUtils.rethrow(exception); } }
3.68
hudi_ClientIds_builder
/** * Returns the builder. */ public static Builder builder() { return new Builder(); }
3.68
hudi_SourceFormatAdapter_processErrorEvents
/** * transform datasets with error events when error table is enabled * @param eventsRow * @return */ public Option<Dataset<Row>> processErrorEvents(Option<Dataset<Row>> eventsRow, ErrorEvent.ErrorReason errorReason) { return eventsRow.map(dataset -> { if (errorTableWriter.isPresent() && Arrays.stream(dataset.columns()).collect(Collectors.toList()) .contains(ERROR_TABLE_CURRUPT_RECORD_COL_NAME)) { errorTableWriter.get().addErrorEvents(dataset.filter(new Column(ERROR_TABLE_CURRUPT_RECORD_COL_NAME).isNotNull()) .select(new Column(ERROR_TABLE_CURRUPT_RECORD_COL_NAME)).toJavaRDD().map(ev -> new ErrorEvent<>(ev.getString(0), errorReason))); return dataset.filter(new Column(ERROR_TABLE_CURRUPT_RECORD_COL_NAME).isNull()).drop(ERROR_TABLE_CURRUPT_RECORD_COL_NAME); } return dataset; } ); }
3.68
hbase_ZKListener_getWatcher
/** Returns The watcher associated with this listener */ public ZKWatcher getWatcher() { return this.watcher; }
3.68
hbase_MemStore_stopReplayingFromWAL
/** * This message intends to inform the MemStore that the replaying edits from WAL are done */ default void stopReplayingFromWAL() { return; }
3.68
dubbo_MetadataInfo_getNoProtocolServiceInfo
/** * Get service infos of an interface with specified group, version. * There may have several service infos of different protocols, this method will simply pick the first one. * * @param serviceKeyWithoutProtocol key is of format '{group}/{interface name}:{version}' * @return the first service info related to serviceKey */ public ServiceInfo getNoProtocolServiceInfo(String serviceKeyWithoutProtocol) { if (CollectionUtils.isEmptyMap(subscribedServices)) { return null; } Set<ServiceInfo> subServices = subscribedServices.get(serviceKeyWithoutProtocol); if (CollectionUtils.isNotEmpty(subServices)) { return subServices.iterator().next(); } return null; }
3.68
hbase_RegionNormalizerManager_getSplitPlanCount
/** * Return the number of times a {@link SplitNormalizationPlan} has been submitted. */ public long getSplitPlanCount() { return worker == null ? 0 : worker.getSplitPlanCount(); }
3.68
querydsl_SQLExpressions_varSamp
/** * returns the sample variance of a set of numbers after discarding the nulls in this set. * * @param expr argument * @return var_samp(expr) */ public static <T extends Number> WindowOver<T> varSamp(Expression<T> expr) { return new WindowOver<T>(expr.getType(), SQLOps.VARSAMP, expr); }
3.68
flink_PythonDependencyUtils_addPythonArchive
/** * Adds a Python archive file (zip format). The file will be extracted and moved to a * dedicated directory under the working directory of the Python UDF workers. The param * `targetDir` is the name of the dedicated directory. The Python UDFs and the config option * "python.executable" could access the extracted files via relative path. * * @param archivePath The path of the archive file. * @param targetDir The name of the target directory. */ private void addPythonArchive( Configuration pythonDependencyConfig, String archivePath, String targetDir) { Preconditions.checkNotNull(archivePath); if (!pythonDependencyConfig.contains(PYTHON_ARCHIVES_DISTRIBUTED_CACHE_INFO)) { pythonDependencyConfig.set(PYTHON_ARCHIVES_DISTRIBUTED_CACHE_INFO, new HashMap<>()); } String fileKey = generateUniqueFileKey( PYTHON_ARCHIVE_PREFIX, archivePath + PARAM_DELIMITER + targetDir); registerCachedFileIfNotExist(archivePath, fileKey); pythonDependencyConfig .get(PYTHON_ARCHIVES_DISTRIBUTED_CACHE_INFO) .put(fileKey, targetDir); }
3.68
hbase_Increment_numFamilies
/** * Method for retrieving the number of families to increment from * @return number of families */ @Override public int numFamilies() { return this.familyMap.size(); }
3.68
flink_CheckpointProperties_getCheckpointType
/** Gets the type of the checkpoint (checkpoint / savepoint). */ public SnapshotType getCheckpointType() { return checkpointType; }
3.68