name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
morf_JdbcUrlElements_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return "JdbcUrlElements [databaseTypeIdentifier=" + databaseType + ", host=" + hostName + ", port=" + port + ", instanceName=" + instanceName + ", databaseName=" + databaseName + ", schemaName=" + schemaName + "]"; }
3.68
morf_AbstractSetOperator_validateNotNull
/** * Don't allow {@code null} references to {@linkplain SelectStatement}. * * @param parentSelect the select statement to be validated. * @param childSelect the select statement to be validated. */ void validateNotNull(SelectStatement parentSelect, SelectStatement childSelect) throws IllegalArgumentException { if (parentSelect == null || childSelect == null) { throw new IllegalArgumentException("Select statements cannot be null"); } }
3.68
flink_TableFunction_setCollector
/** Internal use. Sets the current collector. */ public final void setCollector(Collector<T> collector) { this.collector = collector; }
3.68
framework_AbstractComponentContainer_addComponentAttachListener
/* documented in interface */ @Override public Registration addComponentAttachListener( ComponentAttachListener listener) { return addListener(ComponentAttachEvent.class, listener, ComponentAttachListener.attachMethod); }
3.68
hbase_HBaseTestingUtility_getDataTestDirOnTestFS
/** * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write * temporary test data. Call this method after setting up the mini dfs cluster if the test relies * on it. * @return a unique path in the test filesystem * @param subdirName name of the subdir to create under the base test dir */ public Path getDataTestDirOnTestFS(final String subdirName) throws IOException { return new Path(getDataTestDirOnTestFS(), subdirName); }
3.68
hudi_TimelineLayoutVersion_isNullVersion
/** * For Pre 0.5.1 release, there was no metadata version. This method is used to detect * this case. * @return */ public boolean isNullVersion() { return Objects.equals(version, VERSION_0); }
3.68
hbase_FSTableDescriptors_get
/** * Get the current table descriptor for the given table, or null if none exists. * <p/> * Uses a local cache of the descriptor but still checks the filesystem on each call if * {@link #fsvisited} is not {@code true}, i.e, we haven't done a full scan yet, to see if a newer * file has been created since the cached one was read. */ @Override @Nullable public TableDescriptor get(TableName tableName) { invocations++; if (usecache) { // Look in cache of descriptors. TableDescriptor cachedtdm = this.cache.get(tableName); if (cachedtdm != null) { cachehits++; return cachedtdm; } // we do not need to go to fs any more if (fsvisited) { return null; } } TableDescriptor tdmt = null; try { tdmt = getTableDescriptorFromFs(fs, getTableDir(tableName), fsreadonly).map(Pair::getSecond) .orElse(null); } catch (IOException ioe) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tableName, ioe); } // last HTD written wins if (usecache && tdmt != null) { this.cache.put(tableName, tdmt); } return tdmt; }
3.68
pulsar_LinuxInfoUtils_getTotalNicUsage
/** * Get all physical nic usage. * @param nics All nic path * @param type Nic's usage type: transport, receive * @param bitRateUnit Bit rate unit * @return Total nic usage */ public static double getTotalNicUsage(List<String> nics, NICUsageType type, BitRateUnit bitRateUnit) { return bitRateUnit.convert(nics.stream().mapToDouble(nic -> { try { return readDoubleFromFile(getReplacedNICPath(type.template, nic)); } catch (IOException e) { log.error("[LinuxInfo] Failed to read {} bytes for NIC {} ", type, nic, e); return 0d; } }).sum(), BitRateUnit.Byte); }
3.68
flink_ExpandColumnFunctionsRule_resolveArgsOfColumns
/** Expand the columns expression in the input Expression List. */ private List<Expression> resolveArgsOfColumns( List<Expression> args, boolean isReverseProjection) { List<Expression> finalResult = new LinkedList<>(); List<UnresolvedReferenceExpression> result = args.stream() .flatMap(e -> e.accept(this.columnsExpressionExpander).stream()) .collect(Collectors.toList()); if (isReverseProjection) { for (UnresolvedReferenceExpression field : inputFieldReferences) { if (indexOfName(result, field.getName()) == -1) { finalResult.add(field); } } } else { finalResult.addAll(result); } return finalResult; }
3.68
flink_FailureHandlingResult_getError
/** * Returns reason why the restarting cannot be conducted. * * @return reason why the restarting cannot be conducted */ @Nullable public Throwable getError() { return error; }
3.68
framework_TreeGridDropEvent_isDropTargetRowCollapsed
/** * Tells whether the drop target row is collapsed. * * @return {@code true} if the drop target row is collapsed, {@code false} * otherwise */ public Optional<Boolean> isDropTargetRowCollapsed() { return Optional.ofNullable(collapsed); }
3.68
hibernate-validator_MethodValidationConfiguration_allowMultipleCascadedValidationOnReturnValues
/** * Define whether more than one constraint on a return value may be marked for cascading validation are allowed. * The default value is {@code false}, i.e. do not allow. * * "One must not mark a method return value for cascaded validation more than once in a line of a class hierarchy. * In other words, overriding methods on sub types (be it sub classes/interfaces or interface implementations) * cannot mark the return value for cascaded validation if the return value has already been marked on the * overridden method of the super type or interface." * * @param allow flag determining whether validation will allow multiple cascaded validation on return values. * * @return {@code this} following the chaining method pattern */ public Builder allowMultipleCascadedValidationOnReturnValues(boolean allow) { this.allowMultipleCascadedValidationOnReturnValues = allow; return this; }
3.68
framework_ClassResource_setCacheTime
/** * Sets the length of cache expiration time. * * <p> * This gives the adapter the possibility cache streams sent to the client. * The caching may be made in adapter or at the client if the client * supports caching. Zero or negative value disables the caching of this * stream. * </p> * * @param cacheTime * the cache time in milliseconds. * */ public void setCacheTime(long cacheTime) { this.cacheTime = cacheTime; }
3.68
hadoop_ByteArray_offset
/** * @return the offset in the buffer. */ @Override public int offset() { return offset; }
3.68
dubbo_FrameworkExecutorRepository_getSharedScheduledExecutor
/** * Get the shared schedule executor * * @return ScheduledExecutorService */ public ScheduledExecutorService getSharedScheduledExecutor() { return sharedScheduledExecutor; }
3.68
hbase_RegionCoprocessorHost_getRegionServerServices
/** * @return An instance of RegionServerServices, an object NOT for general user-space Coprocessor * consumption. */ @Override public RegionServerServices getRegionServerServices() { return this.rsServices; }
3.68
morf_FieldReference_noNullHandling
/** * sets null value handling type to none * @return this */ public Builder noNullHandling() { this.nullValueHandling = Optional.of(NullValueHandling.NONE); return this; }
3.68
framework_BeanUtil_checkBeanValidationAvailable
/** * Returns whether an implementation of JSR-303 version 1.0 or 1.1 is * present on the classpath. If this method returns false, trying to create * a {@code BeanValidator} instance will throw an * {@code IllegalStateException}. If an implementation is not found, logs a * level {@code FINE} message the first time it is run. * * @return {@code true} if bean validation is available, {@code false} * otherwise. */ public static boolean checkBeanValidationAvailable() { return LazyValidationAvailability.BEAN_VALIDATION_AVAILABLE; }
3.68
hadoop_AzureNativeFileSystemStore_delete
/** * API implementation to delete a blob in the back end azure storage. */ @Override public boolean delete(String key) throws IOException { try { return delete(key, null); } catch (IOException e) { Throwable t = e.getCause(); if (t instanceof StorageException) { StorageException se = (StorageException) t; if ("LeaseIdMissing".equals(se.getErrorCode())){ SelfRenewingLease lease = null; try { lease = acquireLease(key); return delete(key, lease); } catch (AzureException e3) { LOG.warn("Got unexpected exception trying to acquire lease on " + key + "." + e3.getMessage()); throw e3; } finally { try { if (lease != null){ lease.free(); } } catch (Exception e4){ LOG.error("Unable to free lease on " + key, e4); } } } else { throw e; } } else { throw e; } } }
3.68
flink_AllWindowedStream_min
/** * Applies an aggregation that gives the minimum value of the pojo data stream at the given * field expression for every window. * * <p>A field expression is either the name of a public field or a getter method with * parentheses of the {@link DataStream}S underlying type. A dot can be used to drill down into * objects, as in {@code "field1.getInnerField2()" }. * * @param field The field expression based on which the aggregation will be applied. * @return The transformed DataStream. */ public SingleOutputStreamOperator<T> min(String field) { return aggregate( new ComparableAggregator<>( field, input.getType(), AggregationFunction.AggregationType.MIN, false, input.getExecutionConfig())); }
3.68
flink_SingleInputOperator_addInput
/** * Adds to the input the union of the given operators. * * @param inputs The operator(s) that form the input. * @deprecated This method will be removed in future versions. Use the {@link Union} operator * instead. */ @Deprecated @SuppressWarnings("unchecked") public void addInput(List<Operator<IN>> inputs) { this.input = Operator.createUnionCascade( this.input, inputs.toArray(new Operator[inputs.size()])); }
3.68
framework_VFilterSelect_iconEquals
/** * Checks if the icon widgets show the same icon. * * @param icon1 * the first widget * @param icon2 * the second widget * @return <code>true</code> if they show the same icon, <code>false</code> * otherwise */ private static boolean iconEquals(IconWidget icon1, IconWidget icon2) { if (icon1 == null) { return icon2 == null; } else if (icon2 == null) { return false; } else { return icon1.icon.getUri().equals(icon2.icon.getUri()); } }
3.68
hudi_StreamSync_registerAvroSchemas
/** * Register Avro Schemas. * * @param sourceSchema Source Schema * @param targetSchema Target Schema */ private void registerAvroSchemas(Schema sourceSchema, Schema targetSchema) { // register the schemas, so that shuffle does not serialize the full schemas List<Schema> schemas = new ArrayList<>(); if (sourceSchema != null) { schemas.add(sourceSchema); } if (targetSchema != null) { schemas.add(targetSchema); } if (!schemas.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("Registering Schema: " + schemas); } // Use the underlying spark context in case the java context is changed during runtime hoodieSparkContext.getJavaSparkContext().sc().getConf().registerAvroSchemas(JavaConversions.asScalaBuffer(schemas).toList()); } }
3.68
hudi_HoodieTableMetaClient_getCommitsAndCompactionTimeline
/** * Get the commit + pending-compaction timeline visible for this table. A RT filesystem view is constructed with this * timeline so that file-slice after pending compaction-requested instant-time is also considered valid. A RT * file-system view for reading must then merge the file-slices before and after pending compaction instant so that * all delta-commits are read. */ public HoodieTimeline getCommitsAndCompactionTimeline() { switch (this.getTableType()) { case COPY_ON_WRITE: return getActiveTimeline().getCommitTimeline(); case MERGE_ON_READ: return getActiveTimeline().getWriteTimeline(); default: throw new HoodieException("Unsupported table type :" + this.getTableType()); } }
3.68
hbase_HFileBlockIndex_addEntry
/** * Add one index entry to the current leaf-level block. When the leaf-level block gets large * enough, it will be flushed to disk as an inline block. * @param firstKey the first key of the data block * @param blockOffset the offset of the data block * @param blockDataSize the on-disk size of the data block ({@link HFile} format version 2), or * the uncompressed size of the data block ( {@link HFile} format version * 1). */ public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) { curInlineChunk.add(firstKey, blockOffset, blockDataSize); ++totalNumEntries; }
3.68
hbase_HttpServer_addDefaultApps
/** * Add default apps. * @param appDir The application directory */ protected void addDefaultApps(ContextHandlerCollection parent, final String appDir, Configuration conf) { // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = this.logDir; if (logDir == null) { logDir = System.getProperty("hadoop.log.dir"); } if (logDir != null) { ServletContextHandler logContext = new ServletContextHandler(parent, "/logs"); logContext.addServlet(AdminAuthorizedServlet.class, "/*"); logContext.setResourceBase(logDir); if ( conf.getBoolean(ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES) ) { Map<String, String> params = logContext.getInitParams(); params.put("org.mortbay.jetty.servlet.Default.aliases", "true"); } logContext.setDisplayName("logs"); setContextAttributes(logContext, conf); addNoCacheFilter(logContext, conf); defaultContexts.put(logContext, true); } // set up the context for "/static/*" ServletContextHandler staticContext = new ServletContextHandler(parent, "/static"); staticContext.setResourceBase(appDir + "/static"); staticContext.addServlet(DefaultServlet.class, "/*"); staticContext.setDisplayName("static"); setContextAttributes(staticContext, conf); defaultContexts.put(staticContext, true); }
3.68
flink_TopNBuffer_removeAll
/** * Removes all record list from the buffer under the sortKey. * * @param sortKey key to remove */ public void removeAll(RowData sortKey) { Collection<RowData> collection = treeMap.get(sortKey); if (collection != null) { currentTopNum -= collection.size(); treeMap.remove(sortKey); } }
3.68
framework_VScrollTable_getNavigationDownKey
/** * Get the key that moves the selection head downwards. By default it is the * down arrow key but by overriding this you can change the key to whatever * you want. * * @return The keycode of the key */ protected int getNavigationDownKey() { return KeyCodes.KEY_DOWN; }
3.68
flink_MutableHashTable_getNumWriteBehindBuffers
/** * Determines the number of buffers to be used for asynchronous write behind. It is currently * computed as the logarithm of the number of buffers to the base 4, rounded up, minus 2. The * upper limit for the number of write behind buffers is however set to six. * * @param numBuffers The number of available buffers. * @return The number */ public static int getNumWriteBehindBuffers(int numBuffers) { int numIOBufs = (int) (Math.log(numBuffers) / Math.log(4) - 1.5); return numIOBufs > 6 ? 6 : numIOBufs; }
3.68
framework_DateToLongConverter_convertToModel
/* * (non-Javadoc) * * @see * com.vaadin.data.util.converter.Converter#convertToModel(java.lang.Object, * java.lang.Class, java.util.Locale) */ @Override public Long convertToModel(Date value, Class<? extends Long> targetType, Locale locale) { if (value == null) { return null; } return value.getTime(); }
3.68
framework_VTooltip_replaceCurrentTooltip
/** * Replace current open tooltip with new content. */ public void replaceCurrentTooltip() { if (closing) { closeTimer.cancel(); closeNow(); justClosedTimer.cancel(); justClosed = false; } showTooltip(); opening = false; }
3.68
framework_CssLayout_getComponentCount
/** * Gets the number of contained components. Consistent with the iterator * returned by {@link #getComponentIterator()}. * * @return the number of contained components */ @Override public int getComponentCount() { return components.size(); }
3.68
zxing_GlobalHistogramBinarizer_getBlackRow
// Applies simple sharpening to the row data to improve performance of the 1D Readers. @Override public BitArray getBlackRow(int y, BitArray row) throws NotFoundException { LuminanceSource source = getLuminanceSource(); int width = source.getWidth(); if (row == null || row.getSize() < width) { row = new BitArray(width); } else { row.clear(); } initArrays(width); byte[] localLuminances = source.getRow(y, luminances); int[] localBuckets = buckets; for (int x = 0; x < width; x++) { localBuckets[(localLuminances[x] & 0xff) >> LUMINANCE_SHIFT]++; } int blackPoint = estimateBlackPoint(localBuckets); if (width < 3) { // Special case for very small images for (int x = 0; x < width; x++) { if ((localLuminances[x] & 0xff) < blackPoint) { row.set(x); } } } else { int left = localLuminances[0] & 0xff; int center = localLuminances[1] & 0xff; for (int x = 1; x < width - 1; x++) { int right = localLuminances[x + 1] & 0xff; // A simple -1 4 -1 box filter with a weight of 2. if (((center * 4) - left - right) / 2 < blackPoint) { row.set(x); } left = center; center = right; } } return row; }
3.68
graphhopper_IntsRef_deepCopyOf
/** * Creates a new IntsRef that points to a copy of the ints from * <code>other</code> * <p> * The returned IntsRef will have a length of other.length * and an offset of zero. */ public static IntsRef deepCopyOf(IntsRef other) { return new IntsRef(Arrays.copyOfRange(other.ints, other.offset, other.offset + other.length), 0, other.length); }
3.68
framework_CvalChecker_cacheLicenseInfo
/* * used in tests */ static void cacheLicenseInfo(CvalInfo info) { if (info != null) { Preferences p = Preferences.userNodeForPackage(CvalInfo.class); if (info.toString().length() > Preferences.MAX_VALUE_LENGTH) { // This should never happen since MAX_VALUE_LENGTH is big // enough. // But server could eventually send a very big message, so we // discard it in cache and would use hard-coded messages. info.setMessage(null); } p.put(info.getProduct().getName(), info.toString()); } }
3.68
dubbo_DubboBootstrap_service
// {@link ServiceConfig} correlative methods public <S> Module service(Consumer<ServiceBuilder<S>> consumerBuilder) { return service(null, consumerBuilder); }
3.68
morf_SqlQueryDataSetProducer_close
/** * Closes the connection and any active result sets. */ @Override public void close() { if (connection == null) { return; } try { for (ResultSetIterator resultSetIterator : openResultSets) { resultSetIterator.close(); } openResultSets.clear(); // restore the auto-commit flag. connection.setAutoCommit(wasAutoCommit); connection.close(); connection = null; } catch (SQLException e) { throw new RuntimeException("Error closing result set", e); } }
3.68
dubbo_RpcUtils_isGenericCall
// check parameterTypesDesc to fix CVE-2020-1948 public static boolean isGenericCall(String parameterTypesDesc, String method) { return ($INVOKE.equals(method) || $INVOKE_ASYNC.equals(method)) && GENERIC_PARAMETER_DESC.equals(parameterTypesDesc); }
3.68
hadoop_BlockBlobAppendStream_generateOlderVersionBlockId
/** * Helper method that generates an older (2.2.0) version blockId. * @return String representing the block ID generated. */ private String generateOlderVersionBlockId(long id) { byte[] blockIdInBytes = new byte[8]; for (int m = 0; m < 8; m++) { blockIdInBytes[7 - m] = (byte) ((id >> (8 * m)) & 0xFF); } return new String( Base64.encodeBase64(blockIdInBytes), StandardCharsets.UTF_8); }
3.68
flink_AbstractBytesHashMap_append
/** * Append an value into the hash map's record area. * * @return An BinaryRowData mapping to the memory segments in the map's record area belonging to * the newly appended value. * @throws EOFException if the map can't allocate much more memory. */ public BinaryRowData append(LookupInfo<K, BinaryRowData> lookupInfo, BinaryRowData value) throws IOException { try { if (numElements >= growthThreshold) { growAndRehash(); // update info's bucketSegmentIndex and bucketOffset lookup(lookupInfo.key); } BinaryRowData toAppend = hashSetMode ? reusedValue : value; int pointerToAppended = recordArea.appendRecord(lookupInfo, toAppend); bucketSegments .get(lookupInfo.bucketSegmentIndex) .putInt(lookupInfo.bucketOffset, pointerToAppended); bucketSegments .get(lookupInfo.bucketSegmentIndex) .putInt(lookupInfo.bucketOffset + ELEMENT_POINT_LENGTH, lookupInfo.keyHashCode); numElements++; recordArea.setReadPosition(pointerToAppended); ((RecordArea) recordArea).skipKey(); return recordArea.readValue(reusedValue); } catch (EOFException e) { numSpillFiles++; spillInBytes += recordArea.getSegmentsSize(); throw e; } }
3.68
flink_ResultInfo_getRowFormat
/** Get the row format about the data. */ public RowFormat getRowFormat() { return rowFormat; }
3.68
hbase_VisibilityUtils_extractAndPartitionTags
/** * Extracts and partitions the visibility tags and nonVisibility Tags * @param cell - the cell for which we would extract and partition the visibility and non * visibility tags - all the visibilty tags of type TagType.VISIBILITY_TAG_TYPE * would be added to this list * @param nonVisTags - all the non visibility tags would be added to this list * @return - the serailization format of the tag. Can be null if no tags are found or if there is * no visibility tag found */ public static Byte extractAndPartitionTags(Cell cell, List<Tag> visTags, List<Tag> nonVisTags) { Byte serializationFormat = null; Iterator<Tag> tagsIterator = PrivateCellUtil.tagsIterator(cell); while (tagsIterator.hasNext()) { Tag tag = tagsIterator.next(); if (tag.getType() == TagType.VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE) { serializationFormat = Tag.getValueAsByte(tag); } else if (tag.getType() == VISIBILITY_TAG_TYPE) { visTags.add(tag); } else { // ignore string encoded visibility expressions, will be added in replication handling nonVisTags.add(tag); } } return serializationFormat; }
3.68
querydsl_MetaDataExporter_setColumnComparatorClass
/** * Set the column comparator class * * @param columnComparatorClass */ public void setColumnComparatorClass(Class<? extends Comparator<Property>> columnComparatorClass) { module.bind(SQLCodegenModule.COLUMN_COMPARATOR, columnComparatorClass); }
3.68
flink_HiveParserUnparseTranslator_applyTranslations
/** * Apply all translations on the given token stream. * * @param tokenRewriteStream rewrite-capable stream */ public void applyTranslations(TokenRewriteStream tokenRewriteStream) { for (Map.Entry<Integer, Translation> entry : translations.entrySet()) { if (entry.getKey() > 0) { // negative means the key didn't exist in the original // stream (i.e.: we changed the tree) tokenRewriteStream.replace( entry.getKey(), entry.getValue().tokenStopIndex, entry.getValue().replacementText); } } for (CopyTranslation copyTranslation : copyTranslations) { String replacementText = tokenRewriteStream.toString( copyTranslation.sourceNode.getTokenStartIndex(), copyTranslation.sourceNode.getTokenStopIndex()); String currentText = tokenRewriteStream.toString( copyTranslation.targetNode.getTokenStartIndex(), copyTranslation.targetNode.getTokenStopIndex()); if (currentText.equals(replacementText)) { // copy is a nop, so skip it--this is important for avoiding spurious overlap // assertions continue; } // Call addTranslation just to get the assertions for overlap checking. addTranslation(copyTranslation.targetNode, replacementText); tokenRewriteStream.replace( copyTranslation.targetNode.getTokenStartIndex(), copyTranslation.targetNode.getTokenStopIndex(), replacementText); } }
3.68
flink_TaskExecutorMemoryConfiguration_getFrameworkHeap
/** Returns the configured heap size used by the framework. */ public Long getFrameworkHeap() { return frameworkHeap; }
3.68
zxing_LuminanceSource_rotateCounterClockwise
/** * Returns a new object with rotated image data by 90 degrees counterclockwise. * Only callable if {@link #isRotateSupported()} is true. * * @return A rotated version of this object. */ public LuminanceSource rotateCounterClockwise() { throw new UnsupportedOperationException("This luminance source does not support rotation by 90 degrees."); }
3.68
flink_ChannelWriterOutputView_getBytesWritten
/** * Gets the number of pay-load bytes already written. This excludes the number of bytes spent on * headers in the segments. * * @return The number of bytes that have been written to this output view. */ public long getBytesWritten() { return this.bytesBeforeSegment + getCurrentPositionInSegment() - HEADER_LENGTH; }
3.68
flink_TableSink_getFieldNames
/** @deprecated Use the field names of {@link #getTableSchema()} instead. */ @Deprecated default String[] getFieldNames() { return null; }
3.68
graphhopper_VectorTile_getLayers
/** * <code>repeated .vector_tile.Tile.Layer layers = 3;</code> */ public vector_tile.VectorTile.Tile.Layer getLayers(int index) { if (layersBuilder_ == null) { return layers_.get(index); } else { return layersBuilder_.getMessage(index); } }
3.68
hbase_ZkSplitLogWorkerCoordination_attemptToOwnTask
/** * Try to own the task by transitioning the zk node data from UNASSIGNED to OWNED. * <p> * This method is also used to periodically heartbeat the task progress by transitioning the node * from OWNED to OWNED. * <p> * @param isFirstTime shows whther it's the first attempt. * @param zkw zk wathcer * @param server name * @param task to own * @param taskZKVersion version of the task in zk * @return non-negative integer value when task can be owned by current region server otherwise -1 */ protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, ServerName server, String task, int taskZKVersion) { int latestZKVersion = FAILED_TO_OWN_TASK; try { SplitLogTask slt = new SplitLogTask.Owned(server); Stat stat = zkw.getRecoverableZooKeeper().setData(task, slt.toByteArray(), taskZKVersion); if (stat == null) { LOG.warn("zk.setData() returned null for path " + task); SplitLogCounters.tot_wkr_task_heartbeat_failed.increment(); return FAILED_TO_OWN_TASK; } latestZKVersion = stat.getVersion(); SplitLogCounters.tot_wkr_task_heartbeat.increment(); return latestZKVersion; } catch (KeeperException e) { if (!isFirstTime) { if (e.code().equals(KeeperException.Code.NONODE)) { LOG.warn("NONODE failed to assert ownership for " + task, e); } else if (e.code().equals(KeeperException.Code.BADVERSION)) { LOG.warn("BADVERSION failed to assert ownership for " + task, e); } else { LOG.warn("failed to assert ownership for " + task, e); } } } catch (InterruptedException e1) { LOG.warn("Interrupted while trying to assert ownership of " + task + " " + StringUtils.stringifyException(e1)); Thread.currentThread().interrupt(); } SplitLogCounters.tot_wkr_task_heartbeat_failed.increment(); return FAILED_TO_OWN_TASK; }
3.68
hbase_MobUtils_isRawMobScan
/** * Indicates whether it's a raw scan. The information is set in the attribute "hbase.mob.scan.raw" * of scan. For a mob cell, in a normal scan the scanners retrieves the mob cell from the mob * file. In a raw scan, the scanner directly returns cell in HBase without retrieve the one in the * mob file. * @param scan The current scan. * @return True if it's a raw scan. */ public static boolean isRawMobScan(Scan scan) { byte[] raw = scan.getAttribute(MobConstants.MOB_SCAN_RAW); try { return raw != null && Bytes.toBoolean(raw); } catch (IllegalArgumentException e) { return false; } }
3.68
hbase_HRegionServer_closeRegion
/** * Close asynchronously a region, can be called from the master or internally by the regionserver * when stopping. If called from the master, the region will update the status. * <p> * If an opening was in progress, this method will cancel it, but will not start a new close. The * coprocessors are not called in this case. A NotServingRegionException exception is thrown. * </p> * <p> * If a close was in progress, this new request will be ignored, and an exception thrown. * </p> * <p> * Provides additional flag to indicate if this region blocks should be evicted from the cache. * </p> * @param encodedName Region to close * @param abort True if we are aborting * @param destination Where the Region is being moved too... maybe null if unknown. * @return True if closed a region. * @throws NotServingRegionException if the region is not online */ protected boolean closeRegion(String encodedName, final boolean abort, final ServerName destination) throws NotServingRegionException { // Check for permissions to close. HRegion actualRegion = this.getRegion(encodedName); // Can be null if we're calling close on a region that's not online if ((actualRegion != null) && (actualRegion.getCoprocessorHost() != null)) { try { actualRegion.getCoprocessorHost().preClose(false); } catch (IOException exp) { LOG.warn("Unable to close region: the coprocessor launched an error ", exp); return false; } } // previous can come back 'null' if not in map. final Boolean previous = this.regionsInTransitionInRS.putIfAbsent(Bytes.toBytes(encodedName), Boolean.FALSE); if (Boolean.TRUE.equals(previous)) { LOG.info("Received CLOSE for the region:" + encodedName + " , which we are already " + "trying to OPEN. Cancelling OPENING."); if (!regionsInTransitionInRS.replace(Bytes.toBytes(encodedName), previous, Boolean.FALSE)) { // The replace failed. That should be an exceptional case, but theoretically it can happen. // We're going to try to do a standard close then. LOG.warn("The opening for region " + encodedName + " was done before we could cancel it." + " Doing a standard close now"); return closeRegion(encodedName, abort, destination); } // Let's get the region from the online region list again actualRegion = this.getRegion(encodedName); if (actualRegion == null) { // If already online, we still need to close it. LOG.info("The opening previously in progress has been cancelled by a CLOSE request."); // The master deletes the znode when it receives this exception. throw new NotServingRegionException( "The region " + encodedName + " was opening but not yet served. Opening is cancelled."); } } else if (previous == null) { LOG.info("Received CLOSE for {}", encodedName); } else if (Boolean.FALSE.equals(previous)) { LOG.info("Received CLOSE for the region: " + encodedName + ", which we are already trying to CLOSE, but not completed yet"); return true; } if (actualRegion == null) { LOG.debug("Received CLOSE for a region which is not online, and we're not opening."); this.regionsInTransitionInRS.remove(Bytes.toBytes(encodedName)); // The master deletes the znode when it receives this exception. throw new NotServingRegionException( "The region " + encodedName + " is not online, and is not opening."); } CloseRegionHandler crh; final RegionInfo hri = actualRegion.getRegionInfo(); if (hri.isMetaRegion()) { crh = new CloseMetaHandler(this, this, hri, abort); } else { crh = new CloseRegionHandler(this, this, hri, abort, destination); } this.executorService.submit(crh); return true; }
3.68
pulsar_ConfigValidationUtils_fv
/** * Returns a new NestableFieldValidator for a given class. * * @param cls the Class the field should be a type of * @param notNull whether or not a value of null is valid * @return a NestableFieldValidator for that class */ public static NestableFieldValidator fv(final Class cls, final boolean notNull) { return new NestableFieldValidator() { @Override public void validateField(String pd, String name, Object field) throws IllegalArgumentException { if (field == null) { if (notNull) { throw new IllegalArgumentException("Field " + name + " must not be null"); } else { return; } } if (!cls.isInstance(field)) { throw new IllegalArgumentException( pd + name + " must be a " + cls.getName() + ". (" + field + ")"); } } }; }
3.68
hbase_Table_getWriteRpcTimeout
/** * Get timeout of each rpc write request in this Table instance. * @param unit the unit of time the timeout to be represented in * @return write rpc timeout in the specified time unit */ default long getWriteRpcTimeout(TimeUnit unit) { throw new NotImplementedException("Add an implementation!"); }
3.68
hbase_ZKProcedureUtil_logZKTree
/** * Helper method to print the current state of the ZK tree. * @see #logZKTree(String) * @throws KeeperException if an unexpected exception occurs */ protected void logZKTree(String root, String prefix) throws KeeperException { List<String> children = ZKUtil.listChildrenNoWatch(watcher, root); if (children == null) return; for (String child : children) { LOG.debug(prefix + child); String node = ZNodePaths.joinZNode(root.equals("/") ? "" : root, child); logZKTree(node, prefix + "---"); } }
3.68
graphhopper_VectorTile_setBoolValue
/** * <code>optional bool bool_value = 7;</code> */ public Builder setBoolValue(boolean value) { bitField0_ |= 0x00000040; boolValue_ = value; onChanged(); return this; }
3.68
hbase_PersistentIOEngine_verifyFileIntegrity
/** * Verify cache files's integrity * @param algorithm the backingMap persistence path */ protected void verifyFileIntegrity(byte[] persistentChecksum, String algorithm) throws IOException { byte[] calculateChecksum = calculateChecksum(algorithm); if (!Bytes.equals(persistentChecksum, calculateChecksum)) { throw new IOException( "Mismatch of checksum! The persistent checksum is " + Bytes.toString(persistentChecksum) + ", but the calculate checksum is " + Bytes.toString(calculateChecksum)); } }
3.68
hbase_ReplicationSourceManager_getLogDir
/** * Get the directory where wals are stored by their RSs * @return the directory where wals are stored by their RSs */ public Path getLogDir() { return this.logDir; }
3.68
hmily_HmilyDisruptor_startup
/** * start disruptor. */ @SuppressWarnings("unchecked") public void startup() { Disruptor<DataEvent<T>> disruptor = new Disruptor<>(new DisruptorEventFactory<>(), size, HmilyThreadFactory.create("disruptor_consumer_" + consumer.fixName(), false), ProducerType.MULTI, new BlockingWaitStrategy()); HmilyDisruptorWorkHandler<T>[] workerPool = new HmilyDisruptorWorkHandler[consumerSize]; for (int i = 0; i < consumerSize; i++) { workerPool[i] = new HmilyDisruptorWorkHandler<>(consumer); } disruptor.handleEventsWithWorkerPool(workerPool); disruptor.setDefaultExceptionHandler(new IgnoreExceptionHandler()); disruptor.start(); RingBuffer<DataEvent<T>> ringBuffer = disruptor.getRingBuffer(); provider = new DisruptorProvider<>(ringBuffer, disruptor); }
3.68
flink_TwoInputUdfOperator_returns
/** * Adds a type information hint about the return type of this operator. This method can be used * in cases where Flink cannot determine automatically what the produced type of a function is. * That can be the case if the function uses generic type variables in the return type that * cannot be inferred from the input type. * * <p>In most cases, the methods {@link #returns(Class)} and {@link #returns(TypeHint)} are * preferable. * * @param typeInfo The type information for the returned data type. * @return This operator using the given type information for the return type. */ public O returns(TypeInformation<OUT> typeInfo) { requireNonNull(typeInfo, "TypeInformation must not be null"); fillInType(typeInfo); @SuppressWarnings("unchecked") O returnType = (O) this; return returnType; }
3.68
hbase_Cacheable_retain
/** * Increase its reference count, and only when no reference we can free the object's memory. */ default Cacheable retain() { return this; }
3.68
graphhopper_NavigateResource_getPointsFromRequest
/** * This method is parsing the request URL String. Unfortunately it seems that there is no better options right now. * See: https://stackoverflow.com/q/51420380/1548788 * <p> * The url looks like: ".../{profile}/1.522438,42.504606;1.527209,42.504776;1.526113,42.505144;1.527218,42.50529?.." */ private List<GHPoint> getPointsFromRequest(HttpServletRequest httpServletRequest, String profile) { String url = httpServletRequest.getRequestURI(); String urlStart = "/navigate/directions/v5/gh/" + profile + "/"; if (!url.startsWith(urlStart)) throw new IllegalArgumentException("Incorrect URL " + url); url = url.substring(urlStart.length()); String[] pointStrings = url.split(";"); List<GHPoint> points = new ArrayList<>(pointStrings.length); for (int i = 0; i < pointStrings.length; i++) { points.add(GHPoint.fromStringLonLat(pointStrings[i])); } return points; }
3.68
framework_SetPageFirstItemLoadsNeededRowsOnly_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 14135; }
3.68
flink_FutureUtils_handleAsyncIfNotDone
/** * This function takes a {@link CompletableFuture} and a handler function for the result of this * future. If the input future is already done, this function returns {@link * CompletableFuture#handle(BiFunction)}. Otherwise, the return value is {@link * CompletableFuture#handleAsync(BiFunction, Executor)} with the given executor. * * @param completableFuture the completable future for which we want to call #handle. * @param executor the executor to run the handle function if the future is not yet done. * @param handler the handler function to call when the future is completed. * @param <IN> type of the handler input argument. * @param <OUT> type of the handler return value. * @return the new completion stage. */ public static <IN, OUT> CompletableFuture<OUT> handleAsyncIfNotDone( CompletableFuture<IN> completableFuture, Executor executor, BiFunction<? super IN, Throwable, ? extends OUT> handler) { return completableFuture.isDone() ? completableFuture.handle(handler) : completableFuture.handleAsync(handler, executor); }
3.68
flink_MathUtils_flipSignBit
/** * Flips the sign bit (most-significant-bit) of the input. * * @param in the input value. * @return the input with a flipped sign bit (most-significant-bit). */ public static long flipSignBit(long in) { return in ^ Long.MIN_VALUE; }
3.68
framework_VRichTextArea_swapEditableArea
/** * Swaps html to rta and visa versa. */ private void swapEditableArea() { String value = getValue(); if (html.isAttached()) { if (isReadOnly() || !isEnabled()) { return; } fp.remove(html); if (BrowserInfo.get().isWebkit()) { fp.remove(formatter); createRTAComponents(); // recreate new RTA to bypass #5379 fp.add(formatter); } fp.add(rta); } else { fp.remove(rta); fp.add(html); } setValue(value); }
3.68
dubbo_ClassHelper_forName
/** * Replacement for <code>Class.forName()</code> that also returns Class * instances for primitives (like "int") and array class names (like * "String[]"). * * @param name the name of the Class * @param classLoader the class loader to use (may be <code>null</code>, * which indicates the default class loader) * @return Class instance for the supplied name * @throws ClassNotFoundException if the class was not found * @throws LinkageError if the class file could not be loaded * @see Class#forName(String, boolean, ClassLoader) */ public static Class<?> forName(String name, ClassLoader classLoader) throws ClassNotFoundException, LinkageError { return ClassUtils.forName(name, classLoader); }
3.68
querydsl_StringExpression_append
/** * Create a {@code concat(this, str)} expression * * <p>Get the concatenation of this and str</p> * * @param str string to append * @return this + str */ public StringExpression append(String str) { return append(ConstantImpl.create(str)); }
3.68
querydsl_TimeExpression_second
/** * Create a seconds expression (range 0-59) * * @return second */ public NumberExpression<Integer> second() { if (seconds == null) { seconds = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.SECOND, mixin); } return seconds; }
3.68
flink_TaskStateSnapshot_getSubtaskStateByOperatorID
/** Returns the subtask state for the given operator id (or null if not contained). */ @Nullable public OperatorSubtaskState getSubtaskStateByOperatorID(OperatorID operatorID) { return subtaskStatesByOperatorID.get(operatorID); }
3.68
hbase_RSGroupAdminClient_removeServers
/** * Remove decommissioned servers from rsgroup. 1. Sometimes we may find the server aborted due to * some hardware failure and we must offline the server for repairing. Or we need to move some * servers to join other clusters. So we need to remove these servers from the rsgroup. 2. * Dead/recovering/live servers will be disallowed. * @param servers set of servers to remove */ public void removeServers(Set<Address> servers) throws IOException { Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet(); for (Address el : servers) { hostPorts.add(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname()) .setPort(el.getPort()).build()); } RemoveServersRequest request = RemoveServersRequest.newBuilder().addAllServers(hostPorts).build(); try { stub.removeServers(null, request); } catch (ServiceException e) { throw ProtobufUtil.handleRemoteException(e); } }
3.68
hbase_QuotaObserverChore_getPeriod
/** * Extracts the period for the chore from the configuration. * @param conf The configuration object. * @return The configured chore period or the default value in the given timeunit. * @see #getTimeUnit(Configuration) */ static int getPeriod(Configuration conf) { return conf.getInt(QUOTA_OBSERVER_CHORE_PERIOD_KEY, QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT); }
3.68
morf_DatabaseMetaDataProvider_readColumnName
/** * Retrieves column name from a result set. * * @param columnResultSet Result set to be read. * @return Name of the column. * @throws SQLException Upon errors. */ protected RealName readColumnName(ResultSet columnResultSet) throws SQLException { String columnName = columnResultSet.getString(COLUMN_NAME); return createRealName(columnName, columnName); }
3.68
framework_Alignment_isMiddle
/** * Checks if component is aligned middle (vertically center) of the * available space. * * @return true if aligned bottom */ public boolean isMiddle() { return (bitMask & Bits.ALIGNMENT_VERTICAL_CENTER) == Bits.ALIGNMENT_VERTICAL_CENTER; }
3.68
hadoop_SnappyDecompressor_needsDictionary
/** * Returns <code>false</code>. * * @return <code>false</code>. */ @Override public boolean needsDictionary() { return false; }
3.68
hadoop_KerberosAuthException_getInitialMessage
/** @return The initial message, or null if not set. */ public String getInitialMessage() { return initialMessage; }
3.68
framework_BasicForwardHandler_setDates
/** * Set the start and end dates for the event. * * @param event * The event that the start and end dates should be set * @param start * The start date * @param end * The end date */ protected void setDates(ForwardEvent event, Date start, Date end) { event.getComponent().setStartDate(start); event.getComponent().setEndDate(end); }
3.68
dubbo_DubboBootstrap_consumer
// {@link ConsumerConfig} correlative methods public Module consumer(Consumer<ConsumerBuilder> builderConsumer) { return consumer(null, builderConsumer); }
3.68
querydsl_DateExpression_max
/** * Get the maximum value of this expression (aggregation) * * @return max(this) */ @Override public DateExpression<T> max() { if (max == null) { max = Expressions.dateOperation(getType(), Ops.AggOps.MAX_AGG, mixin); } return max; }
3.68
flink_CrossOperator_projectTuple1
/** * Projects a pair of crossed elements to a {@link Tuple} with the previously selected * fields. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0> ProjectCross<I1, I2, Tuple1<T0>> projectTuple1() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple1<T0>> tType = new TupleTypeInfo<Tuple1<T0>>(fTypes); return new ProjectCross<I1, I2, Tuple1<T0>>( this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint); }
3.68
streampipes_SpGeometryBuilder_createSPGeom
/** * creates a Geometry from a wkt_string. string has to be valid and is not be checked. If invalid, an empty point * geom is returned. method calls getPrecision method and creates a jts geometry factory and a WKT-parser object. * from the wktString the * * @param wktString Well-known text representation of the input geometry * @param epsg EPSG Code representing SRID * @return {@link org.locationtech.jts.geom.Geometry}. An empty point geometry * is created if {@link org.locationtech.jts.io.ParseException} due invalid WKT-String */ public static Geometry createSPGeom(String wktString, Integer epsg) { Geometry geom; PrecisionModel prec = getPrecisionModel(epsg); GeometryFactory geomFactory = new GeometryFactory(prec, epsg); WKTReader wktReader = new WKTReader(geomFactory); try { geom = wktReader.read(wktString); } catch (ParseException e) { // if wktString is invalid, an empty point geometry will be created as returnedGeom geom = geomFactory.createPoint(); } return geom; }
3.68
Activiti_BpmnParse_applyParseHandlers
/** * Parses the 'definitions' root element */ protected void applyParseHandlers() { sequenceFlows = new HashMap<String, SequenceFlow>(); for (Process process : bpmnModel.getProcesses()) { currentProcess = process; if (process.isExecutable()) { bpmnParserHandlers.parseElement(this, process); } } }
3.68
framework_GridDragSourceConnector_getDraggedRowElementStream
/** * Get the dragged table row elements as a stream. * * @return Stream of dragged table row elements. */ private Stream<TableRowElement> getDraggedRowElementStream() { return draggedItems.stream().map( row -> ((AbstractRemoteDataSource<JsonObject>) gridConnector .getDataSource()).indexOf(row)) .map(getGridBody()::getRowElement); }
3.68
framework_VTabsheet_isHiddenOnServer
/** * Returns whether the tab is hidden on server (as opposed to simply * hidden because it's scrolled out of view). * * @return {@code true} if hidden on server, {@code false} otherwise */ public boolean isHiddenOnServer() { return hiddenOnServer; }
3.68
hadoop_TimelineDomain_setModifiedTime
/** * Set the modified time of the domain * * @param modifiedTime the modified time of the domain */ public void setModifiedTime(Long modifiedTime) { this.modifiedTime = modifiedTime; }
3.68
flink_SegmentsUtil_getDouble
/** * get double from segments. * * @param segments target segments. * @param offset value offset. */ public static double getDouble(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 8)) { return segments[0].getDouble(offset); } else { return getDoubleMultiSegments(segments, offset); } }
3.68
hbase_RawShort_encodeShort
/** * Write instance {@code val} into buffer {@code buff}. */ public int encodeShort(byte[] buff, int offset, short val) { return Bytes.putShort(buff, offset, val); }
3.68
dubbo_AbstractMetadataReport_retry
/** * @return if need to continue */ public boolean retry() { return doHandleMetadataCollection(failedReports); }
3.68
framework_Calendar_resetVisibleHoursOfDay
/** * Resets the {@link #setFirstVisibleHourOfDay(int)} and * {@link #setLastVisibleHourOfDay(int)} to the default values, 0 and 23 * respectively. * * @see #autoScaleVisibleHoursOfDay() * @see #setFirstVisibleHourOfDay(int) * @see #setLastVisibleHourOfDay(int) */ public void resetVisibleHoursOfDay() { setFirstVisibleHourOfDay(0); setLastVisibleHourOfDay(23); }
3.68
zxing_QRCodeReader_extractPureBits
/** * This method detects a code in a "pure" image -- that is, pure monochrome image * which contains only an unrotated, unskewed, image of a code, with some white border * around it. This is a specialized method that works exceptionally fast in this special * case. */ private static BitMatrix extractPureBits(BitMatrix image) throws NotFoundException { int[] leftTopBlack = image.getTopLeftOnBit(); int[] rightBottomBlack = image.getBottomRightOnBit(); if (leftTopBlack == null || rightBottomBlack == null) { throw NotFoundException.getNotFoundInstance(); } float moduleSize = moduleSize(leftTopBlack, image); int top = leftTopBlack[1]; int bottom = rightBottomBlack[1]; int left = leftTopBlack[0]; int right = rightBottomBlack[0]; // Sanity check! if (left >= right || top >= bottom) { throw NotFoundException.getNotFoundInstance(); } if (bottom - top != right - left) { // Special case, where bottom-right module wasn't black so we found something else in the last row // Assume it's a square, so use height as the width right = left + (bottom - top); if (right >= image.getWidth()) { // Abort if that would not make sense -- off image throw NotFoundException.getNotFoundInstance(); } } int matrixWidth = Math.round((right - left + 1) / moduleSize); int matrixHeight = Math.round((bottom - top + 1) / moduleSize); if (matrixWidth <= 0 || matrixHeight <= 0) { throw NotFoundException.getNotFoundInstance(); } if (matrixHeight != matrixWidth) { // Only possibly decode square regions throw NotFoundException.getNotFoundInstance(); } // Push in the "border" by half the module width so that we start // sampling in the middle of the module. Just in case the image is a // little off, this will help recover. int nudge = (int) (moduleSize / 2.0f); top += nudge; left += nudge; // But careful that this does not sample off the edge // "right" is the farthest-right valid pixel location -- right+1 is not necessarily // This is positive by how much the inner x loop below would be too large int nudgedTooFarRight = left + (int) ((matrixWidth - 1) * moduleSize) - right; if (nudgedTooFarRight > 0) { if (nudgedTooFarRight > nudge) { // Neither way fits; abort throw NotFoundException.getNotFoundInstance(); } left -= nudgedTooFarRight; } // See logic above int nudgedTooFarDown = top + (int) ((matrixHeight - 1) * moduleSize) - bottom; if (nudgedTooFarDown > 0) { if (nudgedTooFarDown > nudge) { // Neither way fits; abort throw NotFoundException.getNotFoundInstance(); } top -= nudgedTooFarDown; } // Now just read off the bits BitMatrix bits = new BitMatrix(matrixWidth, matrixHeight); for (int y = 0; y < matrixHeight; y++) { int iOffset = top + (int) (y * moduleSize); for (int x = 0; x < matrixWidth; x++) { if (image.get(left + (int) (x * moduleSize), iOffset)) { bits.set(x, y); } } } return bits; }
3.68
hudi_QuickstartUtils_generateInsertsStream
/** * Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys. */ public Stream<HoodieRecord> generateInsertsStream(String randomString, Integer n) { int currSize = getNumExistingKeys(); return IntStream.range(0, n).boxed().map(i -> { String partitionPath = partitionPaths[rand.nextInt(partitionPaths.length)]; HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath); existingKeys.put(currSize + i, key); numExistingKeys++; try { return new HoodieAvroRecord(key, generateRandomValue(key, randomString)); } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } }); }
3.68
hmily_AutoCommitThreadLocal_remove
/** * Remove. */ public void remove() { CURRENT_LOCAL.remove(); }
3.68
flink_DataStream_clean
/** * Invokes the {@link org.apache.flink.api.java.ClosureCleaner} on the given function if closure * cleaning is enabled in the {@link ExecutionConfig}. * * @return The cleaned Function */ protected <F> F clean(F f) { return getExecutionEnvironment().clean(f); }
3.68
framework_CalendarWeekDropHandler_dragEnter
/* * (non-Javadoc) * * @see * com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#dragEnter(com * .vaadin.terminal.gwt.client.ui.dd.VDragEvent) */ @Override public void dragEnter(VDragEvent drag) { // NOOP, we determine drag acceptance in dragOver }
3.68
hadoop_OSSListRequest_v2
/** * Restricted constructors to ensure v1 or v2, not both. * @param request v2 request * @return new list request container */ public static OSSListRequest v2(ListObjectsV2Request request) { return new OSSListRequest(null, request); }
3.68
mutate-test-kata_CompanyFixed_everybodyGetsRaiseBy
/** * Increase every employee's salary by the specified fraction * @param incrementAsFraction salary increase as a fraction of the original salary. e.g. if the value of the * parameter is 0.1, everyone at the company gets a 10% raise */ public void everybodyGetsRaiseBy(double incrementAsFraction) { this.employees.forEach(e -> e.setSalary(e.getSalary() * (1 + incrementAsFraction))); }
3.68
AreaShop_AreaShop_messageNoPrefix
/** * Send a message to a target without a prefix. * @param target The target to send the message to * @param key The key of the language string * @param replacements The replacements to insert in the message */ public void messageNoPrefix(Object target, String key, Object... replacements) { Message.fromKey(key).replacements(replacements).send(target); }
3.68
framework_VScrollTable_getRenderedRowByKey
/** * Get a rendered row by its key. * * @param key * The key to search with * @return */ public VScrollTableRow getRenderedRowByKey(String key) { if (scrollBody != null) { for (Widget w : scrollBody) { VScrollTableRow r = (VScrollTableRow) w; if (r.getKey().equals(key)) { return r; } } } return null; }
3.68
framework_AbstractRemoteDataSource_setCacheStrategy
/** * Sets the cache strategy that is used to determine how much data is * fetched and cached. * <p> * The new strategy is immediately used to evaluate whether currently cached * rows should be discarded or new rows should be fetched. * * @param cacheStrategy * a cache strategy implementation, not <code>null</code> */ public void setCacheStrategy(CacheStrategy cacheStrategy) { if (cacheStrategy == null) { throw new IllegalArgumentException(); } if (this.cacheStrategy != cacheStrategy) { this.cacheStrategy = cacheStrategy; checkCacheCoverage(); } }
3.68
hadoop_SessionTokenIdentifier_getExpiryTime
/** * Return the expiry time in seconds since 1970-01-01. * @return the time when the AWS credentials expire. */ @Override public long getExpiryTime() { return marshalledCredentials.getExpiration(); }
3.68