name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_HBaseTestingUtility_unassignRegion
/** * Unassign the named region. * @param regionName The region to unassign. */ public void unassignRegion(byte[] regionName) throws IOException { getAdmin().unassign(regionName, true); }
3.68
hbase_RegionStateStore_getRegionState
/** * Pull the region state from a catalog table {@link Result}. * @return the region state, or null if unknown. */ public static State getRegionState(final Result r, RegionInfo regionInfo) { Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(regionInfo.getReplicaId())); if (cell == null || cell.getValueLength() == 0) { return null; } String state = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); try { return State.valueOf(state); } catch (IllegalArgumentException e) { LOG.warn( "BAD value {} in hbase:meta info:state column for region {} , " + "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", state, regionInfo.getEncodedName()); return null; } }
3.68
hudi_BaseHoodieWriteClient_lazyRollbackFailedIndexing
/** * Rolls back the failed delta commits corresponding to the indexing action. * <p> * TODO(HUDI-5733): This should be cleaned up once the proper fix of rollbacks * in the metadata table is landed. * * @return {@code true} if rollback happens; {@code false} otherwise. */ public boolean lazyRollbackFailedIndexing() { return tableServiceClient.rollbackFailedIndexingCommits(); }
3.68
graphhopper_LandmarkStorage_getToWeight
/** * @return the weight from the specified node to the landmark (specified *as index*) */ int getToWeight(int landmarkIndex, int node) { int res = (int) landmarkWeightDA.getShort((long) node * LM_ROW_LENGTH + landmarkIndex * 4 + TO_OFFSET) & 0x0000FFFF; if (res == SHORT_INFINITY) return SHORT_MAX; return res; }
3.68
pulsar_RateLimiter_getAvailablePermits
/** * Return available permits for this {@link RateLimiter}. * * @return returns 0 if permits is not available */ public long getAvailablePermits() { return Math.max(0, this.permits - this.acquiredPermits); }
3.68
hbase_StochasticLoadBalancer_updateRegionLoad
/** * Store the current region loads. */ private void updateRegionLoad() { // We create a new hashmap so that regions that are no longer there are removed. // However we temporarily need the old loads so we can use them to keep the rolling average. Map<String, Deque<BalancerRegionLoad>> oldLoads = loads; loads = new HashMap<>(); clusterStatus.getLiveServerMetrics().forEach((ServerName sn, ServerMetrics sm) -> { sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> { String regionNameAsString = RegionInfo.getRegionNameAsString(regionName); Deque<BalancerRegionLoad> rLoads = oldLoads.get(regionNameAsString); if (rLoads == null) { rLoads = new ArrayDeque<>(numRegionLoadsToRemember + 1); } else if (rLoads.size() >= numRegionLoadsToRemember) { rLoads.remove(); } rLoads.add(new BalancerRegionLoad(rm)); loads.put(regionNameAsString, rLoads); }); }); }
3.68
hudi_KafkaConnectUtils_buildWriteStatuses
/** * Build Protobuf message containing the Hudi {@link WriteStatus}. * * @param writeStatuses The list of Hudi {@link WriteStatus}. * @return the protobuf message {@link org.apache.hudi.connect.ControlMessage.ConnectWriteStatus} * that wraps the Hudi {@link WriteStatus}. * @throws IOException thrown if the conversion failed. */ public static ControlMessage.ConnectWriteStatus buildWriteStatuses(List<WriteStatus> writeStatuses) throws IOException { return ControlMessage.ConnectWriteStatus.newBuilder() .setSerializedWriteStatus( ByteString.copyFrom( SerializationUtils.serialize(writeStatuses))) .build(); }
3.68
framework_DesignContext_fireComponentCreatedEvent
/** * Fires component creation event * * @param localId * localId of the component * @param component * the component that was created */ private void fireComponentCreatedEvent(String localId, Component component) { ComponentCreatedEvent event = new ComponentCreatedEvent(localId, component); for (ComponentCreationListener listener : listeners) { listener.componentCreated(event); } }
3.68
morf_NamedParameterPreparedStatement_setMaxRows
/** * Sets the limit for the maximum number of rows that any * <code>ResultSet</code> object generated by this <code>Statement</code> * object can contain to the given number. * If the limit is exceeded, the excess * rows are silently dropped. * * @param maxRows the new max rows limit; zero means there is no limit * @exception SQLException if a database access error occurs, * this method is called on a closed <code>Statement</code> * or the condition maxRows &gt;= 0 is not satisfied * @see Statement#setMaxRows(int) */ public void setMaxRows(Integer maxRows) throws SQLException { statement.setMaxRows(maxRows); }
3.68
querydsl_JTSGeometryExpression_relate
/** * Returns 1 (TRUE) if this geometric object is spatially related to anotherGeometry by testing * for intersections between the interior, boundary and exterior of the two geometric objects * as specified by the values in the intersectionPatternMatrix. This returns FALSE if all the * tested intersections are empty except exterior (this) intersect exterior (another). * * @param geometry other geometry * @param matrix matrix * @return true, if this geometry is spatially related to the other */ public BooleanExpression relate(Expression<? extends Geometry> geometry, String matrix) { return Expressions.booleanOperation(SpatialOps.RELATE, mixin, geometry, ConstantImpl.create(matrix)); }
3.68
hbase_MetricsSource_incrLogReadInBytes
/** increase the byte number read by source from log file */ public void incrLogReadInBytes(long readInBytes) { singleSourceSource.incrLogReadInBytes(readInBytes); globalSourceSource.incrLogReadInBytes(readInBytes); }
3.68
hmily_HmilyRepositoryEventPublisher_getInstance
/** * Gets instance. * * @return the instance */ public static HmilyRepositoryEventPublisher getInstance() { return INSTANCE; }
3.68
flink_BinaryHashBucketArea_findFirstSameBuildRow
/** For distinct build. */ private boolean findFirstSameBuildRow( MemorySegment bucket, int searchHashCode, int bucketInSegmentOffset, BinaryRowData buildRowToInsert) { int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH; int countInBucket = bucket.getShort(bucketInSegmentOffset + HEADER_COUNT_OFFSET); int numInBucket = 0; RandomAccessInputView view = partition.getBuildStageInputView(); while (countInBucket != 0) { while (numInBucket < countInBucket) { final int thisCode = bucket.getInt(posInSegment); posInSegment += HASH_CODE_LEN; if (thisCode == searchHashCode) { final int pointer = bucket.getInt( bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (numInBucket * POINTER_LEN)); numInBucket++; try { view.setReadPosition(pointer); BinaryRowData row = table.binaryBuildSideSerializer.mapFromPages( table.reuseBuildRow, view); if (buildRowToInsert.equals(row)) { return true; } } catch (IOException e) { throw new RuntimeException( "Error deserializing key or value from the hashtable: " + e.getMessage(), e); } } else { numInBucket++; } } // this segment is done. check if there is another chained bucket final int forwardPointer = bucket.getInt(bucketInSegmentOffset + HEADER_FORWARD_OFFSET); if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) { return false; } final int overflowSegIndex = forwardPointer >>> table.segmentSizeBits; bucket = overflowSegments[overflowSegIndex]; bucketInSegmentOffset = forwardPointer & table.segmentSizeMask; countInBucket = bucket.getShort(bucketInSegmentOffset + HEADER_COUNT_OFFSET); posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH; numInBucket = 0; } return false; }
3.68
hadoop_RoleModel_pathToKey
/** * Variant of {@link S3AFileSystem#pathToKey(Path)} which doesn't care * about working directories, so can be static and stateless. * @param path path to map * @return key or "" */ public static String pathToKey(Path path) { if (path.toUri().getScheme() != null && path.toUri().getPath().isEmpty()) { return ""; } return path.toUri().getPath().substring(1); }
3.68
dubbo_TTree_end
/** * end a branch node * * @return this */ public TTree end() { if (current.isRoot()) { throw new IllegalStateException("current node is root."); } current.markEnd(); current = current.parent; return this; }
3.68
hbase_ReplicationProtobufUtil_replicateWALEntry
/** * A helper to replicate a list of WAL entries using region server admin * @param admin the region server admin * @param entries Array of WAL entries to be replicated * @param replicationClusterId Id which will uniquely identify source cluster FS client * configurations in the replication configuration directory * @param sourceBaseNamespaceDir Path to source cluster base namespace directory * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory */ public static CompletableFuture<ReplicateWALEntryResponse> replicateWALEntry( AsyncRegionServerAdmin admin, Entry[] entries, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir, int timeout) { Pair<ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir); return admin.replicateWALEntry(p.getFirst(), p.getSecond(), timeout); }
3.68
hbase_ZKUtil_asyncCreate
/** * Async creates the specified node with the specified data. * <p> * Throws an exception if the node already exists. * <p> * The node created is persistent and open access. * @param zkw zk reference * @param znode path of node to create * @param data data of node to create * @param cb the callback to use for the creation * @param ctx the context to use for the creation */ public static void asyncCreate(ZKWatcher zkw, String znode, byte[] data, final AsyncCallback.StringCallback cb, final Object ctx) { zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data, zkw.createACL(znode), CreateMode.PERSISTENT, cb, ctx); }
3.68
morf_SqlDialect_getSqlForLeftPad
/** * Converts the LEFT_PAD function into SQL. This is the same format used for * H2, MySQL and Oracle. SqlServer implementation overrides this function. * * @param field The field to pad * @param length The length of the padding * @param character The character to use for the padding * @return string representation of the SQL. */ protected String getSqlForLeftPad(AliasedField field, AliasedField length, AliasedField character) { return "LPAD(" + getSqlFrom(field) + ", " + getSqlFrom(length) + ", " + getSqlFrom(character) + ")"; }
3.68
hudi_SparkRecordMergingUtils_getCachedMergedSchema
/** * Merges the two schemas so the merged schema contains all the fields from the two schemas, * with the same ordering of fields based on the provided reader schema. * * @param oldSchema Old schema. * @param newSchema New schema. * @param readerSchema Reader schema containing all the fields to read. * @return The ID to {@link StructField} instance mapping of the merged schema, and the * {@link StructType} and Avro schema of the merged schema. */ public static Pair<Map<Integer, StructField>, Pair<StructType, Schema>> getCachedMergedSchema(Schema oldSchema, Schema newSchema, Schema readerSchema) { return MERGED_SCHEMA_CACHE.computeIfAbsent( Pair.of(Pair.of(oldSchema, newSchema), readerSchema), schemaPair -> { Schema schema1 = schemaPair.getLeft().getLeft(); Schema schema2 = schemaPair.getLeft().getRight(); Schema refSchema = schemaPair.getRight(); Map<String, Integer> nameToIdMapping1 = getCachedFieldNameToIdMapping(schema1); Map<String, Integer> nameToIdMapping2 = getCachedFieldNameToIdMapping(schema2); // Mapping of field ID/position to the StructField instance of the readerSchema Map<Integer, StructField> refFieldIdToFieldMapping = getCachedFieldIdToFieldMapping(refSchema); // This field name set contains all the fields that appear // either in the oldSchema and/or the newSchema Set<String> fieldNameSet = new HashSet<>(); fieldNameSet.addAll(nameToIdMapping1.keySet()); fieldNameSet.addAll(nameToIdMapping2.keySet()); int fieldId = 0; Map<Integer, StructField> mergedMapping = new HashMap<>(); List<StructField> mergedFieldList = new ArrayList<>(); // Iterates over the fields based on the original ordering of the fields of the // readerSchema using the field ID/position from 0 for (int i = 0; i < refFieldIdToFieldMapping.size(); i++) { StructField field = refFieldIdToFieldMapping.get(i); if (fieldNameSet.contains(field.name())) { mergedMapping.put(fieldId, field); mergedFieldList.add(field); fieldId++; } } StructType mergedStructType = new StructType(mergedFieldList.toArray(new StructField[0])); Schema mergedSchema = AvroConversionUtils.convertStructTypeToAvroSchema( mergedStructType, readerSchema.getName(), readerSchema.getNamespace()); return Pair.of(mergedMapping, Pair.of(mergedStructType, mergedSchema)); }); }
3.68
hbase_CheckAndMutate_getValue
/** Returns the expected value */ public byte[] getValue() { return value; }
3.68
querydsl_CollectionExpressionBase_size
/** * Create a {@code this.size()} expression * * <p>Gets the number of elements in this collection</p> * * @return this.size() */ public final NumberExpression<Integer> size() { if (size == null) { size = Expressions.numberOperation(Integer.class, Ops.COL_SIZE, mixin); } return size; }
3.68
framework_UIConnector_onChildSizeChange
/** * Ensure the position is calculated correctly. This method should be called * whenever the content's height changes for any reason, in case the change * has been between a relative and non-relative height to either direction. */ protected void onChildSizeChange() { ComponentConnector child = getContent(); if (child == null) { return; } Style childStyle = child.getWidget().getElement().getStyle(); /* * Must set absolute position if the child has relative height and * there's a chance of horizontal scrolling as some browsers will * otherwise not take the scrollbar into account when calculating the * height. Assuming v-ui does not have an undefined width for now, see * #8460. */ if (child.isRelativeHeight()) { childStyle.setPosition(Position.ABSOLUTE); } else { childStyle.clearPosition(); } }
3.68
graphhopper_VectorTile_getLayersBuilder
/** * <code>repeated .vector_tile.Tile.Layer layers = 3;</code> */ public vector_tile.VectorTile.Tile.Layer.Builder getLayersBuilder( int index) { return getLayersFieldBuilder().getBuilder(index); }
3.68
hibernate-validator_NotEmptyValidatorForArray_isValid
/** * Checks the array is not {@code null} and not empty. * * @param array the array to validate * @param constraintValidatorContext context in which the constraint is evaluated * @return returns {@code true} if the array is not {@code null} and the array is not empty */ @Override public boolean isValid(Object[] array, ConstraintValidatorContext constraintValidatorContext) { if ( array == null ) { return false; } return array.length > 0; }
3.68
hbase_PrivateCellUtil_tagsIterator
/** * Utility method to iterate through the tags in the given cell. * @param cell The Cell over which tags iterator is needed. * @return iterator for the tags */ public static Iterator<Tag> tagsIterator(final Cell cell) { final int tagsLength = cell.getTagsLength(); // Save an object allocation where we can if (tagsLength == 0) { return TagUtil.EMPTY_TAGS_ITR; } if (cell instanceof ByteBufferExtendedCell) { return tagsIterator(((ByteBufferExtendedCell) cell).getTagsByteBuffer(), ((ByteBufferExtendedCell) cell).getTagsPosition(), tagsLength); } return new Iterator<Tag>() { private int offset = cell.getTagsOffset(); private int pos = offset; private int endOffset = offset + cell.getTagsLength() - 1; @Override public boolean hasNext() { return this.pos < endOffset; } @Override public Tag next() { if (hasNext()) { byte[] tags = cell.getTagsArray(); int curTagLen = Bytes.readAsInt(tags, this.pos, Tag.TAG_LENGTH_SIZE); Tag tag = new ArrayBackedTag(tags, pos, curTagLen + TAG_LENGTH_SIZE); this.pos += Bytes.SIZEOF_SHORT + curTagLen; return tag; } return null; } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
3.68
hbase_ClassSize_is32BitJVM
/** * Determines if we are running in a 32-bit JVM. Some unit tests need to know this too. */ public static boolean is32BitJVM() { final String model = System.getProperty("sun.arch.data.model"); return model != null && model.equals("32"); }
3.68
hadoop_SubApplicationColumnPrefix_getColumnPrefix
/** * @return the column name value */ public String getColumnPrefix() { return columnPrefix; }
3.68
framework_VAbstractDropHandler_validate
/** * Validates the given drag event when all existing DnD-related tasks are * completed, and triggers the callback if the validation was successful. * * @param cb * the callback that handles acceptance if the target is valid * @param event * the drag event */ protected void validate(final VAcceptCallback cb, final VDragEvent event) { Command checkCriteria = () -> acceptCriteria.accept(event, criterioUIDL, cb); VDragAndDropManager.get().executeWhenReady(checkCriteria); }
3.68
flink_AbstractUdfOperator_setBroadcastVariable
/** * Binds the result produced by a plan rooted at {@code root} to a variable used by the UDF * wrapped in this operator. * * @param root The root of the plan producing this input. */ public void setBroadcastVariable(String name, Operator<?> root) { if (name == null) { throw new IllegalArgumentException("The broadcast input name may not be null."); } if (root == null) { throw new IllegalArgumentException( "The broadcast input root operator may not be null."); } this.broadcastInputs.put(name, root); }
3.68
hbase_Bytes_readAsInt
/** * Converts a byte array to an int value * @param bytes byte array * @param offset offset into array * @param length how many bytes should be considered for creating int * @return the int value * @throws IllegalArgumentException if there's not enough room in the array at the offset * indicated. */ public static int readAsInt(byte[] bytes, int offset, final int length) { if (offset + length > bytes.length) { throw new IllegalArgumentException("offset (" + offset + ") + length (" + length + ") exceed the" + " capacity of the array: " + bytes.length); } int n = 0; for (int i = offset; i < (offset + length); i++) { n <<= 8; n ^= bytes[i] & 0xFF; } return n; }
3.68
framework_AbstractValidator_getMessage
/** * Returns the error message for the given value. * * @param value * an invalid value * @return the formatted error message */ protected String getMessage(T value) { return messageProvider.apply(value); }
3.68
hbase_MemorySizeUtil_getBucketCacheSize
/** * @param conf used to read config for bucket cache size. * @return the number of bytes to use for bucket cache, negative if disabled. */ public static long getBucketCacheSize(final Configuration conf) { // Size configured in MBs float bucketCacheSize = conf.getFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0F); if (bucketCacheSize < 1) { throw new IllegalArgumentException("Bucket Cache should be minimum 1 MB in size." + "Configure 'hbase.bucketcache.size' with > 1 value"); } return (long) (bucketCacheSize * 1024 * 1024); }
3.68
flink_ReusingKeyGroupedIterator_nextKey
/** * Moves the iterator to the next key. This method may skip any values that have not yet been * returned by the iterator created by the {@link #getValues()} method. Hence, if called * multiple times it "removes" pairs. * * @return true if the input iterator has an other group of key-value pairs that share the same * key. */ @Override public boolean nextKey() throws IOException { // first element (or empty) if (this.current == null) { if (this.done) { this.valuesIterator = null; return false; } this.current = this.reuse; if ((this.current = this.iterator.next(this.current)) != null) { this.comparator.setReference(this.current); this.lookAheadHasNext = false; this.valuesIterator = new ValuesIterator(); this.valuesIterator.currentIsUnconsumed = true; return true; } else { // empty input, set everything null this.valuesIterator = null; this.current = null; this.done = true; return false; } } this.valuesIterator.iteratorAvailable = true; // Whole value-iterator was read and a new key is available. if (this.lookAheadHasNext) { this.lookAheadHasNext = false; this.current = this.lookahead; this.lookahead = null; this.comparator.setReference(this.current); this.valuesIterator.currentIsUnconsumed = true; return true; } // try to move to next key. // Required if user code / reduce() method did not read the whole value iterator. while (true) { if (!this.done && ((this.current = this.iterator.next(this.current)) != null)) { if (!this.comparator.equalToReference(this.current)) { // the keys do not match, so we have a new group. store the current keys this.comparator.setReference(this.current); this.lookAheadHasNext = false; this.valuesIterator.currentIsUnconsumed = true; return true; } } else { this.valuesIterator = null; this.current = null; this.done = true; return false; } } }
3.68
framework_BootstrapHandler_getThemeName
/** * Override if required. * * @param context * @return */ public String getThemeName(BootstrapContext context) { UICreateEvent event = new UICreateEvent(context.getRequest(), context.getUIClass()); return context.getBootstrapResponse().getUIProvider().getTheme(event); }
3.68
pulsar_Function_initialize
/** * Called once to initialize resources when function instance is started. * * @param context The Function context * * @throws Exception if an error occurs */ default void initialize(Context context) throws Exception {}
3.68
hadoop_RollingFileSystemSink_checkForErrors
/** * If the sink isn't set to ignore errors, throw a {@link MetricsException} * if the stream encountered an exception. The message parameter will be used * as the new exception's message with the current file name * ({@link #currentFilePath}) appended to it. * * @param message the exception message. The message will have a colon and * the current file name ({@link #currentFilePath}) appended to it. * @throws MetricsException thrown if there was an error and the sink isn't * ignoring errors */ private void checkForErrors(String message) throws MetricsException { if (!ignoreError && currentOutStream.checkError()) { throw new MetricsException(message + ": " + currentFilePath); } }
3.68
framework_FieldGroup_bind
/** * Binds the field with the given propertyId from the current item. If an * item has not been set then the binding is postponed until the item is set * using {@link #setItemDataSource(Item)}. * <p> * This method also adds validators when applicable. * </p> * * @param field * The field to bind * @param propertyId * The propertyId to bind to the field * @throws BindException * If the field is null or the property id is already bound to * another field by this field binder */ public void bind(Field<?> field, Object propertyId) throws BindException { throwIfFieldIsNull(field, propertyId); throwIfPropertyIdAlreadyBound(field, propertyId); fieldToPropertyId.put(field, propertyId); propertyIdToField.put(propertyId, field); if (itemDataSource == null) { clearField(field); // Will be bound when data source is set return; } field.setPropertyDataSource( wrapInTransactionalProperty(getItemProperty(propertyId))); configureField(field); }
3.68
flink_AbstractFsCheckpointStorageAccess_getDefaultSavepointDirectory
/** * Gets the default directory for savepoints. Returns null, if no default savepoint directory is * configured. */ @Nullable public Path getDefaultSavepointDirectory() { return defaultSavepointDirectory; }
3.68
hadoop_StateStoreMetrics_getCacheLoadMetrics
/** * Retrieve unmodifiable map of cache loading metrics. * * @return unmodifiable map of cache loading metrics. */ @VisibleForTesting public Map<String, MutableRate> getCacheLoadMetrics() { return Collections.unmodifiableMap(cacheLoadMetrics); }
3.68
hbase_CommonFSUtils_getTableDir
/** * Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under * path rootdir * @param rootdir qualified path of HBase root directory * @param tableName name of table * @return {@link org.apache.hadoop.fs.Path} for table */ public static Path getTableDir(Path rootdir, final TableName tableName) { return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()), tableName.getQualifierAsString()); }
3.68
hbase_HRegion_getRowLock
/** * Get an exclusive ( write lock ) lock on a given row. * @param row Which row to lock. * @return A locked RowLock. The lock is exclusive and already aqquired. */ public RowLock getRowLock(byte[] row) throws IOException { return getRowLock(row, false); }
3.68
framework_IndexedContainer_removeItemProperty
/** * Indexed container does not support removing properties. Remove * properties at container level. See * {@link IndexedContainer#removeContainerProperty(Object)} * * @see Item#removeProperty(Object) */ @Override public boolean removeItemProperty(Object id) throws UnsupportedOperationException { throw new UnsupportedOperationException( "Indexed container item does not support property removal"); }
3.68
hbase_PrivateCellUtil_createFirstDeleteFamilyCellOnRow
/** * Create a Delete Family Cell for the specified row and family that would be smaller than all * other possible Delete Family KeyValues that have the same row and family. Used for seeking. * @param row - row key (arbitrary byte array) * @param fam - family name * @return First Delete Family possible key on passed <code>row</code>. */ public static Cell createFirstDeleteFamilyCellOnRow(final byte[] row, final byte[] fam) { return new FirstOnRowDeleteFamilyCell(row, fam); }
3.68
hbase_PrivateCellUtil_writeRowKeyExcludingCommon
/** Write rowkey excluding the common part. */ public static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonPrefix, DataOutputStream out) throws IOException { if (commonPrefix == 0) { out.writeShort(rLen); } else if (commonPrefix == 1) { out.writeByte((byte) rLen); commonPrefix--; } else { commonPrefix -= KeyValue.ROW_LENGTH_SIZE; } if (rLen > commonPrefix) { writeRowSkippingBytes(out, cell, rLen, commonPrefix); } }
3.68
hadoop_HAState_prepareToExitState
/** * Method to be overridden by subclasses to prepare to exit a state. * This method is called <em>without</em> the context being locked. * This is used by the standby state to cancel any checkpoints * that are going on. It can also be used to check any preconditions * for the state transition. * * This method should not make any destructive changes to the state * (eg stopping threads) since {@link #prepareToEnterState(HAContext)} * may subsequently cancel the state transition. * @param context HA context * @throws ServiceFailedException on precondition failure */ public void prepareToExitState(final HAContext context) throws ServiceFailedException {}
3.68
flink_MemorySegment_putFloatBigEndian
/** * Writes the given single-precision float value (32bit, 4 bytes) to the given position in big * endian byte order. This method's speed depends on the system's native byte order, and it is * possibly slower than {@link #putFloat(int, float)}. For most cases (such as transient storage * in memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link * #putFloat(int, float)} is the preferable choice. * * @param index The position at which the value will be written. * @param value The long value to be written. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 4. */ public void putFloatBigEndian(int index, float value) { putIntBigEndian(index, Float.floatToRawIntBits(value)); }
3.68
hbase_ReplicationSyncUp_claimReplicationQueues
// When using this tool, usually the source cluster is unhealthy, so we should try to claim the // replication queues for the dead region servers first and then replicate the data out. private void claimReplicationQueues(ReplicationSourceManager mgr, Set<ServerName> regionServers) throws ReplicationException, KeeperException, IOException { // union the region servers from both places, i.e, from the wal directory, and the records in // replication queue storage. Set<ServerName> replicators = new HashSet<>(regionServers); ReplicationQueueStorage queueStorage = mgr.getQueueStorage(); replicators.addAll(queueStorage.listAllReplicators()); FileSystem fs = CommonFSUtils.getCurrentFileSystem(getConf()); Path infoDir = new Path(CommonFSUtils.getRootDir(getConf()), INFO_DIR); for (ServerName sn : replicators) { List<ReplicationQueueId> replicationQueues = queueStorage.listAllQueueIds(sn); System.out.println(sn + " is dead, claim its replication queues: " + replicationQueues); // record the rs name, so when master restarting, we will skip claiming its replication queue fs.createNewFile(new Path(infoDir, sn.getServerName())); for (ReplicationQueueId queueId : replicationQueues) { mgr.claimQueue(queueId, true); } } }
3.68
graphhopper_VirtualEdgeIteratorState_getOriginalEdgeKey
/** * This method returns the original (not virtual!) edge key. I.e. also the direction is * already correctly encoded. * * @see EdgeIteratorState#getEdgeKey() */ public int getOriginalEdgeKey() { return originalEdgeKey; }
3.68
flink_Types_OBJECT_ARRAY
/** * Returns type information for Java arrays of object types (such as <code>String[]</code>, * <code>Integer[]</code>). The array itself must not be null. Null values for elements are * supported. * * @param elementType element type of the array */ @SuppressWarnings("unchecked") public static <E> TypeInformation<E[]> OBJECT_ARRAY(TypeInformation<E> elementType) { if (elementType == Types.STRING) { return (TypeInformation) BasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO; } return ObjectArrayTypeInfo.getInfoFor(elementType); }
3.68
morf_Criterion_eq
/** * Helper method to create a new "EQUALS" expression. * * <blockquote><pre> * Criterion.eq(new Field("agreementnumber"), "A0001");</pre></blockquote> * * @param field the field to evaluate in the expression (the left hand side of the expression) * @param value the value to evaluate in the expression (the right hand side) * @return a new Criterion object */ public static Criterion eq(AliasedField field, Object value) { return new Criterion(Operator.EQ, field, value); }
3.68
flink_OperatorStateCheckpointOutputStream_startNewPartition
/** * User code can call this method to signal that it begins to write a new partition of operator * state. Each previously written partition is considered final/immutable as soon as this method * is called again. */ public void startNewPartition() throws IOException { partitionOffsets.add(delegate.getPos()); }
3.68
hadoop_TaskManifest_serializer
/** * Create a JSON serializer for this class. * @return a serializer. */ public static JsonSerialization<TaskManifest> serializer() { return new JsonSerialization<>(TaskManifest.class, false, true); }
3.68
flink_ExecutionConfig_setDefaultInputDependencyConstraint
/** * This method is deprecated. It was used to set the {@link InputDependencyConstraint} utilized * by the old scheduler implementations which got removed as part of FLINK-20589. The current * implementation has no effect. * * @param ignored Ignored parameter. * @deprecated due to the deprecation of {@code InputDependencyConstraint}. */ @PublicEvolving @Deprecated public void setDefaultInputDependencyConstraint(InputDependencyConstraint ignored) {}
3.68
framework_Profiler_getRelativeTimeMillis
/** * Returns time relative to the particular page load time. The value should * not be used directly but rather difference between two values returned by * this method should be used to compare measurements. * * @since 7.6 */ public static double getRelativeTimeMillis() { return RELATIVE_TIME_SUPPLIER.getRelativeTime(); }
3.68
flink_DagConnection_markBreaksPipeline
/** * Marks that this connection should do a decoupled data exchange (such as batched) rather then * pipeline data. Connections are marked as pipeline breakers to avoid deadlock situations. */ public void markBreaksPipeline() { this.breakPipeline = true; }
3.68
druid_DruidStatManagerFacade_mergWallStat
/** * @return * @deprecated */ public static Map mergWallStat(Map mapA, Map mapB) { return mergeWallStat(mapA, mapB); }
3.68
framework_Escalator_updateDecoClip
/** * Crop the decorator element so that it doesn't overlap the header * and footer sections. * * @param bodyTop * the top cordinate of the escalator body * @param bodyBottom * the bottom cordinate of the escalator body * @param decoWidth * width of the deco */ private void updateDecoClip(final double bodyTop, final double bodyBottom, final double decoWidth) { final int top = deco.getAbsoluteTop(); final int bottom = deco.getAbsoluteBottom(); /* * FIXME * * Height and its use is a workaround for the issue where * coordinates of the deco are not calculated yet. This will * prevent a deco from being displayed when it's added to DOM */ final int height = bottom - top; if (top < bodyTop || bottom > bodyBottom) { final double topClip = Math.max(0.0D, bodyTop - top); final double bottomClip = height - Math.max(0.0D, bottom - bodyBottom); // TODO [optimize] not sure how GWT compiles this final String clip = new StringBuilder("rect(") .append(topClip).append("px,").append(decoWidth) .append("px,").append(bottomClip).append("px,0)") .toString(); deco.getStyle().setProperty("clip", clip); } else { deco.getStyle().setProperty("clip", "auto"); } }
3.68
morf_TableBean_isTemporary
/** * {@inheritDoc} * * @see org.alfasoftware.morf.metadata.Table#isTemporary() */ @Override public boolean isTemporary() { return isTemporary; }
3.68
flink_DataStreamSink_name
/** * Sets the name of this sink. This name is used by the visualization and logging during * runtime. * * @return The named sink. */ public DataStreamSink<T> name(String name) { transformation.setName(name); return this; }
3.68
graphhopper_GraphHopper_setOSMFile
/** * This file can be an osm xml (.osm), a compressed xml (.osm.zip or .osm.gz) or a protobuf file * (.pbf). */ public GraphHopper setOSMFile(String osmFile) { ensureNotLoaded(); if (isEmpty(osmFile)) throw new IllegalArgumentException("OSM file cannot be empty."); this.osmFile = osmFile; return this; }
3.68
morf_HumanReadableStatementHelper_generateWhereClause
/** * Generates a string describing record selection criteria. If there are no selection * criteria then an empty string is returned. * * @param criterion the criterion to describe. * @return a string containing the human-readable description of the clause. */ private static String generateWhereClause(final Criterion criterion) { if (criterion == null) { return ""; } else { return " where " + generateCriterionString(criterion, false); } }
3.68
framework_AbstractMedia_isMuted
/** * @return true if the audio is muted. */ public boolean isMuted() { return getState(false).muted; }
3.68
flink_EnvironmentInformation_getBuildTimeString
/** * @return The Instant this version of the software was built as a String using the * Europe/Berlin timezone. */ public static String getBuildTimeString() { return getVersionsInstance().gitBuildTimeStr; }
3.68
querydsl_Projections_appending
/** * Create an appending factory expression which serializes all the arguments but the uses * the base value as the return value * * @param base first argument * @param rest additional arguments * @param <T> type of projection * @return factory expression */ public static <T> AppendingFactoryExpression<T> appending(Expression<T> base, Expression<?>... rest) { return new AppendingFactoryExpression<T>(base, rest); }
3.68
framework_VFilterSelect_updateRootWidth
/** * Calculates the width of the select if the select has undefined width. * Should be called when the width changes or when the icon changes. * <p> * For internal use only. May be removed or replaced in the future. */ public void updateRootWidth() { ComponentConnector paintable = ConnectorMap.get(client) .getConnector(this); if (paintable.isUndefinedWidth()) { /* * When the select has a undefined with we need to check that we are * only setting the text box width relative to the first page width * of the items. If this is not done the text box width will change * when the popup is used to view longer items than the text box is * wide. */ int w = WidgetUtil.getRequiredWidth(this); if ((!initDone || currentPage + 1 < 0) && suggestionPopupMinWidth > w) { /* * We want to compensate for the paddings just to preserve the * exact size as in Vaadin 6.x, but we get here before * MeasuredSize has been initialized. * Util.measureHorizontalPaddingAndBorder does not work with * border-box, so we must do this the hard way. */ Style style = getElement().getStyle(); String originalPadding = style.getPadding(); String originalBorder = style.getBorderWidth(); style.setPaddingLeft(0, Unit.PX); style.setBorderWidth(0, Unit.PX); style.setProperty("padding", originalPadding); style.setProperty("borderWidth", originalBorder); // Use util.getRequiredWidth instead of getOffsetWidth here int iconWidth = selectedItemIcon == null ? 0 : WidgetUtil.getRequiredWidth(selectedItemIcon); int buttonWidth = popupOpener == null ? 0 : WidgetUtil.getRequiredWidth(popupOpener); /* * Instead of setting the width of the wrapper, set the width of * the combobox. Subtract the width of the icon and the * popupopener */ tb.setWidth((suggestionPopupMinWidth - iconWidth - buttonWidth) + "px"); } /* * Lock the textbox width to its current value if it's not already * locked */ if (!tb.getElement().getStyle().getWidth().endsWith("px")) { int iconWidth = selectedItemIcon == null ? 0 : selectedItemIcon.getOffsetWidth(); tb.setWidth((tb.getOffsetWidth() - iconWidth) + "px"); } } }
3.68
framework_DesignContext_setRootComponent
/** * Sets the root component of a created component hierarchy. * * @param rootComponent * the root component of the hierarchy */ public void setRootComponent(Component rootComponent) { this.rootComponent = rootComponent; }
3.68
framework_AbstractComponent_getHeightUnits
/* * (non-Javadoc) * * @see com.vaadin.server.Sizeable#getHeightUnits() */ @Override public Unit getHeightUnits() { return heightUnit; }
3.68
hadoop_AbfsClient_getAbfsConfiguration
/** * Getter for abfsConfiguration from AbfsClient. * @return AbfsConfiguration instance */ protected AbfsConfiguration getAbfsConfiguration() { return abfsConfiguration; }
3.68
framework_ComponentTestCase_createCustomActions
/** * Override to provide custom actions for the test case. * * @param actions * Array with default actions. Add custom actions to this. Never * null. */ protected void createCustomActions(List<Component> actions) { }
3.68
flink_EventTimeSessionWindows_withGap
/** * Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions * based on the element timestamp. * * @param size The session timeout, i.e. the time gap between sessions * @return The policy. */ public static EventTimeSessionWindows withGap(Time size) { return new EventTimeSessionWindows(size.toMilliseconds()); }
3.68
hbase_FuzzyRowFilter_getNextForFuzzyRule
/** * @return greater byte array than given (row) which satisfies the fuzzy rule if it exists, null * otherwise */ static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, int offset, int length, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { // To find out the next "smallest" byte array that satisfies fuzzy rule and "greater" than // the given one we do the following: // 1. setting values on all "fixed" positions to the values from fuzzyKeyBytes // 2. if during the first step given row did not increase, then we increase the value at // the first "non-fixed" position (where it is not maximum already) // It is easier to perform this by using fuzzyKeyBytes copy and setting "non-fixed" position // values than otherwise. byte[] result = Arrays.copyOf(fuzzyKeyBytes, length > fuzzyKeyBytes.length ? length : fuzzyKeyBytes.length); if (reverse && length > fuzzyKeyBytes.length) { // we need trailing 0xff's instead of trailing 0x00's for (int i = fuzzyKeyBytes.length; i < result.length; i++) { result[i] = (byte) 0xFF; } } int toInc = -1; final Order order = Order.orderFor(reverse); boolean increased = false; for (int i = 0; i < result.length; i++) { if (i >= fuzzyKeyMeta.length || fuzzyKeyMeta[i] == 0 /* non-fixed */) { result[i] = row[offset + i]; if (!order.isMax(row[offset + i])) { // this is "non-fixed" position and is not at max value, hence we can increase it toInc = i; } } else if (i < fuzzyKeyMeta.length && fuzzyKeyMeta[i] == -1 /* fixed */) { if (order.lt((row[i + offset] & 0xFF), (fuzzyKeyBytes[i] & 0xFF))) { // if setting value for any fixed position increased the original array, // we are OK increased = true; break; } if (order.gt((row[i + offset] & 0xFF), (fuzzyKeyBytes[i] & 0xFF))) { // if setting value for any fixed position makes array "smaller", then just stop: // in case we found some non-fixed position to increase we will do it, otherwise // there's no "next" row key that satisfies fuzzy rule and "greater" than given row break; } } } if (!increased) { if (toInc < 0) { return null; } result[toInc] = order.inc(result[toInc]); // Setting all "non-fixed" positions to zeroes to the right of the one we increased so // that found "next" row key is the smallest possible for (int i = toInc + 1; i < result.length; i++) { if (i >= fuzzyKeyMeta.length || fuzzyKeyMeta[i] == 0 /* non-fixed */) { result[i] = order.min(); } } } return reverse ? result : trimTrailingZeroes(result, fuzzyKeyMeta, toInc); }
3.68
hadoop_BCFile_getOutputStream
/** * Get the output stream for BlockAppender's consumption. * * @return the output stream suitable for writing block data. */ OutputStream getOutputStream() { return out; }
3.68
flink_KeyGroupRangeOffsets_getKeyGroupOffset
/** * Returns the offset for the given key-group. The key-group must be contained in the range. * * @param keyGroup Key-group for which we query the offset. Key-group must be contained in the * range. * @return The offset for the given key-group which must be contained in the range. */ public long getKeyGroupOffset(int keyGroup) { return offsets[computeKeyGroupIndex(keyGroup)]; }
3.68
hadoop_FederationMethodWrapper_getTypes
/** * Get the calling types for this method. * * @return An array of calling types. */ public Class<?>[] getTypes() { return Arrays.copyOf(this.types, this.types.length); }
3.68
framework_VTabsheet_keyFocusTab
/** * Focus the specified tab using left/right key. Selection won't change * until the selection key is pressed. Selectability should be checked * before calling this method. */ private void keyFocusTab(int newTabIndex) { Tab tab = tb.getTab(newTabIndex); if (tab == null) { return; } // Focus the tab, otherwise the selected one will lose focus and // TabSheet will get blurred. focusTabAtIndex(newTabIndex); tb.navigateTab(focusedTabIndex, newTabIndex); focusedTabIndex = newTabIndex; }
3.68
hbase_AsyncTable_exists
/** * Test for the existence of columns in the table, as specified by the Gets. * <p> * This will return a list of booleans. Each value will be true if the related Get matches one or * more keys, false if not. * <p> * This is a server-side call so it prevents any data from being transferred to the client. * @param gets the Gets * @return A list of {@link CompletableFuture}s that represent the existence for each get. */ default List<CompletableFuture<Boolean>> exists(List<Get> gets) { return get(toCheckExistenceOnly(gets)).stream() .<CompletableFuture<Boolean>> map(f -> f.thenApply(r -> r.getExists())).collect(toList()); }
3.68
hadoop_LogAggregationWebUtils_getLogEndTime
/** * Parse log end time from html. * @param endStr the end time string * @return the endIndex */ public static long getLogEndTime(String endStr) throws NumberFormatException { long end = Long.MAX_VALUE; if (endStr != null && !endStr.isEmpty()) { end = Long.parseLong(endStr); } return end; }
3.68
pulsar_ManagedCursorImpl_notifyEntriesAvailable
/** * * @return Whether the cursor responded to the notification */ void notifyEntriesAvailable() { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Received ml notification", ledger.getName(), name); } OpReadEntry opReadEntry = WAITING_READ_OP_UPDATER.getAndSet(this, null); if (opReadEntry != null) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Received notification of new messages persisted, reading at {} -- last: {}", ledger.getName(), name, opReadEntry.readPosition, ledger.lastConfirmedEntry); log.debug("[{}] Consumer {} cursor notification: other counters: consumed {} mdPos {} rdPos {}", ledger.getName(), name, messagesConsumedCounter, markDeletePosition, readPosition); } PENDING_READ_OPS_UPDATER.incrementAndGet(this); opReadEntry.readPosition = (PositionImpl) getReadPosition(); ledger.asyncReadEntries(opReadEntry); } else { // No one is waiting to be notified. Ignore if (log.isDebugEnabled()) { log.debug("[{}] [{}] Received notification but had no pending read operation", ledger.getName(), name); } } }
3.68
hadoop_BlockData_isValidOffset
/** * Indicates whether the given absolute offset is valid. * @param offset absolute offset in the file.. * @return true if the given absolute offset is valid, false otherwise. */ public boolean isValidOffset(long offset) { return (offset >= 0) && (offset < fileSize); }
3.68
hbase_LockManager_tryAcquire
/** * Acquire the lock within a wait time. * @param timeoutMs The maximum time (in milliseconds) to wait for the lock, 0 to wait * indefinitely * @return True if the lock was acquired, false if waiting time elapsed before the lock was * acquired * @throws InterruptedException If the thread is interrupted while waiting to acquire the lock */ public boolean tryAcquire(final long timeoutMs) throws InterruptedException { if (proc != null && proc.isLocked()) { return true; } // Use new condition and procedure every time lock is requested. final CountDownLatch lockAcquireLatch = new CountDownLatch(1); if (regionInfos != null) { proc = new LockProcedure(master.getConfiguration(), regionInfos, type, description, lockAcquireLatch); } else if (tableName != null) { proc = new LockProcedure(master.getConfiguration(), tableName, type, description, lockAcquireLatch); } else if (namespace != null) { proc = new LockProcedure(master.getConfiguration(), namespace, type, description, lockAcquireLatch); } else { throw new UnsupportedOperationException("no namespace/table/region provided"); } // The user of a MasterLock should be 'hbase', the only case where this is not true // is if from inside a coprocessor we try to take a master lock (which should be avoided) proc.setOwner(master.getMasterProcedureExecutor().getEnvironment().getRequestUser()); master.getMasterProcedureExecutor().submitProcedure(proc); long deadline = (timeoutMs > 0) ? EnvironmentEdgeManager.currentTime() + timeoutMs : Long.MAX_VALUE; while (deadline >= EnvironmentEdgeManager.currentTime() && !proc.isLocked()) { try { lockAcquireLatch.await(deadline - EnvironmentEdgeManager.currentTime(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.info("InterruptedException when waiting for lock: " + proc.toString()); // kind of weird, releasing a lock which is not locked. This is to make the procedure // finish immediately whenever it gets scheduled so that it doesn't hold the lock. release(); throw e; } } if (!proc.isLocked()) { LOG.info("Timed out waiting to acquire procedure lock: " + proc.toString()); release(); return false; } return true; }
3.68
framework_FocusableComplexPanel_focus
/** * Focus the panel. */ @Override public void focus() { setFocus(true); }
3.68
hbase_SimpleRegionNormalizer_skipForMerge
/** * Determine if a {@link RegionInfo} should be considered for a merge operation. * </p> * Callers beware: for safe concurrency, be sure to pass in the local instance of * {@link NormalizerConfiguration}, don't use {@code this}'s instance. */ private boolean skipForMerge(final NormalizerConfiguration normalizerConfiguration, final NormalizeContext ctx, final RegionInfo regionInfo) { final RegionState state = ctx.getRegionStates().getRegionState(regionInfo); final String name = regionInfo.getEncodedName(); return logTraceReason(() -> state == null, "skipping merge of region {} because no state information is available.", name) || logTraceReason(() -> !Objects.equals(state.getState(), RegionState.State.OPEN), "skipping merge of region {} because it is not open.", name) || logTraceReason(() -> !isOldEnoughForMerge(normalizerConfiguration, ctx, regionInfo), "skipping merge of region {} because it is not old enough.", name) || logTraceReason(() -> !isLargeEnoughForMerge(normalizerConfiguration, ctx, regionInfo), "skipping merge region {} because it is not large enough.", name); }
3.68
shardingsphere-elasticjob_ElasticJobTracingConfiguration_tracingConfiguration
/** * Create a bean of tracing configuration. * * @param dataSource required by constructor * @param tracingDataSource tracing ataSource * @return a bean of tracing configuration */ @Bean @ConditionalOnBean(DataSource.class) public TracingConfiguration<DataSource> tracingConfiguration(final DataSource dataSource, @Nullable final DataSource tracingDataSource) { return new TracingConfiguration<>("RDB", null == tracingDataSource ? dataSource : tracingDataSource); }
3.68
framework_DesignFormatter_addConverter
/** * Adds a converter for a given type. * * @param type * Type to convert to/from. * @param converter * Converter. * @since 8.0 */ protected <T> void addConverter(Class<?> type, Converter<String, ?> converter) { converterMap.put(type, converter); }
3.68
morf_SelectFirstStatement_deepCopy
/** * @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation) */ @Override public SelectFirstStatementBuilder deepCopy(DeepCopyTransformation transformer) { return new SelectFirstStatementBuilder(this, transformer); }
3.68
flink_OperatorChain_linkOperatorWrappers
/** * Links operator wrappers in forward topological order. * * @param allOperatorWrappers is an operator wrapper list of reverse topological order */ private StreamOperatorWrapper<?, ?> linkOperatorWrappers( List<StreamOperatorWrapper<?, ?>> allOperatorWrappers) { StreamOperatorWrapper<?, ?> previous = null; for (StreamOperatorWrapper<?, ?> current : allOperatorWrappers) { if (previous != null) { previous.setPrevious(current); } current.setNext(previous); previous = current; } return previous; }
3.68
framework_VCalendar_setMonthNames
/** * Set the names of the months. * * @param names * The names of the months (January, February,...) */ public void setMonthNames(String[] names) { assert (names.length == 12); monthNames = names; }
3.68
hbase_ByteBufferOutputStream_write
// OutputStream @Override public void write(int b) throws IOException { checkSizeAndGrow(Bytes.SIZEOF_BYTE); curBuf.put((byte) b); }
3.68
hudi_TableSchemaResolver_readSchemaFromLogFile
/** * Read the schema from the log file on path. * * @return */ public static MessageType readSchemaFromLogFile(FileSystem fs, Path path) throws IOException { try (Reader reader = HoodieLogFormat.newReader(fs, new HoodieLogFile(path), null)) { HoodieDataBlock lastBlock = null; while (reader.hasNext()) { HoodieLogBlock block = reader.next(); if (block instanceof HoodieDataBlock) { lastBlock = (HoodieDataBlock) block; } } return lastBlock != null ? new AvroSchemaConverter().convert(lastBlock.getSchema()) : null; } }
3.68
framework_VMenuBar_iLayout
/** * @author Jouni Koivuviita / Vaadin Ltd. */ public void iLayout() { iLayout(false); updateSize(); }
3.68
hadoop_Canceler_cancel
/** * Requests that the current operation be canceled if it is still running. * This does not block until the cancellation is successful. * @param reason the reason why cancellation is requested */ public void cancel(String reason) { this.cancelReason = reason; }
3.68
morf_H2Dialect_alterTableAddColumnStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableAddColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column) */ @Override public Collection<String> alterTableAddColumnStatements(Table table, Column column) { String statement = "ALTER TABLE " + schemaNamePrefix() + table.getName() + " ADD COLUMN " + column.getName() + ' ' + sqlRepresentationOfColumnType(column, true); return Collections.singletonList(statement); }
3.68
hudi_HoodieTableMetadataUtil_convertFilesToColumnStatsRecords
/** * Convert added and deleted action metadata to column stats index records. */ public static HoodieData<HoodieRecord> convertFilesToColumnStatsRecords(HoodieEngineContext engineContext, Map<String, List<String>> partitionToDeletedFiles, Map<String, Map<String, Long>> partitionToAppendedFiles, MetadataRecordsGenerationParams recordsGenerationParams) { // Find the columns to index HoodieTableMetaClient dataTableMetaClient = recordsGenerationParams.getDataMetaClient(); final List<String> columnsToIndex = getColumnsToIndex(recordsGenerationParams, Lazy.lazily(() -> tryResolveSchemaForTable(dataTableMetaClient))); if (columnsToIndex.isEmpty()) { // In case there are no columns to index, bail return engineContext.emptyHoodieData(); } LOG.info(String.format("Indexing %d columns for column stats index", columnsToIndex.size())); // Create the tuple (partition, filename, isDeleted) to handle both deletes and appends final List<Tuple3<String, String, Boolean>> partitionFileFlagTupleList = fetchPartitionFileInfoTriplets(partitionToDeletedFiles, partitionToAppendedFiles); // Create records MDT int parallelism = Math.max(Math.min(partitionFileFlagTupleList.size(), recordsGenerationParams.getColumnStatsIndexParallelism()), 1); return engineContext.parallelize(partitionFileFlagTupleList, parallelism).flatMap(partitionFileFlagTuple -> { final String partitionName = partitionFileFlagTuple.f0; final String filename = partitionFileFlagTuple.f1; final boolean isDeleted = partitionFileFlagTuple.f2; if (!FSUtils.isBaseFile(new Path(filename)) || !filename.endsWith(HoodieFileFormat.PARQUET.getFileExtension())) { LOG.warn(String.format("Ignoring file %s as it is not a PARQUET file", filename)); return Stream.<HoodieRecord>empty().iterator(); } final String filePathWithPartition = partitionName + "/" + filename; final String partitionId = getPartitionIdentifier(partitionName); return getColumnStatsRecords(partitionId, filePathWithPartition, dataTableMetaClient, columnsToIndex, isDeleted).iterator(); }); }
3.68
flink_PendingCheckpointStats_reportSubtaskStats
/** * Reports statistics for a single subtask. * * @param jobVertexId ID of the task/operator the subtask belongs to. * @param subtask The statistics for the subtask. * @return <code>true</code> if successfully reported or <code>false</code> otherwise. */ boolean reportSubtaskStats(JobVertexID jobVertexId, SubtaskStateStats subtask) { TaskStateStats taskStateStats = taskStats.get(jobVertexId); if (taskStateStats != null && taskStateStats.reportSubtaskStats(subtask)) { if (subtask.isCompleted()) { currentNumAcknowledgedSubtasks++; latestAcknowledgedSubtask = subtask; } currentCheckpointedSize += subtask.getCheckpointedSize(); currentStateSize += subtask.getStateSize(); long processedData = subtask.getProcessedData(); if (processedData > 0) { currentProcessedData += processedData; } long persistedData = subtask.getPersistedData(); if (persistedData > 0) { currentPersistedData += persistedData; } unalignedCheckpoint |= subtask.getUnalignedCheckpoint(); return true; } else { return false; } }
3.68
zxing_ECIStringBuilder_append
/** * Append the string repesentation of {@code value} (short for {@code append(String.valueOf(value))}) * * @param value int to append as a string */ public void append(int value) { append(String.valueOf(value)); }
3.68
hadoop_ReadBufferManager_currentTimeMillis
/** * Similar to System.currentTimeMillis, except implemented with System.nanoTime(). * System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization), * making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core. * Note: it is not monotonic across Sockets, and even within a CPU, its only the * more recent parts which share a clock across all cores. * * @return current time in milliseconds */ private long currentTimeMillis() { return System.nanoTime() / 1000 / 1000; }
3.68
hbase_ThriftTable_createClosestRowAfter
/** * Create the closest row after the specified row */ protected byte[] createClosestRowAfter(byte[] row) { if (row == null) { throw new RuntimeException("The passed row is null"); } return Arrays.copyOf(row, row.length + 1); }
3.68
hbase_HRegion_doRegionCompactionPrep
////////////////////////////////////////////////////////////////////////////// // HRegion maintenance. // // These methods are meant to be called periodically by the HRegionServer for // upkeep. ////////////////////////////////////////////////////////////////////////////// /** * Do preparation for pending compaction. */ protected void doRegionCompactionPrep() throws IOException { }
3.68
hadoop_S3ListRequest_v2
/** * Restricted constructors to ensure v1 or v2, not both. * @param request v2 request * @return new list request container */ public static S3ListRequest v2(ListObjectsV2Request request) { return new S3ListRequest(null, request); }
3.68
hbase_RawBytes_encode
/** * Write {@code val} into {@code dst}, respecting {@code voff} and {@code vlen}. * @param dst the {@link PositionedByteRange} to write to * @param val the value to write to {@code dst} * @param voff the offset in {@code dst} where to write {@code val} to * @param vlen the length of {@code val} * @return number of bytes written */ public int encode(PositionedByteRange dst, byte[] val, int voff, int vlen) { Bytes.putBytes(dst.getBytes(), dst.getOffset() + dst.getPosition(), val, voff, vlen); dst.setPosition(dst.getPosition() + vlen); return vlen; }
3.68