name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_PythonCsvUtils_createRowDataToCsvFormatConverterContext
/** * Util for creating a {@link * RowDataToCsvConverters.RowDataToCsvConverter.RowDataToCsvFormatConverterContext}. */ public static RowDataToCsvConverters.RowDataToCsvConverter.RowDataToCsvFormatConverterContext createRowDataToCsvFormatConverterContext(CsvMapper mapper, ContainerNode<?> container) { return new RowDataToCsvConverters.RowDataToCsvConverter.RowDataToCsvFormatConverterContext( mapper, container); }
3.68
pulsar_ManagedLedgerConfig_getBookKeeperEnsemblePlacementPolicyProperties
/** * Returns properties required by configured bookKeeperEnsemblePlacementPolicy. * * @return */ public Map<String, Object> getBookKeeperEnsemblePlacementPolicyProperties() { return bookKeeperEnsemblePlacementPolicyProperties; }
3.68
hibernate-validator_ConstrainedExecutable_isEquallyParameterConstrained
/** * Whether this and the given other executable have the same parameter * constraints. * * @param other The other executable to check. * * @return True if this and the other executable have the same parameter * constraints (including cross- parameter constraints and parameter * cascades), false otherwise. */ public boolean isEquallyParameterConstrained(ConstrainedExecutable other) { if ( !getDescriptors( crossParameterConstraints ).equals( getDescriptors( other.crossParameterConstraints ) ) ) { return false; } int i = 0; for ( ConstrainedParameter parameter : parameterMetaData ) { ConstrainedParameter otherParameter = other.getParameterMetaData( i ); // FIXME: how to deal with method overriding with type overloading of one of the parameters? if ( !parameter.getCascadingMetaDataBuilder().equals( otherParameter.getCascadingMetaDataBuilder() ) || !getDescriptors( parameter.getConstraints() ).equals( getDescriptors( otherParameter.getConstraints() ) ) ) { return false; } i++; } return true; }
3.68
graphhopper_Instruction_getPoints
/* This method returns the points associated to this instruction. Please note that it will not include the last point, * i.e. the first point of the next instruction object. */ public PointList getPoints() { return points; }
3.68
flink_FlinkContainers_getJobManagerPort
/** Gets JobManager's port on the host machine. */ public int getJobManagerPort() { return jobManager.getMappedPort(this.conf.get(RestOptions.PORT)); }
3.68
hbase_TagUtil_carryForwardTTLTag
/** Returns Carry forward the TTL tag. */ public static List<Tag> carryForwardTTLTag(final List<Tag> tagsOrNull, final long ttl) { if (ttl == Long.MAX_VALUE) { return tagsOrNull; } List<Tag> tags = tagsOrNull; // If we are making the array in here, given we are the last thing checked, we'll be only thing // in the array so set its size to '1' (I saw this being done in earlier version of // tag-handling). if (tags == null) { tags = new ArrayList<>(1); } else { // Remove existing TTL tags if any Iterator<Tag> tagsItr = tags.iterator(); while (tagsItr.hasNext()) { Tag tag = tagsItr.next(); if (tag.getType() == TagType.TTL_TAG_TYPE) { tagsItr.remove(); break; } } } tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(ttl))); return tags; }
3.68
pulsar_NonPersistentTopic_deleteForcefully
/** * Forcefully close all producers/consumers/replicators and deletes the topic. * * @return */ @Override public CompletableFuture<Void> deleteForcefully() { return delete(false, true); }
3.68
framework_ListDataSource_getSelectAllHandler
/** * Returns a {@link SelectAllHandler} for this ListDataSource. * * @return select all handler */ public SelectAllHandler<T> getSelectAllHandler() { return new SelectAllHandler<T>() { @Override public void onSelectAll(SelectAllEvent<T> event) { event.getSelectionModel().select(asList()); } }; }
3.68
pulsar_ProducerConfiguration_getBatchingEnabled
/** * Return the flag whether automatic message batching is enabled or not. * * @return true if batch messages are enabled. otherwise false. * @since 2.0.0 <br> * It is enabled by default. */ public boolean getBatchingEnabled() { return conf.isBatchingEnabled(); }
3.68
hudi_ImmutablePair_setValue
/** * <p> * Throws {@code UnsupportedOperationException}. * </p> * * <p> * This pair is immutable, so this operation is not supported. * </p> * * @param value the value to set * @return never * @throws UnsupportedOperationException as this operation is not supported */ @Override public R setValue(final R value) { throw new UnsupportedOperationException(); }
3.68
framework_DesignFormatter_removeConverter
/** * Removes the converter for given type, if it was present. * * @param type * Type to remove converter for. */ protected void removeConverter(Class<?> type) { converterMap.remove(type); }
3.68
hbase_Mutation_getCellVisibility
/** Returns CellVisibility associated with cells in this Mutation. n */ public CellVisibility getCellVisibility() throws DeserializationException { byte[] cellVisibilityBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY); if (cellVisibilityBytes == null) return null; return toCellVisibility(cellVisibilityBytes); }
3.68
flink_SerializedCheckpointData_getCheckpointId
/** * Gets the checkpointId of the checkpoint. * * @return The checkpointId of the checkpoint. */ public long getCheckpointId() { return checkpointId; }
3.68
pulsar_LedgerOffloaderFactory_create
/** * Create a ledger offloader with the provided configuration, user-metadata, schema storage, * scheduler and offloaderStats. * * @param offloadPolicies offload policies * @param userMetadata user metadata * @param schemaStorage used for schema lookup in offloader * @param scheduler scheduler * @param offloaderStats offloaderStats * @return the offloader instance * @throws IOException when fail to create an offloader */ default T create(OffloadPoliciesImpl offloadPolicies, Map<String, String> userMetadata, SchemaStorage schemaStorage, OrderedScheduler scheduler, LedgerOffloaderStats offloaderStats) throws IOException { return create(offloadPolicies, userMetadata, scheduler, offloaderStats); }
3.68
hudi_TableHeader_getNumFields
/** * Get number of fields in the table. */ public int getNumFields() { return fieldNames.size(); }
3.68
hudi_PartitionFilterGenerator_buildMinMaxPartitionExpression
/** * This method will extract the min value and the max value of each field, * and construct GreatThanOrEqual and LessThanOrEqual to build the expression. * * This method can reduce the Expression tree level a lot if each field has too many values. */ private static Expression buildMinMaxPartitionExpression(List<Partition> partitions, List<FieldSchema> partitionFields) { return extractFieldValues(partitions, partitionFields).stream().map(fieldWithValues -> { FieldSchema fieldSchema = fieldWithValues.getKey(); if (!SUPPORT_TYPES.contains(fieldSchema.getType())) { return null; } String[] values = fieldWithValues.getValue(); if (values.length == 1) { return Predicates.eq(new NameReference(fieldSchema.getName()), buildLiteralExpression(values[0], fieldSchema.getType())); } Arrays.sort(values, new ValueComparator(fieldSchema.getType())); return Predicates.and( Predicates.gteq( new NameReference(fieldSchema.getName()), buildLiteralExpression(values[0], fieldSchema.getType())), Predicates.lteq( new NameReference(fieldSchema.getName()), buildLiteralExpression(values[values.length - 1], fieldSchema.getType()))); }) .filter(Objects::nonNull) .reduce(null, (result, expr) -> { if (result == null) { return expr; } else { return Predicates.and(result, expr); } }); }
3.68
hadoop_BufferData_getState
/** * Gets the state of this block. * * @return the state of this block. */ public State getState() { return this.state; }
3.68
hadoop_SinglePendingCommit_load
/** * Load an instance from a file, then validate it. * @param fs filesystem * @param path path * @param serDeser deserializer * @param status status of file to load or null * @return the loaded instance * @throws IOException IO failure * @throws ValidationFailure if the data is invalid */ public static SinglePendingCommit load(FileSystem fs, Path path, JsonSerialization<SinglePendingCommit> serDeser, @Nullable FileStatus status) throws IOException { JsonSerialization<SinglePendingCommit> jsonSerialization = serDeser != null ? serDeser : serializer(); SinglePendingCommit instance = jsonSerialization.load(fs, path, status); instance.filename = path.toString(); instance.validate(); return instance; }
3.68
hbase_MasterObserver_postDisableReplicationPeer
/** * Called after disable a replication peer * @param peerId a short name that identifies the peer */ default void postDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId) throws IOException { }
3.68
hadoop_AccessTokenTimer_convertExpiresIn
/** * The expires_in param from OAuth is in seconds-from-now. Convert to * milliseconds-from-epoch */ static Long convertExpiresIn(Timer timer, String expiresInSecs) { long expiresSecs = Long.parseLong(expiresInSecs); long expiresMs = expiresSecs * 1000; return timer.now() + expiresMs; }
3.68
rocketmq-connect_LocalConfigManagementServiceImpl_triggerSendMessage
/** * send all connector config */ private void triggerSendMessage() { ConnAndTaskConfigs configs = new ConnAndTaskConfigs(); configs.setConnectorConfigs(connectorKeyValueStore.getKVMap()); connectorKeyValueStore.getKVMap().forEach((connectName, connectKeyValue) -> { Struct struct = new Struct(CONNECTOR_CONFIGURATION_V0) .put(FIELD_EPOCH, connectKeyValue.getEpoch()) .put(FIELD_STATE, connectKeyValue.getTargetState().name()) .put(FIELD_PROPS, connectKeyValue.getProperties()); byte[] body = converter.fromConnectData(topic, CONNECTOR_CONFIGURATION_V0, struct); notify(CONNECTOR_KEY(connectName), body); }); taskKeyValueStore.getKVMap().forEach((connectName, taskConfigs) -> { if (taskConfigs == null || taskConfigs.isEmpty()) { return; } taskConfigs.forEach(taskConfig -> { ConnectorTaskId taskId = new ConnectorTaskId(connectName, taskConfig.getInt(ConnectorConfig.TASK_ID)); Struct struct = new Struct(TASK_CONFIGURATION_V0) .put(FIELD_EPOCH, System.currentTimeMillis()) .put(FIELD_PROPS, taskConfig.getProperties()); byte[] body = converter.fromConnectData(topic, TASK_CONFIGURATION_V0, struct); notify(TASK_KEY(taskId), body); }); }); }
3.68
flink_SpillingThread_getSegmentsForReaders
/** * Divides the given collection of memory buffers among {@code numChannels} sublists. * * @param target The list into which the lists with buffers for the channels are put. * @param memory A list containing the memory buffers to be distributed. The buffers are not * removed from this list. * @param numChannels The number of channels for which to allocate buffers. Must not be zero. */ private void getSegmentsForReaders( List<List<MemorySegment>> target, List<MemorySegment> memory, int numChannels) { // determine the memory to use per channel and the number of buffers final int numBuffers = memory.size(); final int buffersPerChannelLowerBound = numBuffers / numChannels; final int numChannelsWithOneMore = numBuffers % numChannels; final Iterator<MemorySegment> segments = memory.iterator(); // collect memory for the channels that get one segment more for (int i = 0; i < numChannelsWithOneMore; i++) { final ArrayList<MemorySegment> segs = new ArrayList<>(buffersPerChannelLowerBound + 1); target.add(segs); for (int k = buffersPerChannelLowerBound; k >= 0; k--) { segs.add(segments.next()); } } // collect memory for the remaining channels for (int i = numChannelsWithOneMore; i < numChannels; i++) { final ArrayList<MemorySegment> segs = new ArrayList<>(buffersPerChannelLowerBound); target.add(segs); for (int k = buffersPerChannelLowerBound; k > 0; k--) { segs.add(segments.next()); } } }
3.68
hadoop_WriteOperationHelper_uploadPart
/** * Upload part of a multi-partition file. * @param request request * @param durationTrackerFactory duration tracker factory for operation * @param request the upload part request. * @param body the request body. * @return the result of the operation. * @throws IOException on problems */ @Retries.RetryTranslated public UploadPartResponse uploadPart(UploadPartRequest request, RequestBody body, final DurationTrackerFactory durationTrackerFactory) throws IOException { return retry("upload part #" + request.partNumber() + " upload ID " + request.uploadId(), request.key(), true, withinAuditSpan(getAuditSpan(), () -> owner.uploadPart(request, body, durationTrackerFactory))); }
3.68
framework_SingleSelectionEvent_getSource
/** * The single select on which the Event initially occurred. * * @return The single select on which the Event initially occurred. */ @Override public SingleSelect<T> getSource() { return (SingleSelect<T>) super.getSource(); }
3.68
hadoop_StateStoreUtils_getRecordClass
/** * Get the base class for a record. If we get an implementation of a record we * will return the real parent record class. * * @param <T> Type of the class of the data record. * @param record Record to check its main class. * @return Base class for the record. */ public static <T extends BaseRecord> Class<? extends BaseRecord> getRecordClass(final T record) { return getRecordClass(record.getClass()); }
3.68
hbase_BucketCache_checkIOErrorIsTolerated
/** * Check whether we tolerate IO error this time. If the duration of IOEngine throwing errors * exceeds ioErrorsDurationTimeTolerated, we will disable the cache */ private void checkIOErrorIsTolerated() { long now = EnvironmentEdgeManager.currentTime(); // Do a single read to a local variable to avoid timing issue - HBASE-24454 long ioErrorStartTimeTmp = this.ioErrorStartTime; if (ioErrorStartTimeTmp > 0) { if (cacheEnabled && (now - ioErrorStartTimeTmp) > this.ioErrorsTolerationDuration) { LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + "ms, disabling cache, please check your IOEngine"); disableCache(); } } else { this.ioErrorStartTime = now; } }
3.68
hadoop_LoggingStateChangeListener_stateChanged
/** * Callback for a state change event: log it * @param service the service that has changed. */ @Override public void stateChanged(Service service) { log.info("Entry to state " + service.getServiceState() + " for " + service.getName()); }
3.68
framework_SortOrder_getColumn
/** * Returns the {@link GridColumn} reference given in the constructor. * * @return a grid column reference */ public Grid.Column<?, ?> getColumn() { return column; }
3.68
morf_ViewChangesDeploymentHelper_createView
/** * Creates SQL statements for creating given view. * * @param view View to be created. * @param updateDeployedViews Whether to update the DeployedViews table. * @return SQL statements to be run to create the view. * @deprecated kept to ensure backwards compatibility. */ @Deprecated List<String> createView(View view, boolean updateDeployedViews) { return createView(view, updateDeployedViews, new UpgradeSchemas(schema(), schema())); }
3.68
hibernate-validator_ClassCheckFactory_getClassChecks
/** * Provides a collections of checks to be performed on a given element. * * @param element an element you'd like to check * * @return The checks to be performed to validate the given */ public Collection<ClassCheck> getClassChecks(Element element) { switch ( element.getKind() ) { case METHOD: return methodChecks; default: return Collections.emptySet(); } }
3.68
framework_StateChangeEvent_addAllStateFields
/** * Recursively adds the names of all properties in the provided state type. * * @param type * the type to process * @param changedProperties * a set of all currently added properties * @param context * the base name of the current object */ @Deprecated private static void addAllStateFields(com.vaadin.client.metadata.Type type, FastStringSet changedProperties, String context) { try { JsArrayObject<Property> properties = type.getPropertiesAsArray(); int size = properties.size(); for (int i = 0; i < size; i++) { Property property = properties.get(i); String propertyName = context + property.getName(); changedProperties.add(propertyName); com.vaadin.client.metadata.Type propertyType = property .getType(); if (propertyType.hasProperties()) { addAllStateFields(propertyType, changedProperties, propertyName + "."); } } } catch (NoDataException e) { throw new IllegalStateException("No property info for " + type + ". Did you remember to compile the right widgetset?", e); } }
3.68
morf_SchemaUtils_column
/** * Build a {@link Column}. * <p> * This method returns a {@link ColumnBuilder} whose properties are an exact copy of the passed in {@link Column}. * </p> * <p> * Use the methods on {@link ColumnBuilder} to provide optional properties. * </p> * * @param column The column to copy. * @return A new {@link ColumnBuilder} for the column. */ public static ColumnBuilder column(Column column) { return new ColumnBuilderImpl(column, column.isNullable(), column.getDefaultValue(), column.isPrimaryKey(), column.isAutoNumbered(), column.getAutoNumberStart()); }
3.68
hbase_HRegion_replayWALFlushMarker
/** * @deprecated Since 3.0.0, will be removed in 4.0.0. Only for keep compatibility for old region * replica implementation. */ @Deprecated void replayWALFlushMarker(FlushDescriptor flush, long replaySeqId) throws IOException { checkTargetRegion(flush.getEncodedRegionName().toByteArray(), "Flush marker from WAL ", flush); if (ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { return; // if primary nothing to do } if (LOG.isDebugEnabled()) { LOG.debug(getRegionInfo().getEncodedName() + " : " + "Replaying flush marker " + TextFormat.shortDebugString(flush)); } startRegionOperation(Operation.REPLAY_EVENT); // use region close lock to guard against close try { FlushAction action = flush.getAction(); switch (action) { case START_FLUSH: replayWALFlushStartMarker(flush); break; case COMMIT_FLUSH: replayWALFlushCommitMarker(flush); break; case ABORT_FLUSH: replayWALFlushAbortMarker(flush); break; case CANNOT_FLUSH: replayWALFlushCannotFlushMarker(flush, replaySeqId); break; default: LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush event with unknown action, ignoring. " + TextFormat.shortDebugString(flush)); break; } logRegionFiles(); } finally { closeRegionOperation(Operation.REPLAY_EVENT); } }
3.68
querydsl_DateTimeExpression_hour
/** * Create a hours expression (range 0-23) * * @return hour */ public NumberExpression<Integer> hour() { if (hours == null) { hours = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.HOUR, mixin); } return hours; }
3.68
pulsar_NamespaceIsolationPolicies_deletePolicy
/** * Delete a policy. * * @param policyName */ public void deletePolicy(String policyName) { policies.remove(policyName); }
3.68
framework_PureGWTTestApplication_getChildMenu
/** * Gets a reference to a child menu with a certain title, that is a * direct child of this menu level. * * @param title * a title string * @return a Menu object with the specified title string, or null, if * this menu doesn't have a direct child with the specified * title. */ public Menu getChildMenu(String title) { for (Menu m : children) { if (m.title.equals(title)) { return m; } } return null; }
3.68
graphhopper_TurnCost_create
/** * This creates an EncodedValue specifically for the turn costs */ public static DecimalEncodedValue create(String name, int maxTurnCosts) { int turnBits = BitUtil.countBitValue(maxTurnCosts); return new DecimalEncodedValueImpl(key(name), turnBits, 0, 1, false, false, true); }
3.68
framework_MultiSelectionModelConnector_onSelectAllEvent
/** * Handler for selecting / deselecting all grid rows. * * @param event * the select all event from grid */ protected void onSelectAllEvent(SelectAllEvent<JsonObject> event) { final boolean allSelected = event.isAllSelected(); final boolean wasAllSelected = isAllSelected(); assert allSelected != wasAllSelected : "Grid Select All CheckBox had invalid state"; if (allSelected && !wasAllSelected) { getState().allSelected = true; updateAllRowsSelected(true); getRpcProxy(GridMultiSelectServerRpc.class).selectAll(); } else if (!allSelected && wasAllSelected) { getState().allSelected = false; updateAllRowsSelected(false); getRpcProxy(GridMultiSelectServerRpc.class).deselectAll(); } }
3.68
framework_Profiler_getMaxTimeSpent
/** * Gets the maximum time spent for one invocation of this node, * including time spent in sub nodes. * * @return the time spent for the slowest invocation, in milliseconds */ public double getMaxTimeSpent() { return maxTime; }
3.68
hadoop_StartupProgress_createView
/** * Creates a {@link StartupProgressView} containing data cloned from this * StartupProgress. Subsequent updates to this StartupProgress will not be * shown in the view. This gives a consistent, unchanging view for callers * that need to perform multiple related read operations. Calculations that * require aggregation, such as overall percent complete, will not be impacted * by mutations performed in other threads mid-way through the calculation. * * @return StartupProgressView containing cloned data */ public StartupProgressView createView() { return new StartupProgressView(this); }
3.68
hadoop_OBSInputStream_onReadFailure
/** * Handle an IOE on a read by attempting to re-open the stream. The * filesystem's readException count will be incremented. * * @param ioe exception caught. * @param length length of data being attempted to read * @throws IOException any exception thrown on the re-open attempt. */ private void onReadFailure(final IOException ioe, final int length) throws IOException { LOG.debug( "Got exception while trying to read from stream {}" + " trying to recover: " + ioe, uri); int i = 1; while (true) { try { reopen("failure recovery", streamCurrentPos, length); return; } catch (OBSIOException e) { LOG.warn( "OBSIOException occurred in reopen for failure recovery, " + "the {} retry time", i, e); if (i == READ_RETRY_TIME) { throw e; } try { Thread.sleep(DELAY_TIME); } catch (InterruptedException ie) { throw e; } } i++; } }
3.68
hbase_MiniHBaseCluster_getRegionServerThreads
/** * @return List of region server threads. Does not return the master even though it is also a * region server. */ public List<JVMClusterUtil.RegionServerThread> getRegionServerThreads() { return this.hbaseCluster.getRegionServers(); }
3.68
hbase_ByteBufferUtils_toBigDecimal
/** * Reads a BigDecimal value at the given buffer's offset. * @param buffer input bytebuffer to read * @param offset input offset * @return BigDecimal value at offset */ public static BigDecimal toBigDecimal(ByteBuffer buffer, int offset, int length) { if (buffer == null || length < Bytes.SIZEOF_INT + 1 || (offset + length > buffer.limit())) { return null; } int scale = toInt(buffer, offset); byte[] tcBytes = new byte[length - Bytes.SIZEOF_INT]; copyFromBufferToArray(tcBytes, buffer, offset + Bytes.SIZEOF_INT, 0, length - Bytes.SIZEOF_INT); return new BigDecimal(new BigInteger(tcBytes), scale); }
3.68
flink_TypeInference_newBuilder
/** Builder for configuring and creating instances of {@link TypeInference}. */ public static TypeInference.Builder newBuilder() { return new TypeInference.Builder(); }
3.68
morf_SqlDialect_getSqlForGreatest
/** * Converts the greatest function into SQL. * * @param function the function details * @return a string representation of the SQL */ protected String getSqlForGreatest(Function function) { return getGreatestFunctionName() + '(' + Joiner.on(", ").join(function.getArguments().stream().map(f -> getSqlFrom(f)).iterator()) + ')'; }
3.68
flink_MetricGroup_meter
/** * Registers a new {@link Meter} with Flink. * * @param name name of the meter * @param meter meter to register * @param <M> meter type * @return the registered meter */ default <M extends Meter> M meter(int name, M meter) { return meter(String.valueOf(name), meter); }
3.68
hbase_HMaster_createAssignmentManager
// Will be overriden in test to inject customized AssignmentManager @InterfaceAudience.Private protected AssignmentManager createAssignmentManager(MasterServices master, MasterRegion masterRegion) { return new AssignmentManager(master, masterRegion); }
3.68
framework_Panel_setScrollLeft
/* * (non-Javadoc) * * @see com.vaadin.server.Scrollable#setScrollLeft(int) */ @Override public void setScrollLeft(int scrollLeft) { if (scrollLeft < 0) { throw new IllegalArgumentException( "Scroll offset must be at least 0"); } getState().scrollLeft = scrollLeft; }
3.68
graphhopper_AbstractTiffElevationProvider_downloadToFile
/** * Download a file at the provided url and save it as the given downloadFile if the downloadFile does not exist. */ private void downloadToFile(File downloadFile, String url) throws IOException { if (!downloadFile.exists()) { int max = 3; for (int trial = 0; trial < max; trial++) { try { downloader.downloadFile(url, downloadFile.getAbsolutePath()); return; } catch (SocketTimeoutException ex) { if (trial >= max - 1) throw new RuntimeException(ex); try { Thread.sleep(sleep); } catch (InterruptedException ignored) { } } } } }
3.68
flink_CompactingHashTable_resizeHashTable
/** * Attempts to double the number of buckets * * @return true on success * @throws IOException */ @VisibleForTesting boolean resizeHashTable() throws IOException { final int newNumBuckets = 2 * this.numBuckets; final int bucketsPerSegment = this.bucketsPerSegmentMask + 1; final int newNumSegments = (newNumBuckets + (bucketsPerSegment - 1)) / bucketsPerSegment; final int additionalSegments = newNumSegments - this.buckets.length; final int numPartitions = this.partitions.size(); if (this.availableMemory.size() < additionalSegments) { for (int i = 0; i < numPartitions; i++) { compactPartition(i); if (this.availableMemory.size() >= additionalSegments) { break; } } } if (this.availableMemory.size() < additionalSegments || this.closed) { return false; } else { this.isResizing = true; // allocate new buckets final int startOffset = (this.numBuckets * HASH_BUCKET_SIZE) % this.segmentSize; final int oldNumBuckets = this.numBuckets; final int oldNumSegments = this.buckets.length; MemorySegment[] mergedBuckets = new MemorySegment[newNumSegments]; System.arraycopy(this.buckets, 0, mergedBuckets, 0, this.buckets.length); this.buckets = mergedBuckets; this.numBuckets = newNumBuckets; // initialize all new buckets boolean oldSegment = (startOffset != 0); final int startSegment = oldSegment ? (oldNumSegments - 1) : oldNumSegments; for (int i = startSegment, bucket = oldNumBuckets; i < newNumSegments && bucket < this.numBuckets; i++) { MemorySegment seg; int bucketOffset; if (oldSegment) { // the first couple of new buckets may be located on an old // segment seg = this.buckets[i]; for (int k = (oldNumBuckets % bucketsPerSegment); k < bucketsPerSegment && bucket < this.numBuckets; k++, bucket++) { bucketOffset = k * HASH_BUCKET_SIZE; // initialize the header fields seg.put( bucketOffset + HEADER_PARTITION_OFFSET, assignPartition(bucket, (byte) numPartitions)); seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0); seg.putLong( bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); } } else { seg = getNextBuffer(); // go over all buckets in the segment for (int k = 0; k < bucketsPerSegment && bucket < this.numBuckets; k++, bucket++) { bucketOffset = k * HASH_BUCKET_SIZE; // initialize the header fields seg.put( bucketOffset + HEADER_PARTITION_OFFSET, assignPartition(bucket, (byte) numPartitions)); seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0); seg.putLong( bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); } } this.buckets[i] = seg; oldSegment = false; // we write on at most one old segment } int hashOffset; int hash; int pointerOffset; long pointer; IntArrayList hashList = new IntArrayList(NUM_ENTRIES_PER_BUCKET); LongArrayList pointerList = new LongArrayList(NUM_ENTRIES_PER_BUCKET); IntArrayList overflowHashes = new IntArrayList(64); LongArrayList overflowPointers = new LongArrayList(64); // go over all buckets and split them between old and new buckets for (int i = 0; i < numPartitions; i++) { InMemoryPartition<T> partition = this.partitions.get(i); final MemorySegment[] overflowSegments = partition.overflowSegments; int posHashCode; for (int j = 0, bucket = i; j < this.buckets.length && bucket < oldNumBuckets; j++) { MemorySegment segment = this.buckets[j]; // go over all buckets in the segment belonging to the partition for (int k = bucket % bucketsPerSegment; k < bucketsPerSegment && bucket < oldNumBuckets; k += numPartitions, bucket += numPartitions) { int bucketOffset = k * HASH_BUCKET_SIZE; if ((int) segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != i) { throw new IOException( "Accessed wrong bucket! wanted: " + i + " got: " + segment.get(bucketOffset + HEADER_PARTITION_OFFSET)); } // loop over all segments that are involved in the bucket (original bucket // plus overflow buckets) int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET); int numInSegment = 0; pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET; hashOffset = bucketOffset + BUCKET_HEADER_LENGTH; while (true) { while (numInSegment < countInSegment) { hash = segment.getInt(hashOffset); if ((hash % this.numBuckets) != bucket && (hash % this.numBuckets) != (bucket + oldNumBuckets)) { throw new IOException( "wanted: " + bucket + " or " + (bucket + oldNumBuckets) + " got: " + hash % this.numBuckets); } pointer = segment.getLong(pointerOffset); hashList.add(hash); pointerList.add(pointer); pointerOffset += POINTER_LEN; hashOffset += HASH_CODE_LEN; numInSegment++; } // this segment is done. check if there is another chained bucket final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET); if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) { break; } final int overflowSegNum = (int) (forwardPointer >>> 32); segment = overflowSegments[overflowSegNum]; bucketOffset = (int) forwardPointer; countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET); pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET; hashOffset = bucketOffset + BUCKET_HEADER_LENGTH; numInSegment = 0; } segment = this.buckets[j]; bucketOffset = k * HASH_BUCKET_SIZE; // reset bucket for re-insertion segment.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0); segment.putLong( bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); // refill table if (hashList.size() != pointerList.size()) { throw new IOException( "Pointer and hash counts do not match. hashes: " + hashList.size() + " pointer: " + pointerList.size()); } int newSegmentIndex = (bucket + oldNumBuckets) / bucketsPerSegment; MemorySegment newSegment = this.buckets[newSegmentIndex]; // we need to avoid overflows in the first run int oldBucketCount = 0; int newBucketCount = 0; while (!hashList.isEmpty()) { hash = hashList.removeLast(); pointer = pointerList.removeLong(pointerList.size() - 1); posHashCode = hash % this.numBuckets; if (posHashCode == bucket && oldBucketCount < NUM_ENTRIES_PER_BUCKET) { bucketOffset = (bucket % bucketsPerSegment) * HASH_BUCKET_SIZE; insertBucketEntryFromStart( segment, bucketOffset, hash, pointer, partition.getPartitionNumber()); oldBucketCount++; } else if (posHashCode == (bucket + oldNumBuckets) && newBucketCount < NUM_ENTRIES_PER_BUCKET) { bucketOffset = ((bucket + oldNumBuckets) % bucketsPerSegment) * HASH_BUCKET_SIZE; insertBucketEntryFromStart( newSegment, bucketOffset, hash, pointer, partition.getPartitionNumber()); newBucketCount++; } else if (posHashCode == (bucket + oldNumBuckets) || posHashCode == bucket) { overflowHashes.add(hash); overflowPointers.add(pointer); } else { throw new IOException( "Accessed wrong bucket. Target: " + bucket + " or " + (bucket + oldNumBuckets) + " Hit: " + posHashCode); } } hashList.clear(); pointerList.clear(); } } // reset partition's overflow buckets and reclaim their memory this.availableMemory.addAll(partition.resetOverflowBuckets()); // clear overflow lists int bucketArrayPos; int bucketInSegmentPos; MemorySegment bucket; while (!overflowHashes.isEmpty()) { hash = overflowHashes.removeLast(); pointer = overflowPointers.removeLong(overflowPointers.size() - 1); posHashCode = hash % this.numBuckets; bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits; bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS; bucket = this.buckets[bucketArrayPos]; insertBucketEntryFromStart( bucket, bucketInSegmentPos, hash, pointer, partition.getPartitionNumber()); } overflowHashes.clear(); overflowPointers.clear(); } this.isResizing = false; return true; } }
3.68
hbase_MetricsConnection_decrConnectionCount
/** Decrement the connection count of the metrics within a scope */ private void decrConnectionCount() { connectionCount.dec(); }
3.68
framework_VCalendar_setLastHourOfTheDay
/** * Set the last hour of the day. * * @param hour * The last hour of the day */ public void setLastHourOfTheDay(int hour) { assert (hour >= 0 && hour <= 23); lastHour = hour; }
3.68
morf_RemoveColumn_isApplied
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources) */ @Override public boolean isApplied(Schema schema, ConnectionResources database) { // If we can't find the table assume we are not applied. If the table is removed // in a subsequent step it is up to that step to mark itself dependent on this one. if (!schema.tableExists(tableName)) { return false; } Table table = schema.getTable(tableName); SchemaHomology homology = new SchemaHomology(); for (Column column : table.columns()) { if (homology.columnsMatch(column, columnDefinition)) { return false; } } return true; }
3.68
flink_JoinInputSideSpec_withoutUniqueKey
/** Creates a {@link JoinInputSideSpec} that input hasn't any unique keys. */ public static JoinInputSideSpec withoutUniqueKey() { return new JoinInputSideSpec(false, null, null); }
3.68
framework_SQLContainer_refresh
/** * Refreshes the container. If <code>setSizeDirty</code> is * <code>false</code>, assumes that the current size is up to date. This is * used in {@link #updateCount()} to refresh the contents when we know the * size was just updated. * * @param setSizeDirty */ private void refresh(boolean setSizeDirty) { if (setSizeDirty) { sizeDirty = true; } currentOffset = 0; cachedItems.clear(); itemIndexes.clear(); fireContentsChange(); }
3.68
flink_MessageSerializer_serializeRequestFailure
/** * Serializes the exception containing the failure message sent to the {@link * org.apache.flink.queryablestate.network.Client} in case of protocol related errors. * * @param alloc The {@link ByteBufAllocator} used to allocate the buffer to serialize the * message into. * @param requestId The id of the request to which the message refers to. * @param cause The exception thrown at the server. * @return A {@link ByteBuf} containing the serialized message. */ public static ByteBuf serializeRequestFailure( final ByteBufAllocator alloc, final long requestId, final Throwable cause) throws IOException { final ByteBuf buf = alloc.ioBuffer(); // Frame length is set at the end buf.writeInt(0); writeHeader(buf, MessageType.REQUEST_FAILURE); buf.writeLong(requestId); try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf); ObjectOutput out = new ObjectOutputStream(bbos)) { out.writeObject(cause); } // Set frame length int frameLength = buf.readableBytes() - Integer.BYTES; buf.setInt(0, frameLength); return buf; }
3.68
AreaShop_ImportJob_execute
/** * Execute the job. */ private void execute() { // Check for RegionForSale data File regionForSaleFolder = new File(plugin.getDataFolder().getParentFile().getAbsolutePath(), "RegionForSale"); if(!regionForSaleFolder.exists()) { message("import-noPluginFolder", regionForSaleFolder.getName()); return; } File worldsFolder = new File(regionForSaleFolder.getAbsolutePath(), "worlds"); if(!worldsFolder.exists()) { message("import-noWorldsFolder"); return; } File[] worldFolders = worldsFolder.listFiles(); if(worldFolders == null) { message("import-noWorldsFolder"); return; } // Import data for each world message("import-start"); // Group with settings for all imported regions RegionGroup regionForSaleGroup = new RegionGroup(plugin, "RegionForSale"); plugin.getFileManager().addGroup(regionForSaleGroup); // Import /RegionForSale/config.yml settings File regionForSaleConfigFile = new File(regionForSaleFolder.getAbsolutePath(), "config.yml"); YamlConfiguration regionForSaleConfig = loadConfiguration(regionForSaleConfigFile); if(regionForSaleConfig == null) { messageNoPrefix("import-loadConfigFailed", regionForSaleConfigFile.getAbsolutePath()); regionForSaleConfig = new YamlConfiguration(); } else { importRegionSettings(regionForSaleConfig, regionForSaleGroup.getSettings(), null, false); regionForSaleGroup.setSetting("priority", 0); } // Import /RegionForSale/general.yml settings File regionForSaleGeneralFile = new File(regionForSaleFolder.getAbsolutePath(), "config.yml"); YamlConfiguration regionForSaleGeneral = loadConfiguration(regionForSaleConfigFile); if(regionForSaleGeneral == null) { messageNoPrefix("import-loadConfigFailed", regionForSaleGeneralFile.getAbsolutePath()); } else { // Collection interval of RegionForSale maps to rent duration String duration = "1 day"; if(regionForSaleGeneral.isLong("interval.collect_money")) { duration = minutesToString(regionForSaleGeneral.getLong("interval.collect_money")); } regionForSaleGroup.setSetting("rent.duration", duration); // Global economy account has an effect close to landlord in AreaShop if(regionForSaleGeneral.isString("global_econ_account")) { regionForSaleGroup.setSetting("general.landlordName", regionForSaleGeneral.getString("global_econ_account")); } } regionForSaleGroup.saveRequired(); ////////// Handle defaults of RegionForSale // Set autoExtend, to keep the same behavior as RegionForSale had regionForSaleGroup.setSetting("rent.autoExtend", true); // Import regions from each world for(File worldFolder : worldFolders) { // Skip files if(!worldFolder.isDirectory() || worldFolder.isHidden()) { continue; } messageNoPrefix("import-doWorld", worldFolder.getName()); // Get the Bukkit world World world = Bukkit.getWorld(worldFolder.getName()); if(world == null) { messageNoPrefix("import-noBukkitWorld"); continue; } // Get the WorldGuard RegionManager RegionManager regionManager = plugin.getRegionManager(world); if(regionManager == null) { messageNoPrefix("import-noRegionManger"); continue; } // Load the /worlds/<world>/regions.yml file File regionsFile = new File(worldFolder.getAbsolutePath(), "regions.yml"); YamlConfiguration regions = loadConfiguration(regionsFile); if(regions == null) { messageNoPrefix("import-loadRegionsFailed", regionsFile.getAbsolutePath()); continue; } // Load /worlds/<world>/config.yml file File worldConfigFile = new File(worldFolder.getAbsolutePath(), "config.yml"); YamlConfiguration worldConfig = loadConfiguration(worldConfigFile); if(worldConfig == null) { messageNoPrefix("import-loadWorldConfigFailed", worldConfigFile.getAbsolutePath()); // Simply skip importing the settings, since this is not really fatal worldConfig = new YamlConfiguration(); } else { // RegionGroup with all world settings RegionGroup worldGroup = new RegionGroup(plugin, "RegionForSale-" + worldFolder.getName()); importRegionSettings(worldConfig, worldGroup.getSettings(), null, false); worldGroup.setSetting("priority", 1); worldGroup.addWorld(worldFolder.getName()); plugin.getFileManager().addGroup(regionForSaleGroup); worldGroup.saveRequired(); } // Create groups to hold settings of /worlds/<world>/parent-regions.yml File parentRegionsFile = new File(worldFolder.getAbsolutePath(), "parent-regions.yml"); YamlConfiguration parentRegions = loadConfiguration(parentRegionsFile); if(parentRegions == null) { messageNoPrefix("import-loadParentRegionsFailed", parentRegionsFile.getAbsolutePath()); // Non-fatal, so just continue } else { for(String parentRegionName : parentRegions.getKeys(false)) { // Get WorldGuard region ProtectedRegion worldGuardRegion = regionManager.getRegion(parentRegionName); if(worldGuardRegion == null) { messageNoPrefix("import-noWorldGuardRegionParent", parentRegionName); continue; } // Get settings section ConfigurationSection parentRegionSection = parentRegions.getConfigurationSection(parentRegionName); if(parentRegionSection == null) { messageNoPrefix("import-improperParentRegion", parentRegionName); continue; } // Skip if it does not have any settings if(parentRegionSection.getKeys(false).isEmpty()) { continue; } // Import parent region settings into a RegionGroup RegionGroup parentRegionGroup = new RegionGroup(plugin, "RegionForSale-" + worldFolder.getName() + "-" + parentRegionName); importRegionSettings(parentRegionSection, parentRegionGroup.getSettings(), null, false); parentRegionGroup.setSetting("priority", 2 + parentRegionSection.getLong("info.priority", 0)); parentRegionGroup.saveRequired(); // TODO add all regions that are contained in this parent region // Utils.getWorldEditRegionsInSelection() } } // Read and import regions for(String regionKey : regions.getKeys(false)) { GeneralRegion existingRegion = plugin.getFileManager().getRegion(regionKey); if(existingRegion != null) { if(world.getName().equalsIgnoreCase(existingRegion.getWorldName())) { messageNoPrefix("import-alreadyAdded", regionKey); } else { messageNoPrefix("import-alreadyAddedOtherWorld", regionKey, existingRegion.getWorldName(), world.getName()); } continue; } ConfigurationSection regionSection = regions.getConfigurationSection(regionKey); if(regionSection == null) { messageNoPrefix("import-invalidRegionSection", regionKey); continue; } // Get WorldGuard region ProtectedRegion worldGuardRegion = regionManager.getRegion(regionKey); if(worldGuardRegion == null) { messageNoPrefix("import-noWorldGuardRegion", regionKey); continue; } String owner = regionSection.getString("info.owner", null); boolean isBought = regionSection.getBoolean("info.is-bought"); // TODO: should also take into config settings of parent regions boolean rentable = regionSection.getBoolean("economic-settings.rentable", worldConfig.getBoolean("economic-settings.rentable", regionForSaleConfig.getBoolean("economic-settings.rentable"))); boolean buyable = regionSection.getBoolean("economic-settings.buyable", worldConfig.getBoolean("economic-settings.buyable", regionForSaleConfig.getBoolean("economic-settings.buyable"))); // Can be bought and rented, import as buy if(buyable && rentable) { messageNoPrefix("import-buyAndRent", regionKey); } // Cannot be bought or rented, skip if(!buyable && !rentable && owner == null) { messageNoPrefix("import-noBuyAndNoRent", regionKey); continue; } // Create region GeneralRegion region; if(rentable || (owner != null && !isBought)) { region = new RentRegion(regionKey, world); } else { region = new BuyRegion(regionKey, world); } AddingRegionEvent event = plugin.getFileManager().addRegion(region); if (event.isCancelled()) { messageNoPrefix("general-cancelled", event.getReason()); continue; } // Import settings importRegionSettings(regionSection, region.getConfig(), region, !buyable && !rentable); region.getConfig().set("general.importedFrom", "RegionForSale"); // Get existing owners and members List<UUID> existing = new ArrayList<>(); if(owner != null) { @SuppressWarnings("deprecation") OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(owner); if(offlinePlayer != null) { existing.add(offlinePlayer.getUniqueId()); } } for(UUID uuid : plugin.getWorldGuardHandler().getOwners(worldGuardRegion).asUniqueIdList()) { if(!existing.contains(uuid)) { existing.add(uuid); } } for(UUID uuid : plugin.getWorldGuardHandler().getMembers(worldGuardRegion).asUniqueIdList()) { if(!existing.contains(uuid)) { existing.add(uuid); } } // First owner (or if none, the first member) will be the renter/buyer if(!existing.isEmpty()) { region.setOwner(existing.remove(0)); } // Add others as friends for(UUID friend : existing) { region.getFriendsFeature().addFriend(friend, null); } region.saveRequired(); messageNoPrefix("import-imported", regionKey); } } // Update all regions plugin.getFileManager().updateAllRegions(sender); // Write all imported regions and settings to disk plugin.getFileManager().saveRequiredFiles(); }
3.68
hbase_HBaseServerBase_getDataRootDir
/** Returns Return the rootDir. */ public Path getDataRootDir() { return dataRootDir; }
3.68
hudi_PartitionFilterGenerator_buildPartitionExpression
/** * Build expression from the Partition list. Here we're trying to match all partitions. * * ex. partitionSchema(date, hour) [Partition(2022-09-01, 12), Partition(2022-09-02, 13)] => * Or(And(Equal(Attribute(date), Literal(2022-09-01)), Equal(Attribute(hour), Literal(12))), * And(Equal(Attribute(date), Literal(2022-09-02)), Equal(Attribute(hour), Literal(13)))) */ private static Expression buildPartitionExpression(List<Partition> partitions, List<FieldSchema> partitionFields) { return partitions.stream().map(partition -> { List<String> partitionValues = partition.getValues(); Expression root = null; for (int i = 0; i < partitionFields.size(); i++) { FieldSchema field = partitionFields.get(i); BinaryExpression exp = Predicates.eq(new NameReference(field.getName()), buildLiteralExpression(partitionValues.get(i), field.getType())); if (root != null) { root = Predicates.and(root, exp); } else { root = exp; } } return root; }).reduce(null, (result, expr) -> { if (result == null) { return expr; } else { return Predicates.or(result, expr); } }); }
3.68
hadoop_StagingCommitter_getTaskOutput
/** * Lists the output of a task under the task attempt path. Subclasses can * override this method to change how output files are identified. * <p> * This implementation lists the files that are direct children of the output * path and filters hidden files (file names starting with '.' or '_'). * <p> * The task attempt path is provided by * {@link #getTaskAttemptPath(TaskAttemptContext)} * * @param context this task's {@link TaskAttemptContext} * @return the output files produced by this task in the task attempt path * @throws IOException on a failure */ protected List<LocatedFileStatus> getTaskOutput(TaskAttemptContext context) throws IOException { // get files on the local FS in the attempt path Path attemptPath = requireNonNull(getTaskAttemptPath(context), "No attemptPath path"); LOG.debug("Scanning {} for files to commit", attemptPath); return toList(listAndFilter(getTaskAttemptFilesystem(context), attemptPath, true, HIDDEN_FILE_FILTER)); }
3.68
pulsar_BundleSplitterTask_findBundlesToSplit
/** * Determines which bundles should be split based on various thresholds. * * @param loadData * Load data to base decisions on (does not have benefit of preallocated data since this may not be the * leader broker). * @param pulsar * Service to use. * @return All bundles who have exceeded configured thresholds in number of topics, number of sessions, total * message rates, or total throughput and the brokers on which they reside. */ @Override public Map<String, String> findBundlesToSplit(final LoadData loadData, final PulsarService pulsar) { bundleCache.clear(); namespaceBundleCount.clear(); final ServiceConfiguration conf = pulsar.getConfiguration(); int maxBundleCount = conf.getLoadBalancerNamespaceMaximumBundles(); long maxBundleTopics = conf.getLoadBalancerNamespaceBundleMaxTopics(); long maxBundleSessions = conf.getLoadBalancerNamespaceBundleMaxSessions(); long maxBundleMsgRate = conf.getLoadBalancerNamespaceBundleMaxMsgRate(); long maxBundleBandwidth = conf.getLoadBalancerNamespaceBundleMaxBandwidthMbytes() * LoadManagerShared.MIBI; loadData.getBrokerData().forEach((broker, brokerData) -> { LocalBrokerData localData = brokerData.getLocalData(); for (final Map.Entry<String, NamespaceBundleStats> entry : localData.getLastStats().entrySet()) { final String bundle = entry.getKey(); final NamespaceBundleStats stats = entry.getValue(); if (stats.topics < 2) { if (log.isDebugEnabled()) { log.debug("The count of topics on the bundle {} is less than 2, skip split!", bundle); } continue; } double totalMessageRate = 0; double totalMessageThroughput = 0; // Attempt to consider long-term message data, otherwise effectively ignore. if (loadData.getBundleData().containsKey(bundle)) { final TimeAverageMessageData longTermData = loadData.getBundleData().get(bundle).getLongTermData(); totalMessageRate = longTermData.totalMsgRate(); totalMessageThroughput = longTermData.totalMsgThroughput(); } if (stats.topics > maxBundleTopics || (maxBundleSessions > 0 && (stats.consumerCount + stats.producerCount > maxBundleSessions)) || totalMessageRate > maxBundleMsgRate || totalMessageThroughput > maxBundleBandwidth) { final String namespace = LoadManagerShared.getNamespaceNameFromBundleName(bundle); try { final int bundleCount = pulsar.getNamespaceService() .getBundleCount(NamespaceName.get(namespace)); if ((bundleCount + namespaceBundleCount.getOrDefault(namespace, 0)) < maxBundleCount) { log.info("The bundle {} is considered to be unload. Topics: {}/{}, Sessions: ({}+{})/{}, " + "Message Rate: {}/{} (msgs/s), Message Throughput: {}/{} (MB/s)", bundle, stats.topics, maxBundleTopics, stats.producerCount, stats.consumerCount, maxBundleSessions, totalMessageRate, maxBundleMsgRate, totalMessageThroughput / LoadManagerShared.MIBI, maxBundleBandwidth / LoadManagerShared.MIBI); bundleCache.put(bundle, broker); int bundleNum = namespaceBundleCount.getOrDefault(namespace, 0); namespaceBundleCount.put(namespace, bundleNum + 1); } else { if (log.isDebugEnabled()) { log.debug( "Could not split namespace bundle {} because namespace {} has too many bundles:" + "{}", bundle, namespace, bundleCount); } } } catch (Exception e) { log.warn("Error while getting bundle count for namespace {}", namespace, e); } } } }); return bundleCache; }
3.68
flink_LogicalTypeMerging_findModuloDecimalType
/** Finds the result type of a decimal modulo operation. */ public static DecimalType findModuloDecimalType( int precision1, int scale1, int precision2, int scale2) { final int scale = Math.max(scale1, scale2); int precision = Math.min(precision1 - scale1, precision2 - scale2) + scale; return adjustPrecisionScale(precision, scale); }
3.68
Activiti_BpmnActivityBehavior_performOutgoingBehavior
/** * Actual implementation of leaving an activity. * @param execution The current execution context * @param checkConditions Whether or not to check conditions before determining whether or not to take a transition. * @param throwExceptionIfExecutionStuck If true, an {@link ActivitiException} will be thrown in case no transition could be found to leave the activity. */ protected void performOutgoingBehavior(ExecutionEntity execution, boolean checkConditions, boolean throwExceptionIfExecutionStuck) { getAgenda().planTakeOutgoingSequenceFlowsOperation(execution, true); }
3.68
flink_UserDefinedFunctionHelper_getReturnTypeOfAggregateFunction
/** * Tries to infer the TypeInformation of an AggregateFunction's accumulator type. * * @param aggregateFunction The AggregateFunction for which the accumulator type is inferred. * @param scalaType The implicitly inferred type of the accumulator type. * @return The inferred accumulator type of the AggregateFunction. */ public static <T, ACC> TypeInformation<T> getReturnTypeOfAggregateFunction( ImperativeAggregateFunction<T, ACC> aggregateFunction, TypeInformation<T> scalaType) { TypeInformation<T> userProvidedType = aggregateFunction.getResultType(); if (userProvidedType != null) { return userProvidedType; } else if (scalaType != null) { return scalaType; } else { return TypeExtractor.createTypeInfo( aggregateFunction, ImperativeAggregateFunction.class, aggregateFunction.getClass(), 0); } }
3.68
hudi_BaseHoodieTableServiceClient_scheduleClusteringAtInstant
/** * Schedules a new clustering instant with passed-in instant time. * * @param instantTime clustering Instant Time * @param extraMetadata Extra Metadata to be stored */ public boolean scheduleClusteringAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException { return scheduleTableService(instantTime, extraMetadata, TableServiceType.CLUSTER).isPresent(); }
3.68
hbase_MetaBrowser_addParam
/** * Adds {@code value} to {@code encoder} under {@code paramName} when {@code value} is non-null. */ private void addParam(final QueryStringEncoder encoder, final String paramName, final Object value) { if (value != null) { encoder.addParam(paramName, value.toString()); } }
3.68
framework_AbstractComponent_setIcon
/** * Sets the component's icon. * * @param icon * the icon to be shown with the component's caption. */ @Override public void setIcon(Resource icon) { setResource(ComponentConstants.ICON_RESOURCE, icon); }
3.68
hbase_RegionProcedureStore_runWithoutRpcCall
/** * Insert procedure may be called by master's rpc call. There are some check about the rpc call * when mutate region. Here unset the current rpc call and set it back in finally block. See * HBASE-23895 for more details. */ private void runWithoutRpcCall(Runnable runnable) { Optional<RpcCall> rpcCall = RpcServer.unsetCurrentCall(); try { runnable.run(); } finally { rpcCall.ifPresent(RpcServer::setCurrentCall); } }
3.68
flink_SinkFunction_finish
/** * This method is called at the end of data processing. * * <p>The method is expected to flush all remaining buffered data. Exceptions will cause the * pipeline to be recognized as failed, because the last data items are not processed properly. * You may use this method to flush remaining buffered elements in the state into transactions * which you can commit in the last checkpoint. * * <p><b>NOTE:</b>This method does not need to close any resources. You should release external * resources in the {@link RichSinkFunction#close()} method. * * @throws Exception This method may throw exceptions. Throwing an exception will cause the * operation to fail and may trigger recovery. */ default void finish() throws Exception {}
3.68
framework_AbstractRemoteDataSource_unpinHandle
/** * Unpins a previously pinned row with given handle. This function can be * overridden to do specific logic related to unpinning rows. * * @param handle * row handle to unpin * * @throws IllegalStateException * if given row handle has not been pinned before */ protected void unpinHandle(RowHandleImpl handle) throws IllegalStateException { Object key = handle.key; final Integer count = pinnedCounts.get(key); if (count == null) { throw new IllegalStateException("Row " + handle.getRow() + " with key " + key + " was not pinned to begin with"); } else if (count.equals(Integer.valueOf(1))) { pinnedRows.remove(key); pinnedCounts.remove(key); } else { pinnedCounts.put(key, Integer.valueOf(count.intValue() - 1)); } }
3.68
dubbo_ConfigurationUtils_getProperty
/** * For compact single instance * * @deprecated Replaced to {@link ConfigurationUtils#getProperty(ScopeModel, String, String)} */ @Deprecated public static String getProperty(String property, String defaultValue) { return getProperty(ApplicationModel.defaultModel(), property, defaultValue); }
3.68
hudi_InternalDynamicBloomFilter_addRow
/** * Adds a new row to <i>this</i> dynamic Bloom filter. */ private void addRow() { InternalBloomFilter[] tmp = new InternalBloomFilter[matrix.length + 1]; System.arraycopy(matrix, 0, tmp, 0, matrix.length); tmp[tmp.length - 1] = new InternalBloomFilter(vectorSize, nbHash, hashType); matrix = tmp; }
3.68
flink_AsyncSinkWriter_snapshotState
/** * All in-flight requests that are relevant for the snapshot have been completed, but there may * still be request entries in the internal buffers that are yet to be sent to the endpoint. * These request entries are stored in the snapshot state so that they don't get lost in case of * a failure/restart of the application. */ @Override public List<BufferedRequestState<RequestEntryT>> snapshotState(long checkpointId) { return Collections.singletonList(new BufferedRequestState<>((bufferedRequestEntries))); }
3.68
framework_VTooltip_showTooltip
/** * Show a popup containing the currentTooltipInfo * */ private void showTooltip() { if (currentTooltipInfo.hasMessage()) { // Issue #8454: With IE7 the tooltips size is calculated based on // the last tooltip's position, causing problems if the last one was // in the right or bottom edge. For this reason the tooltip is moved // first to 0,0 position so that the calculation goes correctly. setPopupPosition(0, 0); setPopupPositionAndShow(new PositionCallback() { @Override public void setPosition(int offsetWidth, int offsetHeight) { if (offsetWidth > getMaxWidth()) { setWidth(getMaxWidth() + "px"); // Check new height and width with reflowed content offsetWidth = getOffsetWidth(); offsetHeight = getOffsetHeight(); } int x = 0; int y = 0; if (BrowserInfo.get().isTouchDevice()) { setMaxWidth(Window.getClientWidth()); offsetWidth = getOffsetWidth(); offsetHeight = getOffsetHeight(); x = getFinalTouchX(offsetWidth); y = getFinalTouchY(offsetHeight); } else { x = getFinalX(offsetWidth); y = getFinalY(offsetHeight); } setPopupPosition(x, y); sinkEvents(Event.ONMOUSEOVER | Event.ONMOUSEOUT); } /** * Return the final X-coordinate of the tooltip based on cursor * position, size of the tooltip, size of the page and necessary * margins. * * @param offsetWidth * @return The final X-coordinate */ private int getFinalX(int offsetWidth) { int x = 0; int widthNeeded = 10 + MARGIN + offsetWidth; int roomLeft = tooltipEventMouseX; int roomRight = Window.getClientWidth() - roomLeft; if (roomRight > widthNeeded) { x = tooltipEventMouseX + 10 + Window.getScrollLeft(); } else { x = tooltipEventMouseX + Window.getScrollLeft() - 10 - offsetWidth; } if (x + offsetWidth + MARGIN - Window.getScrollLeft() > Window .getClientWidth()) { x = Window.getClientWidth() - offsetWidth - MARGIN + Window.getScrollLeft(); } if (tooltipEventMouseX != EVENT_XY_POSITION_OUTSIDE) { // Do not allow x to be zero, for otherwise the tooltip // does not close when the mouse is moved (see // isTooltipOpen()). #15129 int minX = Window.getScrollLeft() + MARGIN; x = Math.max(x, minX); } return x; } /** * Return the final X-coordinate of the tooltip based on cursor * position, size of the tooltip, size of the page and necessary * margins. * * @param offsetWidth * @return The final X-coordinate */ private int getFinalTouchX(int offsetWidth) { int x = 0; int widthNeeded = 10 + offsetWidth; int roomLeft = currentElement != null ? currentElement.getAbsoluteLeft() : EVENT_XY_POSITION_OUTSIDE; int viewPortWidth = Window.getClientWidth(); int roomRight = viewPortWidth - roomLeft; if (roomRight > widthNeeded) { x = roomLeft; } else { x = roomLeft - offsetWidth; } if (x + offsetWidth - Window.getScrollLeft() > viewPortWidth) { x = viewPortWidth - offsetWidth + Window.getScrollLeft(); } if (roomLeft != EVENT_XY_POSITION_OUTSIDE) { // Do not allow x to be zero, for otherwise the tooltip // does not close when the mouse is moved (see // isTooltipOpen()). #15129 int minX = Math.max(1, Window.getScrollLeft()); x = Math.max(x, minX); } return x; } /** * Return the final Y-coordinate of the tooltip based on cursor * position, size of the tooltip, size of the page and necessary * margins. * * @param offsetHeight * @return The final y-coordinate * */ private int getFinalY(int offsetHeight) { int y = 0; int heightNeeded = 10 + offsetHeight; int roomAbove = tooltipEventMouseY; int roomBelow = Window.getClientHeight() - roomAbove; if (roomBelow > heightNeeded) { y = tooltipEventMouseY + 10 + Window.getScrollTop(); } else { y = tooltipEventMouseY + Window.getScrollTop() - 10 - offsetHeight; } if (y + offsetHeight + MARGIN - Window.getScrollTop() > Window .getClientHeight()) { y = tooltipEventMouseY - 5 - offsetHeight + Window.getScrollTop(); if (y - Window.getScrollTop() < 0) { // tooltip does not fit on top of the mouse either, // put it at the top of the screen y = Window.getScrollTop(); } } if (tooltipEventMouseY != EVENT_XY_POSITION_OUTSIDE) { // Do not allow y to be zero, for otherwise the tooltip // does not close when the mouse is moved (see // isTooltipOpen()). #15129 int minY = Window.getScrollTop() + MARGIN; y = Math.max(y, minY); } return y; } /** * Return the final Y-coordinate of the tooltip based on cursor * position, size of the tooltip, size of the page and necessary * margins. * * @param offsetHeight * @return The final y-coordinate * */ private int getFinalTouchY(int offsetHeight) { int y = 0; int heightNeeded = 10 + offsetHeight; int roomAbove = currentElement != null ? currentElement.getAbsoluteTop() + currentElement.getOffsetHeight() : EVENT_XY_POSITION_OUTSIDE; int roomBelow = Window.getClientHeight() - roomAbove; if (roomBelow > heightNeeded) { y = roomAbove; } else { y = roomAbove - offsetHeight - (currentElement != null ? currentElement.getOffsetHeight() : 0); } if (y + offsetHeight - Window.getScrollTop() > Window .getClientHeight()) { y = roomAbove - 5 - offsetHeight + Window.getScrollTop(); if (y - Window.getScrollTop() < 0) { // tooltip does not fit on top of the mouse either, // put it at the top of the screen y = Window.getScrollTop(); } } if (roomAbove != EVENT_XY_POSITION_OUTSIDE) { // Do not allow y to be zero, for otherwise the tooltip // does not close when the mouse is moved (see // isTooltipOpen()). #15129 int minY = Window.getScrollTop(); y = Math.max(y, minY); } return y; } }); } else { hide(); } }
3.68
flink_TemplateUtils_asFunctionTemplatesForProcedure
/** Converts {@link ProcedureHint}s to {@link FunctionTemplate}. */ static Set<FunctionTemplate> asFunctionTemplatesForProcedure( DataTypeFactory typeFactory, Set<ProcedureHint> hints) { return hints.stream() .map( hint -> { try { return FunctionTemplate.fromAnnotation(typeFactory, hint); } catch (Throwable t) { throw extractionError(t, "Error in procedure hint annotation."); } }) .collect(Collectors.toCollection(LinkedHashSet::new)); }
3.68
framework_AbstractContainer_getPropertySetChangeListeners
/** * Returns the property set change listener collection. For internal use * only. */ protected Collection<Container.PropertySetChangeListener> getPropertySetChangeListeners() { return propertySetChangeListeners; }
3.68
framework_Tree_setItemIconAlternateText
/** * Set the alternate text for an item. * * Used when the item has an icon. * * @param itemId * the id of the item to be assigned an icon. * @param altText * the alternative text for the icon */ public void setItemIconAlternateText(Object itemId, String altText) { if (itemId != null) { if (altText == null) { throw new IllegalArgumentException(NULL_ALT_EXCEPTION_MESSAGE); } else { itemIconAlts.put(itemId, altText); } } }
3.68
hadoop_SnappyCompressor_setDictionary
/** * Does nothing. */ @Override public void setDictionary(byte[] b, int off, int len) { // do nothing }
3.68
hadoop_StagingCommitter_useUniqueFilenames
/** * Is this committer using unique filenames? * @return true if unique filenames are used. */ public Boolean useUniqueFilenames() { return uniqueFilenames; }
3.68
pulsar_PulsarSaslServer_getAuthorizationID
/** * Reports the authorization ID in effect for the client of this * session. * This method can only be called if isComplete() returns true. * @return The authorization ID of the client. * @exception IllegalStateException if this authentication session has not completed */ public String getAuthorizationID() throws IllegalStateException { return saslServer.getAuthorizationID(); }
3.68
hadoop_Quota_eachByStorageType
/** * Invoke consumer by each storage type. * @param consumer the function consuming the storage type. */ public static void eachByStorageType(Consumer<StorageType> consumer) { for (StorageType type : StorageType.values()) { consumer.accept(type); } }
3.68
morf_SqlDialect_getSqlForLeast
/** * Converts the least function into SQL. * * @param function the function details * @return a string representation of the SQL */ protected String getSqlForLeast(Function function) { return getLeastFunctionName() + '(' + Joiner.on(", ").join(function.getArguments().stream().map(f -> getSqlFrom(f)).iterator()) + ')'; }
3.68
flink_MemorySegment_putShortBigEndian
/** * Writes the given short integer value (16 bit, 2 bytes) to the given position in big-endian * byte order. This method's speed depends on the system's native byte order, and it is possibly * slower than {@link #putShort(int, short)}. For most cases (such as transient storage in * memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link * #putShort(int, short)} is the preferable choice. * * @param index The position at which the value will be written. * @param value The short value to be written. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 2. */ public void putShortBigEndian(int index, short value) { if (LITTLE_ENDIAN) { putShort(index, Short.reverseBytes(value)); } else { putShort(index, value); } }
3.68
hbase_ClusterStatusListener_isDeadServer
/** * Check if we know if a server is dead. * @param sn the server name to check. * @return true if we know for sure that the server is dead, false otherwise. */ public boolean isDeadServer(ServerName sn) { if (sn.getStartcode() <= 0) { return false; } for (ServerName dead : deadServers) { if ( dead.getStartcode() >= sn.getStartcode() && dead.getPort() == sn.getPort() && dead.getHostname().equals(sn.getHostname()) ) { return true; } } return false; }
3.68
hadoop_DefaultNoHARMFailoverProxyProvider_close
/** * Close the current proxy. * @throws IOException io error occur. */ @Override public void close() throws IOException { RPC.stopProxy(proxy); }
3.68
zxing_CameraManager_startPreview
/** * Asks the camera hardware to begin drawing preview frames to the screen. */ public synchronized void startPreview() { OpenCamera theCamera = camera; if (theCamera != null && !previewing) { theCamera.getCamera().startPreview(); previewing = true; autoFocusManager = new AutoFocusManager(context, theCamera.getCamera()); } }
3.68
framework_Table_unregisterComponent
/** * This method cleans up a Component that has been generated when Table is * in editable mode. The component needs to be detached from its parent and * if it is a field, it needs to be detached from its property data source * in order to allow garbage collection to take care of removing the unused * component from memory. * * Override this method and getPropertyValue(Object, Object, Property) with * custom logic if you need to deal with buffered fields. * * @see #getPropertyValue(Object, Object, Property) * * @param component * component that should be unregistered. */ protected void unregisterComponent(Component component) { getLogger().log(Level.FINEST, "Unregistered {0}: {1}", new Object[] { component.getClass().getSimpleName(), component.getCaption() }); component.setParent(null); /* * Also remove property data sources to unregister listeners keeping the * fields in memory. */ if (component instanceof Field) { Field<?> field = (Field<?>) component; Property<?> associatedProperty = associatedProperties .remove(component); if (associatedProperty != null && field.getPropertyDataSource() == associatedProperty) { // Remove the property data source only if it's the one we // added in getPropertyValue field.setPropertyDataSource(null); } } }
3.68
hbase_SnapshotManifest_getRegionNameFromManifest
/** * Extract the region encoded name from the region manifest */ static String getRegionNameFromManifest(final SnapshotRegionManifest manifest) { byte[] regionName = RegionInfo.createRegionName(ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), manifest.getRegionInfo().getStartKey().toByteArray(), manifest.getRegionInfo().getRegionId(), true); return RegionInfo.encodeRegionName(regionName); }
3.68
framework_Table_removeColumnCollapseListener
/** * Removes a column reorder listener from the Table. * * @since 7.6 * @param listener * The listener to remove */ public void removeColumnCollapseListener(ColumnCollapseListener listener) { removeListener(TableConstants.COLUMN_COLLAPSE_EVENT_ID, ColumnCollapseEvent.class, listener); }
3.68
hadoop_OBSCommonUtils_getMultipartSizeProperty
/** * Get a size property from the configuration: this property must be at least * equal to {@link OBSConstants#MULTIPART_MIN_SIZE}. If it is too small, it is * rounded up to that minimum, and a warning printed. * * @param conf configuration * @param property property name * @param defVal default value * @return the value, guaranteed to be above the minimum size */ public static long getMultipartSizeProperty(final Configuration conf, final String property, final long defVal) { long partSize = conf.getLongBytes(property, defVal); if (partSize < OBSConstants.MULTIPART_MIN_SIZE) { LOG.warn("{} must be at least 5 MB; configured value is {}", property, partSize); partSize = OBSConstants.MULTIPART_MIN_SIZE; } return partSize; }
3.68
hadoop_YarnConfigurationStore_getUser
/** * Get user who requested configuration change. * @return user who requested configuration change */ public String getUser() { return user; }
3.68
framework_CalendarTest_createCalendarEventPopup
/* Initializes a modal window to edit schedule event. */ private void createCalendarEventPopup() { VerticalLayout layout = new VerticalLayout(); // layout.setMargin(true); layout.setSpacing(true); scheduleEventPopup = new Window(null, layout); scheduleEventPopup.setWidth("300px"); scheduleEventPopup.setModal(true); scheduleEventPopup.center(); scheduleEventFieldLayout.addStyleName(ValoTheme.FORMLAYOUT_LIGHT); scheduleEventFieldLayout.setMargin(false); layout.addComponent(scheduleEventFieldLayout); applyEventButton = new Button("Apply", event -> { try { commitCalendarEvent(); } catch (CommitException | ValidationException e) { e.printStackTrace(); } }); applyEventButton.addStyleName(ValoTheme.BUTTON_PRIMARY); Button cancel = new Button("Cancel", event -> discardCalendarEvent()); deleteEventButton = new Button("Delete", event -> deleteCalendarEvent()); deleteEventButton.addStyleName(ValoTheme.BUTTON_BORDERLESS); scheduleEventPopup.addCloseListener(event -> discardCalendarEvent()); HorizontalLayout buttons = new HorizontalLayout(); buttons.addStyleName(ValoTheme.WINDOW_BOTTOM_TOOLBAR); buttons.setWidth("100%"); buttons.setSpacing(true); buttons.addComponent(deleteEventButton); buttons.addComponent(applyEventButton); buttons.setExpandRatio(applyEventButton, 1); buttons.setComponentAlignment(applyEventButton, Alignment.TOP_RIGHT); buttons.addComponent(cancel); layout.addComponent(buttons); }
3.68
druid_RandomDataSourceValidateThread_logSuccessTime
/** * Provide a static method to record the last success time of a DataSource */ public static void logSuccessTime(DataSourceProxy dataSource) { if (dataSource != null && !StringUtils.isEmpty(dataSource.getName())) { String name = dataSource.getName(); long time = System.currentTimeMillis(); LOG.debug("Log successTime [" + time + "] for " + name); successTimes.put(name, time); } }
3.68
flink_Configuration_addAllToProperties
/** Adds all entries in this {@code Configuration} to the given {@link Properties}. */ public void addAllToProperties(Properties props) { synchronized (this.confData) { for (Map.Entry<String, Object> entry : this.confData.entrySet()) { props.put(entry.getKey(), entry.getValue()); } } }
3.68
hudi_HoodieCombineHiveInputFormat_inputFormatClassName
/** * Returns the inputFormat class name for the i-th chunk. */ public String inputFormatClassName() { return inputFormatClassName; }
3.68
querydsl_SimpleExpression_isNull
/** * Create a {@code this is null} expression * * @return this is null */ public BooleanExpression isNull() { if (isnull == null) { isnull = Expressions.booleanOperation(Ops.IS_NULL, mixin); } return isnull; }
3.68
hadoop_TFile_compare
/** * Provide a customized comparator for Entries. This is useful if we * have a collection of Entry objects. However, if the Entry objects * come from different TFiles, users must ensure that those TFiles share * the same RawComparator. */ @Override public int compare(Scanner.Entry o1, Scanner.Entry o2) { return comparator.compare(o1.getKeyBuffer(), 0, o1.getKeyLength(), o2 .getKeyBuffer(), 0, o2.getKeyLength()); }
3.68
hadoop_CopyOutputFormat_checkOutputSpecs
/** {@inheritDoc} */ @Override public void checkOutputSpecs(JobContext context) throws IOException { Configuration conf = context.getConfiguration(); if (getCommitDirectory(conf) == null) { throw new IllegalStateException("Commit directory not configured"); } Path workingPath = getWorkingDirectory(conf); if (workingPath == null) { throw new IllegalStateException("Working directory not configured"); } // get delegation token for outDir's file system TokenCache.obtainTokensForNamenodes(context.getCredentials(), new Path[] {workingPath}, conf); }
3.68
hbase_AbstractSaslClientAuthenticationProvider_hashCode
/** * Provides a hash code to identify this AuthenticationProvider among others. These two fields * must be unique to ensure that authentication methods are clearly separated. */ @Override public final int hashCode() { return getSaslAuthMethod().hashCode(); }
3.68
flink_BufferBuilder_append
/** * Append as many data as possible from {@code source}. Not everything might be copied if there * is not enough space in the underlying {@link MemorySegment} * * @return number of copied bytes */ public int append(ByteBuffer source) { checkState(!isFinished()); int needed = source.remaining(); int available = getMaxCapacity() - positionMarker.getCached(); int toCopy = Math.min(needed, available); memorySegment.put(positionMarker.getCached(), source, toCopy); positionMarker.move(toCopy); return toCopy; }
3.68