name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_NullGroupsMapping_getGroupsSet | /**
* Get all various group memberships of a given user.
* Returns EMPTY set in case of non-existing user
*
* @param user User's name
* @return set of group memberships of user
* @throws IOException raised on errors performing I/O.
*/
@Override
public Set<String> getGroupsSet(String user) throws IOException {
return Collections.emptySet();
} | 3.68 |
open-banking-gateway_EncryptionKeySerde_read | /**
* Read symmetric key with initialization vector from input stream.
* @param is Stream with key
* @return Read key
*/
@SneakyThrows
public SecretKeyWithIv read(InputStream is) {
SecretKeyWithIvContainer container = mapper.readValue(is, SecretKeyWithIvContainer.class);
return new SecretKeyWithIv(
container.getIv(),
new SecretKeySpec(container.getEncoded(), container.getAlgo())
);
} | 3.68 |
hbase_BlockIOUtils_readWithExtra | /**
* Read bytes into ByteBuffers directly, those buffers either contains the extraLen bytes or only
* contains necessaryLen bytes, which depends on how much bytes do the last time we read.
* @param buf the destination {@link ByteBuff}.
* @param dis input stream to read.
* @param necessaryLen bytes which we must read
* @param extraLen bytes which we may read
* @return if the returned flag is true, then we've finished to read the extraLen into our
* ByteBuffers, otherwise we've not read the extraLen bytes yet.
* @throws IOException if failed to read the necessary bytes.
*/
public static boolean readWithExtra(ByteBuff buf, FSDataInputStream dis, int necessaryLen,
int extraLen) throws IOException {
if (!isByteBufferReadable(dis)) {
// If InputStream does not support the ByteBuffer read, just read to heap and copy bytes to
// the destination ByteBuff.
byte[] heapBuf = new byte[necessaryLen + extraLen];
boolean ret = readWithExtraOnHeap(dis, heapBuf, 0, necessaryLen, extraLen);
copyToByteBuff(heapBuf, 0, heapBuf.length, buf);
return ret;
}
int directBytesRead = 0, heapBytesRead = 0;
ByteBuffer[] buffers = buf.nioByteBuffers();
int bytesRead = 0;
int remain = necessaryLen + extraLen;
int idx = 0;
ByteBuffer cur = buffers[idx];
try {
while (bytesRead < necessaryLen) {
while (!cur.hasRemaining()) {
if (++idx >= buffers.length) {
throw new IOException(
"Not enough ByteBuffers to read the reminding " + remain + "bytes");
}
cur = buffers[idx];
}
cur.limit(cur.position() + Math.min(remain, cur.remaining()));
int ret = dis.read(cur);
if (ret < 0) {
throw new IOException("Premature EOF from inputStream (read returned " + ret
+ ", was trying to read " + necessaryLen + " necessary bytes and " + extraLen
+ " extra bytes, successfully read " + bytesRead);
}
bytesRead += ret;
remain -= ret;
if (cur.isDirect()) {
directBytesRead += ret;
} else {
heapBytesRead += ret;
}
}
} finally {
final Span span = Span.current();
final AttributesBuilder attributesBuilder = builderFromContext(Context.current());
annotateBytesRead(attributesBuilder, directBytesRead, heapBytesRead);
span.addEvent("BlockIOUtils.readWithExtra", attributesBuilder.build());
}
return (extraLen > 0) && (bytesRead == necessaryLen + extraLen);
} | 3.68 |
flink_SlotSharingExecutionSlotAllocator_allocateSlotsForVertices | /**
* Creates logical {@link SlotExecutionVertexAssignment}s from physical shared slots.
*
* <p>The allocation has the following steps:
*
* <ol>
* <li>Map the executions to {@link ExecutionSlotSharingGroup}s using {@link
* SlotSharingStrategy}
* <li>Check which {@link ExecutionSlotSharingGroup}s already have shared slot
* <li>For all involved {@link ExecutionSlotSharingGroup}s which do not have a shared slot
* yet:
* <li>Create a {@link SlotProfile} future using {@link SharedSlotProfileRetriever} and then
* <li>Allocate a physical slot from the {@link PhysicalSlotProvider}
* <li>Create a shared slot based on the returned physical slot futures
* <li>Allocate logical slot futures for the executions from all corresponding shared slots.
* <li>If a physical slot request fails, associated logical slot requests are canceled within
* the shared slot
* <li>Generate {@link SlotExecutionVertexAssignment}s based on the logical slot futures and
* returns the results.
* </ol>
*
* @param executionVertexIds Execution vertices to allocate slots for
*/
private List<SlotExecutionVertexAssignment> allocateSlotsForVertices(
List<ExecutionVertexID> executionVertexIds) {
SharedSlotProfileRetriever sharedSlotProfileRetriever =
sharedSlotProfileRetrieverFactory.createFromBulk(new HashSet<>(executionVertexIds));
Map<ExecutionSlotSharingGroup, List<ExecutionVertexID>> executionsByGroup =
executionVertexIds.stream()
.collect(
Collectors.groupingBy(
slotSharingStrategy::getExecutionSlotSharingGroup));
Map<ExecutionSlotSharingGroup, SharedSlot> slots = new HashMap<>(executionsByGroup.size());
Set<ExecutionSlotSharingGroup> groupsToAssign = new HashSet<>(executionsByGroup.keySet());
Map<ExecutionSlotSharingGroup, SharedSlot> assignedSlots =
tryAssignExistingSharedSlots(groupsToAssign);
slots.putAll(assignedSlots);
groupsToAssign.removeAll(assignedSlots.keySet());
if (!groupsToAssign.isEmpty()) {
Map<ExecutionSlotSharingGroup, SharedSlot> allocatedSlots =
allocateSharedSlots(groupsToAssign, sharedSlotProfileRetriever);
slots.putAll(allocatedSlots);
groupsToAssign.removeAll(allocatedSlots.keySet());
Preconditions.checkState(groupsToAssign.isEmpty());
}
Map<ExecutionVertexID, SlotExecutionVertexAssignment> assignments =
allocateLogicalSlotsFromSharedSlots(slots, executionsByGroup);
// we need to pass the slots map to the createBulk method instead of using the allocator's
// 'sharedSlots'
// because if any physical slots have already failed, their shared slots have been removed
// from the allocator's 'sharedSlots' by failed logical slots.
SharingPhysicalSlotRequestBulk bulk = createBulk(slots, executionsByGroup);
bulkChecker.schedulePendingRequestBulkTimeoutCheck(bulk, allocationTimeout);
return executionVertexIds.stream().map(assignments::get).collect(Collectors.toList());
} | 3.68 |
framework_TabSheet_removeSelectedTabChangeListener | /**
* Removes a tab selection listener.
*
* @param listener
* the Listener to be removed.
*
* @deprecated As of 8.0, replaced by {@link Registration#remove()} in the
* registration object returned from
* {@link #removeSelectedTabChangeListener(SelectedTabChangeListener)}
* .
*/
@Deprecated
public void removeSelectedTabChangeListener(
SelectedTabChangeListener listener) {
removeListener(SelectedTabChangeEvent.class, listener,
SELECTED_TAB_CHANGE_METHOD);
} | 3.68 |
hadoop_RequestFactoryImpl_withRequestPreparer | /**
* Callback to prepare requests.
*
* @param value new value
* @return the builder
*/
public RequestFactoryBuilder withRequestPreparer(
final PrepareRequest value) {
this.requestPreparer = value;
return this;
} | 3.68 |
hadoop_TimelineEntity_getRelatedEntitiesJAXB | // Required by JAXB
@Private
@XmlElement(name = "relatedentities")
public HashMap<String, Set<String>> getRelatedEntitiesJAXB() {
return relatedEntities;
} | 3.68 |
framework_VTree_deselectAll | /**
* Deselects all items in the tree.
*/
public void deselectAll() {
for (String key : selectedIds) {
TreeNode node = keyToNode.get(key);
if (node != null) {
node.setSelected(false);
}
}
selectedIds.clear();
selectionHasChanged = true;
} | 3.68 |
hbase_DynamicMetricsRegistry_getCounter | /**
* Get a MetricMutableCounterLong from the storage. If it is not there atomically put it.
* @param counterName Name of the counter to get
* @param potentialStartingValue starting value if we have to create a new counter
*/
public MutableFastCounter getCounter(String counterName, long potentialStartingValue) {
// See getGauge for description on how this works.
MutableMetric counter = metricsMap.get(counterName);
if (counter == null) {
MutableFastCounter newCounter =
new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue);
counter = metricsMap.putIfAbsent(counterName, newCounter);
if (counter == null) {
return newCounter;
}
}
if (!(counter instanceof MutableCounter)) {
throw new MetricsException("Metric already exists in registry for metric name: " + counterName
+ " and not of type MutableCounter");
}
return (MutableFastCounter) counter;
} | 3.68 |
hudi_FormatUtils_getRawValueWithAltKeys | /**
* Gets the raw value for a {@link ConfigProperty} config from Flink configuration. The key and
* alternative keys are used to fetch the config.
*
* @param flinkConf Configs in Flink {@link org.apache.flink.configuration.Configuration}.
* @param configProperty {@link ConfigProperty} config to fetch.
* @return {@link Option} of value if the config exists; empty {@link Option} otherwise.
*/
public static Option<String> getRawValueWithAltKeys(org.apache.flink.configuration.Configuration flinkConf,
ConfigProperty<?> configProperty) {
if (flinkConf.containsKey(configProperty.key())) {
return Option.ofNullable(flinkConf.getString(configProperty.key(), ""));
}
for (String alternative : configProperty.getAlternatives()) {
if (flinkConf.containsKey(alternative)) {
return Option.ofNullable(flinkConf.getString(alternative, ""));
}
}
return Option.empty();
} | 3.68 |
hadoop_BlockStorageMovementNeeded_removeItemTrackInfo | /**
* Decrease the pending child count for directory once one file blocks moved
* successfully. Remove the SPS xAttr if pending child count is zero.
*/
public synchronized void removeItemTrackInfo(ItemInfo trackInfo,
boolean isSuccess) throws IOException {
if (trackInfo.isDir()) {
// If track is part of some start inode then reduce the pending
// directory work count.
long startId = trackInfo.getStartPath();
if (!ctxt.isFileExist(startId)) {
// directory deleted just remove it.
this.pendingWorkForDirectory.remove(startId);
} else {
DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
if (pendingWork != null) {
pendingWork.decrementPendingWorkCount();
if (pendingWork.isDirWorkDone()) {
ctxt.removeSPSHint(startId);
pendingWorkForDirectory.remove(startId);
}
}
}
} else {
// Remove xAttr if trackID doesn't exist in
// storageMovementAttemptedItems or file policy satisfied.
ctxt.removeSPSHint(trackInfo.getFile());
}
} | 3.68 |
hmily_DataSourceMetaDataLoader_load | /**
* Load data source meta data.
*
* @param dataSource data source
* @param databaseType database type
* @return datasource metadata
* @throws SQLException SQL exception
*/
public static DataSourceMetaData load(final DataSource dataSource, final DatabaseType databaseType) throws SQLException {
DataSourceMetaData result = new DataSourceMetaData();
try (MetaDataConnectionAdapter connectionAdapter = new MetaDataConnectionAdapter(databaseType, dataSource.getConnection())) {
for (String each : loadAllTableNames(connectionAdapter)) {
Optional<TableMetaData> tableMetaData = TableMetaDataLoader.load(connectionAdapter, each, databaseType);
tableMetaData.ifPresent(meta -> result.getTableMetaDataMap().put(each, meta));
}
}
return result;
} | 3.68 |
framework_Panel_setTabIndex | /**
* {@inheritDoc}
*/
@Override
public void setTabIndex(int tabIndex) {
getState().tabIndex = tabIndex;
} | 3.68 |
flink_RpcInvocation_convertRpcToString | /**
* Converts a rpc call into its string representation.
*
* @param declaringClassName declaringClassName declares the specified rpc
* @param methodName methodName of the rpc
* @param parameterTypes parameterTypes of the rpc
* @return string representation of the rpc
*/
static String convertRpcToString(
String declaringClassName, String methodName, Class<?>[] parameterTypes) {
final StringBuilder paramTypeStringBuilder = new StringBuilder(parameterTypes.length * 5);
if (parameterTypes.length > 0) {
paramTypeStringBuilder.append(parameterTypes[0].getSimpleName());
for (int i = 1; i < parameterTypes.length; i++) {
paramTypeStringBuilder.append(", ").append(parameterTypes[i].getSimpleName());
}
}
return declaringClassName + '.' + methodName + '(' + paramTypeStringBuilder + ')';
} | 3.68 |
flink_PartitionedFileWriter_writeBuffers | /**
* Writes a list of {@link Buffer}s to this {@link PartitionedFile}. It guarantees that after
* the return of this method, the target buffers can be released. In a data region, all data of
* the same subpartition must be written together.
*
* <p>Note: The caller is responsible for recycling the target buffers and releasing the failed
* {@link PartitionedFile} if any exception occurs.
*/
public void writeBuffers(List<BufferWithChannel> bufferWithChannels) throws IOException {
checkState(!isFinished, "File writer is already finished.");
checkState(!isClosed, "File writer is already closed.");
if (bufferWithChannels.isEmpty()) {
return;
}
numBuffers += bufferWithChannels.size();
long expectedBytes;
ByteBuffer[] bufferWithHeaders = new ByteBuffer[2 * bufferWithChannels.size()];
if (isBroadcastRegion) {
expectedBytes = collectBroadcastBuffers(bufferWithChannels, bufferWithHeaders);
} else {
expectedBytes = collectUnicastBuffers(bufferWithChannels, bufferWithHeaders);
}
totalBytesWritten += expectedBytes;
BufferReaderWriterUtil.writeBuffers(dataFileChannel, expectedBytes, bufferWithHeaders);
} | 3.68 |
framework_VAcceptCriterion_accept | /**
* Checks if current drag event has valid drop target and target accepts the
* transferable. If drop target is valid, callback is used.
*
* @param drag
* the drag event
* @param configuration
* accept criterion UIDL
* @param callback
* the callback that handles acceptance if the target is valid
*/
public void accept(final VDragEvent drag, UIDL configuration,
final VAcceptCallback callback) {
if (needsServerSideCheck(drag, configuration)) {
VDragEventServerCallback acceptCallback = (accepted, response) -> {
if (accepted) {
callback.accepted(drag);
}
};
VDragAndDropManager.get().visitServer(acceptCallback);
} else {
boolean validates = accept(drag, configuration);
if (validates) {
callback.accepted(drag);
}
}
} | 3.68 |
hadoop_StageConfig_withJobIdSource | /**
* Set the Job ID source.
* @param value new value
* @return this
*/
public StageConfig withJobIdSource(final String value) {
checkOpen();
jobIdSource = value;
return this;
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_getJobInstance | /**
* Get job instance.
*
* @param jobName job name
* @return job instance
*/
public JobInstance getJobInstance(final String jobName) {
return jobInstanceMap.get(jobName);
} | 3.68 |
querydsl_JPAExpressions_max | /**
* Create a max(col) expression
*
* @param left collection
* @return max(col)
*/
public static <A extends Comparable<? super A>> ComparableExpression<A> max(CollectionExpression<?,A> left) {
return Expressions.comparableOperation((Class) left.getParameter(0), Ops.QuantOps.MAX_IN_COL, (Expression<?>) left);
} | 3.68 |
hbase_SyncTable_moveToNextBatch | /**
* If there is an open hash batch, complete it and sync if there are diffs. Start a new batch,
* and seek to read the
*/
private void moveToNextBatch(Context context) throws IOException, InterruptedException {
if (targetHasher.isBatchStarted()) {
finishBatchAndCompareHashes(context);
}
targetHasher.startBatch(nextSourceKey);
currentSourceHash = sourceHashReader.getCurrentHash();
findNextKeyHashPair();
} | 3.68 |
hudi_HoodieMultiTableStreamer_populateTableExecutionContextList | //commonProps are passed as parameter which contain table to config file mapping
private void populateTableExecutionContextList(TypedProperties properties, String configFolder, FileSystem fs, Config config) throws IOException {
List<String> tablesToBeIngested = getTablesToBeIngested(properties);
logger.info("tables to be ingested via MultiTableDeltaStreamer : " + tablesToBeIngested);
TableExecutionContext executionContext;
for (String table : tablesToBeIngested) {
String[] tableWithDatabase = table.split("\\.");
String database = tableWithDatabase.length > 1 ? tableWithDatabase[0] : "default";
String currentTable = tableWithDatabase.length > 1 ? tableWithDatabase[1] : table;
String configProp = HoodieStreamerConfig.INGESTION_PREFIX + database + Constants.DELIMITER + currentTable + Constants.INGESTION_CONFIG_SUFFIX;
String oldConfigProp = HoodieStreamerConfig.OLD_INGESTION_PREFIX + database + Constants.DELIMITER + currentTable + Constants.INGESTION_CONFIG_SUFFIX;
String configFilePath = getStringWithAltKeys(properties, configProp, oldConfigProp,
Helpers.getDefaultConfigFilePath(configFolder, database, currentTable));
checkIfTableConfigFileExists(configFolder, fs, configFilePath);
TypedProperties tableProperties = UtilHelpers.readConfig(fs.getConf(), new Path(configFilePath), new ArrayList<>()).getProps();
properties.forEach((k, v) -> {
if (tableProperties.get(k) == null) {
tableProperties.setProperty(k.toString(), v.toString());
}
});
final HoodieStreamer.Config cfg = new HoodieStreamer.Config();
//copy all the values from config to cfg
String targetBasePath = resetTarget(config, database, currentTable);
Helpers.deepCopyConfigs(config, cfg);
String overriddenTargetBasePath = getStringWithAltKeys(tableProperties, HoodieStreamerConfig.TARGET_BASE_PATH, true);
cfg.targetBasePath = StringUtils.isNullOrEmpty(overriddenTargetBasePath) ? targetBasePath : overriddenTargetBasePath;
if (cfg.enableMetaSync && StringUtils.isNullOrEmpty(tableProperties.getString(HoodieSyncConfig.META_SYNC_TABLE_NAME.key(), ""))) {
throw new HoodieException("Meta sync table field not provided!");
}
populateTransformerProps(cfg, tableProperties);
populateSchemaProviderProps(cfg, tableProperties);
executionContext = new TableExecutionContext();
executionContext.setProperties(tableProperties);
executionContext.setConfig(cfg);
executionContext.setDatabase(database);
executionContext.setTableName(currentTable);
this.tableExecutionContexts.add(executionContext);
}
} | 3.68 |
hadoop_AzureBlobFileSystem_getAclStatus | /**
* Gets the ACL of a file or directory.
*
* @param path Path to get
* @return AbfsAclStatus describing the ACL of the file or directory
* @throws IOException if an ACL could not be read
*/
@Override
public AclStatus getAclStatus(final Path path) throws IOException {
LOG.debug("AzureBlobFileSystem.getAclStatus path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId,
fileSystemId, FSOperationType.GET_ACL_STATUS, true, tracingHeaderFormat, listener);
if (!getIsNamespaceEnabled(tracingContext)) {
throw new UnsupportedOperationException(
"getAclStatus is only supported by storage account with the "
+ "hierarchical namespace enabled.");
}
Path qualifiedPath = makeQualified(path);
try {
return abfsStore.getAclStatus(qualifiedPath, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
return null;
}
} | 3.68 |
framework_Overlay_setFitInWindow | /**
* Sets whether the overlay should be moved or shrunk to fit inside the
* window.
* <p>
* When this is <code>false</code>, the default {@link PopupPanel} behavior
* is used, which tries to position the popup primarly below and to the
* right of a reference UIObject and, if there is not enough space, above or
* to the left.
* <p>
* When this is <code>true</code>, the popup will be moved up/left in case
* it does not fit on either side. If the popup is larger than the window,
* it will be shrunk to fit and assume that scrolling e.g. using
* <code>overflow:auto</code>, is taken care of by the overlay user.
*
* @since 7.6.6
* @param fitInWindow
* <code>true</code> to ensure that no part of the popup is
* outside the visible view, <code>false</code> to use the
* default {@link PopupPanel} behavior
*/
public void setFitInWindow(boolean fitInWindow) {
this.fitInWindow = fitInWindow;
} | 3.68 |
querydsl_GenericExporter_setGeneratedAnnotationClass | /**
* Set the Generated annotation class. Will default to java {@code @Generated}
*
* @param generatedAnnotationClass the fully qualified class name of the <em>Single-Element Annotation</em> (with {@code String} element) to be used on
* the generated sources, or {@code null} (defaulting to {@code javax.annotation.Generated} or
* {@code javax.annotation.processing.Generated} depending on the java version).
* @see <a href="https://docs.oracle.com/javase/specs/jls/se8/html/jls-9.html#jls-9.7.3">Single-Element Annotation</a>
*/
public void setGeneratedAnnotationClass(@Nullable String generatedAnnotationClass) {
codegenModule.bindInstance(CodegenModule.GENERATED_ANNOTATION_CLASS, GeneratedAnnotationResolver.resolve(generatedAnnotationClass));
} | 3.68 |
framework_VFilterSelect_setSelectionRange | /**
* Overridden to avoid selecting text when text input is disabled.
*/
@Override
public void setSelectionRange(int pos, int length) {
if (textInputEnabled) {
/*
* set selection range with a backwards direction: anchor at the
* back, focus at the front. This means that items that are too
* long to display will display from the start and not the end
* even on Firefox.
*
* We need the JSNI function to set selection range so that we
* can use the optional direction attribute to set the anchor to
* the end and the focus to the start. This makes Firefox work
* the same way as other browsers (#13477)
*/
WidgetUtil.setSelectionRange(getElement(), pos, length,
"backward");
} else {
/*
* Setting the selectionrange for an uneditable textbox leads to
* unwanted behavior when the width of the textbox is narrower
* than the width of the entry: the end of the entry is shown
* instead of the beginning. (see #13477)
*
* To avoid this, we set the caret to the beginning of the line.
*/
super.setSelectionRange(0, 0);
}
} | 3.68 |
flink_WindowsGrouping_advanceWatermarkToTriggerAllWindows | /**
* Advance the watermark to trigger all the possible windows. It is designed to be idempotent.
*/
public void advanceWatermarkToTriggerAllWindows() {
skipEmptyWindow();
advanceWatermark(watermark + windowSize);
} | 3.68 |
flink_CrossOperator_with | /**
* Finalizes a Cross transformation by applying a {@link CrossFunction} to each pair of
* crossed elements.
*
* <p>Each CrossFunction call returns exactly one element.
*
* @param function The CrossFunction that is called for each pair of crossed elements.
* @return An CrossOperator that represents the crossed result DataSet
* @see CrossFunction
* @see DataSet
*/
public <R> CrossOperator<I1, I2, R> with(CrossFunction<I1, I2, R> function) {
if (function == null) {
throw new NullPointerException("Cross function must not be null.");
}
TypeInformation<R> returnType =
TypeExtractor.getCrossReturnTypes(
function,
getInput1().getType(),
getInput2().getType(),
super.getDefaultName(),
true);
return new CrossOperator<I1, I2, R>(
getInput1(),
getInput2(),
clean(function),
returnType,
getCrossHint(),
Utils.getCallLocationName());
} | 3.68 |
hadoop_AzureNativeFileSystemStore_isPageBlobKey | /**
* Checks if the given key in Azure Storage should be stored as a page
* blob instead of block blob.
*/
public boolean isPageBlobKey(String key) {
return isKeyForDirectorySet(key, pageBlobDirs);
} | 3.68 |
hadoop_BufferedIOStatisticsOutputStream_hflush | /**
* If the inner stream is Syncable, flush the buffer and then
* invoke the inner stream's hflush() operation.
*
* Otherwise: throw an exception, unless the stream was constructed with
* {@link #downgradeSyncable} set to true, in which case the stream
* is just flushed.
* @throws IOException IO Problem
* @throws UnsupportedOperationException if the inner class is not syncable
*/
@Override
public void hflush() throws IOException {
if (out instanceof Syncable) {
flush();
((Syncable) out).hflush();
} else {
if (!downgradeSyncable) {
throw new UnsupportedOperationException("hflush not supported by "
+ out);
} else {
flush();
}
}
} | 3.68 |
flink_CompressedSerializedValue_getSize | /** Returns the size of the compressed serialized data. */
public int getSize() {
return getByteArray().length;
} | 3.68 |
graphhopper_Entity_endRecord | /**
* End a row.
* This is just a proxy to the writer, but could be used for hooks in the future.
*/
public void endRecord () throws IOException {
writer.endRecord();
} | 3.68 |
hbase_ByteBuffAllocator_allocate | /**
* Allocate size bytes from the ByteBufAllocator, Note to call the {@link ByteBuff#release()} if
* no need any more, otherwise the memory leak happen in NIO ByteBuffer pool.
* @param size to allocate
* @return an ByteBuff with the desired size.
*/
public ByteBuff allocate(int size) {
if (size < 0) {
throw new IllegalArgumentException("size to allocate should >=0");
}
// If disabled the reservoir, just allocate it from on-heap.
if (!isReservoirEnabled() || size == 0) {
return ByteBuff.wrap(allocateOnHeap(size));
}
int reminder = size % bufSize;
int len = size / bufSize + (reminder > 0 ? 1 : 0);
List<ByteBuffer> bbs = new ArrayList<>(len);
// Allocate from ByteBufferPool until the remaining is less than minSizeForReservoirUse or
// reservoir is exhausted.
int remain = size;
while (remain >= minSizeForReservoirUse) {
ByteBuffer bb = this.getBuffer();
if (bb == null) {
break;
}
bbs.add(bb);
remain -= bufSize;
}
int lenFromReservoir = bbs.size();
if (remain > 0) {
// If the last ByteBuffer is too small or the reservoir can not provide more ByteBuffers, we
// just allocate the ByteBuffer from on-heap.
bbs.add(allocateOnHeap(remain));
}
ByteBuff bb;
// we only need a recycler if we successfully pulled from the pool
// this matters for determining whether to add leak detection in RefCnt
if (lenFromReservoir == 0) {
bb = ByteBuff.wrap(bbs);
} else {
bb = ByteBuff.wrap(bbs, () -> {
for (int i = 0; i < lenFromReservoir; i++) {
this.putbackBuffer(bbs.get(i));
}
});
}
bb.limit(size);
return bb;
} | 3.68 |
flink_PartitionedFileWriter_startNewRegion | /**
* Persists the region index of the current data region and starts a new region to write.
*
* <p>Note: The caller is responsible for releasing the failed {@link PartitionedFile} if any
* exception occurs.
*
* @param isBroadcastRegion Whether it's a broadcast region. See {@link #isBroadcastRegion}.
*/
public void startNewRegion(boolean isBroadcastRegion) throws IOException {
checkState(!isFinished, "File writer is already finished.");
checkState(!isClosed, "File writer is already closed.");
writeRegionIndex();
this.isBroadcastRegion = isBroadcastRegion;
} | 3.68 |
flink_ArrowFieldWriter_reset | /** Resets the state of the writer to write the next batch of fields. */
public void reset() {
valueVector.reset();
count = 0;
} | 3.68 |
hadoop_AMRMProxyService_processApplicationStartRequest | /**
* Callback from the ContainerManager implementation for initializing the
* application request processing pipeline.
*
* @param request - encapsulates information for starting an AM
* @throws IOException if fails
* @throws YarnException if fails
*/
public void processApplicationStartRequest(StartContainerRequest request)
throws IOException, YarnException {
this.metrics.incrRequestCount();
long startTime = clock.getTime();
try {
ContainerTokenIdentifier containerTokenIdentifierForKey =
BuilderUtils.newContainerTokenIdentifier(request.getContainerToken());
ApplicationAttemptId appAttemptId =
containerTokenIdentifierForKey.getContainerID()
.getApplicationAttemptId();
ApplicationId applicationID = appAttemptId.getApplicationId();
// Checking if application is there in federation state store only
// if federation is enabled. If
// application is submitted to router then it adds it in statestore.
// if application is not found in statestore that means its
// submitted to RM
if (!checkIfAppExistsInStateStore(applicationID)) {
return;
}
LOG.info("Callback received for initializing request processing pipeline for an AM.");
Credentials credentials = YarnServerSecurityUtils
.parseCredentials(request.getContainerLaunchContext());
Token<AMRMTokenIdentifier> amrmToken =
getFirstAMRMToken(credentials.getAllTokens());
if (amrmToken == null) {
throw new YarnRuntimeException(
"AMRMToken not found in the start container request for application:" + appAttemptId);
}
// Substitute the existing AMRM Token with a local one. Keep the rest of
// the tokens in the credentials intact.
Token<AMRMTokenIdentifier> localToken =
this.secretManager.createAndGetAMRMToken(appAttemptId);
credentials.addToken(localToken.getService(), localToken);
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
request.getContainerLaunchContext()
.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
initializePipeline(appAttemptId,
containerTokenIdentifierForKey.getApplicationSubmitter(), amrmToken,
localToken, null, false, credentials);
long endTime = clock.getTime();
this.metrics.succeededAppStartRequests(endTime - startTime);
} catch (Throwable t) {
this.metrics.incrFailedAppStartRequests();
throw t;
}
} | 3.68 |
hudi_HoodieTable_rollbackInflightClustering | /**
* Rollback inflight clustering instant to requested clustering instant
*
* @param inflightInstant Inflight clustering instant
* @param getPendingRollbackInstantFunc Function to get rollback instant
*/
public void rollbackInflightClustering(HoodieInstant inflightInstant,
Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION));
rollbackInflightInstant(inflightInstant, getPendingRollbackInstantFunc);
} | 3.68 |
flink_CheckpointStatsCounts_incrementCompletedCheckpoints | /**
* Increments the number of successfully completed checkpoints.
*
* <p>It is expected that this follows a previous call to {@link
* #incrementInProgressCheckpoints()}.
*/
void incrementCompletedCheckpoints() {
if (canDecrementOfInProgressCheckpointsNumber()) {
numInProgressCheckpoints--;
}
numCompletedCheckpoints++;
} | 3.68 |
flink_OverWindowPartitionedOrdered_preceding | /**
* Set the preceding offset (based on time or row-count intervals) for over window.
*
* @param preceding preceding offset relative to the current row.
* @return an over window with defined preceding
*/
public OverWindowPartitionedOrderedPreceding preceding(Expression preceding) {
return new OverWindowPartitionedOrderedPreceding(partitionBy, orderBy, preceding);
} | 3.68 |
hbase_IndividualBytesFieldCell_clone | /**
* Implement Cloneable interface
*/
@Override
public Object clone() throws CloneNotSupportedException {
return super.clone(); // only a shadow copy
} | 3.68 |
hadoop_S3ClientFactory_withMinimumPartSize | /**
* Set the minimum part size for transfer parts.
* @param value new value
* @return the builder
*/
public S3ClientCreationParameters withMinimumPartSize(
final long value) {
minimumPartSize = value;
return this;
} | 3.68 |
hbase_HMaster_getMaxRegionsInTransition | /** Returns Maximum number of regions in transition */
private int getMaxRegionsInTransition() {
int numRegions = this.assignmentManager.getRegionStates().getRegionAssignments().size();
return Math.max((int) Math.floor(numRegions * this.maxRitPercent), 1);
} | 3.68 |
hbase_CellCounter_createSubmittableJob | /**
* Sets up the actual job.
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
String tableName = args[0];
Path outputDir = new Path(args[1]);
String reportSeparatorString = (args.length > 2) ? args[2] : ":";
conf.set("ReportSeparator", reportSeparatorString);
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
job.setJarByClass(CellCounter.class);
Scan scan = getConfiguredScanForJob(conf, args);
TableMapReduceUtil.initTableMapperJob(tableName, scan, CellCounterMapper.class,
ImmutableBytesWritable.class, Result.class, job);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileOutputFormat.setOutputPath(job, outputDir);
job.setReducerClass(LongSumReducer.class);
job.setCombinerClass(LongSumReducer.class);
return job;
} | 3.68 |
hbase_Procedure_doAcquireLock | /**
* Internal method called by the ProcedureExecutor that starts the user-level code acquireLock().
*/
final LockState doAcquireLock(TEnvironment env, ProcedureStore store) {
if (waitInitialized(env)) {
return LockState.LOCK_EVENT_WAIT;
}
if (lockedWhenLoading) {
// reset it so we will not consider it anymore
lockedWhenLoading = false;
locked = true;
// Here we return without persist the locked state, as lockedWhenLoading is true means
// that the locked field of the procedure stored in procedure store is true, so we do not need
// to store it again.
return LockState.LOCK_ACQUIRED;
}
LockState state = acquireLock(env);
if (state == LockState.LOCK_ACQUIRED) {
locked = true;
// persist that we have held the lock. This must be done before we actually execute the
// procedure, otherwise when restarting, we may consider the procedure does not have a lock,
// but it may have already done some changes as we have already executed it, and if another
// procedure gets the lock, then the semantic will be broken if the holdLock is true, as we do
// not expect that another procedure can be executed in the middle.
store.update(this);
}
return state;
} | 3.68 |
hudi_BulkInsertWriteFunction_lastPendingInstant | /**
* Returns the last pending instant time.
*/
protected String lastPendingInstant() {
return this.ckpMetadata.lastPendingInstant();
} | 3.68 |
hbase_HFile_getWriterFactory | /**
* Returns the factory to be used to create {@link HFile} writers
*/
public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) {
int version = getFormatVersion(conf);
switch (version) {
case 2:
throw new IllegalArgumentException("This should never happen. "
+ "Did you change hfile.format.version to read v2? This version of the software writes v3"
+ " hfiles only (but it can read v2 files without having to update hfile.format.version "
+ "in hbase-site.xml)");
case 3:
return new HFile.WriterFactory(conf, cacheConf);
default:
throw new IllegalArgumentException(
"Cannot create writer for HFile " + "format version " + version);
}
} | 3.68 |
flink_ExternalResourceUtils_externalResourceDriversFromConfig | /**
* Instantiate the {@link ExternalResourceDriver ExternalResourceDrivers} for all of enabled
* external resources. {@link ExternalResourceDriver ExternalResourceDrivers} are mapped to its
* resource name.
*/
@VisibleForTesting
static Map<String, ExternalResourceDriver> externalResourceDriversFromConfig(
Configuration config, PluginManager pluginManager) {
final Set<String> resourceSet = getExternalResourceSet(config);
if (resourceSet.isEmpty()) {
return Collections.emptyMap();
}
final Iterator<ExternalResourceDriverFactory> factoryIterator =
pluginManager.load(ExternalResourceDriverFactory.class);
final Map<String, ExternalResourceDriverFactory> externalResourceFactories =
new HashMap<>();
factoryIterator.forEachRemaining(
externalResourceDriverFactory ->
externalResourceFactories.put(
externalResourceDriverFactory.getClass().getName(),
externalResourceDriverFactory));
final Map<String, ExternalResourceDriver> externalResourceDrivers = new HashMap<>();
for (String resourceName : resourceSet) {
final ConfigOption<String> driverClassOption =
key(ExternalResourceOptions
.getExternalResourceDriverFactoryConfigOptionForResource(
resourceName))
.stringType()
.noDefaultValue();
final String driverFactoryClassName = config.getString(driverClassOption);
if (StringUtils.isNullOrWhitespaceOnly(driverFactoryClassName)) {
LOG.warn(
"Could not find driver class name for {}. Please make sure {} is configured.",
resourceName,
driverClassOption.key());
continue;
}
ExternalResourceDriverFactory externalResourceDriverFactory =
externalResourceFactories.get(driverFactoryClassName);
if (externalResourceDriverFactory != null) {
DelegatingConfiguration delegatingConfiguration =
new DelegatingConfiguration(
config,
ExternalResourceOptions
.getExternalResourceParamConfigPrefixForResource(
resourceName));
try {
externalResourceDrivers.put(
resourceName,
externalResourceDriverFactory.createExternalResourceDriver(
delegatingConfiguration));
LOG.info("Add external resources driver for {}.", resourceName);
} catch (Exception e) {
LOG.warn(
"Could not instantiate driver with factory {} for {}. {}",
driverFactoryClassName,
resourceName,
e);
}
} else {
LOG.warn(
"Could not find factory class {} for {}.",
driverFactoryClassName,
resourceName);
}
}
return externalResourceDrivers;
} | 3.68 |
framework_GridElement_getVerticalScroller | /**
* Get the vertical scroll element.
*
* @return The element representing the vertical scrollbar
*/
public TestBenchElement getVerticalScroller() {
List<WebElement> rootElements = findElements(By.xpath("./div"));
return (TestBenchElement) rootElements.get(0);
} | 3.68 |
hadoop_NativeCrc32_isAvailable | /**
* Return true if the JNI-based native CRC extensions are available.
*/
public static boolean isAvailable() {
if (isSparc) {
return false;
} else {
return NativeCodeLoader.isNativeCodeLoaded();
}
} | 3.68 |
hmily_HmilyExecuteTemplate_beforeSetAutoCommit | /**
* Sets auto commit.
*
* @param connection the connection
*/
public void beforeSetAutoCommit(final Connection connection) {
if (check()) {
return;
}
try {
boolean autoCommit = connection.getAutoCommit();
if (autoCommit) {
connection.setAutoCommit(false);
}
AutoCommitThreadLocal.INSTANCE.set(autoCommit);
} catch (SQLException e) {
e.printStackTrace();
}
} | 3.68 |
framework_MenuBar_onHide | /*
* This method is called when a menu bar is hidden, so that it can hide any
* child popups that are currently being shown.
*/
private void onHide() {
if (shownChildMenu != null) {
shownChildMenu.onHide();
popup.hide();
}
} | 3.68 |
hadoop_TFile_equals | /**
* Compare whether this and other points to the same key value.
*/
@Override
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof Entry)) return false;
return ((Entry) other).compareTo(keyBuffer, 0, getKeyLength()) == 0;
} | 3.68 |
hbase_EnabledTableSnapshotHandler_snapshotRegions | /**
* This method kicks off a snapshot procedure. Other than that it hangs around for various phases
* to complete.
*/
@Override
protected void snapshotRegions(List<Pair<RegionInfo, ServerName>> regions) throws IOException {
Set<String> regionServers = new HashSet<>(regions.size());
for (Pair<RegionInfo, ServerName> region : regions) {
if (region != null && region.getFirst() != null && region.getSecond() != null) {
RegionInfo hri = region.getFirst();
if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) continue;
regionServers.add(region.getSecond().toString());
}
}
// start the snapshot on the RS
Procedure proc = coordinator.startProcedure(this.monitor, this.snapshot.getName(),
this.snapshot.toByteArray(), Lists.newArrayList(regionServers));
if (proc == null) {
String msg =
"Failed to submit distributed procedure for snapshot '" + snapshot.getName() + "'";
LOG.error(msg);
throw new HBaseSnapshotException(msg);
}
try {
// wait for the snapshot to complete. A timer thread is kicked off that should cancel this
// if it takes too long.
proc.waitForCompleted();
LOG.info("Done waiting - online snapshot for " + this.snapshot.getName());
// Take the offline regions as disabled
for (Pair<RegionInfo, ServerName> region : regions) {
RegionInfo regionInfo = region.getFirst();
if (
regionInfo.isOffline() && (regionInfo.isSplit() || regionInfo.isSplitParent())
&& RegionReplicaUtil.isDefaultReplica(regionInfo)
) {
LOG.info("Take disabled snapshot of offline region=" + regionInfo);
snapshotDisabledRegion(regionInfo);
}
}
// handle the mob files if any.
boolean mobEnabled = MobUtils.hasMobColumns(htd);
if (mobEnabled) {
LOG.info("Taking snapshot for mob files in table " + htd.getTableName());
// snapshot the mob files as a offline region.
RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(htd.getTableName());
snapshotMobRegion(mobRegionInfo);
}
} catch (InterruptedException e) {
ForeignException ee =
new ForeignException("Interrupted while waiting for snapshot to finish", e);
monitor.receive(ee);
Thread.currentThread().interrupt();
} catch (ForeignException e) {
monitor.receive(e);
}
} | 3.68 |
flink_MetricRegistryImpl_closeAsync | /**
* Shuts down this registry and the associated {@link MetricReporter}.
*
* <p>NOTE: This operation is asynchronous and returns a future which is completed once the
* shutdown operation has been completed.
*
* @return Future which is completed once the {@link MetricRegistryImpl} is shut down.
*/
@Override
public CompletableFuture<Void> closeAsync() {
synchronized (lock) {
if (isShutdown) {
return terminationFuture;
} else {
isShutdown = true;
final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3);
final Time gracePeriod = Time.seconds(1L);
if (metricQueryServiceRpcService != null) {
final CompletableFuture<Void> metricQueryServiceRpcServiceTerminationFuture =
metricQueryServiceRpcService.closeAsync();
terminationFutures.add(metricQueryServiceRpcServiceTerminationFuture);
}
Throwable throwable = null;
for (ReporterAndSettings reporterAndSettings : reporters) {
try {
reporterAndSettings.getReporter().close();
} catch (Throwable t) {
throwable = ExceptionUtils.firstOrSuppressed(t, throwable);
}
}
reporters.clear();
if (throwable != null) {
terminationFutures.add(
FutureUtils.completedExceptionally(
new FlinkException(
"Could not shut down the metric reporters properly.",
throwable)));
}
final CompletableFuture<Void> reporterExecutorShutdownFuture =
ExecutorUtils.nonBlockingShutdown(
gracePeriod.toMilliseconds(),
TimeUnit.MILLISECONDS,
reporterScheduledExecutor);
terminationFutures.add(reporterExecutorShutdownFuture);
final CompletableFuture<Void> viewUpdaterExecutorShutdownFuture =
ExecutorUtils.nonBlockingShutdown(
gracePeriod.toMilliseconds(),
TimeUnit.MILLISECONDS,
viewUpdaterScheduledExecutor);
terminationFutures.add(viewUpdaterExecutorShutdownFuture);
FutureUtils.completeAll(terminationFutures)
.whenComplete(
(Void ignored, Throwable error) -> {
if (error != null) {
terminationFuture.completeExceptionally(error);
} else {
terminationFuture.complete(null);
}
});
return terminationFuture;
}
}
} | 3.68 |
flink_SchedulingPipelinedRegionComputeUtil_mergeRegionsOnCycles | /**
* Merge the regions base on <a
* href="https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm">
* Tarjan's strongly connected components algorithm</a>. For more details please see <a
* href="https://issues.apache.org/jira/browse/FLINK-17330">FLINK-17330</a>.
*/
private static Set<Set<SchedulingExecutionVertex>> mergeRegionsOnCycles(
final Map<SchedulingExecutionVertex, Set<SchedulingExecutionVertex>> vertexToRegion,
final Function<ExecutionVertexID, ? extends SchedulingExecutionVertex>
executionVertexRetriever) {
final List<Set<SchedulingExecutionVertex>> regionList =
new ArrayList<>(uniqueVertexGroups(vertexToRegion));
final List<List<Integer>> outEdges =
buildOutEdgesDesc(vertexToRegion, regionList, executionVertexRetriever);
final Set<Set<Integer>> sccs =
StronglyConnectedComponentsComputeUtils.computeStronglyConnectedComponents(
outEdges.size(), outEdges);
final Set<Set<SchedulingExecutionVertex>> mergedRegions =
Collections.newSetFromMap(new IdentityHashMap<>());
for (Set<Integer> scc : sccs) {
checkState(scc.size() > 0);
Set<SchedulingExecutionVertex> mergedRegion = new HashSet<>();
for (int regionIndex : scc) {
mergedRegion =
mergeVertexGroups(
mergedRegion, regionList.get(regionIndex), vertexToRegion);
}
mergedRegions.add(mergedRegion);
}
return mergedRegions;
} | 3.68 |
hudi_HoodieDropPartitionsTool_readConfigFromFileSystem | /**
* Reads config from the file system.
*
* @param jsc {@link JavaSparkContext} instance.
* @param cfg {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/
private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs)
.getProps(true);
} | 3.68 |
hbase_ServerManager_getOnlineServersListWithPredicator | /**
* @param keys The target server name
* @param idleServerPredicator Evaluates the server on the given load
* @return A copy of the internal list of online servers matched by the predicator
*/
public List<ServerName> getOnlineServersListWithPredicator(List<ServerName> keys,
Predicate<ServerMetrics> idleServerPredicator) {
List<ServerName> names = new ArrayList<>();
if (keys != null && idleServerPredicator != null) {
keys.forEach(name -> {
ServerMetrics load = onlineServers.get(name);
if (load != null) {
if (idleServerPredicator.test(load)) {
names.add(name);
}
}
});
}
return names;
} | 3.68 |
hbase_WALSplitUtil_moveAsideBadEditsFile | /**
* Move aside a bad edits file.
* @param fs the file system used to rename bad edits file.
* @param edits Edits file to move aside.
* @return The name of the moved aside file.
*/
public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits)
throws IOException {
Path moveAsideName =
new Path(edits.getParent(), edits.getName() + "." + EnvironmentEdgeManager.currentTime());
if (!fs.rename(edits, moveAsideName)) {
LOG.warn("Rename failed from {} to {}", edits, moveAsideName);
}
return moveAsideName;
} | 3.68 |
hadoop_OBSCommonUtils_deleteObjects | /**
* Perform a bulk object delete operation. Increments the {@code
* OBJECT_DELETE_REQUESTS} and write operation statistics.
*
* @param owner the owner OBSFileSystem instance
* @param deleteRequest keys to delete on the obs-backend
* @throws IOException on any failure to delete objects
*/
static void deleteObjects(final OBSFileSystem owner,
final DeleteObjectsRequest deleteRequest) throws IOException {
DeleteObjectsResult result;
deleteRequest.setQuiet(true);
try {
result = owner.getObsClient().deleteObjects(deleteRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
} catch (ObsException e) {
LOG.warn("delete objects failed, request [{}], request id [{}] - "
+ "error code [{}] - error message [{}]",
deleteRequest, e.getErrorRequestId(), e.getErrorCode(),
e.getErrorMessage());
for (KeyAndVersion keyAndVersion
: deleteRequest.getKeyAndVersionsList()) {
deleteObject(owner, keyAndVersion.getKey());
}
return;
}
// delete one by one if there is errors
if (result != null) {
List<DeleteObjectsResult.ErrorResult> errorResults
= result.getErrorResults();
if (!errorResults.isEmpty()) {
LOG.warn("bulk delete {} objects, {} failed, begin to delete "
+ "one by one.",
deleteRequest.getKeyAndVersionsList().size(),
errorResults.size());
for (DeleteObjectsResult.ErrorResult errorResult
: errorResults) {
deleteObject(owner, errorResult.getObjectKey());
}
}
}
} | 3.68 |
framework_SystemMessagesInfo_getService | /**
* Returns the service this SystemMessages request comes from.
*
* @return The service which triggered this request or null of not triggered
* from a service.
*/
public VaadinService getService() {
return service;
} | 3.68 |
flink_ChangelogKeyedStateBackend_getKeyGroupRange | // -------------------- CheckpointableKeyedStateBackend --------------------------------
@Override
public KeyGroupRange getKeyGroupRange() {
return keyedStateBackend.getKeyGroupRange();
} | 3.68 |
hmily_CollectionUtils_isNotEmpty | /**
* Is not empty boolean.
*
* @param coll the coll
* @return the boolean
*/
public static boolean isNotEmpty(final Collection<?> coll) {
return !isEmpty(coll);
} | 3.68 |
morf_SpreadsheetDataSetConsumer_createIndex | /**
* Create the index worksheet.
*
* <p>This also creates links back to the index in each of the worksheets.</p>
*/
public void createIndex() {
WritableSheet sheet = workbook.createSheet(spreadsheetifyName("Index"), 0);
createTitle(sheet, "Index");
try {
// Create links for each worksheet, apart from the first sheet which is the
// index we're currently creating
final String[] names = workbook.getSheetNames();
for (int currentSheet = 1; currentSheet < names.length; currentSheet++) {
// Create the link from the index to the table's worksheet
WritableHyperlink link = new WritableHyperlink(0, currentSheet - 1 + NUMBER_OF_ROWS_IN_TITLE, names[currentSheet], workbook.getSheet(currentSheet), 0, 0);
sheet.addHyperlink(link);
//Add the filename in column B (stored in cell B2 of each sheet)
String fileName = workbook.getSheet(currentSheet).getCell(1, 1).getContents();
Label fileNameLabel = new Label(1, currentSheet - 1 + NUMBER_OF_ROWS_IN_TITLE, fileName);
WritableFont fileNameFont = new WritableFont(WritableFont.ARIAL,10,WritableFont.NO_BOLD,false,UnderlineStyle.NO_UNDERLINE,Colour.BLACK);
WritableCellFormat fileNameFormat = new WritableCellFormat(fileNameFont);
fileNameLabel.setCellFormat(fileNameFormat);
sheet.addCell(fileNameLabel);
// Create the link back to the index
link = new WritableHyperlink(0, 1, "Back to index", sheet, 0, currentSheet + NUMBER_OF_ROWS_IN_TITLE - 1);
workbook.getSheet(currentSheet).addHyperlink(link);
//Set column A of each sheet to be wide enough to show "Back to index"
workbook.getSheet(currentSheet).setColumnView(0, 13);
}
// Make Column A fairly wide to show tab names and hide column B
sheet.setColumnView(0, 35);
sheet.setColumnView(1, 0);
} catch (Exception e) {
throw new RuntimeException(e);
}
} | 3.68 |
hbase_ExploringCompactionPolicy_getTotalStoreSize | /**
* Find the total size of a list of store files.
* @param potentialMatchFiles StoreFile list.
* @return Sum of StoreFile.getReader().length();
*/
private long getTotalStoreSize(List<HStoreFile> potentialMatchFiles) {
return potentialMatchFiles.stream().mapToLong(sf -> sf.getReader().length()).sum();
} | 3.68 |
hbase_LogRollBackupSubprocedurePool_submitTask | /**
* Submit a task to the pool.
*/
public void submitTask(final Callable<Void> task) {
Future<Void> f = this.taskPool.submit(task);
futures.add(f);
} | 3.68 |
morf_SqlDialect_buildSpecificValueInsert | /**
* Creates an SQL statement to insert specific values into the columns
* specified.
*
* @param statement The insert statement to build an SQL query for.
* @param metadata the database schema. If null, the SQL statement will be
* treated "as is". If not null, the schema will be used to decorate
* the statement further with the default values from any columns not
* specified.
* @param idTable the ID table. Only required if the table has a
* non-autonumbered id column and the schema has been supplied.
* @return a string containing a specific value insert query for the specified
* table and column values.
*/
protected List<String> buildSpecificValueInsert(InsertStatement statement, Schema metadata, Table idTable) {
List<String> result = new LinkedList<>();
String destinationTableName = statement.getTable().getName();
if (StringUtils.isBlank(destinationTableName)) {
throw new IllegalArgumentException("Cannot create specified value insert SQL for a blank table");
}
StringBuilder sqlBuilder = new StringBuilder();
StringBuilder values = new StringBuilder("VALUES (");
// -- Add the preamble...
//
sqlBuilder.append(getSqlForInsertInto(statement));
sqlBuilder.append(tableNameWithSchemaName(statement.getTable()));
sqlBuilder.append(" (");
Set<String> columnNamesAdded = new HashSet<>();
boolean firstField = true;
for (AliasedField fieldWithValue : statement.getValues()) {
if (!firstField) {
sqlBuilder.append(", ");
values.append(", ");
}
if (StringUtils.isBlank(fieldWithValue.getAlias())) {
throw new IllegalArgumentException("Field value in insert statement does not have an alias");
}
sqlBuilder.append(fieldWithValue.getAlias());
values.append(getSqlFrom(fieldWithValue));
columnNamesAdded.add(fieldWithValue.getAlias().toUpperCase());
firstField = false;
}
// If we have a schema, then we can add defaults for missing column values
if (metadata != null) {
for (Column currentColumn : metadata.getTable(destinationTableName).columns()) {
// Default date columns to null and skip columns we've already added.
if (columnNamesAdded.contains(currentColumn.getName().toUpperCase())) {
continue;
}
// Allow identity columns to be defaulted by the database - nothing to
// do
if (currentColumn.isAutoNumbered()) {
continue;
}
// Non-autonumbered identity columns should be populated using the id
// table
if (currentColumn.getName().equalsIgnoreCase("id")) {
sqlBuilder.append(", ");
values.append(", ");
result.addAll(buildSimpleAutonumberUpdate(statement.getTable(), "id", idTable, ID_INCREMENTOR_TABLE_COLUMN_NAME,
ID_INCREMENTOR_TABLE_COLUMN_VALUE));
String fieldValue = autoNumberId(statement, idTable);
if (StringUtils.isNotEmpty(fieldValue)) {
sqlBuilder.append("id");
values.append(fieldValue);
}
continue;
}
// If there is a default for the field, use it
if (statement.getFieldDefaults().containsKey(currentColumn.getName())) {
AliasedField fieldWithValue = statement.getFieldDefaults().get(currentColumn.getName());
sqlBuilder.append(", ");
values.append(", ");
sqlBuilder.append(fieldWithValue.getAlias());
values.append(literalValue(fieldWithValue));
continue;
}
}
}
sqlBuilder.append(") ");
values.append(")");
sqlBuilder.append(values);
result.add(sqlBuilder.toString());
return result;
} | 3.68 |
hudi_LSMTimeline_listAllManifestFiles | /**
* List all the parquet manifest files.
*/
public static FileStatus[] listAllManifestFiles(HoodieTableMetaClient metaClient) throws IOException {
return metaClient.getFs().listStatus(new Path(metaClient.getArchivePath()), getManifestFilePathFilter());
} | 3.68 |
dubbo_AbstractDynamicConfiguration_getDefaultTimeout | /**
* @return the default timeout
* @since 2.7.8
*/
@Override
public long getDefaultTimeout() {
return getTimeout();
} | 3.68 |
hadoop_ValidateRenamedFilesStage_executeStage | /**
* Validate the task manifests.
* This is done by listing all the directories
* and verifying that every file in the source list
* has a file in the destination of the same size.
* If two tasks have both written the same file or
* a source file was changed after the task was committed,
* then a mismatch will be detected -provided the file
* length is now different.
* @param entryFile path to entry file
* @return list of files committed.
*/
@Override
protected List<FileEntry> executeStage(
final Path entryFile)
throws IOException {
final EntryFileIO entryFileIO = new EntryFileIO(getStageConfig().getConf());
try (SequenceFile.Reader reader = entryFileIO.createReader(entryFile)) {
// iterate over the entries in the file.
TaskPool.foreach(entryFileIO.iterateOver(reader))
.executeWith(getIOProcessors())
.stopOnFailure()
.run(this::validateOneFile);
return getFilesCommitted();
}
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_registerRegistryCenter | /**
* Register registry center.
*
* @param jobName job name
* @param regCenter registry center
*/
public void registerRegistryCenter(final String jobName, final CoordinatorRegistryCenter regCenter) {
regCenterMap.put(jobName, regCenter);
regCenter.addCacheData("/" + jobName);
} | 3.68 |
pulsar_AuthorizationProvider_allowNamespacePolicyOperation | /**
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
default Boolean allowNamespacePolicyOperation(NamespaceName namespaceName,
PolicyName policy,
PolicyOperation operation,
String role,
AuthenticationDataSource authData) {
try {
return allowNamespacePolicyOperationAsync(namespaceName, policy, operation, role, authData).get();
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.68 |
hbase_SimpleRegionNormalizer_skipForSplit | /**
* Determine if a region in {@link RegionState} should be considered for a split operation.
*/
private static boolean skipForSplit(final RegionState state, final RegionInfo regionInfo) {
final String name = regionInfo.getEncodedName();
return logTraceReason(() -> state == null,
"skipping split of region {} because no state information is available.", name)
|| logTraceReason(() -> !Objects.equals(state.getState(), RegionState.State.OPEN),
"skipping merge of region {} because it is not open.", name);
} | 3.68 |
hadoop_StartupProgressMetrics_addCounter | /**
* Adds a counter with a name built by using the specified phase's name as
* prefix and then appending the specified suffix.
*
* @param builder MetricsRecordBuilder to receive counter
* @param phase Phase to add
* @param nameSuffix String suffix of metric name
* @param descSuffix String suffix of metric description
* @param value long counter value
*/
private static void addCounter(MetricsRecordBuilder builder, Phase phase,
String nameSuffix, String descSuffix, long value) {
MetricsInfo metricsInfo = info(phase.getName() + nameSuffix,
phase.getDescription() + descSuffix);
builder.addCounter(metricsInfo, value);
} | 3.68 |
hbase_HbckReport_getCheckingStartTimestamp | /**
* Used for web ui to show when the HBCK checking started.
*/
public Instant getCheckingStartTimestamp() {
return checkingStartTimestamp;
} | 3.68 |
flink_JobEdge_setOperatorLevelCachingDescription | /**
* Sets the operator-level caching description for this input.
*
* @param operatorLevelCachingDescription The description of operator-level caching.
*/
public void setOperatorLevelCachingDescription(String operatorLevelCachingDescription) {
this.operatorLevelCachingDescription = operatorLevelCachingDescription;
} | 3.68 |
hbase_WALSplitter_createWriter | /**
* Create a new {@link WALProvider.Writer} for writing log splits.
* @return a new Writer instance, caller should close
*/
protected WALProvider.Writer createWriter(Path logfile) throws IOException {
return walFactory.createRecoveredEditsWriter(walFS, logfile);
} | 3.68 |
hadoop_VersionMismatchException_toString | /** Returns a string representation of this object. */
@Override
public String toString(){
return "A record version mismatch occurred. Expecting v"
+ expectedVersion + ", found v" + foundVersion;
} | 3.68 |
hadoop_WorkerId_getHostname | /**
* Get hostname for Worker.
* @return hostname of worker node
*/
public final Text getHostname() {
return hostname;
} | 3.68 |
hadoop_AbfsCountersImpl_incrementCounter | /**
* {@inheritDoc}
*
* Increment a statistic with some value.
*
* @param statistic AbfsStatistic need to be incremented.
* @param value long value to be incremented by.
*/
@Override
public void incrementCounter(AbfsStatistic statistic, long value) {
ioStatisticsStore.incrementCounter(statistic.getStatName(), value);
MutableCounterLong counter = lookupCounter(statistic.getStatName());
if (counter != null) {
counter.incr(value);
}
} | 3.68 |
framework_VAbsoluteLayout_updateCaptionPosition | /**
* Updates the caption position by using element offset left and top
*/
private void updateCaptionPosition() {
if (caption != null) {
Style style = caption.getElement().getStyle();
style.setProperty("position", "absolute");
style.setPropertyPx("left", getElement().getOffsetLeft());
style.setPropertyPx("top",
getElement().getOffsetTop() - caption.getHeight());
}
} | 3.68 |
flink_DefaultExecutionGraph_getAccumulatorResultsStringified | /**
* Returns the a stringified version of the user-defined accumulators.
*
* @return an Array containing the StringifiedAccumulatorResult objects
*/
@Override
public StringifiedAccumulatorResult[] getAccumulatorResultsStringified() {
Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap =
aggregateUserAccumulators();
return StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);
} | 3.68 |
hbase_PrettyPrinter_valueOf | /**
* Convert a human readable string to its value.
* @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
* @return the value corresponding to the human readable string
*/
public static String valueOf(final String pretty, final Unit unit) throws HBaseException {
StringBuilder value = new StringBuilder();
switch (unit) {
case TIME_INTERVAL:
value.append(humanReadableIntervalToSec(pretty));
break;
case BYTE:
value.append(humanReadableSizeToBytes(pretty));
break;
default:
value.append(pretty);
}
return value.toString();
} | 3.68 |
flink_Task_loadAndInstantiateInvokable | /**
* Instantiates the given task invokable class, passing the given environment (and possibly the
* initial task state) to the task's constructor.
*
* <p>The method will first try to instantiate the task via a constructor accepting both the
* Environment and the TaskStateSnapshot. If no such constructor exists, and there is no initial
* state, the method will fall back to the stateless convenience constructor that accepts only
* the Environment.
*
* @param classLoader The classloader to load the class through.
* @param className The name of the class to load.
* @param environment The task environment.
* @return The instantiated invokable task object.
* @throws Throwable Forwards all exceptions that happen during initialization of the task. Also
* throws an exception if the task class misses the necessary constructor.
*/
private static TaskInvokable loadAndInstantiateInvokable(
ClassLoader classLoader, String className, Environment environment) throws Throwable {
final Class<? extends TaskInvokable> invokableClass;
try {
invokableClass =
Class.forName(className, true, classLoader).asSubclass(TaskInvokable.class);
} catch (Throwable t) {
throw new Exception("Could not load the task's invokable class.", t);
}
Constructor<? extends TaskInvokable> statelessCtor;
try {
statelessCtor = invokableClass.getConstructor(Environment.class);
} catch (NoSuchMethodException ee) {
throw new FlinkException("Task misses proper constructor", ee);
}
// instantiate the class
try {
//noinspection ConstantConditions --> cannot happen
return statelessCtor.newInstance(environment);
} catch (InvocationTargetException e) {
// directly forward exceptions from the eager initialization
throw e.getTargetException();
} catch (Exception e) {
throw new FlinkException("Could not instantiate the task's invokable class.", e);
}
} | 3.68 |
flink_DefaultConfigurableOptionsFactory_setLogDir | /**
* The directory for RocksDB's logging files.
*
* @param logDir If empty, log files will be in the same directory as data files<br>
* If non-empty, this directory will be used and the data directory's absolute path will be
* used as the prefix of the log file name.
* @return this options factory
*/
public DefaultConfigurableOptionsFactory setLogDir(String logDir) {
Preconditions.checkArgument(
new File(logDir).isAbsolute(),
"Invalid configuration: " + logDir + " does not point to an absolute path.");
setInternal(LOG_DIR.key(), logDir);
return this;
} | 3.68 |
framework_VaadinPortletResponse_escapeHtml | /**
* Perform minimal HTML escaping similar to Guava HtmlEscapers.
*
* @param input
* string to escape
* @return minimally escaped HTML safe string
*/
private static String escapeHtml(String input) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < input.length(); i++) {
char c = input.charAt(i);
switch (c) {
case '"':
sb.append(""");
break;
case '\'':
sb.append("'");
break;
case '&':
sb.append("&");
break;
case '<':
sb.append("<");
break;
case '>':
sb.append(">");
break;
default:
sb.append(c);
}
}
return sb.toString();
} | 3.68 |
morf_SchemaHomology_tablesMatch | /**
* Compare two tables.
*
* @param table1 Table 1
* @param table2 Table 2
* @return Whether they match.
*/
public boolean tablesMatch(Table table1, Table table2) {
noDifferences = true;
checkTable(table1, table2);
return noDifferences;
} | 3.68 |
flink_InMemoryPartition_appendRecord | /**
* Inserts the given object into the current buffer. This method returns a pointer that can be
* used to address the written record in this partition.
*
* @param record The object to be written to the partition.
* @return A pointer to the object in the partition.
* @throws IOException Thrown when the write failed.
*/
public final long appendRecord(T record) throws IOException {
long pointer = this.writeView.getCurrentPointer();
try {
this.serializer.serialize(record, this.writeView);
this.recordCounter++;
return pointer;
} catch (EOFException e) {
// we ran out of pages.
// first, reset the pages and then we need to trigger a compaction
// int oldCurrentBuffer =
this.writeView.resetTo(pointer);
// for (int bufNum = this.partitionPages.size() - 1; bufNum > oldCurrentBuffer;
// bufNum--) {
// this.availableMemory.addMemorySegment(this.partitionPages.remove(bufNum));
// }
throw e;
}
} | 3.68 |
framework_DesignAttributeHandler_resolveSupportedAttributes | /**
* Resolves the supported attributes and corresponding getters and setters
* for the class using introspection. After resolving, the information is
* cached internally by this class
*
* @param clazz
* the class to resolve the supported attributes for
*/
private static void resolveSupportedAttributes(Class<?> clazz) {
if (clazz == null) {
throw new IllegalArgumentException("The clazz can not be null");
}
if (CACHE.containsKey(clazz)) {
// NO-OP
return;
}
BeanInfo beanInfo;
try {
beanInfo = Introspector.getBeanInfo(clazz);
} catch (IntrospectionException e) {
throw new RuntimeException(
"Could not get supported attributes for class "
+ clazz.getName());
}
AttributeCacheEntry entry = new AttributeCacheEntry();
for (PropertyDescriptor descriptor : beanInfo
.getPropertyDescriptors()) {
Method getter = descriptor.getReadMethod();
Method setter = descriptor.getWriteMethod();
Class<?> propertyType = descriptor.getPropertyType();
if (getter != null && setter != null && propertyType != null
&& getFormatter().canConvert(propertyType)) {
String attribute = toAttributeName(descriptor.getName());
entry.addAttribute(attribute, getter, setter);
}
}
CACHE.put(clazz, entry);
} | 3.68 |
hudi_HoodieBloomIndex_tagLocationBacktoRecords | /**
* Tag the <rowKey, filename> back to the original HoodieRecord List.
*/
protected <R> HoodieData<HoodieRecord<R>> tagLocationBacktoRecords(
HoodiePairData<HoodieKey, HoodieRecordLocation> keyFilenamePair,
HoodieData<HoodieRecord<R>> records,
HoodieTable hoodieTable) {
HoodiePairData<HoodieKey, HoodieRecord<R>> keyRecordPairs =
records.mapToPair(record -> new ImmutablePair<>(record.getKey(), record));
// Here as the records might have more data than keyFilenamePairs (some row keys' fileId is null),
// so we do left outer join.
return keyRecordPairs.leftOuterJoin(keyFilenamePair).values()
.map(v -> HoodieIndexUtils.tagAsNewRecordIfNeeded(v.getLeft(), Option.ofNullable(v.getRight().orElse(null))));
} | 3.68 |
flink_FunctionContext_getJobParameter | /**
* Gets the global job parameter value associated with the given key as a string.
*
* @param key key pointing to the associated value
* @param defaultValue default value which is returned in case global job parameter is null or
* there is no value associated with the given key
* @return (default) value associated with the given key
*/
public String getJobParameter(String key, String defaultValue) {
if (context == null && jobParameters == null) {
throw new TableException(
"Calls to FunctionContext.getJobParameter are not available "
+ "at the current location.");
} else if (context == null) {
return jobParameters.getOrDefault(key, defaultValue);
}
final GlobalJobParameters conf = context.getExecutionConfig().getGlobalJobParameters();
if (conf != null) {
return conf.toMap().getOrDefault(key, defaultValue);
}
return defaultValue;
} | 3.68 |
pulsar_LegacyHierarchicalLedgerRangeIterator_nextL1Node | /**
* Iterate next level1 znode.
*
* @return false if have visited all level1 nodes
* @throws InterruptedException/KeeperException if error occurs reading zookeeper children
*/
private boolean nextL1Node() throws ExecutionException, InterruptedException, TimeoutException {
l2NodesIter = null;
while (l2NodesIter == null) {
if (l1NodesIter.hasNext()) {
curL1Nodes = l1NodesIter.next();
} else {
return false;
}
// Top level nodes are always exactly 2 digits long. (Don't pick up long hierarchical top level nodes)
if (!isLedgerParentNode(curL1Nodes)) {
continue;
}
List<String> l2Nodes = store.getChildren(ledgersRoot + "/" + curL1Nodes)
.get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
l2NodesIter = l2Nodes.iterator();
if (!l2NodesIter.hasNext()) {
l2NodesIter = null;
continue;
}
}
return true;
} | 3.68 |
hibernate-validator_ResourceLoaderHelper_getResettableInputStreamForPath | /**
* Returns an input stream for the given path, which supports the mark/reset
* contract.
*
* @param path The path of the requested input stream.
*
* @return An input stream for the given path or {@code null} if no such
* resource exists.
*
* @see InputStream#markSupported()
*/
static InputStream getResettableInputStreamForPath(String path, ClassLoader externalClassLoader) {
//TODO not sure if it's the right thing to removing '/'
String inputPath = path;
if ( inputPath.startsWith( "/" ) ) {
inputPath = inputPath.substring( 1 );
}
InputStream inputStream = null;
if ( externalClassLoader != null ) {
LOG.debug( "Trying to load " + path + " via user class loader" );
inputStream = externalClassLoader.getResourceAsStream( inputPath );
}
if ( inputStream == null ) {
ClassLoader loader = run( GetClassLoader.fromContext() );
if ( loader != null ) {
LOG.debug( "Trying to load " + path + " via TCCL" );
inputStream = loader.getResourceAsStream( inputPath );
}
}
if ( inputStream == null ) {
LOG.debug( "Trying to load " + path + " via Hibernate Validator's class loader" );
ClassLoader loader = ResourceLoaderHelper.class.getClassLoader();
inputStream = loader.getResourceAsStream( inputPath );
}
if ( inputStream == null ) {
return null;
}
else if ( inputStream.markSupported() ) {
return inputStream;
}
else {
return new BufferedInputStream( inputStream );
}
} | 3.68 |
flink_TumblingProcessingTimeWindows_of | /**
* Creates a new {@code TumblingProcessingTimeWindows} {@link WindowAssigner} that assigns
* elements to time windows based on the element timestamp, offset and a staggering offset,
* depending on the staggering policy.
*
* @param size The size of the generated windows.
* @param offset The offset which window start would be shifted by.
* @param windowStagger The utility that produces staggering offset in runtime.
* @return The time policy.
*/
@PublicEvolving
public static TumblingProcessingTimeWindows of(
Time size, Time offset, WindowStagger windowStagger) {
return new TumblingProcessingTimeWindows(
size.toMilliseconds(), offset.toMilliseconds(), windowStagger);
} | 3.68 |
flink_JoinOperator_projectTuple23 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>
ProjectJoin<
I1,
I2,
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>>
projectTuple23() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>>
tType =
new TupleTypeInfo<
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>>(fTypes);
return new ProjectJoin<
I1,
I2,
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
hbase_ColumnValueFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
} else if (!(o instanceof ColumnValueFilter)) {
return false;
}
ColumnValueFilter other = (ColumnValueFilter) o;
return Bytes.equals(this.getFamily(), other.getFamily())
&& Bytes.equals(this.getQualifier(), other.getQualifier())
&& this.getCompareOperator().equals(other.getCompareOperator())
&& this.getComparator().areSerializedFieldsEqual(other.getComparator());
} | 3.68 |
hbase_BaseLoadBalancer_balanceCluster | /**
* Perform the major balance operation for cluster, will invoke
* {@link #balanceTable(TableName, Map)} to do actual balance.
* <p/>
* THIs method is marked as final which means you should not override this method. See the javadoc
* for {@link #balanceTable(TableName, Map)} for more details.
* @param loadOfAllTable region load of servers for all table
* @return a list of regions to be moved, including source and destination, or null if cluster is
* already balanced
* @see #balanceTable(TableName, Map)
*/
@Override
public final List<RegionPlan>
balanceCluster(Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable) {
preBalanceCluster(loadOfAllTable);
if (isByTable) {
List<RegionPlan> result = new ArrayList<>();
loadOfAllTable.forEach((tableName, loadOfOneTable) -> {
LOG.info("Start Generate Balance plan for table: " + tableName);
List<RegionPlan> partialPlans = balanceTable(tableName, loadOfOneTable);
if (partialPlans != null) {
result.addAll(partialPlans);
}
});
return result;
} else {
LOG.debug("Start Generate Balance plan for cluster.");
return balanceTable(HConstants.ENSEMBLE_TABLE_NAME, toEnsumbleTableLoad(loadOfAllTable));
}
} | 3.68 |
streampipes_PrimitivePropertyBuilder_scope | /**
* Assigns a property scope to the event property.
*
* @param propertyScope The {@link org.apache.streampipes.model.schema.PropertyScope}.
* @return this
*/
public PrimitivePropertyBuilder scope(PropertyScope propertyScope) {
this.eventProperty.setPropertyScope(propertyScope.name());
return this;
} | 3.68 |
hbase_MetricsAssignmentManager_getMoveProcMetrics | /** Returns Set of common metrics for move procedure */
public ProcedureMetrics getMoveProcMetrics() {
return moveProcMetrics;
} | 3.68 |
hbase_RegionInfo_toDelimitedByteArray | /**
* Use this instead of {@link RegionInfo#toByteArray(RegionInfo)} when writing to a stream and you
* want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what
* you want).
* @return This instance serialized as a delimied protobuf w/ a magic pb prefix.
*/
static byte[] toDelimitedByteArray(RegionInfo ri) throws IOException {
return ProtobufUtil.toDelimitedByteArray(ProtobufUtil.toRegionInfo(ri));
} | 3.68 |
framework_AbstractSelect_getVisibleItemIds | /**
* Gets the visible item ids. In Select, this returns list of all item ids,
* but can be overridden in subclasses if they paint only part of the items
* to the terminal or null if no items is visible.
*/
public Collection<?> getVisibleItemIds() {
return getItemIds();
} | 3.68 |
hbase_RestoreSnapshotHelper_getParentToChildrenPairMap | /**
* Returns the map of parent-children_pair.
* @return the map
*/
public Map<String, Pair<String, String>> getParentToChildrenPairMap() {
return this.parentsMap;
} | 3.68 |