name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
zxing_ErrorCorrectionLevel_forBits | /**
* @param bits int containing the two bits encoding a QR Code's error correction level
* @return ErrorCorrectionLevel representing the encoded error correction level
*/
public static ErrorCorrectionLevel forBits(int bits) {
if (bits < 0 || bits >= FOR_BITS.length) {
throw new IllegalArgumentException();
}
return FOR_BITS[bits];
} | 3.68 |
hadoop_TFile_prepareMetaBlock | /**
* Obtain an output stream for creating a meta block. This function may not
* be called when there is a key append stream or value append stream
* active. No more key-value insertion is allowed after a meta data block
* has been added to TFile. Data will be compressed using the default
* compressor as defined in Writer's constructor.
*
* @param name
* Name of the meta block.
* @return A DataOutputStream that can be used to write Meta Block data.
* Closing the stream would signal the ending of the block.
* @throws IOException raised on errors performing I/O.
* @throws MetaBlockAlreadyExists
* the Meta Block with the same name already exists.
*/
public DataOutputStream prepareMetaBlock(String name) throws IOException,
MetaBlockAlreadyExists {
if (state != State.READY) {
throw new IllegalStateException(
"Incorrect state to start a Meta Block: " + state.name());
}
finishDataBlock(true);
return writerBCF.prepareMetaBlock(name);
} | 3.68 |
hmily_AbstractConfig_setLoad | /**
* Sets load.
*
* @param load the load
*/
public void setLoad(final boolean load) {
isLoad = load;
} | 3.68 |
flink_CheckpointFailureManager_handleCheckpointSuccess | /**
* Handle checkpoint success.
*
* @param checkpointId the failed checkpoint id used to count the continuous failure number
* based on checkpoint id sequence.
*/
public void handleCheckpointSuccess(long checkpointId) {
if (checkpointId > lastSucceededCheckpointId) {
lastSucceededCheckpointId = checkpointId;
clearCount();
}
} | 3.68 |
framework_ApplicationConnection_forceLayout | /**
* This will cause re-layouting of all components. Mainly used for
* development. Published to JavaScript.
*/
public void forceLayout() {
Duration duration = new Duration();
layoutManager.forceLayout();
getLogger().info("forceLayout in " + duration.elapsedMillis() + " ms");
} | 3.68 |
hbase_MiniHBaseCluster_getMaster | /**
* Returns the master at the specified index, if available.
* @return the active HMaster, null if none is active.
*/
public HMaster getMaster(final int serverNumber) {
return this.hbaseCluster.getMaster(serverNumber);
} | 3.68 |
flink_AbstractKeyedStateBackend_getKeySerializer | /** @see KeyedStateBackend */
@Override
public TypeSerializer<K> getKeySerializer() {
return keySerializer;
} | 3.68 |
hbase_FilterBase_filterRowCells | /**
* Filters that never filter by modifying the returned List of Cells can inherit this
* implementation that does nothing. {@inheritDoc}
*/
@Override
public void filterRowCells(List<Cell> ignored) throws IOException {
} | 3.68 |
hadoop_AbfsConfiguration_set | /**
* Sets String in the underlying Configuration object.
* Provided only as a convenience; does not add any account logic.
* @param key Configuration key
* @param value Configuration value
*/
public void set(String key, String value) {
rawConfig.set(key, value);
} | 3.68 |
framework_VScrollTable_onUnregister | /**
* @since 7.2.6
*/
public void onUnregister() {
if (addCloseHandler != null) {
addCloseHandler.removeHandler();
}
} | 3.68 |
hudi_OptionsResolver_isInsertOverwrite | /**
* Returns whether the operation is INSERT OVERWRITE (table or partition).
*/
public static boolean isInsertOverwrite(Configuration conf) {
return conf.getString(FlinkOptions.OPERATION).equalsIgnoreCase(WriteOperationType.INSERT_OVERWRITE_TABLE.value())
|| conf.getString(FlinkOptions.OPERATION).equalsIgnoreCase(WriteOperationType.INSERT_OVERWRITE.value());
} | 3.68 |
flink_MergeTableLikeUtil_mergePartitions | /**
* Merges the partitions part of {@code CREATE TABLE} statement.
*
* <p>Partitioning is a single property of a Table, thus there can be at most a single instance
* of partitioning. Therefore it is not possible to use {@link MergingStrategy#INCLUDING} with
* partitioning defined in both source and derived table.
*/
public List<String> mergePartitions(
MergingStrategy mergingStrategy,
List<String> sourcePartitions,
List<String> derivedPartitions) {
if (!derivedPartitions.isEmpty()
&& !sourcePartitions.isEmpty()
&& mergingStrategy != MergingStrategy.EXCLUDING) {
throw new ValidationException(
"The base table already has partitions defined. You might want to specify "
+ "EXCLUDING PARTITIONS.");
}
if (!derivedPartitions.isEmpty()) {
return derivedPartitions;
}
return sourcePartitions;
} | 3.68 |
hadoop_DiskBalancerWorkStatus_getSourcePath | /**
* Returns the source path.
*
* @return - Source path
*/
public String getSourcePath() {
return sourcePath;
} | 3.68 |
morf_ResultSetComparer_compareKeyValue | /**
* Given key values from right and left data set, compare and record mismatch.
*
* @return type The mismatch type {@link MismatchType#MISSING_LEFT} or
* {@link MismatchType#MISSING_RIGHT}, null if value matches
*/
@SuppressWarnings({ "rawtypes" })
private MismatchType compareKeyValue(Optional<? extends Comparable> leftValue, Optional<? extends Comparable> rightValue) {
if (leftValue == null && rightValue == null) {
throw new IllegalStateException("Cannot compare two nonexistent keys.");
}
if (leftValue == null) {
return MISSING_LEFT;
}
if (rightValue == null) {
return MISSING_RIGHT;
}
if (!leftValue.isPresent() || !rightValue.isPresent()) {
throw new IllegalStateException("Cannot compare null keys.");
}
int result = databaseEquivalentStringComparator.get().compare(leftValue.get(), rightValue.get());
return result < 0 ? MISSING_RIGHT : result > 0 ? MISSING_LEFT : null;
} | 3.68 |
pulsar_RawBatchConverter_rebatchMessage | /**
* Take a batched message and a filter, and returns a message with the only the sub-messages
* which match the filter. Returns an empty optional if no messages match.
*
* NOTE: this message does not alter the reference count of the RawMessage argument.
*/
public static Optional<RawMessage> rebatchMessage(RawMessage msg,
BiPredicate<String, MessageId> filter,
boolean retainNullKey)
throws IOException {
checkArgument(msg.getMessageIdData().getBatchIndex() == -1);
ByteBuf payload = msg.getHeadersAndPayload();
int readerIndex = payload.readerIndex();
ByteBuf brokerMeta = null;
if (payload.getShort(readerIndex) == magicBrokerEntryMetadata) {
payload.skipBytes(Short.BYTES);
int brokerEntryMetadataSize = payload.readInt();
payload.readerIndex(readerIndex);
brokerMeta = payload.readSlice(brokerEntryMetadataSize + Short.BYTES + Integer.BYTES);
}
MessageMetadata metadata = Commands.parseMessageMetadata(payload);
ByteBuf batchBuffer = PulsarByteBufAllocator.DEFAULT.buffer(payload.capacity());
CompressionType compressionType = metadata.getCompression();
CompressionCodec codec = CompressionCodecProvider.getCompressionCodec(compressionType);
int uncompressedSize = metadata.getUncompressedSize();
ByteBuf uncompressedPayload = codec.decode(payload, uncompressedSize);
try {
int batchSize = metadata.getNumMessagesInBatch();
int messagesRetained = 0;
SingleMessageMetadata emptyMetadata = new SingleMessageMetadata().setCompactedOut(true);
SingleMessageMetadata singleMessageMetadata = new SingleMessageMetadata();
for (int i = 0; i < batchSize; i++) {
ByteBuf singleMessagePayload = Commands.deSerializeSingleMessageInBatch(uncompressedPayload,
singleMessageMetadata,
0, batchSize);
MessageId id = new BatchMessageIdImpl(msg.getMessageIdData().getLedgerId(),
msg.getMessageIdData().getEntryId(),
msg.getMessageIdData().getPartition(),
i);
if (!singleMessageMetadata.hasPartitionKey()) {
if (retainNullKey) {
messagesRetained++;
Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadata,
singleMessagePayload, batchBuffer);
} else {
Commands.serializeSingleMessageInBatchWithPayload(emptyMetadata,
Unpooled.EMPTY_BUFFER, batchBuffer);
}
} else if (filter.test(singleMessageMetadata.getPartitionKey(), id)
&& singleMessagePayload.readableBytes() > 0) {
messagesRetained++;
Commands.serializeSingleMessageInBatchWithPayload(singleMessageMetadata,
singleMessagePayload, batchBuffer);
} else {
Commands.serializeSingleMessageInBatchWithPayload(emptyMetadata,
Unpooled.EMPTY_BUFFER, batchBuffer);
}
singleMessagePayload.release();
}
if (messagesRetained > 0) {
int newUncompressedSize = batchBuffer.readableBytes();
ByteBuf compressedPayload = codec.encode(batchBuffer);
metadata.setUncompressedSize(newUncompressedSize);
ByteBuf metadataAndPayload = Commands.serializeMetadataAndPayload(Commands.ChecksumType.Crc32c,
metadata, compressedPayload);
if (brokerMeta != null) {
CompositeByteBuf compositeByteBuf = PulsarByteBufAllocator.DEFAULT.compositeDirectBuffer();
compositeByteBuf.addComponents(true, brokerMeta.retain(), metadataAndPayload);
metadataAndPayload = compositeByteBuf;
}
Optional<RawMessage> result =
Optional.of(new RawMessageImpl(msg.getMessageIdData(), metadataAndPayload));
metadataAndPayload.release();
compressedPayload.release();
return result;
} else {
return Optional.empty();
}
} finally {
uncompressedPayload.release();
batchBuffer.release();
}
} | 3.68 |
hudi_HoodieHeartbeatUtils_getLastHeartbeatTime | /**
* Use modification time as last heart beat time.
*
* @param fs {@link FileSystem} instance.
* @param basePath Base path of the table.
* @param instantTime Instant time.
* @return Last heartbeat timestamp.
* @throws IOException
*/
public static Long getLastHeartbeatTime(FileSystem fs, String basePath, String instantTime) throws IOException {
Path heartbeatFilePath = new Path(HoodieTableMetaClient.getHeartbeatFolderPath(basePath) + Path.SEPARATOR + instantTime);
if (fs.exists(heartbeatFilePath)) {
return fs.getFileStatus(heartbeatFilePath).getModificationTime();
} else {
// NOTE : This can happen when a writer is upgraded to use lazy cleaning and the last write had failed
return 0L;
}
} | 3.68 |
hadoop_BlockDispatcher_receiveResponse | /** Receive a reportedBlock copy response from the input stream. */
private static void receiveResponse(DataInputStream in) throws IOException {
BlockOpResponseProto response = BlockOpResponseProto
.parseFrom(vintPrefixed(in));
while (response.getStatus() == Status.IN_PROGRESS) {
// read intermediate responses
response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
}
String logInfo = "reportedBlock move is failed";
DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
} | 3.68 |
flink_SkipListUtils_putKeyPointer | /**
* Puts the pointer of key space.
*
* @param memorySegment memory segment for value space.
* @param offset offset of value space in memory segment.
* @param keyPointer pointer to key space.
*/
public static void putKeyPointer(MemorySegment memorySegment, int offset, long keyPointer) {
memorySegment.putLong(offset + KEY_POINTER_OFFSET, keyPointer);
} | 3.68 |
hadoop_AbfsInputStream_getPos | /**
* Return the current offset from the start of the file
* @throws IOException throws {@link IOException} if there is an error
*/
@Override
public synchronized long getPos() throws IOException {
if (closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
return nextReadPos < 0 ? 0 : nextReadPos;
} | 3.68 |
framework_FocusableFlowPanel_setFocus | /**
* Sets/Removes the keyboard focus to the panel.
*
* @param focus
* If set to true then the focus is moved to the panel, if set to
* false the focus is removed
*/
public void setFocus(boolean focus) {
if (focus) {
FocusImpl.getFocusImplForPanel().focus(getElement());
} else {
FocusImpl.getFocusImplForPanel().blur(getElement());
}
} | 3.68 |
hbase_HttpServer_getFilterInitializers | /** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
if (conf == null) {
return null;
}
Class<?>[] classes = conf.getClasses(FILTER_INITIALIZERS_PROPERTY);
if (classes == null) {
return null;
}
FilterInitializer[] initializers = new FilterInitializer[classes.length];
for (int i = 0; i < classes.length; i++) {
initializers[i] = (FilterInitializer) ReflectionUtils.newInstance(classes[i]);
}
return initializers;
} | 3.68 |
graphhopper_EncodedValue_next | /**
* This method determines a space of the specified bits and sets shift and dataIndex accordingly
*/
void next(int usedBits) {
shift = nextShift;
if ((shift - 1 + usedBits) / 32 > (shift - 1) / 32) {
dataIndex++;
shift = 0;
}
// we need 1L as otherwise it'll fail for usedBits==32
bitMask = (int) ((1L << usedBits) - 1);
bitMask <<= shift;
nextShift = shift + usedBits;
} | 3.68 |
hbase_TableSchemaModel_getAny | /** Returns the map for holding unspecified (user) attributes */
@XmlAnyAttribute
@JsonAnyGetter
public Map<QName, Object> getAny() {
return attrs;
} | 3.68 |
hadoop_MetricsLoggerTask_hasAppenders | // TODO : hadoop-logging module to hide log4j implementation details, this method
// can directly call utility from hadoop-logging.
private static boolean hasAppenders(Logger logger) {
return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders()
.hasMoreElements();
} | 3.68 |
hadoop_S3APrefetchingInputStream_getIOStatistics | /**
* Gets the internal IO statistics.
*
* @return the internal IO statistics.
*/
@Override
public IOStatistics getIOStatistics() {
if (!isClosed()) {
ioStatistics = inputStream.getIOStatistics();
}
return ioStatistics;
} | 3.68 |
flink_CatalogManager_getBuiltInDatabaseName | /**
* Gets the built-in database name in the built-in catalog. The built-in database is used for
* storing all non-serializable transient meta-objects.
*
* @return the built-in database name
*/
public String getBuiltInDatabaseName() {
// The default database of the built-in catalog is also the built-in database.
return getCatalogOrThrowException(getBuiltInCatalogName()).getDefaultDatabase();
} | 3.68 |
hbase_ProcedureCoordinator_createProcedure | /**
* Exposed for hooking with unit tests.
* @return the newly created procedure
*/
Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs,
List<String> expectedMembers) {
// build the procedure
return new Procedure(this, fed, wakeTimeMillis, timeoutMillis, procName, procArgs,
expectedMembers);
} | 3.68 |
dubbo_MethodConfig_checkDefault | /**
* Set default field values of MethodConfig.
*
* @see org.apache.dubbo.config.annotation.Method
*/
@Override
protected void checkDefault() {
super.checkDefault();
// set default field values
// org.apache.dubbo.config.annotation.Method.isReturn() default true;
if (isReturn() == null) {
setReturn(true);
}
// org.apache.dubbo.config.annotation.Method.sent() default true;
if (getSent() == null) {
setSent(true);
}
} | 3.68 |
flink_BlockerSync_blockNonInterruptible | /**
* Blocks until {@link #releaseBlocker()} is called. Notifies the awaiting thread that waits in
* the method {@link #awaitBlocker()}.
*/
public void blockNonInterruptible() {
synchronized (lock) {
blockerReady = true;
lock.notifyAll();
while (!blockerReleased) {
try {
lock.wait();
} catch (InterruptedException ignored) {
}
}
}
} | 3.68 |
flink_StreamingJobGraphGenerator_setChaining | /**
* Sets up task chains from the source {@link StreamNode} instances.
*
* <p>This will recursively create all {@link JobVertex} instances.
*/
private void setChaining(Map<Integer, byte[]> hashes, List<Map<Integer, byte[]>> legacyHashes) {
// we separate out the sources that run as inputs to another operator (chained inputs)
// from the sources that needs to run as the main (head) operator.
final Map<Integer, OperatorChainInfo> chainEntryPoints =
buildChainedInputsAndGetHeadInputs(hashes, legacyHashes);
final Collection<OperatorChainInfo> initialEntryPoints =
chainEntryPoints.entrySet().stream()
.sorted(Comparator.comparing(Map.Entry::getKey))
.map(Map.Entry::getValue)
.collect(Collectors.toList());
// iterate over a copy of the values, because this map gets concurrently modified
for (OperatorChainInfo info : initialEntryPoints) {
createChain(
info.getStartNodeId(),
1, // operators start at position 1 because 0 is for chained source inputs
info,
chainEntryPoints);
}
} | 3.68 |
flink_CliClient_executeInInteractiveMode | /** Opens the interactive CLI shell. */
public void executeInInteractiveMode() {
executeInInteractiveMode(null);
} | 3.68 |
flink_SubtaskGatewayImpl_openGatewayAndUnmarkAllCheckpoint | /** Opens the gateway, releasing all buffered events. */
void openGatewayAndUnmarkAllCheckpoint() {
checkRunsInMainThread();
for (List<BlockedEvent> blockedEvents : blockedEventsMap.values()) {
for (BlockedEvent blockedEvent : blockedEvents) {
callSendAction(blockedEvent.sendAction, blockedEvent.future);
}
}
blockedEventsMap.clear();
currentMarkedCheckpointIds.clear();
} | 3.68 |
querydsl_AbstractSQLInsertClause_populate | /**
* Populate the INSERT clause with the properties of the given bean using
* the given Mapper.
*
* @param obj object to use for population
* @param mapper mapper to use
* @return the current object
*/
@SuppressWarnings("rawtypes")
public <T> C populate(T obj, Mapper<T> mapper) {
Map<Path<?>, Object> values = mapper.createMap(entity, obj);
for (Map.Entry<Path<?>, Object> entry : values.entrySet()) {
set((Path) entry.getKey(), entry.getValue());
}
return (C) this;
} | 3.68 |
hbase_FavoredStochasticBalancer_segregateRegionsAndAssignRegionsWithFavoredNodes | /**
* Return a pair - one with assignments when favored nodes are present and another with regions
* without favored nodes.
*/
private Pair<Map<ServerName, List<RegionInfo>>, List<RegionInfo>>
segregateRegionsAndAssignRegionsWithFavoredNodes(Collection<RegionInfo> regions,
List<ServerName> onlineServers) throws HBaseIOException {
// Since we expect FN to be present most of the time, lets create map with same size
Map<ServerName, List<RegionInfo>> assignmentMapForFavoredNodes =
new HashMap<>(onlineServers.size());
List<RegionInfo> regionsWithNoFavoredNodes = new ArrayList<>();
for (RegionInfo region : regions) {
List<ServerName> favoredNodes = fnm.getFavoredNodes(region);
ServerName primaryHost = null;
ServerName secondaryHost = null;
ServerName tertiaryHost = null;
if (favoredNodes != null && !favoredNodes.isEmpty()) {
for (ServerName s : favoredNodes) {
ServerName serverWithLegitStartCode = getServerFromFavoredNode(onlineServers, s);
if (serverWithLegitStartCode != null) {
FavoredNodesPlan.Position position =
FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s);
if (Position.PRIMARY.equals(position)) {
primaryHost = serverWithLegitStartCode;
} else if (Position.SECONDARY.equals(position)) {
secondaryHost = serverWithLegitStartCode;
} else if (Position.TERTIARY.equals(position)) {
tertiaryHost = serverWithLegitStartCode;
}
}
}
assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, primaryHost,
secondaryHost, tertiaryHost);
} else {
regionsWithNoFavoredNodes.add(region);
}
}
return new Pair<>(assignmentMapForFavoredNodes, regionsWithNoFavoredNodes);
} | 3.68 |
hudi_SparkSQLQueryNode_execute | /**
* Method helps to execute a sparkSql query from a hive table.
*
* @param executionContext Execution context to perform this query.
* @param curItrCount current iteration count.
* @throws Exception will be thrown if ant error occurred
*/
@Override
public void execute(ExecutionContext executionContext, int curItrCount) throws Exception {
log.info("Executing spark sql query node");
this.hiveServiceProvider.startLocalHiveServiceIfNeeded(executionContext.getHoodieTestSuiteWriter().getConfiguration());
this.hiveServiceProvider.syncToLocalHiveIfNeeded(executionContext.getHoodieTestSuiteWriter());
SparkSession session = SparkSession.builder().sparkContext(executionContext.getJsc().sc()).getOrCreate();
for (String hiveProperty : this.config.getHiveProperties()) {
session.sql(hiveProperty).count();
}
for (Pair<String, Integer> queryAndResult : this.config.getHiveQueries()) {
log.info("Running {}", queryAndResult.getLeft());
Dataset<Row> res = session.sql(queryAndResult.getLeft());
if (res.count() == 0) {
assert 0 == queryAndResult.getRight();
} else {
assert ((Row[]) res.collect())[0].getInt(0) == queryAndResult.getRight();
}
log.info("Successfully validated query!");
}
this.hiveServiceProvider.stopLocalHiveServiceIfNeeded();
this.result = true;
} | 3.68 |
hbase_RegionCoprocessorHost_preAppend | /**
* Supports Coprocessor 'bypass'.
* @param append append object
* @param edit The WALEdit object.
* @return result to return to client if default operation should be bypassed, null otherwise
* @throws IOException if an error occurred on the coprocessor
*/
public Result preAppend(final Append append, final WALEdit edit) throws IOException {
boolean bypassable = true;
Result defaultResult = null;
if (this.coprocEnvironments.isEmpty()) {
return defaultResult;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>(
regionObserverGetter, defaultResult, bypassable) {
@Override
public Result call(RegionObserver observer) throws IOException {
return observer.preAppend(this, append, edit);
}
});
} | 3.68 |
Activiti_BigDecimalToString_primTransform | /**
* {@inheritDoc}
*/
@Override
protected Object primTransform(Object anObject) throws Exception {
return format.format((BigDecimal) anObject);
} | 3.68 |
flink_ParquetSchemaConverter_is32BitDecimal | // From DecimalDataUtils
public static boolean is32BitDecimal(int precision) {
return precision <= 9;
} | 3.68 |
flink_AsyncSinkBaseBuilder_setMaxBatchSizeInBytes | /**
* @param maxBatchSizeInBytes a flush will be attempted if the most recent call to write
* introduces an element to the buffer such that the total size of the buffer is greater
* than or equal to this threshold value. If this happens, the maximum number of elements
* from the head of the buffer will be selected, that is smaller than {@code
* maxBatchSizeInBytes} in size will be flushed.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBatchSizeInBytes(long maxBatchSizeInBytes) {
this.maxBatchSizeInBytes = maxBatchSizeInBytes;
return (ConcreteBuilderT) this;
} | 3.68 |
hmily_RpcResource_getXaProxy | /**
* Gets xa proxy.
*
* @return the xa proxy
*/
public RpcXaProxy getXaProxy() {
return xaProxy;
} | 3.68 |
framework_HorizontalLayoutFullsizeContentWithErrorMsg_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "TextField should remain at same level vertically, horizontally width should adjust to fit error indicator.";
} | 3.68 |
hbase_BitComparator_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof BitComparator)) {
return false;
}
BitComparator comparator = (BitComparator) other;
return super.areSerializedFieldsEqual(other)
&& this.getOperator().equals(comparator.getOperator());
} | 3.68 |
hbase_HRegion_writeCanNotFlushMarkerToWAL | /**
* This method is only used when we flush but the memstore is empty,if writeFlushWalMarker is
* true,we write the {@link FlushAction#CANNOT_FLUSH} flush marker to WAL when the memstore is
* empty. Ignores exceptions from WAL. Returns whether the write succeeded.
* @return whether WAL write was successful
*/
private boolean writeCanNotFlushMarkerToWAL(WriteEntry flushOpSeqIdMVCCEntry, WAL wal,
boolean writeFlushWalMarker) {
FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.CANNOT_FLUSH, getRegionInfo(),
-1, new TreeMap<>(Bytes.BYTES_COMPARATOR));
RegionReplicationSink sink = regionReplicationSink.orElse(null);
if (sink != null && !writeFlushWalMarker) {
/**
* Here for replication to secondary region replica could use {@link FlushAction#CANNOT_FLUSH}
* to recover when writeFlushWalMarker is false, we create {@link WALEdit} for
* {@link FlushDescriptor} and attach the {@link RegionReplicationSink#add} to the
* flushOpSeqIdMVCCEntry,see HBASE-26960 for more details.
*/
this.attachRegionReplicationToFlushOpSeqIdMVCCEntry(flushOpSeqIdMVCCEntry, desc, sink);
return false;
}
if (writeFlushWalMarker && wal != null && !writestate.readOnly) {
try {
WALUtil.writeFlushMarker(wal, this.getReplicationScope(), getRegionInfo(), desc, true, mvcc,
sink);
return true;
} catch (IOException e) {
LOG.warn(getRegionInfo().getEncodedName() + " : "
+ "Received exception while trying to write the flush request to wal", e);
}
}
return false;
} | 3.68 |
morf_AbstractDatabaseType_canTrace | /**
* @return true if tracing can be enabled on the database type
*/
@Override
public boolean canTrace() {
return false;
} | 3.68 |
graphhopper_VectorTile_clearBoolValue | /**
* <code>optional bool bool_value = 7;</code>
*/
public Builder clearBoolValue() {
bitField0_ = (bitField0_ & ~0x00000040);
boolValue_ = false;
onChanged();
return this;
} | 3.68 |
framework_LayoutManager_getPaddingBottom | /**
* Gets the bottom padding of the given element, provided that it has been
* measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured bottom padding of the element in pixels.
*/
public int getPaddingBottom(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getPaddingBottom();
} | 3.68 |
framework_DesignContext_isLegacyPrefixEnabled | /**
* Check whether the legacy prefix "v" or the default prefix "vaadin" should
* be used when writing designs. The property or context parameter
* {@link Constants#SERVLET_PARAMETER_LEGACY_DESIGN_PREFIX} can be used to
* switch to the legacy prefix.
*
* @since 7.5.7
* @return true to use the legacy prefix, false by default
*/
protected boolean isLegacyPrefixEnabled() {
if (legacyDesignPrefix != null) {
return legacyDesignPrefix.booleanValue();
}
if (VaadinService.getCurrent() == null) {
// This will happen at least in JUnit tests.
return false;
}
DeploymentConfiguration configuration = VaadinService.getCurrent()
.getDeploymentConfiguration();
legacyDesignPrefix = configuration.getApplicationOrSystemProperty(
Constants.SERVLET_PARAMETER_LEGACY_DESIGN_PREFIX, "false")
.equals("true");
return legacyDesignPrefix.booleanValue();
} | 3.68 |
hadoop_TimelineDomains_getDomains | /**
* Get a list of domains
*
* @return a list of domains
*/
@XmlElement(name = "domains")
public List<TimelineDomain> getDomains() {
return domains;
} | 3.68 |
streampipes_SpKafkaProducer_createKafkaTopic | /**
* Create a new topic and define number partitions, replicas, and retention time
*
* @param settings The settings to connect to a Kafka broker
*/
private void createKafkaTopic(KafkaTransportProtocol settings) throws ExecutionException, InterruptedException {
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl);
AdminClient adminClient = KafkaAdminClient.create(props);
ListTopicsResult topics = adminClient.listTopics();
if (!topicExists(topics)) {
Map<String, String> topicConfig = new HashMap<>();
String retentionTime = Environments.getEnvironment().getKafkaRetentionTimeMs().getValueOrDefault();
topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, retentionTime);
final NewTopic newTopic = new NewTopic(topic, 1, (short) 1);
newTopic.configs(topicConfig);
final CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singleton(newTopic));
createTopicsResult.values().get(topic).get();
LOG.info("Successfully created Kafka topic " + topic);
} else {
LOG.info("Topic " + topic + "already exists in the broker, skipping topic creation");
}
} | 3.68 |
rocketmq-connect_AbstractStateManagementService_get | /**
* Get the current state of the connector.
*
* @param connector the connector name
* @return the state or null if there is none
*/
@Override
public ConnectorStatus get(String connector) {
ConnAndTaskStatus.CacheEntry<ConnectorStatus> cacheEntry = connAndTaskStatus.getConnectors().get(connector);
if (cacheEntry == null) {
return null;
}
return cacheEntry.get();
} | 3.68 |
hadoop_IOStatisticsSnapshot_createMaps | /**
* Create the maps.
*/
private synchronized void createMaps() {
counters = new ConcurrentHashMap<>();
gauges = new ConcurrentHashMap<>();
minimums = new ConcurrentHashMap<>();
maximums = new ConcurrentHashMap<>();
meanStatistics = new ConcurrentHashMap<>();
} | 3.68 |
hudi_HoodieInputFormatUtils_getAffectedPartitions | /**
* Extract partitions touched by the commitsToCheck.
*
* @param commitsToCheck
* @param tableMetaClient
* @param timeline
* @param inputPaths
* @return
* @throws IOException
*/
public static Option<String> getAffectedPartitions(List<HoodieInstant> commitsToCheck,
HoodieTableMetaClient tableMetaClient,
HoodieTimeline timeline,
List<Path> inputPaths) throws IOException {
Set<String> partitionsToList = new HashSet<>();
for (HoodieInstant commit : commitsToCheck) {
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(timeline.getInstantDetails(commit).get(),
HoodieCommitMetadata.class);
partitionsToList.addAll(commitMetadata.getPartitionToWriteStats().keySet());
}
if (partitionsToList.isEmpty()) {
return Option.empty();
}
String incrementalInputPaths = partitionsToList.stream()
.map(s -> StringUtils.isNullOrEmpty(s) ? tableMetaClient.getBasePath() : tableMetaClient.getBasePath() + Path.SEPARATOR + s)
.filter(s -> {
/*
* Ensure to return only results from the original input path that has incremental changes
* This check is needed for the following corner case - When the caller invokes
* HoodieInputFormat.listStatus multiple times (with small batches of Hive partitions each
* time. Ex. Hive fetch task calls listStatus for every partition once) we do not want to
* accidentally return all incremental changes for the entire table in every listStatus()
* call. This will create redundant splits. Instead we only want to return the incremental
* changes (if so any) in that batch of input paths.
*
* NOTE on Hive queries that are executed using Fetch task:
* Since Fetch tasks invoke InputFormat.listStatus() per partition, Hoodie metadata can be
* listed in every such listStatus() call. In order to avoid this, it might be useful to
* disable fetch tasks using the hive session property for incremental queries:
* `set hive.fetch.task.conversion=none;`
* This would ensure Map Reduce execution is chosen for a Hive query, which combines
* partitions (comma separated) and calls InputFormat.listStatus() only once with all
* those partitions.
*/
for (Path path : inputPaths) {
if (path.toString().endsWith(s)) {
return true;
}
}
return false;
})
.collect(Collectors.joining(","));
return StringUtils.isNullOrEmpty(incrementalInputPaths) ? Option.empty() : Option.of(incrementalInputPaths);
} | 3.68 |
hbase_TestingHBaseClusterOption_builder | /**
* Returns a new builder.
*/
public static Builder builder() {
return new Builder();
} | 3.68 |
pulsar_ClientCnxIdleState_getIdleStat | /**
* Get idle-stat.
* @return connection idle-stat
*/
public State getIdleStat() {
return STATE_UPDATER.get(this);
} | 3.68 |
flink_ClassLoaderUtil_validateClassLoadable | /**
* Checks, whether the class that was not found in the given exception, can be resolved through
* the given class loader.
*
* @param cnfe The ClassNotFoundException that defines the name of the class.
* @param cl The class loader to use for the class resolution.
* @return True, if the class can be resolved with the given class loader, false if not.
*/
public static boolean validateClassLoadable(ClassNotFoundException cnfe, ClassLoader cl) {
try {
String className = cnfe.getMessage();
Class.forName(className, false, cl);
return true;
} catch (ClassNotFoundException e) {
return false;
} catch (Exception e) {
return false;
}
} | 3.68 |
flink_MultisetTypeInfo_getElementTypeInfo | /** Gets the type information for the elements contained in the Multiset */
public TypeInformation<T> getElementTypeInfo() {
return getKeyTypeInfo();
} | 3.68 |
hadoop_InstantiationIOException_isAbstract | /**
* Class is abstract.
* @param uri URI of filesystem
* @param classname classname.
* @param key configuration key
* @return an exception.
*/
public static InstantiationIOException isAbstract(URI uri, String classname, String key) {
return new InstantiationIOException(Kind.IsAbstract,
uri, classname, key, ABSTRACT_PROVIDER, null);
} | 3.68 |
hbase_ClientMetaTableAccessor_getTableStopRowForMeta | /** Returns stop row for scanning META according to query type */
public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) {
if (tableName == null) {
return null;
}
final byte[] stopRow;
switch (type) {
case REGION:
case REPLICATION: {
stopRow = new byte[tableName.getName().length + 3];
System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length);
stopRow[stopRow.length - 3] = ' ';
stopRow[stopRow.length - 2] = HConstants.DELIMITER;
stopRow[stopRow.length - 1] = HConstants.DELIMITER;
break;
}
case ALL:
case TABLE:
default: {
stopRow = new byte[tableName.getName().length + 1];
System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length);
stopRow[stopRow.length - 1] = ' ';
break;
}
}
return stopRow;
} | 3.68 |
hbase_MasterObserver_preListSnapshot | /**
* Called before listSnapshots request has been processed.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor of the snapshot to list
*/
default void preListSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot) throws IOException {
} | 3.68 |
framework_CustomizedSystemMessages_setAuthenticationErrorNotificationEnabled | /**
* Enables or disables the notification. If disabled, the set URL (or
* current) is loaded directly.
*
* @param authenticationErrorNotificationEnabled
* true = enabled, false = disabled
*/
public void setAuthenticationErrorNotificationEnabled(
boolean authenticationErrorNotificationEnabled) {
this.authenticationErrorNotificationEnabled = authenticationErrorNotificationEnabled;
} | 3.68 |
framework_VScrollTable_updateHeight | /** For internal use only. May be removed or replaced in the future. */
public void updateHeight() {
setContainerHeight();
if (initializedAndAttached) {
updatePageLength();
}
triggerLazyColumnAdjustment(false);
/*
* setting height may affect wheter the component has scrollbars ->
* needs scrolling or not
*/
setProperTabIndex();
} | 3.68 |
hudi_RowDataToHoodieFunction_toHoodieRecord | /**
* Converts the give record to a {@link HoodieRecord}.
*
* @param record The input record
* @return HoodieRecord based on the configuration
* @throws IOException if error occurs
*/
@SuppressWarnings("rawtypes")
private HoodieRecord toHoodieRecord(I record) throws Exception {
GenericRecord gr = (GenericRecord) this.converter.convert(this.avroSchema, record);
final HoodieKey hoodieKey = keyGenerator.getKey(gr);
HoodieRecordPayload payload = payloadCreation.createPayload(gr);
HoodieOperation operation = HoodieOperation.fromValue(record.getRowKind().toByteValue());
return new HoodieAvroRecord<>(hoodieKey, payload, operation);
} | 3.68 |
hmily_HmilyRepositoryNode_getHmilyTransactionRealPath | /**
* Get hmily transaction real path.
*
* @param transactionId transaction id
* @return hmily transaction real path
*/
public String getHmilyTransactionRealPath(final Long transactionId) {
return Joiner.on("/").join(getHmilyTransactionRootPath(), transactionId);
} | 3.68 |
morf_HumanReadableStatementHelper_generateFieldValueString | /**
* Generates a string describing a field or literal value.
*
* @param field the field to describe.
* @return a string containing the literal value.
*/
private static String generateFieldValueString(final AliasedField field) {
if (field instanceof CaseStatement) {
final StringBuilder sb = new StringBuilder("(");
for (WhenCondition when : ((CaseStatement)field).getWhenConditions()) {
if (sb.length() > 1) {
sb.append("; ");
}
sb.append(String.format("%s if %s", generateFieldValueString(when.getValue()), generateCriterionString(when.getCriterion(), false)));
}
if (sb.length() > 0) {
sb.append("; otherwise ");
}
sb.append(generateFieldValueString(((CaseStatement)field).getDefaultValue()));
return sb.append(')').toString();
} else if (field instanceof Cast) {
return generateFieldValueString(((Cast)field).getExpression());
} else if (field instanceof ConcatenatedField) {
return "the concatenation of " + generateFieldSymbolStrings(((ConcatenatedField)field).getConcatenationFields());
} else if (field instanceof FieldFromSelect) {
return generateSelectStatementString(((FieldFromSelect)field).getSelectStatement(), false);
} else if (field instanceof FieldFromSelectFirst) {
return generateSelectStatementString(((FieldFromSelectFirst)field).getSelectFirstStatement(), false);
} else if (field instanceof FieldLiteral) {
final String value = ((FieldLiteral)field).getValue();
if (NumberUtils.isNumber(value)) {
return value;
} else {
return generateLiteral(value);
}
} else if (field instanceof FieldReference) {
return ((FieldReference)field).getName();
} else if (field instanceof Function) {
final FunctionType type = ((Function)field).getType();
List<AliasedField> args = ((Function)field).getArguments();
if (type == FunctionType.COUNT && args.isEmpty()) {
// Handle COUNT(*) as a special case
return "record count";
} else {
final FunctionTypeMetaData function = functionTypeMetaData.get(type);
final StringBuilder sb = new StringBuilder(function.prefix);
boolean comma = false;
if (function.reverseArgs) {
args = Lists.reverse(args);
}
for (AliasedField arg : args) {
if (comma) {
sb.append(function.sep);
} else {
comma = true;
}
sb.append(generateFieldValueString(arg));
}
return sb.append(function.suffix).toString();
}
} else if (field instanceof MathsField) {
final MathsField maths = (MathsField)field;
return String.format("%s %s %s", generateFieldValueString(maths.getLeftField()), maths.getOperator(), generateFieldValueString(maths.getRightField()));
} else if (field instanceof BracketedExpression) {
final BracketedExpression bracketExpression = (BracketedExpression)field;
return String.format("(%s)", generateFieldValueString(bracketExpression.getInnerExpression()));
} else {
throw new UnsupportedOperationException("Unable to generate data upgrade string for: [" + field.getClass().getName() + "]");
}
} | 3.68 |
hadoop_Preconditions_getDefaultCheckStateMSG | /* @VisibleForTesting */
static String getDefaultCheckStateMSG() {
return CHECK_STATE_EX_MESSAGE;
} | 3.68 |
hadoop_ResourceSkyline_getJobInputDataSize | /**
* Get the job's input data size.
*
* @return job's input data size.
*/
public final double getJobInputDataSize() {
return jobInputDataSize;
} | 3.68 |
hadoop_SubApplicationEntityReader_excludeFieldsFromInfoColFamily | /**
* Exclude column prefixes via filters which are not required(based on fields
* to retrieve) from info column family. These filters are added to filter
* list which contains a filter for getting info column family.
*
* @param infoColFamilyList filter list for info column family.
*/
private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
// Events not required.
if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
infoColFamilyList.addFilter(
TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
SubApplicationColumnPrefix.EVENT));
}
// info not required.
if (!hasField(fieldsToRetrieve, Field.INFO)) {
infoColFamilyList.addFilter(
TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
SubApplicationColumnPrefix.INFO));
}
// is related to not required.
if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
infoColFamilyList.addFilter(
TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
SubApplicationColumnPrefix.IS_RELATED_TO));
}
// relates to not required.
if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
infoColFamilyList.addFilter(
TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
SubApplicationColumnPrefix.RELATES_TO));
}
} | 3.68 |
flink_ObjectColumnSummary_getNonNullCount | /** The number of non-null values in this column. */
@Override
public long getNonNullCount() {
return 0;
} | 3.68 |
framework_AbstractProperty_addListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addValueChangeListener(Property.ValueChangeListener)}
*/
@Override
@Deprecated
public void addListener(ValueChangeListener listener) {
addValueChangeListener(listener);
} | 3.68 |
starts_AnnotationVisitor_visitEnd | /**
* Visits the end of the annotation.
*/
public void visitEnd() {
if (av != null) {
av.visitEnd();
}
} | 3.68 |
flink_PythonEnvUtils_resetCallbackClientExecutorService | /**
* Reset a daemon thread to the callback client thread pool so that the callback server can be
* terminated when gate way server is shutting down. We need to shut down the none-daemon thread
* firstly, then set a new thread created in a daemon thread to the ExecutorService.
*
* @param gatewayServer the gateway which creates the callback server.
*/
private static void resetCallbackClientExecutorService(GatewayServer gatewayServer)
throws NoSuchFieldException, IllegalAccessException, NoSuchMethodException,
InvocationTargetException {
CallbackClient callbackClient = (CallbackClient) gatewayServer.getCallbackClient();
// The Java API of py4j does not provide approach to set "daemonize_connections" parameter.
// Use reflect to daemonize the connection thread.
Field executor = CallbackClient.class.getDeclaredField("executor");
executor.setAccessible(true);
((ScheduledExecutorService) executor.get(callbackClient)).shutdown();
executor.set(callbackClient, Executors.newScheduledThreadPool(1, Thread::new));
Method setupCleaner = CallbackClient.class.getDeclaredMethod("setupCleaner");
setupCleaner.setAccessible(true);
setupCleaner.invoke(callbackClient);
} | 3.68 |
shardingsphere-elasticjob_JobFacade_isExecuteMisfired | /**
* Judge job whether to need to execute misfire tasks.
*
* @param shardingItems sharding items
* @return need to execute misfire tasks or not
*/
public boolean isExecuteMisfired(final Collection<Integer> shardingItems) {
return configService.load(true).isMisfire() && !isNeedSharding() && !executionService.getMisfiredJobItems(shardingItems).isEmpty();
} | 3.68 |
hbase_SplitLogWorker_processSyncReplicationWAL | // returns whether we need to continue the split work
private static boolean processSyncReplicationWAL(String name, Configuration conf,
RegionServerServices server, FileSystem fs, Path walDir) throws IOException {
Path walFile = new Path(walDir, name);
String filename = walFile.getName();
Optional<String> optSyncPeerId =
AbstractWALProvider.getSyncReplicationPeerIdFromWALName(filename);
if (!optSyncPeerId.isPresent()) {
return true;
}
String peerId = optSyncPeerId.get();
ReplicationPeerImpl peer =
server.getReplicationSourceService().getReplicationPeers().getPeer(peerId);
if (peer == null || !peer.getPeerConfig().isSyncReplication()) {
return true;
}
Pair<SyncReplicationState, SyncReplicationState> stateAndNewState =
peer.getSyncReplicationStateAndNewState();
if (
stateAndNewState.getFirst().equals(SyncReplicationState.ACTIVE)
&& stateAndNewState.getSecond().equals(SyncReplicationState.NONE)
) {
// copy the file to remote and overwrite the previous one
String remoteWALDir = peer.getPeerConfig().getRemoteWALDir();
Path remoteWALDirForPeer = ReplicationUtils.getPeerRemoteWALDir(remoteWALDir, peerId);
Path tmpRemoteWAL = new Path(remoteWALDirForPeer, filename + ".tmp");
FileSystem remoteFs = ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir);
try (FSDataInputStream in = fs.open(walFile);
FSDataOutputStream out = remoteFs.createNonRecursive(tmpRemoteWAL, true,
CommonFSUtils.getDefaultBufferSize(remoteFs),
remoteFs.getDefaultReplication(tmpRemoteWAL), remoteFs.getDefaultBlockSize(tmpRemoteWAL),
null)) {
IOUtils.copy(in, out);
}
Path toCommitRemoteWAL =
new Path(remoteWALDirForPeer, filename + ReplicationUtils.RENAME_WAL_SUFFIX);
// Some FileSystem implementations may not support atomic rename so we need to do it in two
// phases
FSUtils.renameFile(remoteFs, tmpRemoteWAL, toCommitRemoteWAL);
FSUtils.renameFile(remoteFs, toCommitRemoteWAL, new Path(remoteWALDirForPeer, filename));
} else if (
(stateAndNewState.getFirst().equals(SyncReplicationState.ACTIVE)
&& stateAndNewState.getSecond().equals(SyncReplicationState.STANDBY))
|| stateAndNewState.getFirst().equals(SyncReplicationState.STANDBY)
) {
// check whether we still need to process this file
// actually we only write wal file which name is ended with .syncrep in A state, and after
// transiting to a state other than A, we will reopen all the regions so the data in the wal
// will be flushed so the wal file will be archived soon. But it is still possible that there
// is a server crash when we are transiting from A to S, to simplify the logic of the transit
// procedure, here we will also check the remote snapshot directory in state S, so that we do
// not need wait until all the wal files with .syncrep suffix to be archived before finishing
// the procedure.
String remoteWALDir = peer.getPeerConfig().getRemoteWALDir();
Path remoteSnapshotDirForPeer = ReplicationUtils.getPeerSnapshotWALDir(remoteWALDir, peerId);
FileSystem remoteFs = ReplicationUtils.getRemoteWALFileSystem(conf, remoteWALDir);
if (remoteFs.exists(new Path(remoteSnapshotDirForPeer, filename))) {
// the file has been replayed when the remote cluster was transited from S to DA, the
// content will be replicated back to us so give up split it.
LOG.warn("Giveup splitting {} since it has been replayed in the remote cluster and "
+ "the content will be replicated back", filename);
return false;
}
}
return true;
} | 3.68 |
pulsar_ManagedCursorImpl_getNextAvailablePosition | /**
* Checks given position is part of deleted-range and returns next position of upper-end as all the messages are
* deleted up to that point.
*
* @param position
* @return next available position
*/
public PositionImpl getNextAvailablePosition(PositionImpl position) {
Range<PositionImpl> range = individualDeletedMessages.rangeContaining(position.getLedgerId(),
position.getEntryId());
if (range != null) {
PositionImpl nextPosition = range.upperEndpoint().getNext();
return (nextPosition != null && nextPosition.compareTo(position) > 0) ? nextPosition : position.getNext();
}
return position.getNext();
} | 3.68 |
framework_LocatorUtil_indexOfIgnoringQuoted | /**
* Find first occurrence of character that's not inside quotes starting from
* the beginning of string.
*
* @param str
* Full string for searching
* @param find
* Character we want to find
* @return Index of character. -1 if character not found
*/
protected static int indexOfIgnoringQuoted(String str, char find) {
return indexOfIgnoringQuoted(str, find, 0);
} | 3.68 |
flink_JobVertex_getConfiguration | /**
* Returns the vertex's configuration object which can be used to pass custom settings to the
* task at runtime.
*
* @return the vertex's configuration object
*/
public Configuration getConfiguration() {
if (this.configuration == null) {
this.configuration = new Configuration();
}
return this.configuration;
} | 3.68 |
hbase_BaseLoadBalancer_preBalanceCluster | /**
* Called before actually executing balanceCluster. The sub classes could override this method to
* do some initialization work.
*/
protected void
preBalanceCluster(Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable) {
} | 3.68 |
hadoop_AbstractDelegationTokenBinding_serviceStart | /**
* Service startup: create the secret manager.
* @throws Exception failure.
*/
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
secretManager = createSecretMananger();
} | 3.68 |
framework_ApplicationConnection_getConfiguration | /**
* Gets the {@link ApplicationConfiguration} for the current application.
*
* @see ApplicationConfiguration
* @return the configuration for this application
*/
public ApplicationConfiguration getConfiguration() {
return configuration;
} | 3.68 |
morf_TableReference_getAlias | /**
* Get the alias for the table
*
* @return the alias
*/
public String getAlias() {
return alias;
} | 3.68 |
hbase_HBaseTestingUtility_getAdmin | /**
* Returns an Admin instance which is shared between HBaseTestingUtility instance users. Closing
* it has no effect, it will be closed automatically when the cluster shutdowns
*/
public Admin getAdmin() throws IOException {
if (hbaseAdmin == null) {
this.hbaseAdmin = getConnection().getAdmin();
}
return hbaseAdmin;
} | 3.68 |
framework_TreeTable_addListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addCollapseListener(CollapseListener)}
*/
@Deprecated
public void addListener(CollapseListener listener) {
addCollapseListener(listener);
} | 3.68 |
framework_LayoutDependencyTree_getVerticalLayoutTargetsJsArray | /**
* Returns a JsArrayString array of connectorIds for managed layouts that
* are waiting for vertical layouting.
*
* @return JsArrayString of connectorIds
*/
public JsArrayString getVerticalLayoutTargetsJsArray() {
return getLayoutQueue(VERTICAL).dump();
} | 3.68 |
framework_VCalendar_setEventCaptionAsHtml | /**
* Sets whether the event captions are rendered as HTML.
* <p>
* If set to true, the captions are rendered in the browser as HTML and the
* developer is responsible for ensuring no harmful HTML is used. If set to
* false, the caption is rendered in the browser as plain text.
* <p>
* The default is false, i.e. to render that caption as plain text.
*
* @param eventCaptionAsHtml
* {@code true} if the captions are rendered as HTML,
* {@code false} if rendered as plain text
*/
public void setEventCaptionAsHtml(boolean eventCaptionAsHtml) {
this.eventCaptionAsHtml = eventCaptionAsHtml;
} | 3.68 |
framework_SortEvent_getSource | /**
* Get access to the Grid that fired this event.
*
* @return the grid instance
*/
@Override
public Grid<T> getSource() {
return grid;
} | 3.68 |
hadoop_Quota_setQuota | /**
* Set quota for the federation path.
* @param path Federation path.
* @param namespaceQuota Name space quota.
* @param storagespaceQuota Storage space quota.
* @param type StorageType that the space quota is intended to be set on.
* @param checkMountEntry whether to check the path is a mount entry.
* @throws AccessControlException If the quota system is disabled or if
* checkMountEntry is true and the path is a mount entry.
*/
public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
StorageType type, boolean checkMountEntry) throws IOException {
if (!router.isQuotaEnabled()) {
throw new IOException("The quota system is disabled in Router.");
}
if (checkMountEntry && isMountEntry(path)) {
throw new AccessControlException(
"Permission denied: " + RouterRpcServer.getRemoteUser()
+ " is not allowed to change quota of " + path);
}
setQuotaInternal(path, null, namespaceQuota, storagespaceQuota, type);
} | 3.68 |
hbase_DefaultHeapMemoryTuner_addToRollingStats | /**
* Add the given context to the rolling tuner stats.
* @param context The tuner context.
*/
private void addToRollingStats(TunerContext context) {
rollingStatsForCacheMisses.insertDataValue(context.getCacheMissCount());
rollingStatsForFlushes
.insertDataValue(context.getBlockedFlushCount() + context.getUnblockedFlushCount());
rollingStatsForEvictions.insertDataValue(context.getEvictCount());
} | 3.68 |
flink_HiveParserTypeCheckProcFactory_getFuncExprNodeDescWithUdfData | /**
* This function create an ExprNodeDesc for a UDF function given the children (arguments).
* It will insert implicit type conversion functions if necessary. Currently this is only
* used to handle CAST with hive UDFs. So no need to check flink functions.
*/
public static ExprNodeDesc getFuncExprNodeDescWithUdfData(
String udfName, TypeInfo typeInfo, ExprNodeDesc... children)
throws UDFArgumentException {
FunctionInfo fi;
try {
fi = HiveParserUtils.getFunctionInfo(udfName);
} catch (SemanticException e) {
throw new UDFArgumentException(e);
}
if (fi == null) {
throw new UDFArgumentException(udfName + " not found.");
}
GenericUDF genericUDF = fi.getGenericUDF();
if (genericUDF == null) {
throw new UDFArgumentException(
udfName + " is an aggregation function or a table function.");
}
// Add udfData to UDF if necessary
if (typeInfo != null) {
if (genericUDF instanceof SettableUDF) {
((SettableUDF) genericUDF).setTypeInfo(typeInfo);
}
}
List<ExprNodeDesc> childrenList = new ArrayList<>(children.length);
childrenList.addAll(Arrays.asList(children));
return ExprNodeGenericFuncDesc.newInstance(genericUDF, udfName, childrenList);
} | 3.68 |
querydsl_SimpleExpression_nullif | /**
* Create a {@code nullif(this, other)} expression
*
* @param other
* @return nullif(this, other)
*/
public SimpleExpression<T> nullif(T other) {
return nullif(ConstantImpl.create(other));
} | 3.68 |
framework_TreeData_removeItem | /**
* Remove a given item from this structure. Additionally, this will
* recursively remove any descendants of the item.
*
* @param item
* the item to remove, or null to clear all data
* @return this
*
* @throws IllegalArgumentException
* if the item does not exist in this structure
*/
public TreeData<T> removeItem(T item) {
if (!contains(item)) {
throw new IllegalArgumentException(
"Item '" + item + "' not in the hierarchy");
}
new ArrayList<>(getChildren(item)).forEach(child -> removeItem(child));
itemToWrapperMap.get(itemToWrapperMap.get(item).getParent())
.removeChild(item);
if (item != null) {
// remove non root item from backing map
itemToWrapperMap.remove(item);
}
return this;
} | 3.68 |
pulsar_NamespaceName_isV2 | /**
* Returns true if this is a V2 namespace prop/namespace-name.
* @return true if v2
*/
public boolean isV2() {
return cluster == null;
} | 3.68 |
framework_ConnectorTracker_hasDirtyConnectors | /**
* Checks if there a dirty connectors.
*
* @return true if there are dirty connectors, false otherwise
*/
public boolean hasDirtyConnectors() {
return !getDirtyConnectors().isEmpty();
} | 3.68 |
framework_DataCommunicator_fetchItemsWithRange | /**
* Fetches a list of items from the DataProvider.
*
* @param offset
* the starting index of the range
* @param limit
* the max number of results
* @return the list of items in given range
*
* @since 8.1
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public List<T> fetchItemsWithRange(int offset, int limit) {
return (List<T>) getDataProvider().fetch(new Query(offset, limit,
backEndSorting, inMemorySorting, filter))
.collect(Collectors.toList());
} | 3.68 |
hudi_HoodieTimeline_getLogCompactionInflightInstant | // Returns Log compaction inflight instant
static HoodieInstant getLogCompactionInflightInstant(final String timestamp) {
return new HoodieInstant(State.INFLIGHT, LOG_COMPACTION_ACTION, timestamp);
} | 3.68 |
hbase_AsyncAdmin_getBackupMasters | /** Returns current backup master list wrapped by {@link CompletableFuture} */
default CompletableFuture<Collection<ServerName>> getBackupMasters() {
return getClusterMetrics(EnumSet.of(Option.BACKUP_MASTERS))
.thenApply(ClusterMetrics::getBackupMasterNames);
} | 3.68 |
flink_Channel_setLocalStrategyComparator | /**
* Sets the local strategy comparator for this Channel.
*
* @param localStrategyComparator The local strategy comparator to set.
*/
public void setLocalStrategyComparator(TypeComparatorFactory<?> localStrategyComparator) {
this.localStrategyComparator = localStrategyComparator;
} | 3.68 |
dubbo_QosProcessHandler_isHttp | // G for GET, and P for POST
private static boolean isHttp(int magic) {
return magic == 'G' || magic == 'P';
} | 3.68 |
flink_EmbeddedLeaderService_shutdown | /**
* Shuts down this leader election service.
*
* <p>This method does not perform a clean revocation of the leader status and no notification
* to any leader listeners. It simply notifies all contenders and listeners that the service is
* no longer available.
*/
public void shutdown() {
synchronized (lock) {
shutdownInternally(new Exception("Leader election service is shutting down"));
}
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_accept | // returns true if the specified path matches the prefix stored
// in this TestFilter.
@Override
public boolean accept(Path path) {
boolean find = false;
while (path != null) {
if (pStrings.contains(path.toUri().getPath())) {
find = true;
break;
}
path = path.getParent();
}
return find;
} | 3.68 |
hadoop_ResourceAllocationRequest_newInstance | /**
* @param startTime The start time that the capability is reserved for.
* @param endTime The end time that the capability is reserved for.
* @param capability {@link Resource} representing the capability of the
* resource allocation.
* @return {ResourceAllocationRequest} which represents the capability of
* the resource allocation for a time interval.
*/
@Public
@Stable
public static ResourceAllocationRequest newInstance(long startTime,
long endTime, Resource capability) {
ResourceAllocationRequest ra = Records.newRecord(
ResourceAllocationRequest.class);
ra.setEndTime(endTime);
ra.setStartTime(startTime);
ra.setCapability(capability);
return ra;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.