name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_RegionCoprocessorHost_preDelete | /**
* Supports Coprocessor 'bypass'.
* @param delete The Delete object
* @param edit The WALEdit object.
* @return true if default processing should be bypassed
* @exception IOException Exception
*/
public boolean preDelete(final Delete delete, final WALEdit edit) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return false;
}
boolean bypassable = true;
return execOperation(new RegionObserverOperationWithoutResult(bypassable) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preDelete(this, delete, edit);
}
});
} | 3.68 |
hbase_BloomFilterUtil_createBySize | /**
* Creates a Bloom filter chunk of the given size.
* @param byteSizeHint the desired number of bytes for the Bloom filter bit array. Will be
* increased so that folding is possible.
* @param errorRate target false positive rate of the Bloom filter
* @param hashType Bloom filter hash function type
* @return the new Bloom filter of the desired size
*/
public static BloomFilterChunk createBySize(int byteSizeHint, double errorRate, int hashType,
int foldFactor, BloomType bloomType) {
BloomFilterChunk bbf = new BloomFilterChunk(hashType, bloomType);
bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8L, foldFactor);
long bitSize = bbf.byteSize * 8;
bbf.maxKeys = (int) idealMaxKeys(bitSize, errorRate);
bbf.hashCount = optimalFunctionCount(bbf.maxKeys, bitSize);
// Adjust max keys to bring error rate closer to what was requested,
// because byteSize was adjusted to allow for folding, and hashCount was
// rounded.
bbf.maxKeys = (int) computeMaxKeys(bitSize, errorRate, bbf.hashCount);
return bbf;
} | 3.68 |
dubbo_ClassSourceScanner_adaptiveClasses | /**
* The required adaptive class.
* For example: LoadBalance$Adaptive.class
* @return adaptive class
*/
public Map<String, Class<?>> adaptiveClasses() {
List<String> res = spiClassesWithAdaptive().stream()
.map((c) -> c.getName() + "$Adaptive")
.collect(Collectors.toList());
return forNames(res);
} | 3.68 |
flink_StreamExecutionEnvironment_getNumberOfExecutionRetries | /**
* Gets the number of times the system will try to re-execute failed tasks. A value of {@code
* -1} indicates that the system default value (as defined in the configuration) should be used.
*
* @return The number of times the system will try to re-execute failed tasks.
* @deprecated This method will be replaced by {@link #getRestartStrategy}.
*/
@Deprecated
@PublicEvolving
public int getNumberOfExecutionRetries() {
return config.getNumberOfExecutionRetries();
} | 3.68 |
framework_BootstrapHandler_setupMainDiv | /**
* Method to write the div element into which that actual Vaadin application
* is rendered.
* <p>
* Override this method if you want to add some custom html around around
* the div element into which the actual Vaadin application will be
* rendered.
*
* @param context
*
* @throws IOException
*/
private void setupMainDiv(BootstrapContext context) throws IOException {
String style = getMainDivStyle(context);
/*- Add classnames;
* .v-app
* .v-app-loading
*- Additionally added from javascript:
* <themeName, remove non-alphanum>
*/
List<Node> fragmentNodes = context.getBootstrapResponse()
.getFragmentNodes();
Element mainDiv = new Element(Tag.valueOf("div"), "");
mainDiv.attr("id", context.getAppId());
mainDiv.addClass("v-app");
mainDiv.addClass(context.getThemeName());
mainDiv.addClass(
context.getUIClass().getSimpleName().toLowerCase(Locale.ROOT));
if (style != null && !style.isEmpty()) {
mainDiv.attr("style", style);
}
mainDiv.appendElement("div").addClass("v-app-loading");
mainDiv.appendElement("noscript").append(
"You have to enable javascript in your browser to use an application built with Vaadin.");
fragmentNodes.add(mainDiv);
VaadinRequest request = context.getRequest();
VaadinService vaadinService = request.getService();
String vaadinLocation = vaadinService.getStaticFileLocation(request)
+ "/VAADIN/";
// Parameter appended to JS to bypass caches after version upgrade.
String versionQueryParam = "?v=" + Version.getFullVersion();
if (context.getPushMode().isEnabled()) {
// Load client-side dependencies for push support
String pushJS = vaadinLocation;
if (context.getRequest().getService().getDeploymentConfiguration()
.isProductionMode()) {
pushJS += ApplicationConstants.VAADIN_PUSH_JS;
} else {
pushJS += ApplicationConstants.VAADIN_PUSH_DEBUG_JS;
}
pushJS += versionQueryParam;
fragmentNodes.add(new Element(Tag.valueOf("script"), "")
.attr("type", "text/javascript").attr("src", pushJS));
}
String bootstrapLocation = vaadinLocation
+ ApplicationConstants.VAADIN_BOOTSTRAP_JS + versionQueryParam;
fragmentNodes.add(new Element(Tag.valueOf("script"), "")
.attr("type", "text/javascript")
.attr("src", bootstrapLocation));
Element mainScriptTag = new Element(Tag.valueOf("script"), "")
.attr("type", "text/javascript");
StringBuilder builder = new StringBuilder();
builder.append("//<![CDATA[\n");
builder.append("if (!window.vaadin) alert(" + JsonUtil.quote(
"Failed to load the bootstrap javascript: " + bootstrapLocation)
+ ");\n");
appendMainScriptTagContents(context, builder);
builder.append("//]]>");
mainScriptTag.appendChild(new DataNode(builder.toString()));
fragmentNodes.add(mainScriptTag);
} | 3.68 |
flink_TopNBuffer_getElement | /**
* Gets record which rank is given value.
*
* @param rank rank value to search
* @return the record which rank is given value
*/
public RowData getElement(int rank) {
int curRank = 0;
for (Map.Entry<RowData, Collection<RowData>> entry : treeMap.entrySet()) {
Collection<RowData> collection = entry.getValue();
if (curRank + collection.size() >= rank) {
for (RowData elem : collection) {
curRank += 1;
if (curRank == rank) {
return elem;
}
}
} else {
curRank += collection.size();
}
}
return null;
} | 3.68 |
hbase_HFileLink_createPath | /**
* Create an HFileLink relative path for the table/region/family/hfile location
* @param table Table name
* @param region Region Name
* @param family Family Name
* @param hfile HFile Name
* @return the relative Path to open the specified table/region/family/hfile link
*/
public static Path createPath(final TableName table, final String region, final String family,
final String hfile) {
if (HFileLink.isHFileLink(hfile)) {
return new Path(family, hfile);
}
return new Path(family, HFileLink.createHFileLinkName(table, region, hfile));
} | 3.68 |
hadoop_YarnRegistryViewForProviders_getSelfRegistrationPath | /**
* Get the path to where the service has registered itself.
* Null until the service is registered
* @return the service registration path.
*/
public String getSelfRegistrationPath() {
return selfRegistrationPath;
} | 3.68 |
morf_AbstractSqlDialectTest_nullOrder | /**
* A database platform may need to specify the null order.
*
* <p>If a null order is not required for a SQL dialect descendant classes need to implement this method.</p>
*
* @return the null order for an SQL dialect
*/
protected String nullOrder() {
return StringUtils.EMPTY;
} | 3.68 |
hadoop_ManifestCommitterSupport_getEtag | /**
* Get an etag from a FileStatus which MUST BE
* an implementation of EtagSource and
* whose etag MUST NOT BE null/empty.
* @param status the status; may be null.
* @return the etag or null if not provided
*/
public static String getEtag(FileStatus status) {
if (status instanceof EtagSource) {
return ((EtagSource) status).getEtag();
} else {
return null;
}
} | 3.68 |
hbase_ScanQueryMatcher_checkColumn | // Used only for testing purposes
static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset, int length,
long ttl, byte type, boolean ignoreCount) throws IOException {
KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 0, 0,
HConstants.EMPTY_BYTE_ARRAY, 0, 0, bytes, offset, length);
MatchCode matchCode = columnTracker.checkColumn(kv, type);
if (matchCode == MatchCode.INCLUDE) {
return columnTracker.checkVersions(kv, ttl, type, ignoreCount);
}
return matchCode;
} | 3.68 |
hbase_Bytes_hashCode | /**
* Calculate the hash code for a given range of bytes.
* @param bytes array to hash
* @param offset offset to start from
* @param length length to hash
*/
public static int hashCode(byte[] bytes, int offset, int length) {
int hash = 1;
for (int i = offset; i < offset + length; i++)
hash = (31 * hash) + bytes[i];
return hash;
} | 3.68 |
framework_Notification_getStyle | /**
* @since 7.2
*
* @return the style name for this notification type.
*/
public String getStyle() {
return style;
} | 3.68 |
flink_CliFrontend_loadCustomCommandLine | /**
* Loads a class from the classpath that implements the CustomCommandLine interface.
*
* @param className The fully-qualified class name to load.
* @param params The constructor parameters
*/
private static CustomCommandLine loadCustomCommandLine(String className, Object... params)
throws Exception {
Class<? extends CustomCommandLine> customCliClass =
Class.forName(className).asSubclass(CustomCommandLine.class);
// construct class types from the parameters
Class<?>[] types = new Class<?>[params.length];
for (int i = 0; i < params.length; i++) {
checkNotNull(params[i], "Parameters for custom command-lines may not be null.");
types[i] = params[i].getClass();
}
Constructor<? extends CustomCommandLine> constructor = customCliClass.getConstructor(types);
return constructor.newInstance(params);
} | 3.68 |
hbase_WALStreamReader_next | /**
* Read the next entry in WAL.
* <p/>
* In most cases you should just use this method, especially when reading a closed wal file for
* splitting or printing.
*/
default WAL.Entry next() throws IOException {
return next(null);
} | 3.68 |
framework_InMemoryDataProvider_setFilterByValue | /**
* Sets a filter that requires an item property to have a specific value.
* The property value and the provided value are compared using
* {@link Object#equals(Object)}. The filter replaces any filter that has
* been set or added previously.
*
* @see #setFilter(SerializablePredicate)
* @see #setFilter(ValueProvider, SerializablePredicate)
* @see #addFilterByValue(ValueProvider, Object)
*
* @param valueProvider
* value provider that gets the property value, not
* <code>null</code>
* @param requiredValue
* the value that the property must have for the filter to pass
*/
public default <V> void setFilterByValue(ValueProvider<T, V> valueProvider,
V requiredValue) {
setFilter(InMemoryDataProviderHelpers.createEqualsFilter(valueProvider,
requiredValue));
} | 3.68 |
framework_VComboBox_getItemOffsetWidth | /*
* Gets the width of one menu item.
*/
int getItemOffsetWidth() {
List<MenuItem> items = getItems();
return items != null && !items.isEmpty()
? items.get(0).getOffsetWidth()
: 0;
} | 3.68 |
hadoop_CommitContext_commitOrFail | /**
* Commit the operation, throwing an exception on any failure.
* See {@code CommitOperations#commitOrFail(SinglePendingCommit)}.
* @param commit commit to execute
* @throws IOException on a failure
*/
public void commitOrFail(SinglePendingCommit commit) throws IOException {
commitOperations.commitOrFail(commit);
} | 3.68 |
hadoop_StorageStatistics_getName | /**
* Get the name of this StorageStatistics object.
* @return name of this StorageStatistics object
*/
public String getName() {
return name;
} | 3.68 |
framework_VaadinService_getDependencyFilters | /**
* Gets the filters which all resource dependencies are passed through
* before being sent to the client for loading.
*
* @see #initDependencyFilters(List)
*
* @since 8.1
* @return the dependency filters to pass resources dependencies through
* before loading
*/
public Iterable<DependencyFilter> getDependencyFilters() {
if (dependencyFilters == null) {
return Collections.emptyList();
}
return dependencyFilters;
} | 3.68 |
flink_RocksDBStateBackend_getNumberOfTransferingThreads | /** @deprecated Typo in method name. Use {@link #getNumberOfTransferThreads} instead. */
@Deprecated
public int getNumberOfTransferingThreads() {
return getNumberOfTransferThreads();
} | 3.68 |
flink_SideOutputDataStream_cache | /**
* Caches the intermediate result of the transformation. Only support bounded streams and
* currently only block mode is supported. The cache is generated lazily at the first time the
* intermediate result is computed. The cache will be clear when {@link
* CachedDataStream#invalidate()} called or the {@link StreamExecutionEnvironment} close.
*
* @return CachedDataStream that can use in later job to reuse the cached intermediate result.
*/
@PublicEvolving
public CachedDataStream<T> cache() {
return new CachedDataStream<>(this.environment, this.transformation);
} | 3.68 |
hadoop_FTPInputStream_seek | // We don't support seek.
@Override
public void seek(long pos) throws IOException {
throw new IOException("Seek not supported");
} | 3.68 |
flink_SubtaskStateStats_getProcessedData | /** @return the total number of processed bytes during the checkpoint. */
public long getProcessedData() {
return processedData;
} | 3.68 |
hbase_Compression_createPlainCompressionStream | /**
* Creates a compression stream without any additional wrapping into buffering streams.
*/
public CompressionOutputStream createPlainCompressionStream(OutputStream downStream,
Compressor compressor) throws IOException {
CompressionCodec codec = getCodec(conf);
((Configurable) codec).getConf().setInt("io.file.buffer.size", 32 * 1024);
return codec.createOutputStream(downStream, compressor);
} | 3.68 |
flink_FileInputFormat_registerInflaterInputStreamFactory | /**
* Registers a decompression algorithm through a {@link
* org.apache.flink.api.common.io.compression.InflaterInputStreamFactory} with a file extension
* for transparent decompression.
*
* @param fileExtension of the compressed files
* @param factory to create an {@link java.util.zip.InflaterInputStream} that handles the
* decompression format
*/
public static void registerInflaterInputStreamFactory(
String fileExtension, InflaterInputStreamFactory<?> factory) {
synchronized (INFLATER_INPUT_STREAM_FACTORIES) {
if (INFLATER_INPUT_STREAM_FACTORIES.put(fileExtension, factory) != null) {
LOG.warn(
"Overwriting an existing decompression algorithm for \"{}\" files.",
fileExtension);
}
}
} | 3.68 |
hudi_SparkBootstrapCommitActionExecutor_metadataBootstrap | /**
* Perform Metadata Bootstrap.
* @param partitionFilesList List of partitions and files within that partitions
*/
protected Option<HoodieWriteMetadata<HoodieData<WriteStatus>>> metadataBootstrap(List<Pair<String, List<HoodieFileStatus>>> partitionFilesList) {
if (null == partitionFilesList || partitionFilesList.isEmpty()) {
return Option.empty();
}
HoodieTableMetaClient metaClient = table.getMetaClient();
String bootstrapInstantTime = HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS;
metaClient.getActiveTimeline().createNewInstant(
new HoodieInstant(State.REQUESTED, metaClient.getCommitActionType(), bootstrapInstantTime));
table.getActiveTimeline().transitionRequestedToInflight(new HoodieInstant(State.REQUESTED,
metaClient.getCommitActionType(), bootstrapInstantTime), Option.empty());
HoodieData<BootstrapWriteStatus> bootstrapWriteStatuses = runMetadataBootstrap(partitionFilesList);
HoodieWriteMetadata<HoodieData<WriteStatus>> result = new HoodieWriteMetadata<>();
updateIndexAndCommitIfNeeded(bootstrapWriteStatuses.map(w -> w), result);
// Delete the marker directory for the instant
WriteMarkersFactory.get(config.getMarkersType(), table, bootstrapInstantTime)
.quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism());
return Option.of(result);
} | 3.68 |
hadoop_BlockResolver_resolve | /**
* @param s the external reference.
* @return sequence of blocks that make up the reference.
*/
public Iterable<BlockProto> resolve(FileStatus s) {
List<Long> lengths = blockLengths(s);
ArrayList<BlockProto> ret = new ArrayList<>(lengths.size());
long tot = 0;
for (long l : lengths) {
tot += l;
ret.add(buildBlock(nextId(), l));
}
if (tot != s.getLen()) {
// log a warning?
throw new IllegalStateException(
"Expected " + s.getLen() + " found " + tot);
}
return ret;
} | 3.68 |
hudi_HoodieTableMetaClient_getSchemaFolderName | /**
* @return schema folder path
*/
public String getSchemaFolderName() {
return new Path(metaPath.get(), SCHEMA_FOLDER_NAME).toString();
} | 3.68 |
flink_NumericSummaryAggregator_combine | /** combine two aggregations. */
@Override
public void combine(Aggregator<T, NumericColumnSummary<T>> otherSameType) {
NumericSummaryAggregator<T> other = (NumericSummaryAggregator<T>) otherSameType;
nullCount += other.nullCount;
nanCount += other.nanCount;
infinityCount += other.infinityCount;
if (nonMissingCount == 0) {
nonMissingCount = other.nonMissingCount;
min = other.min;
max = other.max;
sum = other.sum;
mean = other.mean;
m2 = other.m2;
} else if (other.nonMissingCount != 0) {
long combinedCount = nonMissingCount + other.nonMissingCount;
min.combine(other.min);
max.combine(other.max);
sum.combine(other.sum);
double deltaMean = other.mean.value() - mean.value();
mean = mean.add(deltaMean * other.nonMissingCount / combinedCount);
m2 =
m2.add(other.m2)
.add(
deltaMean
* deltaMean
* nonMissingCount
* other.nonMissingCount
/ combinedCount);
nonMissingCount = combinedCount;
}
} | 3.68 |
hibernate-validator_Contracts_assertValueNotNull | /**
* Asserts that the given object is not {@code null}.
*
* @param o The object to check.
* @param name The name of the value to check. A message of the form
* "<name> must not be null" will be used as message of
* the resulting exception if the given object is {@code null}.
*
* @throws IllegalArgumentException In case the given object is {@code null}.
*/
public static void assertValueNotNull(Object o, String name) {
if ( o == null ) {
throw LOG.getIllegalArgumentException( MESSAGES.mustNotBeNull( name ) );
}
} | 3.68 |
hbase_HFileReaderImpl_getCurCellSerializedSize | // Returns the #bytes in HFile for the current cell. Used to skip these many bytes in current
// HFile block's buffer so as to position to the next cell.
private int getCurCellSerializedSize() {
int curCellSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen + currMemstoreTSLen;
if (this.reader.getFileContext().isIncludesTags()) {
curCellSize += Bytes.SIZEOF_SHORT + currTagsLen;
}
return curCellSize;
} | 3.68 |
framework_RpcDataSourceConnector_updateRowData | /**
* Updates row data based on row key.
*
* @since 7.6
* @param row
* new row object
*/
protected void updateRowData(JsonObject row) {
int index = indexOfKey(getRowKey(row));
if (index >= 0) {
setRowData(index, Collections.singletonList(row));
}
} | 3.68 |
rocketmq-connect_FieldsMetadata_extractRecordValuePk | /**
* record value
*
* @param tableName
* @param configuredPkFields
* @param valueSchema
* @param headers
* @param allFields
* @param keyFieldNames
*/
private static void extractRecordValuePk(
final String tableName,
final List<String> configuredPkFields,
final Schema valueSchema,
final KeyValue headers,
final Map<String, SinkRecordField> allFields,
final Set<String> keyFieldNames
) {
if (valueSchema == null) {
throw new ConnectException(String.format(
"PK mode for table '%s' is %s, but record value schema is missing",
tableName,
JdbcSinkConfig.PrimaryKeyMode.RECORD_VALUE)
);
}
List<String> pkFields = new ArrayList<>(configuredPkFields);
if (pkFields.isEmpty()) {
for (Field keyField : valueSchema.getFields()) {
keyFieldNames.add(keyField.getName());
}
} else {
for (Field keyField : valueSchema.getFields()) {
keyFieldNames.add(keyField.getName());
}
for (String fieldName : pkFields) {
if (!keyFieldNames.contains(fieldName)) {
throw new ConnectException(String.format(
"PK mode for table '%s' is %s with configured PK fields %s, but record value "
+ "schema does not contain field: %s",
tableName,
JdbcSinkConfig.PrimaryKeyMode.RECORD_VALUE,
pkFields,
fieldName
));
}
}
keyFieldNames.addAll(pkFields);
}
for (String fieldName : keyFieldNames) {
final Schema fieldSchema = valueSchema.getField(fieldName).getSchema();
allFields.put(fieldName, new SinkRecordField(fieldSchema, fieldName, true));
}
} | 3.68 |
framework_DragSourceExtensionConnector_removeDragListeners | /**
* Removes dragstart and dragend event listeners from the given DOM element.
*
* @param element
* DOM element to remove event listeners from.
*/
protected void removeDragListeners(Element element) {
EventTarget target = element.cast();
target.removeEventListener(Event.DRAGSTART, dragStartListener);
target.removeEventListener(Event.DRAGEND, dragEndListener);
} | 3.68 |
hadoop_UriUtils_generateUniqueTestPath | /**
* Generate unique test path for multiple user tests.
*
* @return root test path
*/
public static String generateUniqueTestPath() {
String testUniqueForkId = System.getProperty("test.unique.fork.id");
return testUniqueForkId == null ? "/test" : "/" + testUniqueForkId + "/test";
} | 3.68 |
hudi_BootstrapOperator_waitForBootstrapReady | /**
* Wait for other bootstrap tasks to finish the index bootstrap.
*/
private void waitForBootstrapReady(int taskID) {
int taskNum = getRuntimeContext().getNumberOfParallelSubtasks();
int readyTaskNum = 1;
while (taskNum != readyTaskNum) {
try {
readyTaskNum = aggregateManager.updateGlobalAggregate(BootstrapAggFunction.NAME + conf.getString(FlinkOptions.TABLE_NAME), taskID, new BootstrapAggFunction());
LOG.info("Waiting for other bootstrap tasks to complete, taskId = {}.", taskID);
TimeUnit.SECONDS.sleep(5);
} catch (Exception e) {
LOG.warn("Update global task bootstrap summary error", e);
}
}
} | 3.68 |
hbase_AssignmentVerificationReport_getTotalFavoredAssignments | /**
* Return the number of regions assigned to their favored nodes
* @return number of regions assigned to their favored nodes
*/
int getTotalFavoredAssignments() {
return totalFavoredAssignments;
} | 3.68 |
hadoop_GetAllResourceTypeInfoResponsePBImpl_initResourceTypeInfosList | // Once this is called. containerList will never be null - until a getProto
// is called.
private void initResourceTypeInfosList() {
if (this.resourceTypeInfo != null) {
return;
}
GetAllResourceTypeInfoResponseProtoOrBuilder p = viaProto ? proto : builder;
List<ResourceTypeInfoProto> list = p.getResourceTypeInfoList();
resourceTypeInfo = new ArrayList<ResourceTypeInfo>();
for (ResourceTypeInfoProto a : list) {
resourceTypeInfo.add(convertFromProtoFormat(a));
}
} | 3.68 |
flink_SegmentPartitionFileWriter_writeSegmentFinishFile | /**
* Writing a segment-finish file when the current segment is complete. The downstream can
* determine if the current segment is complete by checking for the existence of the
* segment-finish file.
*
* <p>Note that the method is only called by the flushing thread.
*/
private void writeSegmentFinishFile(
TieredStoragePartitionId partitionId, int subpartitionId, int segmentId) {
try {
WritableByteChannel channel = subpartitionChannels[subpartitionId];
if (channel != null) {
channel.close();
subpartitionChannels[subpartitionId] = null;
}
SegmentPartitionFile.writeSegmentFinishFile(
basePath, partitionId, subpartitionId, segmentId);
} catch (IOException exception) {
ExceptionUtils.rethrow(exception);
}
} | 3.68 |
framework_VTabsheet_scheduleBlur | /**
* Schedule a new blur event for a deferred execution.
*
* @param blurSource
* the source tab
*/
private void scheduleBlur(Tab blurSource) {
if (nextBlurScheduleCancelled) {
// This will set the stopNextBlurCommand back to false as well.
cancelLastBlurSchedule();
// Reset the status.
nextBlurScheduleCancelled = false;
return;
}
cancelLastBlurSchedule();
blurCommand = new BlurCommand(blurSource);
blurCommand.scheduleDeferred();
} | 3.68 |
flink_HardwareDescription_getSizeOfPhysicalMemory | /**
* Returns the size of physical memory in bytes available on the compute node.
*
* @return the size of physical memory in bytes available on the compute node
*/
public long getSizeOfPhysicalMemory() {
return this.sizeOfPhysicalMemory;
} | 3.68 |
framework_ApplicationConnection_getConnector | /**
* Get either an existing ComponentConnector or create a new
* ComponentConnector with the given type and id.
*
* If a ComponentConnector with the given id already exists, returns it.
* Otherwise creates and registers a new ComponentConnector of the given
* type.
*
* @param connectorId
* Id of the paintable
* @param connectorType
* Type of the connector, as passed from the server side
*
* @return Either an existing ComponentConnector or a new ComponentConnector
* of the given type
*/
public ServerConnector getConnector(String connectorId, int connectorType) {
if (!connectorMap.hasConnector(connectorId)) {
return createAndRegisterConnector(connectorId, connectorType);
}
return connectorMap.getConnector(connectorId);
} | 3.68 |
framework_EventCellReference_isHeader | /**
* Is the cell reference for a cell in the header of the Grid.
*
* @since 7.5
* @return <code>true</code> if referenced cell is in the header,
* <code>false</code> if not
*/
public boolean isHeader() {
return section == Section.HEADER;
} | 3.68 |
flink_NettyPartitionRequestClient_requestSubpartition | /**
* Requests a remote intermediate result partition queue.
*
* <p>The request goes to the remote producer, for which this partition request client instance
* has been created.
*/
@Override
public void requestSubpartition(
final ResultPartitionID partitionId,
final int subpartitionIndex,
final RemoteInputChannel inputChannel,
int delayMs)
throws IOException {
checkNotClosed();
LOG.debug(
"Requesting subpartition {} of partition {} with {} ms delay.",
subpartitionIndex,
partitionId,
delayMs);
clientHandler.addInputChannel(inputChannel);
final PartitionRequest request =
new PartitionRequest(
partitionId,
subpartitionIndex,
inputChannel.getInputChannelId(),
inputChannel.getInitialCredit());
final ChannelFutureListener listener =
future -> {
if (!future.isSuccess()) {
clientHandler.removeInputChannel(inputChannel);
inputChannel.onError(
new LocalTransportException(
String.format(
"Sending the partition request to '%s [%s] (#%d)' failed.",
connectionId.getAddress(),
connectionId
.getResourceID()
.getStringWithMetadata(),
connectionId.getConnectionIndex()),
future.channel().localAddress(),
future.cause()));
sendToChannel(
new ConnectionErrorMessage(
future.cause() == null
? new RuntimeException(
"Cannot send partition request.")
: future.cause()));
}
};
if (delayMs == 0) {
ChannelFuture f = tcpChannel.writeAndFlush(request);
f.addListener(listener);
} else {
final ChannelFuture[] f = new ChannelFuture[1];
tcpChannel
.eventLoop()
.schedule(
() -> {
f[0] = tcpChannel.writeAndFlush(request);
f[0].addListener(listener);
},
delayMs,
TimeUnit.MILLISECONDS);
}
} | 3.68 |
hudi_PreferWriterConflictResolutionStrategy_getCandidateInstantsForTableServicesCommits | /**
* To find which instants are conflicting, we apply the following logic
* Get both completed instants and ingestion inflight commits that have happened since the last successful write.
* We need to check for write conflicts since they may have mutated the same files
* that are being newly created by the current write.
*/
private Stream<HoodieInstant> getCandidateInstantsForTableServicesCommits(HoodieActiveTimeline activeTimeline, HoodieInstant currentInstant) {
// Fetch list of completed commits.
Stream<HoodieInstant> completedCommitsStream =
activeTimeline
.getTimelineOfActions(CollectionUtils.createSet(COMMIT_ACTION, REPLACE_COMMIT_ACTION, COMPACTION_ACTION, DELTA_COMMIT_ACTION))
.filterCompletedInstants()
.findInstantsModifiedAfterByCompletionTime(currentInstant.getTimestamp())
.getInstantsAsStream();
// Fetch list of ingestion inflight commits.
Stream<HoodieInstant> inflightIngestionCommitsStream =
activeTimeline
.getTimelineOfActions(CollectionUtils.createSet(COMMIT_ACTION, DELTA_COMMIT_ACTION))
.filterInflights()
.getInstantsAsStream();
// Merge and sort the instants and return.
List<HoodieInstant> instantsToConsider = Stream.concat(completedCommitsStream, inflightIngestionCommitsStream)
.sorted(Comparator.comparing(o -> o.getCompletionTime()))
.collect(Collectors.toList());
LOG.info(String.format("Instants that may have conflict with %s are %s", currentInstant, instantsToConsider));
return instantsToConsider.stream();
} | 3.68 |
flink_RocksDBProperty_getRocksDBProperty | /**
* @return property string that can be used to query {@link
* RocksDB#getLongProperty(ColumnFamilyHandle, String)}.
*/
public String getRocksDBProperty() {
return String.format(ROCKS_DB_PROPERTY_FORMAT, property);
} | 3.68 |
morf_BaseDataSetReader_open | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider#open()
*/
@Override
public void open() {
// no-op
} | 3.68 |
hadoop_FederationStateStoreFacade_incrementCurrentKeyId | /**
* stateStore provides CurrentKeyId increase.
*
* @return currentKeyId.
*/
public int incrementCurrentKeyId() {
return stateStore.incrementCurrentKeyId();
} | 3.68 |
framework_ColorPickerGradient_setBackgroundColor | /**
* Sets the background color.
*
* @param color
* the new background color
*/
public void setBackgroundColor(Color color) {
getState().bgColor = color.getCSS();
} | 3.68 |
hbase_ServerCrashProcedure_getRegionsOnCrashedServer | /** Returns List of Regions on crashed server. */
List<RegionInfo> getRegionsOnCrashedServer(MasterProcedureEnv env) {
return env.getMasterServices().getAssignmentManager().getRegionsOnServer(serverName);
} | 3.68 |
rocketmq-connect_AvroData_defaultValueFromConnect | // Convert default values from Connect data format to Avro's format, which is an
// org.codehaus.jackson.JsonNode. The default value is provided as an argument because even
// though you can get a default value from the schema, default values for complex structures need
// to perform the same translation but those defaults will be part of the original top-level
// (complex type) default value, not part of the child schema.
private static JsonNode defaultValueFromConnect(Schema schema, Object value) {
try {
// If this is a logical type, convert it from the convenient Java type to the underlying
// serializeable format
Object defaultVal = toAvroLogical(schema, value);
switch (schema.getFieldType()) {
case INT8:
return JsonNodeFactory.instance.numberNode(((Byte) defaultVal).intValue());
case INT16:
return JsonNodeFactory.instance.numberNode(((Short) defaultVal).intValue());
case INT32:
return JsonNodeFactory.instance.numberNode((Integer) defaultVal);
case INT64:
return JsonNodeFactory.instance.numberNode((Long) defaultVal);
case FLOAT32:
return JsonNodeFactory.instance.numberNode((Float) defaultVal);
case FLOAT64:
return JsonNodeFactory.instance.numberNode((Double) defaultVal);
case BOOLEAN:
return JsonNodeFactory.instance.booleanNode((Boolean) defaultVal);
case STRING:
return JsonNodeFactory.instance.textNode((String) defaultVal);
case BYTES:
if (defaultVal instanceof byte[]) {
return JsonNodeFactory.instance.textNode(new String((byte[]) defaultVal,
StandardCharsets.ISO_8859_1));
} else {
return JsonNodeFactory.instance.textNode(new String(((ByteBuffer) defaultVal).array(),
StandardCharsets.ISO_8859_1));
}
case ARRAY: {
ArrayNode array = JsonNodeFactory.instance.arrayNode();
for (Object elem : (Collection<Object>) defaultVal) {
array.add(defaultValueFromConnect(schema.getValueSchema(), elem));
}
return array;
}
case MAP:
if (schema.getKeySchema().getFieldType() == FieldType.STRING && !schema.getKeySchema().isOptional()) {
ObjectNode node = JsonNodeFactory.instance.objectNode();
for (Map.Entry<String, Object> entry : ((Map<String, Object>) defaultVal).entrySet()) {
JsonNode entryDef = defaultValueFromConnect(schema.getValueSchema(), entry.getValue());
node.put(entry.getKey(), entryDef);
}
return node;
} else {
ArrayNode array = JsonNodeFactory.instance.arrayNode();
for (Map.Entry<Object, Object> entry : ((Map<Object, Object>) defaultVal).entrySet()) {
JsonNode keyDefault = defaultValueFromConnect(schema.getKeySchema(), entry.getKey());
JsonNode valDefault = defaultValueFromConnect(schema.getValueSchema(), entry.getValue());
ArrayNode jsonEntry = JsonNodeFactory.instance.arrayNode();
jsonEntry.add(keyDefault);
jsonEntry.add(valDefault);
array.add(jsonEntry);
}
return array;
}
case STRUCT: {
ObjectNode node = JsonNodeFactory.instance.objectNode();
Struct struct = (Struct) defaultVal;
for (Field field : schema.getFields()) {
JsonNode fieldDef = defaultValueFromConnect(field.getSchema(), struct.get(field));
node.set(field.getName(), fieldDef);
}
return node;
}
default:
throw new ConnectException("Unknown schema type:" + schema.getFieldType());
}
} catch (ClassCastException e) {
throw new ConnectException("Invalid type used for default value of "
+ schema.getFieldType()
+ " field: "
+ schema.getDefaultValue().getClass());
}
} | 3.68 |
hbase_UserProvider_create | /**
* Wraps an underlying {@code UserGroupInformation} instance.
* @param ugi The base Hadoop user
*/
public User create(UserGroupInformation ugi) {
if (ugi == null) {
return null;
}
return new User.SecureHadoopUser(ugi, groupCache);
} | 3.68 |
flink_OperatorChain_close | /**
* This method releases all resources of the record writer output. It stops the output flushing
* thread (if there is one) and releases all buffers currently held by the output serializers.
*
* <p>This method should never fail.
*/
public void close() throws IOException {
closer.close();
} | 3.68 |
hbase_ExceptionUtil_rethrowIfInterrupt | /** Throw InterruptedIOException if t was an interruption, nothing otherwise. */
public static void rethrowIfInterrupt(Throwable t) throws InterruptedIOException {
InterruptedIOException iie = asInterrupt(t);
if (iie != null) {
throw iie;
}
} | 3.68 |
morf_UnionSetOperator_validateOrderBy | /**
* Don't allow sub-select statements to have ORDER BY statements, as this is
* an invalid construct in in SQL-92.
*
* @param selectStatement the select statement to be validated.
*/
private void validateOrderBy(SelectStatement selectStatement) throws IllegalArgumentException {
if (!selectStatement.getOrderBys().isEmpty()) {
throw new IllegalArgumentException("Only the parent select statement can contain an order by statement");
}
} | 3.68 |
hbase_TableDescriptorBuilder_getNormalizerTargetRegionCount | /**
* Check if there is the target region count. If so, the normalize plan will be calculated based
* on the target region count.
* @return target region count after normalize done
*/
@Override
public int getNormalizerTargetRegionCount() {
return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf,
Integer.valueOf(-1));
} | 3.68 |
MagicPlugin_SpellAction_load | /**
* This mechanism never worked properly and is no longer called.
* Actions that need to store data should interact with CastContext.getVariables instead.
*/
@Deprecated
default void load(Mage mage, ConfigurationSection data) { } | 3.68 |
hadoop_TFile_getBlockContainsKey | /**
* if greater is true then returns the beginning location of the block
* containing the key strictly greater than input key. if greater is false
* then returns the beginning location of the block greater than equal to
* the input key
*
* @param key
* the input key
* @param greater
* boolean flag
* @return
* @throws IOException
*/
Location getBlockContainsKey(RawComparable key, boolean greater)
throws IOException {
if (!isSorted()) {
throw new RuntimeException("Seeking in unsorted TFile");
}
checkTFileDataIndex();
int blkIndex =
(greater) ? tfileIndex.upperBound(key) : tfileIndex.lowerBound(key);
if (blkIndex < 0) return end;
return new Location(blkIndex, 0);
} | 3.68 |
flink_SpillingThread_disposeSortBuffers | /** Releases the memory that is registered for in-memory sorted run generation. */
private void disposeSortBuffers(boolean releaseMemory) {
CircularElement<E> element;
while ((element = this.dispatcher.poll(SortStage.READ)) != null) {
element.getBuffer().dispose();
if (releaseMemory) {
this.memManager.release(element.getMemory());
}
}
} | 3.68 |
flink_QueryableStateConfiguration_getStateServerPortRange | /**
* Returns the port range where the queryable state server can listen. See {@link
* org.apache.flink.configuration.QueryableStateOptions#SERVER_PORT_RANGE
* QueryableStateOptions.SERVER_PORT_RANGE}.
*/
public Iterator<Integer> getStateServerPortRange() {
return qserverPortRange;
} | 3.68 |
hadoop_IOStatisticsStoreImpl_getMaximumReference | /**
* Get a reference to the atomic instance providing the
* value for a specific maximum. This is useful if
* the value is passed around.
* @param key statistic name
* @return the reference
* @throws NullPointerException if there is no entry of that name
*/
@Override
public AtomicLong getMaximumReference(String key) {
return lookup(maximumMap, key);
} | 3.68 |
hbase_RegionCoprocessorHost_postClose | /**
* Invoked after a region is closed
* @param abortRequested true if the server is aborting
*/
public void postClose(final boolean abortRequested) {
try {
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postClose(this, abortRequested);
}
@Override
public void postEnvCall() {
shutdown(this.getEnvironment());
}
});
} catch (IOException e) {
LOG.warn(e.toString(), e);
}
} | 3.68 |
hbase_OrderedInt64_decodeLong | /**
* Read a {@code long} value from the buffer {@code src}.
* @param src the {@link PositionedByteRange} to read the {@code long} from
* @return the {@code long} read from the buffer
*/
public long decodeLong(PositionedByteRange src) {
return OrderedBytes.decodeInt64(src);
} | 3.68 |
framework_NotificationElement_getCaption | /**
* Returns the caption of the Notification element.
*
* @since 8.0
* @return the caption of the Notification element
*/
public String getCaption() {
WebElement popup = findElement(By.className("popupContent"));
WebElement caption = popup.findElement(By.tagName("h1"));
return caption.getText();
} | 3.68 |
hbase_MetricsSource_refreshAgeOfLastShippedOp | /**
* Convenience method to use the last given timestamp to refresh the age of the last edit. Used
* when replication fails and need to keep that metric accurate.
* @param walGroupId id of the group to update
*/
public void refreshAgeOfLastShippedOp(String walGroupId) {
Long lastTimestamp = this.lastShippedTimeStamps.get(walGroupId);
if (lastTimestamp == null) {
this.lastShippedTimeStamps.put(walGroupId, 0L);
lastTimestamp = 0L;
}
if (lastTimestamp > 0) {
setAgeOfLastShippedOp(lastTimestamp, walGroupId);
}
} | 3.68 |
hudi_Triple_compareTo | /**
* <p>
* Compares the triple based on the left element, followed by the middle element, finally the right element. The types
* must be {@code Comparable}.
* </p>
*
* @param other the other triple, not null
* @return negative if this is less, zero if equal, positive if greater
*/
@Override
public int compareTo(final Triple<L, M, R> other) {
checkComparable(this);
checkComparable(other);
Comparable thisLeft = (Comparable) getLeft();
Comparable otherLeft = (Comparable) other.getLeft();
if (thisLeft.compareTo(otherLeft) == 0) {
return Pair.of(getMiddle(), getRight()).compareTo(Pair.of(other.getMiddle(), other.getRight()));
} else {
return thisLeft.compareTo(otherLeft);
}
} | 3.68 |
flink_S3TestCredentials_assumeCredentialsAvailable | /**
* Checks whether credentials are available in the environment variables of this JVM. If not,
* throws an {@link AssumptionViolatedException} which causes JUnit tests to be skipped.
*/
public static void assumeCredentialsAvailable() {
Assume.assumeTrue(
"No S3 credentials available in this test's environment", credentialsAvailable());
} | 3.68 |
hbase_PrivateCellUtil_writeValue | /**
* Writes the value from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
* @param vlength the value length
*/
public static void writeValue(OutputStream out, Cell cell, int vlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) cell).getValueByteBuffer(),
((ByteBufferExtendedCell) cell).getValuePosition(), vlength);
} else {
out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
}
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_getCurrentWebResponses | /**
* Gets the current number of web responses obtained from Azure Storage.
* @return The number of web responses.
*/
public long getCurrentWebResponses() {
return inMemoryNumberOfWebResponses.get();
} | 3.68 |
morf_InsertStatement_useDirectPath | /**
* If supported by the dialect, hints to the database that an {@code APPEND} query hint should be used in the insert statement.
*
* <p>In general, as with all query plan modification, <strong>do not use this unless you know
* exactly what you are doing</strong>.</p>
*
* <p>These directives are applied in the SQL in the order they are called on {@link InsertStatement}. This usually
* affects their precedence or relative importance, depending on the platform.</p>
*
* @return a new insert statement with the change applied.
*/
public InsertStatement useDirectPath() {
return copyOnWriteOrMutate(
InsertStatementBuilder::useDirectPath,
() -> this.hints.add(DirectPathQueryHint.INSTANCE)
);
} | 3.68 |
hadoop_RouterAuditLogger_logFailure | /**
* Create a readable and parsable audit log string for a failed event.
*
* @param user User who made the service request.
* @param operation Operation requested by the user.
* @param perm Target permissions.
* @param target The target on which the operation is being performed.
* @param description Some additional information as to why the operation failed.
* @param subClusterId SubCluster Id in which operation was performed.
*/
public static void logFailure(String user, String operation, String perm,
String target, String description, SubClusterId subClusterId) {
if (LOG.isInfoEnabled()) {
LOG.info(createFailureLog(user, operation, perm, target, description, null,
subClusterId));
}
} | 3.68 |
rocketmq-connect_Worker_stopConnector | /**
* Stop a connector managed by this worker.
*
* @param connName the connector name.
*/
private void stopConnector(String connName) {
WorkerConnector workerConnector = connectors.get(connName);
log.info("Stopping connector {}", connName);
if (workerConnector == null) {
log.warn("Ignoring stop request for unowned connector {}", connName);
return;
}
workerConnector.shutdown();
} | 3.68 |
flink_GenericDataSinkBase_setInput | /**
* Sets the given operator as the input to this operator.
*
* @param input The operator to use as the input.
*/
public void setInput(Operator<IN> input) {
this.input = checkNotNull(input, "The input may not be null.");
} | 3.68 |
hadoop_YarnServiceConf_getLong | /**
* Get long value for the property. First get from the userConf, if not
* present, get from systemConf.
*
* @param name name of the property
* @param defaultValue default value of the property, if it is not defined in
* userConf and systemConf.
* @param userConf Configuration provided by client in the JSON definition
* @param systemConf The YarnConfiguration in the system.
* @return long value for the property
*/
public static long getLong(String name, long defaultValue,
Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
return userConf.getPropertyLong(name, systemConf.getLong(name, defaultValue));
} | 3.68 |
framework_Potus_getLastName | /**
* @return the lastName
*/
public String getLastName() {
return lastName;
} | 3.68 |
flink_ArrayListConverter_createObjectArrayKind | /** Creates the kind of array for {@link List#toArray(Object[])}. */
private static Object[] createObjectArrayKind(Class<?> elementClazz) {
// e.g. int[] is not a Object[]
if (elementClazz.isPrimitive()) {
return (Object[]) Array.newInstance(primitiveToWrapper(elementClazz), 0);
}
// e.g. int[][] and Integer[] are Object[]
return (Object[]) Array.newInstance(elementClazz, 0);
} | 3.68 |
hbase_StoreFileInfo_getCreatedTimestamp | /** Returns timestamp when this file was created (as returned by filesystem) */
public long getCreatedTimestamp() {
return createdTimestamp;
} | 3.68 |
zxing_HistoryManager_buildHistory | /**
* <p>Builds a text representation of the scanning history. Each scan is encoded on one
* line, terminated by a line break (\r\n). The values in each line are comma-separated,
* and double-quoted. Double-quotes within values are escaped with a sequence of two
* double-quotes. The fields output are:</p>
*
* <ol>
* <li>Raw text</li>
* <li>Display text</li>
* <li>Format (e.g. QR_CODE)</li>
* <li>Unix timestamp (milliseconds since the epoch)</li>
* <li>Formatted version of timestamp</li>
* <li>Supplemental info (e.g. price info for a product barcode)</li>
* </ol>
*/
CharSequence buildHistory() {
StringBuilder historyText = new StringBuilder(1000);
SQLiteOpenHelper helper = new DBHelper(activity);
try (SQLiteDatabase db = helper.getReadableDatabase();
Cursor cursor = db.query(DBHelper.TABLE_NAME,
COLUMNS,
null, null, null, null,
DBHelper.TIMESTAMP_COL + " DESC")) {
DateFormat format = DateFormat.getDateTimeInstance(DateFormat.MEDIUM, DateFormat.MEDIUM);
while (cursor.moveToNext()) {
historyText.append('"').append(massageHistoryField(cursor.getString(0))).append("\",");
historyText.append('"').append(massageHistoryField(cursor.getString(1))).append("\",");
historyText.append('"').append(massageHistoryField(cursor.getString(2))).append("\",");
historyText.append('"').append(massageHistoryField(cursor.getString(3))).append("\",");
// Add timestamp again, formatted
long timestamp = cursor.getLong(3);
historyText.append('"').append(massageHistoryField(format.format(timestamp))).append("\",");
// Above we're preserving the old ordering of columns which had formatted data in position 5
historyText.append('"').append(massageHistoryField(cursor.getString(4))).append("\"\r\n");
}
} catch (SQLException sqle) {
Log.w(TAG, sqle);
}
return historyText;
} | 3.68 |
hbase_WALSplitter_createOutputSinkAndEntryBuffers | /**
* Setup the output sinks and entry buffers ahead of splitting WAL.
*/
private void createOutputSinkAndEntryBuffers() {
PipelineController controller = new PipelineController();
if (this.hfile) {
this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize);
this.outputSink = new BoundedRecoveredHFilesOutputSink(this, controller, this.entryBuffers,
this.numWriterThreads);
} else if (this.splitWriterCreationBounded) {
this.entryBuffers = new BoundedEntryBuffers(controller, this.bufferSize);
this.outputSink = new BoundedRecoveredEditsOutputSink(this, controller, this.entryBuffers,
this.numWriterThreads);
} else {
this.entryBuffers = new EntryBuffers(controller, this.bufferSize);
this.outputSink =
new RecoveredEditsOutputSink(this, controller, this.entryBuffers, this.numWriterThreads);
}
} | 3.68 |
hadoop_AzureBlobFileSystem_checkException | /**
* Given a path and exception, choose which IOException subclass
* to create.
* Will return if and only iff the error code is in the list of allowed
* error codes.
* @param path path of operation triggering exception; may be null
* @param exception the exception caught
* @param allowedErrorCodesList varargs list of error codes.
* @throws IOException if the exception error code is not on the allowed list.
*/
@VisibleForTesting
public static void checkException(final Path path,
final AzureBlobFileSystemException exception,
final AzureServiceErrorCode... allowedErrorCodesList) throws IOException {
if (exception instanceof AbfsRestOperationException) {
AbfsRestOperationException ere = (AbfsRestOperationException) exception;
if (ArrayUtils.contains(allowedErrorCodesList, ere.getErrorCode())) {
return;
}
//AbfsRestOperationException.getMessage() contains full error info including path/uri.
String message = ere.getMessage();
switch (ere.getStatusCode()) {
case HttpURLConnection.HTTP_NOT_FOUND:
throw (IOException) new FileNotFoundException(message)
.initCause(exception);
case HTTP_CONFLICT:
throw (IOException) new FileAlreadyExistsException(message)
.initCause(exception);
case HttpURLConnection.HTTP_FORBIDDEN:
case HttpURLConnection.HTTP_UNAUTHORIZED:
throw (IOException) new AccessDeniedException(message)
.initCause(exception);
default:
throw ere;
}
} else if (exception instanceof SASTokenProviderException) {
throw exception;
} else {
if (path == null) {
throw exception;
}
// record info of path
throw new PathIOException(path.toString(), exception);
}
} | 3.68 |
hbase_DisableTableProcedure_setTableStateToDisabling | /**
* Mark table state to Disabling
* @param env MasterProcedureEnv
*/
private static void setTableStateToDisabling(final MasterProcedureEnv env,
final TableName tableName) throws IOException {
// Set table disabling flag up in zk.
env.getMasterServices().getTableStateManager().setTableState(tableName,
TableState.State.DISABLING);
LOG.info("Set {} to state={}", tableName, TableState.State.DISABLING);
} | 3.68 |
hbase_RegionServerAccounting_getGlobalMemStoreDataSize | /** Returns the global Memstore data size in the RegionServer */
public long getGlobalMemStoreDataSize() {
return globalMemStoreDataSize.sum();
} | 3.68 |
morf_ViewDeploymentValidator_validateExistingView | /**
*
* @param view View being examined. This is an existing schema view.
* @return boolean true if view is valid.
* @deprecated kept to ensure backwards compatibility.
*/
@Override
@Deprecated
public boolean validateExistingView(View view) {
return true; // the given existing view is okay as is
} | 3.68 |
querydsl_StringExpression_containsIgnoreCase | /**
* Create a {@code this.containsIgnoreCase(str)} expression
*
* <p>Returns true if the given String is contained, compare case insensitively</p>
*
* @param str string
* @return this.containsIgnoreCase(str)
*/
public BooleanExpression containsIgnoreCase(String str) {
return containsIgnoreCase(ConstantImpl.create(str));
} | 3.68 |
flink_SingleOutputStreamOperator_setChainingStrategy | /**
* Sets the {@link ChainingStrategy} for the given operator affecting the way operators will
* possibly be co-located on the same thread for increased performance.
*
* @param strategy The selected {@link ChainingStrategy}
* @return The operator with the modified chaining strategy
*/
@PublicEvolving
private SingleOutputStreamOperator<T> setChainingStrategy(ChainingStrategy strategy) {
if (transformation instanceof PhysicalTransformation) {
((PhysicalTransformation<T>) transformation).setChainingStrategy(strategy);
} else {
throw new UnsupportedOperationException(
"Cannot set chaining strategy on " + transformation);
}
return this;
} | 3.68 |
hadoop_WritableName_setName | /**
* Set the name that a class should be known as to something other than the
* class name.
*
* @param writableClass input writableClass.
* @param name input name.
*/
public static synchronized void setName(Class<?> writableClass, String name) {
CLASS_TO_NAME.put(writableClass, name);
NAME_TO_CLASS.put(name, writableClass);
} | 3.68 |
hadoop_BlockManagerParameters_getPrefetchingStatistics | /**
* @return The prefetching statistics for the stream.
*/
public PrefetchingStatistics getPrefetchingStatistics() {
return prefetchingStatistics;
} | 3.68 |
hudi_HoodieTable_getCompletedSavepointTimeline | /**
* Get only the completed (no-inflights) savepoint timeline.
*/
public HoodieTimeline getCompletedSavepointTimeline() {
return getActiveTimeline().getSavePointTimeline().filterCompletedInstants();
} | 3.68 |
hadoop_AuditContextUpdater_resetCurrentAuditContext | /**
* Remove job/task info from the current audit context.
*/
public void resetCurrentAuditContext() {
currentAuditContext().remove(AuditConstants.PARAM_JOB_ID);
currentAuditContext().remove(CommitConstants.PARAM_TASK_ATTEMPT_ID);
} | 3.68 |
hadoop_StripedBlockChecksumReconstructor_clearBuffers | /**
* Clear all associated buffers.
*/
private void clearBuffers() {
getStripedReader().clearBuffers();
targetBuffer.clear();
} | 3.68 |
framework_ProgressBar_setIndeterminate | /**
* Sets whether or not this progress indicator is indeterminate. In
* indeterminate mode there is an animation indicating that the task is
* running but without providing any information about the current progress.
*
* @param indeterminate
* <code>true</code> to set to indeterminate mode; otherwise
* <code>false</code>
*/
public void setIndeterminate(boolean indeterminate) {
getState().indeterminate = indeterminate;
} | 3.68 |
hibernate-validator_ExecutableHelper_instanceMethodParametersResolveToSameTypes | /**
* Whether the parameters of the two given instance methods resolve to the same types or not. Takes type parameters into account.
*
* @param subTypeMethod a method on a supertype
* @param superTypeMethod a method on a subtype
*
* @return {@code true} if the parameters of the two methods resolve to the same types, {@code false otherwise}.
*/
private boolean instanceMethodParametersResolveToSameTypes(Method subTypeMethod, Method superTypeMethod) {
return instanceMethodParametersResolveToSameTypes( subTypeMethod.getDeclaringClass(), subTypeMethod, superTypeMethod );
} | 3.68 |
framework_MultiSelect_isSelected | /**
* Returns whether the given item is currently selected.
*
* @param item
* the item to check, not null
* @return {@code true} if the item is selected, {@code false} otherwise
*/
public default boolean isSelected(T item) {
return getSelectedItems().contains(item);
} | 3.68 |
hbase_HRegionServer_getReplicationSinkService | /** Returns Return the object that implements the replication sink executorService. */
public ReplicationSinkService getReplicationSinkService() {
return replicationSinkHandler;
} | 3.68 |
flink_ActiveResourceManager_requestNewWorker | /**
* Allocates a resource using the worker resource specification.
*
* @param workerResourceSpec workerResourceSpec specifies the size of the to be allocated
* resource
*/
@VisibleForTesting
public void requestNewWorker(WorkerResourceSpec workerResourceSpec) {
final TaskExecutorProcessSpec taskExecutorProcessSpec =
TaskExecutorProcessUtils.processSpecFromWorkerResourceSpec(
flinkConfig, workerResourceSpec);
final int pendingCount = pendingWorkerCounter.increaseAndGet(workerResourceSpec);
totalWorkerCounter.increaseAndGet(workerResourceSpec);
log.info(
"Requesting new worker with resource spec {}, current pending count: {}.",
workerResourceSpec,
pendingCount);
final CompletableFuture<WorkerType> requestResourceFuture =
resourceManagerDriver.requestResource(taskExecutorProcessSpec);
unallocatedWorkerFutures.put(requestResourceFuture, workerResourceSpec);
FutureUtils.assertNoException(
requestResourceFuture.handle(
(worker, exception) -> {
unallocatedWorkerFutures.remove(requestResourceFuture);
if (exception != null) {
final int count =
pendingWorkerCounter.decreaseAndGet(workerResourceSpec);
totalWorkerCounter.decreaseAndGet(workerResourceSpec);
if (exception instanceof CancellationException) {
log.info(
"Requesting worker with resource spec {} canceled, current pending count: {}",
workerResourceSpec,
count);
} else {
log.warn(
"Failed requesting worker with resource spec {}, current pending count: {}",
workerResourceSpec,
count,
exception);
recordWorkerFailureAndPauseWorkerCreationIfNeeded();
checkResourceDeclarations();
}
} else {
final ResourceID resourceId = worker.getResourceID();
workerNodeMap.put(resourceId, worker);
workerResourceSpecs.put(resourceId, workerResourceSpec);
currentAttemptUnregisteredWorkers.add(resourceId);
scheduleWorkerRegistrationTimeoutCheck(resourceId);
log.info(
"Requested worker {} with resource spec {}.",
resourceId.getStringWithMetadata(),
workerResourceSpec);
}
return null;
}));
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_getHeadroomWeighting | /**
* Compute the weighting based on available headroom. This is proportional to
* the available headroom memory announced by RM, or to 1/N for RMs we have
* not seen yet. If all RMs report zero headroom, we fallback to 1/N again.
*/
private float getHeadroomWeighting(SubClusterId targetId,
AllocationBookkeeper allocationBookkeeper) {
// baseline weight for all RMs
float headroomWeighting =
1 / (float) allocationBookkeeper.getActiveAndEnabledSC().size();
// if we have headroom information for this sub-cluster (and we are safe
// from /0 issues)
if (headroom.containsKey(targetId)
&& allocationBookkeeper.totHeadroomMemory > 0) {
// compute which portion of the RMs that are active/enabled have reported
// their headroom (needed as adjustment factor)
// (note: getActiveAndEnabledSC should never be null/zero)
float ratioHeadroomKnown = allocationBookkeeper.totHeadRoomEnabledRMs
/ (float) allocationBookkeeper.getActiveAndEnabledSC().size();
// headroomWeighting is the ratio of headroom memory in the targetId
// cluster / total memory. The ratioHeadroomKnown factor is applied to
// adjust for missing information and ensure sum of allocated containers
// closely approximate what the user asked (small excess).
headroomWeighting = (headroom.get(targetId).getMemorySize()
/ allocationBookkeeper.totHeadroomMemory) * (ratioHeadroomKnown);
}
return headroomWeighting;
} | 3.68 |
framework_VScrollTable_setRowFocus | /**
* Moves the selection head to a specific row.
*
* @param row
* The row to where the selection head should move
* @return Returns true if focus was moved successfully, else false
*/
public boolean setRowFocus(VScrollTableRow row) {
if (!isSelectable()) {
return false;
}
// Remove previous selection
if (focusedRow != null && focusedRow != row) {
focusedRow.removeStyleName(getStylePrimaryName() + "-focus");
}
if (row != null) {
// Apply focus style to new selection
row.addStyleName(getStylePrimaryName() + "-focus");
/*
* Trying to set focus on already focused row
*/
if (row == focusedRow) {
return false;
}
// Set new focused row
focusedRow = row;
if (hasFocus()) {
ensureRowIsVisible(row);
}
return true;
}
return false;
} | 3.68 |
hudi_HoodieIngestionService_onIngestionCompletes | /**
* A callback method to be invoked after ingestion completes.
* <p>
* For continuous mode, this is invoked once after exiting the ingestion loop.
*/
protected boolean onIngestionCompletes(boolean hasError) {
return true;
} | 3.68 |
flink_AvailabilityProvider_resetUnavailable | /** Judges to reset the current available state as unavailable. */
public void resetUnavailable() {
if (isAvailable()) {
availableFuture = new CompletableFuture<>();
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.