name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
zxing_BitMatrix_rotate | /**
* Modifies this {@code BitMatrix} to represent the same but rotated the given degrees (0, 90, 180, 270)
*
* @param degrees number of degrees to rotate through counter-clockwise (0, 90, 180, 270)
*/
public void rotate(int degrees) {
switch (degrees % 360) {
case 0:
return;
case 90:
rotate90();
return;
case 180:
rotate180();
return;
case 270:
rotate90();
rotate180();
return;
}
throw new IllegalArgumentException("degrees must be a multiple of 0, 90, 180, or 270");
} | 3.68 |
morf_GraphBasedUpgradeTraversalService_allNodesCompletedNoLock | /**
* @return true if all nodes of the upgrade have been executed
*/
private boolean allNodesCompletedNoLock() {
return graphBasedUpgrade.getNumberOfNodes() == completedNodes.size();
} | 3.68 |
flink_EnvironmentInformation_getGitCommitTimeString | /**
* @return The Instant of the last commit of this code as a String using the Europe/Berlin
* timezone.
*/
public static String getGitCommitTimeString() {
return getVersionsInstance().gitCommitTimeStr;
} | 3.68 |
hadoop_BoundedResourcePool_acquire | /**
* Acquires a resource blocking if necessary until one becomes available.
*/
@Override
public T acquire() {
return this.acquireHelper(true);
} | 3.68 |
AreaShop_RegionGroup_addMember | /**
* Adds a member to a group.
* @param region The region to add to the group (GeneralRegion or a subclass of it)
* @return true if the region was not already added, otherwise false
*/
public boolean addMember(GeneralRegion region) {
if(regions.add(region.getName())) {
setSetting("regions", new ArrayList<>(regions));
saveRequired();
return true;
}
return false;
} | 3.68 |
hbase_FileSystemUtilizationChore_computeSize | /**
* Computes total FileSystem size for the given {@link Region}.
* @param r The region
* @return The size, in bytes, of the Region.
*/
long computeSize(Region r) {
long regionSize = 0L;
for (Store store : r.getStores()) {
regionSize += store.getHFilesSize();
}
if (LOG.isTraceEnabled()) {
LOG.trace("Size of " + r + " is " + regionSize);
}
return regionSize;
} | 3.68 |
framework_AbstractComponent_hasEqualSize | /**
* Test if the given components has equal size with this instance
*
* @param component
* the component for the size comparison
* @return true if the sizes are equal
*/
private boolean hasEqualSize(Component component) {
return hasEqualWidth(component) && hasEqualHeight(component);
} | 3.68 |
dubbo_NetUtils_getLocalAddress | /**
* Find first valid IP from local network card
*
* @return first valid local IP
*/
public static InetAddress getLocalAddress() {
if (LOCAL_ADDRESS != null) {
return LOCAL_ADDRESS;
}
InetAddress localAddress = getLocalAddress0();
LOCAL_ADDRESS = localAddress;
return localAddress;
} | 3.68 |
flink_CoGroupedStreams_with | /**
* Completes the co-group operation with the user function that is executed for windowed
* groups.
*
* <p><b>Note:</b> This is a temporary workaround while the {@link #apply(CoGroupFunction,
* TypeInformation)} method has the wrong return type and hence does not allow one to set an
* operator-specific parallelism
*
* @deprecated This method will be removed once the {@link #apply(CoGroupFunction,
* TypeInformation)} method is fixed in the next major version of Flink (2.0).
*/
@PublicEvolving
@Deprecated
public <T> SingleOutputStreamOperator<T> with(
CoGroupFunction<T1, T2, T> function, TypeInformation<T> resultType) {
return (SingleOutputStreamOperator<T>) apply(function, resultType);
} | 3.68 |
hbase_RestoreSnapshotHelper_cloneHdfsMobRegion | /**
* Clone the mob region. For the region create a new region and create a HFileLink for each hfile.
*/
private void cloneHdfsMobRegion(final Map<String, SnapshotRegionManifest> regionManifests,
final RegionInfo region) throws IOException {
// clone region info (change embedded tableName with the new one)
Path clonedRegionPath = MobUtils.getMobRegionPath(rootDir, tableDesc.getTableName());
cloneRegion(MobUtils.getMobRegionInfo(tableDesc.getTableName()), clonedRegionPath, region,
regionManifests.get(region.getEncodedName()));
} | 3.68 |
hbase_WAL_init | /**
* Used to initialize the WAL. Usually this is for creating the first writer.
*/
default void init() throws IOException {
} | 3.68 |
AreaShop_AreaShop_debug | /**
* Sends an debug message to the console.
* @param message The message that should be printed to the console
*/
public static void debug(Object... message) {
if(AreaShop.getInstance().debug) {
info("Debug: " + StringUtils.join(message, " "));
}
} | 3.68 |
morf_Function_upperCase | /**
* Helper method to create an instance of the <code>UPPER</code> SQL function.
* Converts all of the characters in this String to upper case using the rules
* of the default locale.
*
* @param expression the expression to evaluate.
* @return an instance of the upper function.
*/
public static Function upperCase(AliasedField expression) {
return new Function(FunctionType.UPPER, expression);
} | 3.68 |
hbase_MasterRpcServices_hasVisibilityLabelsServiceCoprocessor | /**
* Determines if there is a MasterCoprocessor deployed which implements
* {@link VisibilityLabelsService.Interface}.
*/
boolean hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost cpHost) {
return checkCoprocessorWithService(cpHost.findCoprocessors(MasterCoprocessor.class),
VisibilityLabelsService.Interface.class);
} | 3.68 |
hibernate-validator_AbstractConstraintValidatorManagerImpl_findMatchingValidatorDescriptor | /**
* Runs the validator resolution algorithm.
*
* @param validatedValueType The type of the value to be validated (the type of the member/class the constraint was placed on).
*
* @return The class of a matching validator.
*/
private <A extends Annotation> ConstraintValidatorDescriptor<A> findMatchingValidatorDescriptor(ConstraintDescriptorImpl<A> descriptor, Type validatedValueType) {
Map<Type, ConstraintValidatorDescriptor<A>> availableValidatorDescriptors = TypeHelper.getValidatorTypes(
descriptor.getAnnotationType(),
descriptor.getMatchingConstraintValidatorDescriptors()
);
List<Type> discoveredSuitableTypes = findSuitableValidatorTypes( validatedValueType, availableValidatorDescriptors.keySet() );
resolveAssignableTypes( discoveredSuitableTypes );
if ( discoveredSuitableTypes.size() == 0 ) {
return null;
}
if ( discoveredSuitableTypes.size() > 1 ) {
throw LOG.getMoreThanOneValidatorFoundForTypeException( validatedValueType, discoveredSuitableTypes );
}
Type suitableType = discoveredSuitableTypes.get( 0 );
return availableValidatorDescriptors.get( suitableType );
} | 3.68 |
hudi_HoodieTable_getHoodieView | /**
* Get complete view of the file system for this table with ability to force sync.
*/
public SyncableFileSystemView getHoodieView() {
return getViewManager().getFileSystemView(metaClient);
} | 3.68 |
flink_ApplicationDispatcherBootstrap_finishBootstrapTasks | /**
* Logs final application status and invokes error handler in case of unexpected failures.
* Optionally shuts down the given dispatcherGateway when the application completes (either
* successfully or in case of failure), depending on the corresponding config option.
*/
private CompletableFuture<Acknowledge> finishBootstrapTasks(
final DispatcherGateway dispatcherGateway) {
final CompletableFuture<Acknowledge> shutdownFuture =
applicationCompletionFuture
.handle(
(ignored, t) -> {
if (t == null) {
LOG.info("Application completed SUCCESSFULLY");
return finish(
dispatcherGateway, ApplicationStatus.SUCCEEDED);
}
final Optional<ApplicationStatus> maybeApplicationStatus =
extractApplicationStatus(t);
if (maybeApplicationStatus.isPresent()
&& isCanceledOrFailed(maybeApplicationStatus.get())) {
final ApplicationStatus applicationStatus =
maybeApplicationStatus.get();
LOG.info("Application {}: ", applicationStatus, t);
return finish(dispatcherGateway, applicationStatus);
}
if (t instanceof CancellationException) {
LOG.warn(
"Application has been cancelled because the {} is being stopped.",
ApplicationDispatcherBootstrap.class
.getSimpleName());
return CompletableFuture.completedFuture(Acknowledge.get());
}
LOG.warn("Application failed unexpectedly: ", t);
return FutureUtils.<Acknowledge>completedExceptionally(t);
})
.thenCompose(Function.identity());
FutureUtils.handleUncaughtException(shutdownFuture, (t, e) -> errorHandler.onFatalError(e));
return shutdownFuture;
} | 3.68 |
querydsl_Expressions_enumPath | /**
* Create a new Path expression
*
* @param type type of expression
* @param metadata path metadata
* @param <T> type of expression
* @return path expression
*/
public static <T extends Enum<T>> EnumPath<T> enumPath(Class<? extends T> type, PathMetadata metadata) {
return new EnumPath<T>(type, metadata);
} | 3.68 |
pulsar_PersistentSubscription_resumeAfterFence | /**
* Resume subscription after topic deletion or close failure.
*/
public synchronized void resumeAfterFence() {
// If "fenceFuture" is null, it means that "disconnect" has never been called.
if (fenceFuture != null) {
fenceFuture.whenComplete((ignore, ignoreEx) -> {
synchronized (PersistentSubscription.this) {
try {
if (IS_FENCED_UPDATER.compareAndSet(this, TRUE, FALSE)) {
if (dispatcher != null) {
dispatcher.reset();
}
}
fenceFuture = null;
} catch (Exception ex) {
log.error("[{}] Resume subscription [{}] failure", topicName, subName, ex);
}
}
});
}
} | 3.68 |
flink_CompletedCheckpointStore_getLatestCheckpointId | /** Returns the id of the latest completed checkpoints. */
default long getLatestCheckpointId() {
try {
List<CompletedCheckpoint> allCheckpoints = getAllCheckpoints();
if (allCheckpoints.isEmpty()) {
return 0;
}
return allCheckpoints.get(allCheckpoints.size() - 1).getCheckpointID();
} catch (Throwable throwable) {
LOG.warn("Get the latest completed checkpoints failed", throwable);
return 0;
}
} | 3.68 |
flink_TwoPhaseCommitSinkFunction_beginTransactionInternal | /**
* This method must be the only place to call {@link #beginTransaction()} to ensure that the
* {@link TransactionHolder} is created at the same time.
*/
private TransactionHolder<TXN> beginTransactionInternal() throws Exception {
return new TransactionHolder<>(beginTransaction(), clock.millis());
} | 3.68 |
hadoop_SessionTokenIdentifier_toString | /**
* Add the (sanitized) marshalled credentials to the string value.
* @return a string value for test assertions and debugging.
*/
@Override
public String toString() {
return super.toString()
+ "; " + marshalledCredentials.toString();
} | 3.68 |
flink_CheckpointConfig_setFailOnCheckpointingErrors | /**
* Sets the expected behaviour for tasks in case that they encounter an error when
* checkpointing. If this is set as true, which is equivalent to set
* tolerableCheckpointFailureNumber as zero, job manager would fail the whole job once it
* received a decline checkpoint message. If this is set as false, which is equivalent to set
* tolerableCheckpointFailureNumber as the maximum of integer (means unlimited), job manager
* would not fail the whole job no matter how many declined checkpoints it received.
*
* <p>{@link #setTolerableCheckpointFailureNumber(int)} would always overrule this deprecated
* method if they have conflicts.
*
* @deprecated Use {@link #setTolerableCheckpointFailureNumber(int)}.
*/
@Deprecated
public void setFailOnCheckpointingErrors(boolean failOnCheckpointingErrors) {
if (configuration
.getOptional(ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER)
.isPresent()) {
LOG.warn(
"Since ExecutionCheckpointingOptions.TOLERABLE_FAILURE_NUMBER has been configured as {}, deprecated "
+ "#setFailOnCheckpointingErrors(boolean) method would not take any effect and please use "
+ "#setTolerableCheckpointFailureNumber(int) method to "
+ "determine your expected behaviour when checkpoint errors on task side.",
getTolerableCheckpointFailureNumber());
return;
}
if (failOnCheckpointingErrors) {
setTolerableCheckpointFailureNumber(0);
} else {
setTolerableCheckpointFailureNumber(UNLIMITED_TOLERABLE_FAILURE_NUMBER);
}
} | 3.68 |
streampipes_SwingingDoorTrendingFilterProcessor_checkSdtCompressionParams | /**
* @throws SpRuntimeException throw if the followings are not satisfied:
* 0 < sdtCompressionDeviation
* 0 <= sdtCompressionMinTimeInterval < sdtCompressionMaxTimeInterval <= Long.MAX_VALUE
*/
private void checkSdtCompressionParams() {
if (sdtCompressionDeviation <= 0) {
throw new SpRuntimeException(
String.format("Compression Deviation should be positive! Actual value: %f. ", sdtCompressionDeviation));
}
if (sdtCompressionMinTimeInterval < 0) {
throw new SpRuntimeException(
String.format("Compression Minimum Time Interval should be >= 0! Actual value: %d. ",
sdtCompressionMinTimeInterval));
}
if (sdtCompressionMaxTimeInterval <= sdtCompressionMinTimeInterval) {
throw new SpRuntimeException(
String.format(
"Compression Minimum Time Interval should be < Compression Maximum Time Interval! "
+ "Actual: Compression Minimum Time Interval(%d), Compression Maximum Time Interval(%d). ",
sdtCompressionMinTimeInterval, sdtCompressionMaxTimeInterval));
}
} | 3.68 |
framework_ConnectorTracker_registerConnector | /**
* Register the given connector.
* <p>
* The lookup method {@link #getConnector(String)} only returns registered
* connectors.
* </p>
*
* @param connector
* The connector to register.
*/
public void registerConnector(ClientConnector connector) {
boolean wasUnregistered = unregisteredConnectors.remove(connector);
String connectorId = connector.getConnectorId();
ClientConnector previouslyRegistered = connectorIdToConnector
.get(connectorId);
if (previouslyRegistered == null) {
connectorIdToConnector.put(connectorId, connector);
uninitializedConnectors.add(connector);
if (fineLogging) {
getLogger().log(Level.FINE, "Registered {0} ({1})",
new Object[] { connector.getClass().getSimpleName(),
connectorId });
}
} else if (previouslyRegistered != connector) {
throw new RuntimeException("A connector with id " + connectorId
+ " is already registered!");
} else if (!wasUnregistered) {
getLogger().log(Level.WARNING,
"An already registered connector was registered again: {0} ({1})",
new Object[] { connector.getClass().getSimpleName(),
connectorId });
}
dirtyConnectors.add(connector);
} | 3.68 |
hmily_HmilyTransactionHolder_registerStarterParticipant | /**
* add participant.
*
* @param hmilyParticipant {@linkplain HmilyParticipant}
*/
public void registerStarterParticipant(final HmilyParticipant hmilyParticipant) {
if (Objects.isNull(hmilyParticipant)) {
return;
}
Optional.ofNullable(getCurrentTransaction())
.ifPresent(c -> c.registerParticipant(hmilyParticipant));
} | 3.68 |
flink_Tuple18_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>
Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>
of(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17) {
return new Tuple18<>(
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17);
} | 3.68 |
morf_H2Dialect_decorateTemporaryTableName | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#decorateTemporaryTableName(java.lang.String)
*/
@Override
public String decorateTemporaryTableName(String undecoratedName) {
return TEMPORARY_TABLE_PREFIX + undecoratedName;
} | 3.68 |
framework_VScrollTable_setProperTabIndex | /**
* Sets the proper tabIndex for scrollBodyPanel (the focusable elemen in the
* component).
* <p>
* If the component has no explicit tabIndex a zero is given (default
* tabbing order based on dom hierarchy) or -1 if the component does not
* need to gain focus. The component needs no focus if it has no scrollabars
* (not scrollable) and not selectable. Note that in the future shortcut
* actions may need focus.
* <p>
* For internal use only. May be removed or replaced in the future.
*/
public void setProperTabIndex() {
int storedScrollTop = 0;
int storedScrollLeft = 0;
if (BrowserInfo.get().getOperaVersion() >= 11) {
// Workaround for Opera scroll bug when changing tabIndex (#6222)
storedScrollTop = scrollBodyPanel.getScrollPosition();
storedScrollLeft = scrollBodyPanel.getHorizontalScrollPosition();
}
if (tabIndex == 0 && !isFocusable()) {
scrollBodyPanel.setTabIndex(-1);
} else {
scrollBodyPanel.setTabIndex(tabIndex);
}
if (BrowserInfo.get().getOperaVersion() >= 11) {
// Workaround for Opera scroll bug when changing tabIndex (#6222)
scrollBodyPanel.setScrollPosition(storedScrollTop);
scrollBodyPanel.setHorizontalScrollPosition(storedScrollLeft);
}
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_description | /**
* Set job description.
*
* @param description job description
* @return job configuration builder
*/
public Builder description(final String description) {
if (null != description) {
this.description = description;
}
return this;
} | 3.68 |
starts_Handle_toString | /**
* Returns the textual representation of this handle. The textual
* representation is:
*
* <pre>
* owner '.' name desc ' ' '(' tag ')'
* </pre>
*
* . As this format is unambiguous, it can be parsed if necessary.
*/
@Override
public String toString() {
return owner + '.' + name + desc + " (" + tag + ')';
} | 3.68 |
morf_AbstractSqlDialectTest_testCoalesce | /**
* Test the COALESCE functionality behaves as expected
*/
@Test
public void testCoalesce() {
SelectStatement testStatement = select(coalesce(new NullFieldLiteral(), field("bob"))).from(tableRef("MyTable"));
assertEquals(expectedCoalesce().toLowerCase(), testDialect.convertStatementToSQL(testStatement).toLowerCase());
} | 3.68 |
flink_DataSource_getSplitDataProperties | /**
* Returns the {@link org.apache.flink.api.java.io.SplitDataProperties} for the {@link
* org.apache.flink.core.io.InputSplit}s of this DataSource for configurations.
*
* <p>SplitDataProperties can help to generate more efficient execution plans.
*
* <p><b> IMPORTANT: Incorrect configuration of SplitDataProperties can cause wrong results!
* </b>
*
* @return The SplitDataProperties for the InputSplits of this DataSource.
*/
@PublicEvolving
public SplitDataProperties<OUT> getSplitDataProperties() {
if (this.splitDataProperties == null) {
this.splitDataProperties = new SplitDataProperties<OUT>(this);
}
return this.splitDataProperties;
} | 3.68 |
pulsar_ProducerConfiguration_setSendTimeout | /**
* Set the send timeout <i>(default: 30 seconds)</i>
* <p>
* If a message is not acknowledged by the server before the sendTimeout expires, an error will be reported.
*
* @param sendTimeout
* the send timeout
* @param unit
* the time unit of the {@code sendTimeout}
*/
public ProducerConfiguration setSendTimeout(int sendTimeout, TimeUnit unit) {
conf.setSendTimeoutMs(sendTimeout, unit);
return this;
} | 3.68 |
framework_ApplicationConnection_setCommunicationErrorDelegate | /**
* Sets the delegate that is called whenever a communication error occurrs.
*
* @param delegate
* the delegate.
*/
public void setCommunicationErrorDelegate(
CommunicationErrorHandler delegate) {
communicationErrorDelegate = delegate;
} | 3.68 |
framework_SelectorPredicate_extractPredicates | /**
* Generates a list of predicates from a single predicate string.
*
* @param path
* a comma separated string of predicates
* @return a List of Predicate objects
*/
public static List<SelectorPredicate> extractPredicates(String path) {
List<SelectorPredicate> predicates = new ArrayList<>();
String predicateStr = extractPredicateString(path);
if (null == predicateStr || predicateStr.isEmpty()) {
return predicates;
}
// Extract input strings
List<String> input = readPredicatesFromString(predicateStr);
// Process each predicate into proper predicate descriptor
for (String s : input) {
SelectorPredicate p = new SelectorPredicate();
s = s.trim();
try {
// If we can parse out the predicate as a pure index argument,
// stop processing here.
p.index = Integer.parseInt(s);
predicates.add(p);
continue;
} catch (Exception e) {
p.index = -1;
}
int idx = LocatorUtil.indexOfIgnoringQuoted(s, '=');
if (idx < 0) {
continue;
}
p.name = s.substring(0, idx);
p.value = s.substring(idx + 1);
if (p.value.equals("?")) {
p.wildcard = true;
p.value = null;
} else {
// Only unquote predicate value once we're sure it's a proper
// value...
p.value = unquote(p.value);
}
predicates.add(p);
}
// Move any (and all) index predicates to last place in the list.
for (int i = 0, l = predicates.size(); i < l - 1; ++i) {
if (predicates.get(i).index > -1) {
predicates.add(predicates.remove(i));
--i;
--l;
}
}
return predicates;
} | 3.68 |
hbase_RegionSplitCalculator_add | /**
* Adds an edge to the split calculator
* @return true if is included, false if backwards/invalid
*/
public boolean add(R range) {
byte[] start = range.getStartKey();
byte[] end = specialEndKey(range);
// No need to use Arrays.equals because ENDKEY is null
if (end != ENDKEY && Bytes.compareTo(start, end) > 0) {
// don't allow backwards edges
LOG.debug(
"attempted to add backwards edge: " + Bytes.toString(start) + " " + Bytes.toString(end));
return false;
}
splits.add(start);
splits.add(end);
starts.put(start, range);
return true;
} | 3.68 |
flink_RunLengthDecoder_readDictionaryIdData | /** It is used to decode dictionary IDs. */
private void readDictionaryIdData(int total, WritableIntVector c, int rowId) {
int left = total;
while (left > 0) {
if (this.currentCount == 0) {
this.readNextGroup();
}
int n = Math.min(left, this.currentCount);
switch (mode) {
case RLE:
c.setInts(rowId, n, currentValue);
break;
case PACKED:
c.setInts(rowId, n, currentBuffer, currentBufferIdx);
currentBufferIdx += n;
break;
}
rowId += n;
left -= n;
currentCount -= n;
}
} | 3.68 |
flink_BoundedBlockingSubpartition_createWithFileAndMemoryMappedReader | /**
* Creates a BoundedBlockingSubpartition that stores the partition data in a file and memory
* maps that file for reading. Data is eagerly spilled (written to disk) and then mapped into
* memory. The main difference to the {@link #createWithMemoryMappedFile(int, ResultPartition,
* File)} variant is that no I/O is necessary when pages from the memory mapped file are
* evicted.
*/
public static BoundedBlockingSubpartition createWithFileAndMemoryMappedReader(
int index, ResultPartition parent, File tempFile) throws IOException {
final FileChannelMemoryMappedBoundedData bd =
FileChannelMemoryMappedBoundedData.create(tempFile.toPath());
return new BoundedBlockingSubpartition(index, parent, bd, false);
} | 3.68 |
framework_VComboBox_onSuggestionSelected | /**
* Triggered when a suggestion is selected.
*
* @param suggestion
* The suggestion that just got selected.
*/
public void onSuggestionSelected(ComboBoxSuggestion suggestion) {
if (enableDebug) {
debug("VComboBox: onSuggestionSelected(" + suggestion.caption + ": "
+ suggestion.key + ")");
}
// special handling of null selection
if (suggestion.key.isEmpty()) {
onNullSelected();
return;
}
dataReceivedHandler.cancelPendingPostFiltering();
currentSuggestion = suggestion;
String newKey = suggestion.getOptionKey();
String text = suggestion.getReplacementString();
setText(text);
setSelectedItemIcon(suggestion.getIconUri());
if (!newKey.equals(selectedOptionKey)) {
selectedOptionKey = newKey;
connector.sendSelection(selectedOptionKey);
setSelectedCaption(text);
// currentPage = -1; // forget the page
}
suggestionPopup.hide();
} | 3.68 |
framework_ActionsOnInvisibleComponents_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Test to ensure actions are not performed on disabled/invisible components";
} | 3.68 |
flink_ExtractionAwareDeltaFunction_getDelta | /**
* This method takes the two data point and runs the set extractor on it. The delta function
* implemented at {@link #getNestedDelta} is then called with the extracted data. In case no
* extractor is set the input data gets passes to {@link #getNestedDelta} as-is. The return
* value is just forwarded from {@link #getNestedDelta}.
*
* @param oldDataPoint the older data point as raw data (before extraction).
* @param newDataPoint the new data point as raw data (before extraction).
* @return the delta between the two points.
*/
@SuppressWarnings("unchecked")
@Override
public double getDelta(DATA oldDataPoint, DATA newDataPoint) {
if (converter == null) {
// In case no conversion/extraction is required, we can cast DATA to
// TO
// => Therefore, "unchecked" warning is suppressed for this method.
return getNestedDelta((TO) oldDataPoint, (TO) newDataPoint);
} else {
return getNestedDelta(converter.extract(oldDataPoint), converter.extract(newDataPoint));
}
} | 3.68 |
hbase_BooleanStateStore_set | /**
* Set the flag on/off.
* @param on true if the flag should be on, false otherwise
* @throws IOException if the operation fails
* @return returns the previous state
*/
public synchronized boolean set(boolean on) throws IOException {
byte[] state = toByteArray(on);
setState(state);
boolean prevOn = this.on;
this.on = on;
return prevOn;
} | 3.68 |
flink_TimeIntervalJoin_registerCleanUpTimer | /**
* Register a timer for cleaning up rows in a specified time.
*
* @param ctx the context to register timer
* @param rowTime time for the input row
* @param leftRow whether this row comes from the left stream
*/
private void registerCleanUpTimer(Context ctx, long rowTime, boolean leftRow)
throws IOException {
if (leftRow) {
long cleanUpTime =
rowTime + leftRelativeSize + minCleanUpInterval + allowedLateness + 1;
registerTimer(ctx, cleanUpTime);
rightTimerState.update(cleanUpTime);
} else {
long cleanUpTime =
rowTime + rightRelativeSize + minCleanUpInterval + allowedLateness + 1;
registerTimer(ctx, cleanUpTime);
leftTimerState.update(cleanUpTime);
}
} | 3.68 |
flink_IOManager_createBlockChannelWriter | /**
* Creates a block channel writer that writes to the given channel. The writer adds the written
* segment to its return-queue afterwards (to allow for asynchronous implementations).
*
* @param channelID The descriptor for the channel to write to.
* @return A block channel writer that writes to the given channel.
* @throws IOException Thrown, if the channel for the writer could not be opened.
*/
public BlockChannelWriter<MemorySegment> createBlockChannelWriter(ID channelID)
throws IOException {
return createBlockChannelWriter(channelID, new LinkedBlockingQueue<>());
} | 3.68 |
framework_AbstractContainer_addItemSetChangeListener | /**
* Implementation of the corresponding method in
* {@link ItemSetChangeNotifier}, override with the corresponding public
* method and implement the interface to use this.
*
* @see ItemSetChangeNotifier#addListener(Container.ItemSetChangeListener)
*/
protected void addItemSetChangeListener(
Container.ItemSetChangeListener listener) {
if (getItemSetChangeListeners() == null) {
setItemSetChangeListeners(
new LinkedList<Container.ItemSetChangeListener>());
}
getItemSetChangeListeners().add(listener);
} | 3.68 |
hadoop_StageConfig_withDestinationDir | /**
* Set job destination dir.
* @param dir new dir
* @return this
*/
public StageConfig withDestinationDir(final Path dir) {
destinationDir = dir;
return this;
} | 3.68 |
hadoop_ContainerReapContext_setUser | /**
* Set the set within the context.
*
* @param user the user.
* @return the Builder with the user set.
*/
public Builder setUser(String user) {
this.builderUser = user;
return this;
} | 3.68 |
flink_ResolvedSchema_physical | /** Shortcut for a resolved schema of only physical columns. */
public static ResolvedSchema physical(String[] columnNames, DataType[] columnDataTypes) {
return physical(Arrays.asList(columnNames), Arrays.asList(columnDataTypes));
} | 3.68 |
hbase_ConnectionUtils_getPauseTime | /**
* Calculate pause time. Built on {@link HConstants#RETRY_BACKOFF}.
* @param pause time to pause
* @param tries amount of tries
* @return How long to wait after <code>tries</code> retries
*/
public static long getPauseTime(final long pause, final int tries) {
int ntries = tries;
if (ntries >= HConstants.RETRY_BACKOFF.length) {
ntries = HConstants.RETRY_BACKOFF.length - 1;
}
if (ntries < 0) {
ntries = 0;
}
long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
// 1% possible jitter
long jitter = (long) (normalPause * ThreadLocalRandom.current().nextFloat() * 0.01f);
return normalPause + jitter;
} | 3.68 |
hadoop_WeightedPolicyInfo_toByteBuffer | /**
* Converts the policy into a byte array representation in the input
* {@link ByteBuffer}.
*
* @return byte array representation of this policy configuration.
*
* @throws FederationPolicyInitializationException if a serialization error
* occurs.
*/
public ByteBuffer toByteBuffer()
throws FederationPolicyInitializationException {
if (jsonjaxbContext == null) {
throw new FederationPolicyInitializationException(
"JSONJAXBContext should" + " not be null.");
}
try {
String s = toJSONString();
return ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8));
} catch (JAXBException j) {
throw new FederationPolicyInitializationException(j);
}
} | 3.68 |
hbase_AbstractProcedureScheduler_getPollCalls | // ============================================================================
// TODO: Metrics
// ============================================================================
public long getPollCalls() {
return pollCalls;
} | 3.68 |
flink_OutputFileConfig_getPartPrefix | /** The prefix for the part name. */
public String getPartPrefix() {
return partPrefix;
} | 3.68 |
querydsl_GeometryExpressions_collect | /**
* Return a specified ST_Geometry value from a collection of other geometries.
*
* @param expr1 geometry
* @param expr2 other geometry
* @return geometry collection
*/
public static GeometryExpression<?> collect(Expression<? extends Geometry> expr1, Expression<? extends Geometry> expr2) {
return geometryOperation(SpatialOps.COLLECT2, expr1, expr2);
} | 3.68 |
morf_SqlServerMetaDataProvider_dataTypeFromSqlType | /**
* SqlServerDialect maps CLOB data types to NVARCHAR(MAX) but NVARCHAR sqlTypes are mapped as Strings in the {@link DatabaseMetaDataProvider}.
* This method uses the column width to determine whether a sqlType == Type.NVARCHAR should be mapped to a String
* or to a CLOB data type. If the column with is large (> ~ 1G) it will be mapped to a CLOB data type. Otherwise
* is will be mapped as a String.
*
* @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#dataTypeFromSqlType(int, java.lang.String, int)
*/
@Override
protected DataType dataTypeFromSqlType(int sqlType, String typeName, int width) {
if (sqlType == Types.NVARCHAR && width > 1<<30) {
return DataType.CLOB;
} else {
return super.dataTypeFromSqlType(sqlType, typeName, width);
}
} | 3.68 |
hbase_FilterListBase_filterRowCells | /**
* Filters that never filter by modifying the returned List of Cells can inherit this
* implementation that does nothing. {@inheritDoc}
*/
@Override
public void filterRowCells(List<Cell> cells) throws IOException {
for (int i = 0, n = filters.size(); i < n; i++) {
filters.get(i).filterRowCells(cells);
}
} | 3.68 |
hmily_SpringBeanUtils_registerBean | /**
* Register bean.
*
* @param type the type
* @param object the object
*/
public void registerBean(final Class<?> type, final Object object) {
if (Objects.nonNull(cfgContext)) {
cfgContext.getBeanFactory().registerSingleton(type.getSimpleName(), object);
}
} | 3.68 |
hudi_HoodieRepairTool_deleteFiles | /**
* Deletes files from table base path.
*
* @param context {@link HoodieEngineContext} instance.
* @param basePath Base path of the table.
* @param relativeFilePaths A {@link List} of relative file paths for deleting.
*/
static boolean deleteFiles(
HoodieEngineContext context, String basePath, List<String> relativeFilePaths) {
SerializableConfiguration conf = context.getHadoopConf();
return context.parallelize(relativeFilePaths)
.mapPartitions(iterator -> {
FileSystem fs = FSUtils.getFs(basePath, conf.get());
List<Boolean> results = new ArrayList<>();
iterator.forEachRemaining(relativeFilePath -> {
boolean success = false;
try {
success = fs.delete(new Path(basePath, relativeFilePath), false);
} catch (IOException e) {
LOG.warn("Failed to delete file " + relativeFilePath);
} finally {
results.add(success);
}
});
return results.iterator();
}, true)
.collectAsList()
.stream().reduce((a, b) -> a && b)
.orElse(true);
} | 3.68 |
framework_BindingValidationStatus_getStatus | /**
* Gets status of the validation.
*
* @return status
*/
public Status getStatus() {
return status;
} | 3.68 |
pulsar_ModularLoadManagerImpl_reapDeadBrokerPreallocations | // For each broker that we have a recent load report, see if they are still alive
private void reapDeadBrokerPreallocations(List<String> aliveBrokers) {
for (String broker : loadData.getBrokerData().keySet()) {
if (!aliveBrokers.contains(broker)) {
if (log.isDebugEnabled()) {
log.debug("Broker {} appears to have stopped; now reclaiming any preallocations", broker);
}
final Iterator<Map.Entry<String, String>> iterator = preallocatedBundleToBroker.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, String> entry = iterator.next();
final String preallocatedBundle = entry.getKey();
final String preallocatedBroker = entry.getValue();
if (broker.equals(preallocatedBroker)) {
if (log.isDebugEnabled()) {
log.debug("Removing old preallocation on dead broker {} for bundle {}",
preallocatedBroker, preallocatedBundle);
}
iterator.remove();
}
}
}
}
} | 3.68 |
hudi_CkpMetadata_bootstrap | /**
* Initialize the message bus, would clean all the messages
*
* <p>This expects to be called by the driver.
*/
public void bootstrap() throws IOException {
fs.delete(path, true);
fs.mkdirs(path);
} | 3.68 |
hbase_ZKUtil_listChildrenAndWatchForNewChildren | /**
* Lists the children znodes of the specified znode. Also sets a watch on the specified znode
* which will capture a NodeDeleted event on the specified znode as well as NodeChildrenChanged if
* any children of the specified znode are created or deleted. Returns null if the specified node
* does not exist. Otherwise returns a list of children of the specified node. If the node exists
* but it has no children, an empty list will be returned.
* @param zkw zk reference
* @param znode path of node to list and watch children of
* @return list of children of the specified node, an empty list if the node exists but has no
* children, and null if the node does not exist
* @throws KeeperException if unexpected zookeeper exception
*/
public static List<String> listChildrenAndWatchForNewChildren(ZKWatcher zkw, String znode)
throws KeeperException {
try {
return zkw.getRecoverableZooKeeper().getChildren(znode, zkw);
} catch (KeeperException.NoNodeException ke) {
LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " "
+ "because node does not exist (not an error)"));
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e);
zkw.keeperException(e);
} catch (InterruptedException e) {
LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e);
zkw.interruptedException(e);
}
return null;
} | 3.68 |
flink_DataType_getLogicalType | /**
* Returns the corresponding logical type.
*
* @return a parameterized instance of {@link LogicalType}
*/
public LogicalType getLogicalType() {
return logicalType;
} | 3.68 |
framework_WebBrowser_isChrome | /**
* Tests whether the user is using Chrome.
*
* @return true if the user is using Chrome, false if the user is not using
* Chrome or if no information on the browser is present
*/
public boolean isChrome() {
if (browserDetails == null) {
return false;
}
return browserDetails.isChrome();
} | 3.68 |
flink_FlinkContainers_restartJobManager | /**
* Restarts JobManager container.
*
* <p>Note that the REST port will be changed because the new JM container will be mapped to
* another random port. Please make sure to get the REST cluster client again after this method
* is invoked.
*/
public void restartJobManager(RunnableWithException afterFailAction) throws Exception {
if (this.haService == null) {
LOG.warn(
"Restarting JobManager without HA service. This might drop all your running jobs");
}
jobManager.stop();
afterFailAction.run();
jobManager.start();
// Recreate client because JobManager REST port might have been changed in new container
waitUntilJobManagerRESTReachable(jobManager);
this.restClusterClient = createClusterClient();
waitUntilAllTaskManagerConnected();
} | 3.68 |
flink_HiveParserSemanticAnalyzer_handleInsertStatementSpecPhase1 | // This is phase1 of supporting specifying schema in insert statement.
// insert into foo(z,y) select a,b from bar;
private void handleInsertStatementSpecPhase1(
HiveParserASTNode ast,
HiveParserQBParseInfo qbp,
HiveParserBaseSemanticAnalyzer.Phase1Ctx ctx1)
throws SemanticException {
HiveParserASTNode tabColName = (HiveParserASTNode) ast.getChild(1);
if (ast.getType() == HiveASTParser.TOK_INSERT_INTO
&& tabColName != null
&& tabColName.getType() == HiveASTParser.TOK_TABCOLNAME) {
// we have "insert into foo(a,b)..."; parser will enforce that 1+ columns are listed if
// TOK_TABCOLNAME is present
List<String> targetColNames = new ArrayList<>();
for (Node col : tabColName.getChildren()) {
assert ((HiveParserASTNode) col).getType() == HiveASTParser.Identifier
: "expected token "
+ HiveASTParser.Identifier
+ " found "
+ ((HiveParserASTNode) col).getType();
targetColNames.add(((HiveParserASTNode) col).getText());
}
String fullTableName =
getUnescapedName(
(HiveParserASTNode) ast.getChild(0).getChild(0),
catalogRegistry.getCurrentCatalog(),
catalogRegistry.getCurrentDatabase());
qbp.setDestSchemaForClause(ctx1.dest, targetColNames);
Set<String> targetColumns = new HashSet<>(targetColNames);
if (targetColNames.size() != targetColumns.size()) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
tabColName,
"Duplicate column name detected in "
+ fullTableName
+ " table schema specification"));
}
CatalogTable targetTable = getCatalogTable(fullTableName, qb);
Set<String> partitionColumns = new HashSet<>(targetTable.getPartitionKeys());
ResolvedSchema resolvedSchema =
((ResolvedCatalogTable) targetTable).getResolvedSchema();
for (String column : resolvedSchema.getColumnNames()) {
// parser only allows foo(a,b), not foo(foo.a, foo.b)
// only consider non-partition col
if (!partitionColumns.contains(column)) {
targetColumns.remove(column);
}
}
// here we need to see if remaining columns are dynamic partition columns
if (!targetColumns.isEmpty()) {
/* We just checked the user specified schema columns among regular table column and found some which are not
'regular'. Now check is they are dynamic partition columns
For dynamic partitioning,
Given "create table multipart(a int, b int) partitioned by (c int, d int);"
for "insert into multipart partition(c='1',d)(d,a) values(2,3);" we expect parse tree to look like this
(TOK_INSERT_INTO
(TOK_TAB
(TOK_TABNAME multipart)
(TOK_PARTSPEC
(TOK_PARTVAL c '1')
(TOK_PARTVAL d)
)
)
(TOK_TABCOLNAME d a)
)*/
List<String> dynamicPartitionColumns = new ArrayList<String>();
if (ast.getChild(0) != null && ast.getChild(0).getType() == HiveASTParser.TOK_TAB) {
HiveParserASTNode tokTab = (HiveParserASTNode) ast.getChild(0);
HiveParserASTNode tokPartSpec =
(HiveParserASTNode)
tokTab.getFirstChildWithType(HiveASTParser.TOK_PARTSPEC);
if (tokPartSpec != null) {
for (Node n : tokPartSpec.getChildren()) {
HiveParserASTNode tokPartVal = null;
if (n instanceof HiveParserASTNode) {
tokPartVal = (HiveParserASTNode) n;
}
if (tokPartVal != null
&& tokPartVal.getType() == HiveASTParser.TOK_PARTVAL
&& tokPartVal.getChildCount() == 1) {
assert tokPartVal.getChild(0).getType() == HiveASTParser.Identifier
: "Expected column name; found tokType="
+ tokPartVal.getType();
dynamicPartitionColumns.add(tokPartVal.getChild(0).getText());
}
}
}
}
for (String colName : dynamicPartitionColumns) {
targetColumns.remove(colName);
}
if (!targetColumns.isEmpty()) {
// Found some columns in user specified schema which are neither regular not
// dynamic partition columns
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
tabColName,
"'"
+ (targetColumns.size() == 1
? targetColumns.iterator().next()
: targetColumns)
+ "' in insert schema specification "
+ (targetColumns.size() == 1 ? "is" : "are")
+ " not found among regular columns of "
+ fullTableName
+ " nor dynamic partition columns."));
}
}
}
} | 3.68 |
framework_CompositeErrorMessage_addErrorMessage | /**
* Adds a error message into this composite message. Updates the level
* field.
*
* @param error
* the error message to be added. Duplicate errors are ignored.
*/
private void addErrorMessage(ErrorMessage error) {
if (error != null && !getCauses().contains(error)) {
addCause(error);
if (error.getErrorLevel().intValue() > getErrorLevel().intValue()) {
setErrorLevel(error.getErrorLevel());
}
}
} | 3.68 |
flink_WriterProperties_supportsResume | /**
* @return Whether the {@link BucketWriter} support appending data to the restored the
* in-progress file or not.
*/
public boolean supportsResume() {
return supportsResume;
} | 3.68 |
framework_Form_discard | /*
* Discards local changes and refresh values from the data source Don't add
* a JavaDoc comment here, we use the default one from the interface.
*/
@Override
public void discard() throws Buffered.SourceException {
LinkedList<SourceException> problems = null;
// Try to discard all changes
for (final Object id : propertyIds) {
try {
fields.get(id).discard();
} catch (final Buffered.SourceException e) {
if (problems == null) {
problems = new LinkedList<SourceException>();
}
problems.add(e);
}
}
// No problems occurred
if (problems == null) {
if (currentBufferedSourceException != null) {
currentBufferedSourceException = null;
markAsDirty();
}
return;
}
// Discards problems occurred
final Throwable[] causes = problems
.toArray(new Throwable[problems.size()]);
final Buffered.SourceException e = new Buffered.SourceException(this,
causes);
currentBufferedSourceException = e;
markAsDirty();
throw e;
} | 3.68 |
framework_Embedded_setCodetype | /**
* This attribute specifies the content type of data expected when
* downloading the object specified by classid. This attribute is optional
* but recommended when classid is specified since it allows the user agent
* to avoid loading information for unsupported content types. When absent,
* it defaults to the value of the type attribute.
*
* @param codetype
* the codetype to set.
*/
public void setCodetype(String codetype) {
String oldCodetype = getCodetype();
if (codetype != oldCodetype
|| (codetype != null && !codetype.equals(oldCodetype))) {
getState().codetype = codetype;
}
} | 3.68 |
hbase_HFileBlock_getDummyHeaderForVersion | /**
* Return the appropriate DUMMY_HEADER for the minor version
*/
static private byte[] getDummyHeaderForVersion(boolean usesHBaseChecksum) {
return usesHBaseChecksum ? HConstants.HFILEBLOCK_DUMMY_HEADER : DUMMY_HEADER_NO_CHECKSUM;
} | 3.68 |
framework_Tree_rootItemIds | /**
* Gets the IDs of all Items in the container that don't have a parent.
*
* @see Container.Hierarchical#rootItemIds()
*/
@Override
public Collection<?> rootItemIds() {
return ((Container.Hierarchical) items).rootItemIds();
} | 3.68 |
hbase_HFileArchiver_archiveRecoveredEdits | /**
* Archive recovered edits using existing logic for archiving store files. This is currently only
* relevant when <b>hbase.region.archive.recovered.edits</b> is true, as recovered edits shouldn't
* be kept after replay. In theory, we could use very same method available for archiving store
* files, but supporting WAL dir and store files on different FileSystems added the need for extra
* validation of the passed FileSystem instance and the path where the archiving edits should be
* placed.
* @param conf {@link Configuration} to determine the archive directory.
* @param fs the filesystem used for storing WAL files.
* @param regionInfo {@link RegionInfo} a pseudo region representation for the archiving logic.
* @param family a pseudo familiy representation for the archiving logic.
* @param replayedEdits the recovered edits to be archived.
* @throws IOException if files can't be achived due to some internal error.
*/
public static void archiveRecoveredEdits(Configuration conf, FileSystem fs, RegionInfo regionInfo,
byte[] family, Collection<HStoreFile> replayedEdits) throws IOException {
String workingDir = conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR));
// extra sanity checks for the right FS
Path path = new Path(workingDir);
if (path.isAbsoluteAndSchemeAuthorityNull()) {
// no schema specified on wal dir value, so it's on same FS as StoreFiles
path = new Path(conf.get(HConstants.HBASE_DIR));
}
if (path.toUri().getScheme() != null && !path.toUri().getScheme().equals(fs.getScheme())) {
throw new IOException(
"Wrong file system! Should be " + path.toUri().getScheme() + ", but got " + fs.getScheme());
}
path = HFileArchiveUtil.getStoreArchivePathForRootDir(path, regionInfo, family);
archive(fs, regionInfo, family, replayedEdits, path);
} | 3.68 |
hadoop_SchedulingRequest_placementConstraintExpression | /**
* Set the <code>placementConstraintExpression</code> of the request.
*
* @see SchedulingRequest#setPlacementConstraint(
* PlacementConstraint)
* @param placementConstraintExpression <code>placementConstraints</code> of
* the request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
*/
@Public
@Unstable
public SchedulingRequestBuilder placementConstraintExpression(
PlacementConstraint placementConstraintExpression) {
schedulingRequest
.setPlacementConstraint(placementConstraintExpression);
return this;
} | 3.68 |
pulsar_DispatchRateLimiter_hasMessageDispatchPermit | /**
* checks if dispatch-rate limit is configured and if it's configured then check if permits are available or not.
*
* @return
*/
public boolean hasMessageDispatchPermit() {
return (dispatchRateLimiterOnMessage == null || dispatchRateLimiterOnMessage.getAvailablePermits() > 0)
&& (dispatchRateLimiterOnByte == null || dispatchRateLimiterOnByte.getAvailablePermits() > 0);
} | 3.68 |
druid_SQLBinaryOpExpr_addMergedItem | /**
* only for parameterized output
*
* @param item
* @return
*/
private void addMergedItem(SQLBinaryOpExpr item) {
if (mergedList == null) {
mergedList = new ArrayList<SQLObject>();
}
mergedList.add(item);
} | 3.68 |
hbase_HttpServer_addDefaultServlets | /**
* Add default servlets.
*/
protected void addDefaultServlets(ContextHandlerCollection contexts, Configuration conf)
throws IOException {
// set up default servlets
addPrivilegedServlet("stacks", "/stacks", StackServlet.class);
addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class);
// While we don't expect users to have sensitive information in their configuration, they
// might. Give them an option to not expose the service configuration to all users.
if (conf.getBoolean(HTTP_PRIVILEGED_CONF_KEY, HTTP_PRIVILEGED_CONF_DEFAULT)) {
addPrivilegedServlet("conf", "/conf", ConfServlet.class);
} else {
addUnprivilegedServlet("conf", "/conf", ConfServlet.class);
}
final String asyncProfilerHome = ProfileServlet.getAsyncProfilerHome();
if (asyncProfilerHome != null && !asyncProfilerHome.trim().isEmpty()) {
addPrivilegedServlet("prof", "/prof", ProfileServlet.class);
Path tmpDir = Paths.get(ProfileServlet.OUTPUT_DIR);
if (Files.notExists(tmpDir)) {
Files.createDirectories(tmpDir);
}
ServletContextHandler genCtx = new ServletContextHandler(contexts, "/prof-output-hbase");
genCtx.addServlet(ProfileOutputServlet.class, "/*");
genCtx.setResourceBase(tmpDir.toAbsolutePath().toString());
genCtx.setDisplayName("prof-output-hbase");
} else {
addUnprivilegedServlet("prof", "/prof", ProfileServlet.DisabledServlet.class);
LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property "
+ "not specified. Disabling /prof endpoint.");
}
/* register metrics servlets */
String[] enabledServlets = conf.getStrings(METRIC_SERVLETS_CONF_KEY, METRICS_SERVLETS_DEFAULT);
for (String enabledServlet : enabledServlets) {
try {
ServletConfig servletConfig = METRIC_SERVLETS.get(enabledServlet);
if (servletConfig != null) {
Class<?> clz = Class.forName(servletConfig.getClazz());
addPrivilegedServlet(servletConfig.getName(), servletConfig.getPathSpec(),
clz.asSubclass(HttpServlet.class));
}
} catch (Exception e) {
/* shouldn't be fatal, so warn the user about it */
LOG.warn("Couldn't register the servlet " + enabledServlet, e);
}
}
} | 3.68 |
morf_MySql_openSchema | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#openSchema(Connection, String, String)
*/
@Override
public Schema openSchema(Connection connection, String databaseName, String schemaName) {
return new MySqlMetaDataProvider(connection, databaseName);
} | 3.68 |
morf_HumanReadableStatementHelper_generateAddTableString | /**
* Generates a human-readable "Add Table" string.
*
* @param definition the definition of the new table
* @return a string containing the human-readable version of the action
*/
public static String generateAddTableString(final Table definition) {
StringBuilder addTableBuilder = new StringBuilder();
addTableBuilder.append(String.format("Create table %s with %s and %s",
definition.getName(),
generateColumnCountString(definition.columns().size()),
generateIndexCountString(definition.indexes().size()))
);
for (Column column : definition.columns()) {
addTableBuilder.append(generateNewTableColumnString(column));
}
return addTableBuilder.toString();
} | 3.68 |
hudi_ClusteringUtil_isClusteringInstant | /**
* Returns whether the given instant {@code instant} is with clustering operation.
*/
public static boolean isClusteringInstant(HoodieInstant instant, HoodieTimeline timeline) {
if (!instant.getAction().equals(HoodieTimeline.REPLACE_COMMIT_ACTION)) {
return false;
}
try {
return TimelineUtils.getCommitMetadata(instant, timeline).getOperationType().equals(WriteOperationType.CLUSTER);
} catch (IOException e) {
throw new HoodieException("Resolve replace commit metadata error for instant: " + instant, e);
}
} | 3.68 |
flink_ListSerializer_getElementSerializer | /**
* Gets the serializer for the elements of the list.
*
* @return The serializer for the elements of the list
*/
public TypeSerializer<T> getElementSerializer() {
return elementSerializer;
} | 3.68 |
framework_Escalator_getWidgetFromCell | /**
* Returns the widget from a cell node or <code>null</code> if there is no
* widget in the cell
*
* @param cellNode
* The cell node
*/
static Widget getWidgetFromCell(Node cellNode) {
Node possibleWidgetNode = cellNode.getFirstChild();
if (possibleWidgetNode != null
&& possibleWidgetNode.getNodeType() == Node.ELEMENT_NODE) {
@SuppressWarnings("deprecation")
com.google.gwt.user.client.Element castElement = (com.google.gwt.user.client.Element) possibleWidgetNode
.cast();
Widget w = WidgetUtil.findWidget(castElement, null);
// Ensure findWidget did not traverse past the cell element in the
// DOM hierarchy
if (cellNode.isOrHasChild(w.getElement())) {
return w;
}
}
return null;
} | 3.68 |
hbase_ServerManager_isServerDead | /**
* Check if a server is known to be dead. A server can be online, or known to be dead, or unknown
* to this manager (i.e, not online, not known to be dead either; it is simply not tracked by the
* master any more, for example, a very old previous instance).
*/
public synchronized boolean isServerDead(ServerName serverName) {
return serverName == null || deadservers.isDeadServer(serverName);
} | 3.68 |
graphhopper_QueryGraph_clearUnfavoredStatus | /**
* Removes the 'unfavored' status of all virtual edges.
*/
public void clearUnfavoredStatus() {
for (VirtualEdgeIteratorState edge : unfavoredEdges) {
edge.setUnfavored(false);
}
unfavoredEdges.clear();
} | 3.68 |
flink_TimestampedValue_getTimestamp | /** @return The timestamp associated with this stream value in milliseconds. */
public long getTimestamp() {
if (hasTimestamp) {
return timestamp;
} else {
throw new IllegalStateException(
"Record has no timestamp. Is the time characteristic set to 'ProcessingTime', or "
+ "did you forget to call 'DataStream.assignTimestampsAndWatermarks(...)'?");
}
} | 3.68 |
flink_TaskExecutorResourceUtils_generateDefaultSlotResourceProfile | /**
* This must be consist with {@link
* org.apache.flink.runtime.resourcemanager.slotmanager.SlotManagerUtils#generateDefaultSlotResourceProfile}.
*/
@VisibleForTesting
public static ResourceProfile generateDefaultSlotResourceProfile(
TaskExecutorResourceSpec taskExecutorResourceSpec, int numberOfSlots) {
final ResourceProfile.Builder resourceProfileBuilder =
ResourceProfile.newBuilder()
.setCpuCores(taskExecutorResourceSpec.getCpuCores().divide(numberOfSlots))
.setTaskHeapMemory(
taskExecutorResourceSpec.getTaskHeapSize().divide(numberOfSlots))
.setTaskOffHeapMemory(
taskExecutorResourceSpec.getTaskOffHeapSize().divide(numberOfSlots))
.setManagedMemory(
taskExecutorResourceSpec
.getManagedMemorySize()
.divide(numberOfSlots))
.setNetworkMemory(
taskExecutorResourceSpec.getNetworkMemSize().divide(numberOfSlots));
taskExecutorResourceSpec
.getExtendedResources()
.forEach(
(name, resource) ->
resourceProfileBuilder.setExtendedResource(
resource.divide(numberOfSlots)));
return resourceProfileBuilder.build();
} | 3.68 |
hbase_TableRecordReader_setScan | /**
* Sets the scan defining the actual details like columns etc.
* @param scan The scan to set.
*/
public void setScan(Scan scan) {
this.recordReaderImpl.setScan(scan);
} | 3.68 |
pulsar_ManagedLedgerConfig_getDigestType | /**
* @return the digestType
*/
public DigestType getDigestType() {
return digestType;
} | 3.68 |
framework_ServerRpcQueue_toJson | /**
* Returns the current invocations as JSON.
*
* @return the current invocations in a JSON format ready to be sent to the
* server
*/
public JsonArray toJson() {
JsonArray json = Json.createArray();
if (isEmpty()) {
return json;
}
for (MethodInvocation invocation : getAll()) {
String connectorId = invocation.getConnectorId();
if (!connectorExists(connectorId)) {
getLogger().info("Ignoring RPC for removed connector: "
+ connectorId + ": " + invocation);
continue;
}
JsonArray invocationJson = Json.createArray();
invocationJson.set(0, connectorId);
invocationJson.set(1, invocation.getInterfaceName());
invocationJson.set(2, invocation.getMethodName());
JsonArray paramJson = Json.createArray();
Type[] parameterTypes = null;
if (!isLegacyVariableChange(invocation)
&& !isJavascriptRpc(invocation)) {
try {
Type type = new Type(invocation.getInterfaceName(), null);
Method method = type.getMethod(invocation.getMethodName());
parameterTypes = method.getParameterTypes();
} catch (NoDataException e) {
throw new RuntimeException("No type data for " + invocation,
e);
}
}
for (int i = 0; i < invocation.getParameters().length; ++i) {
// TODO non-static encoder?
Type type = null;
if (parameterTypes != null) {
type = parameterTypes[i];
}
Object value = invocation.getParameters()[i];
JsonValue jsonValue = JsonEncoder.encode(value, type,
connection);
paramJson.set(i, jsonValue);
}
invocationJson.set(3, paramJson);
json.set(json.length(), invocationJson);
}
return json;
} | 3.68 |
hadoop_YarnRegistryViewForProviders_getAbsoluteSelfRegistrationPath | /**
* Get the absolute path to where the service has registered itself.
* This includes the base registry path
* Null until the service is registered
* @return the service registration path.
*/
public String getAbsoluteSelfRegistrationPath() {
if (selfRegistrationPath == null) {
return null;
}
String root = registryOperations.getConfig().getTrimmed(
RegistryConstants.KEY_REGISTRY_ZK_ROOT,
RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
return RegistryPathUtils.join(root, selfRegistrationPath);
} | 3.68 |
rocketmq-connect_AvroDeserializer_deserialize | /**
* deserialize
*
* @param topic
* @param isKey
* @param payload
* @return
*/
@Override
public GenericContainerWithVersion deserialize(String topic, boolean isKey, byte[] payload) {
if (payload == null) {
return null;
}
// get subject name
String subjectName = TopicNameStrategy.subjectName(topic, isKey);
ByteBuffer byteBuffer = ByteBuffer.wrap(payload);
long recordId = byteBuffer.getLong();
GetSchemaResponse schemaResponse = schemaRegistryClient.getSchemaByRecordId(AvroData.NAMESPACE, topic, recordId);
String avroSchemaIdl = schemaResponse.getIdl();
Integer version = Integer.parseInt(String.valueOf(schemaResponse.getVersion()));
AvroSchema avroSchema = new AvroSchema(avroSchemaIdl);
Object result = this.avroDatumReaderFactory.read(byteBuffer, avroSchema.rawSchema(), null);
if (avroSchema.rawSchema().getType().equals(Schema.Type.RECORD)) {
return new GenericContainerWithVersion((GenericContainer) result, version);
} else {
return new GenericContainerWithVersion(
new NonRecordContainer(avroSchema.rawSchema(), result),
version
);
}
} | 3.68 |
flink_ContinuousFileMonitoringFunction_shouldIgnore | /**
* Returns {@code true} if the file is NOT to be processed further. This happens if the
* modification time of the file is smaller than the {@link #globalModificationTime}.
*
* @param filePath the path of the file to check.
* @param modificationTime the modification time of the file.
*/
private boolean shouldIgnore(Path filePath, long modificationTime) {
assert (Thread.holdsLock(checkpointLock));
boolean shouldIgnore = modificationTime <= globalModificationTime;
if (shouldIgnore && LOG.isDebugEnabled()) {
LOG.debug(
"Ignoring "
+ filePath
+ ", with mod time= "
+ modificationTime
+ " and global mod time= "
+ globalModificationTime);
}
return shouldIgnore;
} | 3.68 |
hibernate-validator_ReflectionHelper_unBoxedType | /**
* Returns the primitive type for a boxed type.
*
* @param type the boxed type
*
* @return the primitive type for a auto-boxed type. In case {@link Void} is
* passed (which is considered as primitive type by
* {@link Class#isPrimitive()}), {@link Void} will be returned.
*
* @throws IllegalArgumentException in case the parameter {@code primitiveType} does not
* represent a primitive type.
*/
public static Class<?> unBoxedType(Class<?> type) {
Class<?> wrapperType = WRAPPER_TO_PRIMITIVE_TYPES.get( type );
if ( wrapperType == null ) {
throw LOG.getHasToBeABoxedTypeException( type.getClass() );
}
return wrapperType;
} | 3.68 |
hadoop_EditLogInputStream_skipUntil | /**
* Skip edit log operations up to a given transaction ID, or until the
* end of the edit log is reached.
*
* After this function returns, the next call to readOp will return either
* end-of-file (null) or a transaction with a txid equal to or higher than
* the one we asked for.
*
* @param txid The transaction ID to read up until.
* @return Returns true if we found a transaction ID greater than
* or equal to 'txid' in the log.
*/
public boolean skipUntil(long txid) throws IOException {
while (true) {
FSEditLogOp op = readOp();
if (op == null) {
return false;
}
if (op.getTransactionId() >= txid) {
cachedOp = op;
return true;
}
}
} | 3.68 |
framework_InMemoryDataProviderHelpers_getNaturalSortComparator | /**
* Gets the natural order comparator for the type argument, or the natural
* order comparator reversed if the given sorting direction is
* {@link SortDirection#DESCENDING}.
*
* @param sortDirection
* the sort direction to use
* @return the natural comparator, with ordering defined by the given sort
* direction
*/
public static <V extends Comparable<? super V>> Comparator<V> getNaturalSortComparator(
SortDirection sortDirection) {
Comparator<V> comparator = Comparator.naturalOrder();
if (sortDirection == SortDirection.DESCENDING) {
comparator = comparator.reversed();
}
return comparator;
} | 3.68 |
hudi_AvroOrcUtils_addToVector | /**
* Add an object (of a given ORC type) to the column vector at a given position.
*
* @param type ORC schema of the value Object.
* @param colVector The column vector to store the value Object.
* @param avroSchema Avro schema of the value Object.
* Only used to check logical types for timestamp unit conversion.
* @param value Object to be added to the column vector
* @param vectorPos The position in the vector where value will be stored at.
*/
public static void addToVector(TypeDescription type, ColumnVector colVector, Schema avroSchema, Object value, int vectorPos) {
final int currentVecLength = colVector.isNull.length;
if (vectorPos >= currentVecLength) {
colVector.ensureSize(2 * currentVecLength, true);
}
if (value == null) {
colVector.isNull[vectorPos] = true;
colVector.noNulls = false;
return;
}
if (avroSchema.getType().equals(Schema.Type.UNION)) {
avroSchema = getActualSchemaType(avroSchema);
}
LogicalType logicalType = avroSchema != null ? avroSchema.getLogicalType() : null;
switch (type.getCategory()) {
case BOOLEAN:
LongColumnVector boolVec = (LongColumnVector) colVector;
boolVec.vector[vectorPos] = (boolean) value ? 1 : 0;
break;
case BYTE:
LongColumnVector byteColVec = (LongColumnVector) colVector;
byteColVec.vector[vectorPos] = (byte) value;
break;
case SHORT:
LongColumnVector shortColVec = (LongColumnVector) colVector;
shortColVec.vector[vectorPos] = (short) value;
break;
case INT:
// the Avro logical type could be AvroTypeUtil.LOGICAL_TYPE_TIME_MILLIS, but we will ignore that fact here
// since Orc has no way to represent a time in the way Avro defines it; we will simply preserve the int value
LongColumnVector intColVec = (LongColumnVector) colVector;
intColVec.vector[vectorPos] = (int) value;
break;
case LONG:
// the Avro logical type could be AvroTypeUtil.LOGICAL_TYPE_TIME_MICROS, but we will ignore that fact here
// since Orc has no way to represent a time in the way Avro defines it; we will simply preserve the long value
LongColumnVector longColVec = (LongColumnVector) colVector;
longColVec.vector[vectorPos] = (long) value;
break;
case FLOAT:
DoubleColumnVector floatColVec = (DoubleColumnVector) colVector;
floatColVec.vector[vectorPos] = (float) value;
break;
case DOUBLE:
DoubleColumnVector doubleColVec = (DoubleColumnVector) colVector;
doubleColVec.vector[vectorPos] = (double) value;
break;
case VARCHAR:
case CHAR:
case STRING:
BytesColumnVector bytesColVec = (BytesColumnVector) colVector;
byte[] bytes = null;
if (value instanceof String) {
bytes = getUTF8Bytes((String) value);
} else if (value instanceof Utf8) {
final Utf8 utf8 = (Utf8) value;
bytes = utf8.getBytes();
} else if (value instanceof GenericData.EnumSymbol) {
bytes = getUTF8Bytes(((GenericData.EnumSymbol) value).toString());
} else {
throw new IllegalStateException(String.format(
"Unrecognized type for Avro %s field value, which has type %s, value %s",
type.getCategory().getName(),
value.getClass().getName(),
value.toString()
));
}
if (bytes == null) {
bytesColVec.isNull[vectorPos] = true;
bytesColVec.noNulls = false;
} else {
bytesColVec.setRef(vectorPos, bytes, 0, bytes.length);
}
break;
case DATE:
LongColumnVector dateColVec = (LongColumnVector) colVector;
int daysSinceEpoch;
if (logicalType instanceof LogicalTypes.Date) {
daysSinceEpoch = (int) value;
} else if (value instanceof java.sql.Date) {
daysSinceEpoch = DateWritable.dateToDays((java.sql.Date) value);
} else if (value instanceof Date) {
daysSinceEpoch = DateWritable.millisToDays(((Date) value).getTime());
} else {
throw new IllegalStateException(String.format(
"Unrecognized type for Avro DATE field value, which has type %s, value %s",
value.getClass().getName(),
value.toString()
));
}
dateColVec.vector[vectorPos] = daysSinceEpoch;
break;
case TIMESTAMP:
TimestampColumnVector tsColVec = (TimestampColumnVector) colVector;
long time;
int nanos = 0;
// The unit for Timestamp in ORC is millis, convert timestamp to millis if needed
if (logicalType instanceof LogicalTypes.TimestampMillis) {
time = (long) value;
} else if (logicalType instanceof LogicalTypes.TimestampMicros) {
final long logicalTsValue = (long) value;
time = logicalTsValue / MICROS_PER_MILLI;
nanos = NANOS_PER_MICRO * ((int) (logicalTsValue % MICROS_PER_MILLI));
} else if (value instanceof Timestamp) {
Timestamp tsValue = (Timestamp) value;
time = tsValue.getTime();
nanos = tsValue.getNanos();
} else if (value instanceof java.sql.Date) {
java.sql.Date sqlDateValue = (java.sql.Date) value;
time = sqlDateValue.getTime();
} else if (value instanceof Date) {
Date dateValue = (Date) value;
time = dateValue.getTime();
} else {
throw new IllegalStateException(String.format(
"Unrecognized type for Avro TIMESTAMP field value, which has type %s, value %s",
value.getClass().getName(),
value.toString()
));
}
tsColVec.time[vectorPos] = time;
tsColVec.nanos[vectorPos] = nanos;
break;
case BINARY:
BytesColumnVector binaryColVec = (BytesColumnVector) colVector;
byte[] binaryBytes;
if (value instanceof GenericData.Fixed) {
binaryBytes = ((GenericData.Fixed)value).bytes();
} else if (value instanceof ByteBuffer) {
final ByteBuffer byteBuffer = (ByteBuffer) value;
binaryBytes = toBytes(byteBuffer);
} else if (value instanceof byte[]) {
binaryBytes = (byte[]) value;
} else {
throw new IllegalStateException(String.format(
"Unrecognized type for Avro BINARY field value, which has type %s, value %s",
value.getClass().getName(),
value.toString()
));
}
binaryColVec.setRef(vectorPos, binaryBytes, 0, binaryBytes.length);
break;
case DECIMAL:
DecimalColumnVector decimalColVec = (DecimalColumnVector) colVector;
HiveDecimal decimalValue;
if (value instanceof BigDecimal) {
final BigDecimal decimal = (BigDecimal) value;
decimalValue = HiveDecimal.create(decimal);
} else if (value instanceof ByteBuffer) {
final ByteBuffer byteBuffer = (ByteBuffer) value;
final byte[] decimalBytes = new byte[byteBuffer.remaining()];
byteBuffer.get(decimalBytes);
final BigInteger bigInt = new BigInteger(decimalBytes);
final int scale = type.getScale();
BigDecimal bigDecVal = new BigDecimal(bigInt, scale);
decimalValue = HiveDecimal.create(bigDecVal);
if (decimalValue == null && decimalBytes.length > 0) {
throw new IllegalStateException(
"Unexpected read null HiveDecimal from bytes (base-64 encoded): "
+ Base64.getEncoder().encodeToString(decimalBytes)
);
}
} else if (value instanceof GenericData.Fixed) {
final BigDecimal decimal = new Conversions.DecimalConversion()
.fromFixed((GenericData.Fixed) value, avroSchema, logicalType);
decimalValue = HiveDecimal.create(decimal);
} else {
throw new IllegalStateException(String.format(
"Unexpected type for decimal (%s), cannot convert from Avro value",
value.getClass().getCanonicalName()
));
}
if (decimalValue == null) {
decimalColVec.isNull[vectorPos] = true;
decimalColVec.noNulls = false;
} else {
decimalColVec.set(vectorPos, decimalValue);
}
break;
case LIST:
List<?> list = (List<?>) value;
ListColumnVector listColVec = (ListColumnVector) colVector;
listColVec.offsets[vectorPos] = listColVec.childCount;
listColVec.lengths[vectorPos] = list.size();
TypeDescription listType = type.getChildren().get(0);
for (Object listItem : list) {
addToVector(listType, listColVec.child, avroSchema.getElementType(), listItem, listColVec.childCount++);
}
break;
case MAP:
Map<String, ?> mapValue = (Map<String, ?>) value;
MapColumnVector mapColumnVector = (MapColumnVector) colVector;
mapColumnVector.offsets[vectorPos] = mapColumnVector.childCount;
mapColumnVector.lengths[vectorPos] = mapValue.size();
// keys are always strings
Schema keySchema = Schema.create(Schema.Type.STRING);
for (Map.Entry<String, ?> entry : mapValue.entrySet()) {
addToVector(
type.getChildren().get(0),
mapColumnVector.keys,
keySchema,
entry.getKey(),
mapColumnVector.childCount
);
addToVector(
type.getChildren().get(1),
mapColumnVector.values,
avroSchema.getValueType(),
entry.getValue(),
mapColumnVector.childCount
);
mapColumnVector.childCount++;
}
break;
case STRUCT:
StructColumnVector structColVec = (StructColumnVector) colVector;
GenericData.Record record = (GenericData.Record) value;
for (int i = 0; i < type.getFieldNames().size(); i++) {
String fieldName = type.getFieldNames().get(i);
Object fieldValue = record.get(fieldName);
TypeDescription fieldType = type.getChildren().get(i);
addToVector(fieldType, structColVec.fields[i], avroSchema.getFields().get(i).schema(), fieldValue, vectorPos);
}
break;
case UNION:
UnionColumnVector unionColVec = (UnionColumnVector) colVector;
List<TypeDescription> childTypes = type.getChildren();
boolean added = addUnionValue(unionColVec, childTypes, avroSchema, value, vectorPos);
if (!added) {
throw new IllegalStateException(String.format(
"Failed to add value %s to union with type %s",
value == null ? "null" : value.toString(),
type.toString()
));
}
break;
default:
throw new IllegalArgumentException("Invalid TypeDescription " + type.toString() + ".");
}
} | 3.68 |
flink_FieldParser_getCharset | /**
* Gets the character set used for this parser.
*
* @return the charset used for this parser.
*/
public Charset getCharset() {
return this.charset;
} | 3.68 |
hudi_HoodieAsyncService_enqueuePendingAsyncServiceInstant | /**
* Enqueues new pending table service instant.
* @param instant {@link HoodieInstant} to enqueue.
*/
public void enqueuePendingAsyncServiceInstant(HoodieInstant instant) {
LOG.info("Enqueuing new pending table service instant: " + instant.getTimestamp());
pendingInstants.add(instant);
} | 3.68 |
hadoop_RolePolicies_allowS3Operations | /**
* From an S3 bucket name, build an ARN to refer to it.
* @param bucket bucket name.
* @param write are write permissions required
* @return return statement granting access.
*/
public static List<Statement> allowS3Operations(String bucket,
boolean write) {
// add the bucket operations for the specific bucket ARN
ArrayList<Statement> statements =
Lists.newArrayList(
statement(true,
bucketToArn(bucket),
S3_GET_BUCKET_LOCATION, S3_BUCKET_ALL_LIST));
// then add the statements for objects in the buckets
if (write) {
statements.add(
statement(true,
bucketObjectsToArn(bucket),
S3_ROOT_RW_OPERATIONS));
} else {
statements.add(
statement(true,
bucketObjectsToArn(bucket),
S3_ROOT_READ_OPERATIONS_LIST));
}
return statements;
} | 3.68 |
hbase_HeterogeneousRegionCountCostFunction_rebuildCache | /**
* Rebuild cache matching ServerNames and their capacity.
*/
private void rebuildCache() {
LOG.debug("Rebuilding cache of capacity for each RS");
this.limitPerRS.clear();
this.totalCapacity = 0;
if (null == this.cluster) {
return;
}
for (int i = 0; i < this.cluster.numServers; i++) {
final ServerName sn = this.cluster.servers[i];
final int capacity = this.findLimitForRS(sn);
LOG.debug(sn.getHostname() + " can hold " + capacity + " regions");
this.totalCapacity += capacity;
}
overallUsage = (double) this.cluster.numRegions / (double) this.totalCapacity;
LOG.info("Cluster can hold " + this.cluster.numRegions + "/" + this.totalCapacity + " regions ("
+ Math.round(overallUsage * 100) + "%)");
if (overallUsage >= 1) {
LOG.warn("Cluster is overused, {}", overallUsage);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.