name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
Activiti_TreeBuilderException_getEncountered | /**
* @return the substring (or description) that has been encountered
*/
public String getEncountered() {
return encountered;
} | 3.68 |
flink_MemorySegment_putShortLittleEndian | /**
* Writes the given short integer value (16 bit, 2 bytes) to the given position in little-endian
* byte order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #putShort(int, short)}. For most cases (such as transient storage in
* memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #putShort(int, short)} is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The short value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public void putShortLittleEndian(int index, short value) {
if (LITTLE_ENDIAN) {
putShort(index, value);
} else {
putShort(index, Short.reverseBytes(value));
}
} | 3.68 |
hadoop_ServiceShutdownHook_unregister | /**
* Unregister the hook.
*/
public synchronized void unregister() {
try {
ShutdownHookManager.get().removeShutdownHook(this);
} catch (IllegalStateException e) {
LOG.info("Failed to unregister shutdown hook: {}", e, e);
}
} | 3.68 |
flink_FailureEnricherUtils_getIncludedFailureEnrichers | /**
* Returns a set of failure enricher names included in the given configuration.
*
* @param configuration the configuration to get the failure enricher names from
* @return failure enricher names
*/
@VisibleForTesting
static Set<String> getIncludedFailureEnrichers(final Configuration configuration) {
final String includedEnrichersString =
configuration.getString(JobManagerOptions.FAILURE_ENRICHERS_LIST, "");
return enricherListPattern
.splitAsStream(includedEnrichersString)
.filter(r -> !r.isEmpty())
.collect(Collectors.toSet());
} | 3.68 |
hibernate-validator_MetaDataBuilder_adaptConstraints | /**
* Allows specific sub-classes to customize the retrieved constraints.
*/
protected Set<MetaConstraint<?>> adaptConstraints(ConstrainedElement constrainedElement, Set<MetaConstraint<?>> constraints) {
return constraints;
} | 3.68 |
framework_TableWidthItemRemove_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 13592;
} | 3.68 |
hadoop_TimelineReaderAuthenticationFilterInitializer_initFilter | /**
* Initializes {@link AuthenticationFilter}
* <p>
* Propagates to {@link AuthenticationFilter} configuration all
* YARN configuration properties prefixed with
* {@value
* org.apache.hadoop.yarn.conf.YarnConfiguration#TIMELINE_HTTP_AUTH_PREFIX}.
*
* @param container
* The filter container
* @param conf
* Configuration for run-time parameters
*/
@Override
public void initFilter(FilterContainer container, Configuration conf) {
setAuthFilterConfig(conf);
container.addGlobalFilter("Timeline Reader Authentication Filter",
AuthenticationFilter.class.getName(),
getFilterConfig());
} | 3.68 |
pulsar_AuthenticationProviderSasl_authRoleFromHttpRequest | /**
* Returns null if authentication has not completed.
* Return auth role if authentication has completed, and httpRequest's role token contains the authRole
*/
public String authRoleFromHttpRequest(HttpServletRequest httpRequest) throws AuthenticationException {
String tokenStr = httpRequest.getHeader(SASL_AUTH_ROLE_TOKEN);
if (tokenStr == null) {
return null;
}
String unSigned = signer.verifyAndExtract(tokenStr);
SaslRoleToken token;
try {
token = SaslRoleToken.parse(unSigned);
if (log.isDebugEnabled()) {
log.debug("server side get role token: {}, session in token:{}, session in request:{}",
token, token.getSession(), httpRequest.getRemoteAddr());
}
} catch (Exception e) {
log.error("token parse failed, with exception: ", e);
return SASL_AUTH_ROLE_TOKEN_EXPIRED;
}
if (!token.isExpired()) {
return token.getUserRole();
} else if (token.isExpired()) {
return SASL_AUTH_ROLE_TOKEN_EXPIRED;
} else {
return null;
}
} | 3.68 |
framework_Component_addStyleNames | /**
* Adds one or more style names to this component by using one or multiple
* parameters.
*
* @param styles
* the style name or style names to be added to the component
* @see #addStyleName(String)
* @see #setStyleName(String)
* @see #removeStyleName(String)
* @since 8.1
*/
public default void addStyleNames(String... styles) {
for (String style : styles) {
addStyleName(style);
}
} | 3.68 |
hbase_QuotaObserverChore_setTableQuotaSnapshot | /**
* Stores the quota state for the given table.
*/
void setTableQuotaSnapshot(TableName table, SpaceQuotaSnapshot snapshot) {
this.tableQuotaSnapshots.put(table, snapshot);
} | 3.68 |
flink_EmptyIterator_next | /**
* Always throws a {@link java.util.NoSuchElementException}.
*
* @see java.util.Iterator#next()
*/
@Override
public E next() {
throw new NoSuchElementException();
} | 3.68 |
dubbo_BasicJsonWriter_writeObject | /**
* Write an object with the specified attributes. Each attribute is
* written according to its value type:
* <ul>
* <li>Map: write the value as a nested object</li>
* <li>List: write the value as a nested array</li>
* <li>Otherwise, write a single value</li>
* </ul>
*
* @param attributes the attributes of the object
*/
public void writeObject(Map<String, Object> attributes) {
writeObject(attributes, true);
} | 3.68 |
zxing_HttpHelper_downloadViaHttp | /**
* @param uri URI to retrieve
* @param type expected text-like MIME type of that content
* @param maxChars approximate maximum characters to read from the source
* @return content as a {@code String}
* @throws IOException if the content can't be retrieved because of a bad URI, network problem, etc.
*/
public static CharSequence downloadViaHttp(String uri, ContentType type, int maxChars) throws IOException {
String contentTypes;
switch (type) {
case HTML:
contentTypes = "application/xhtml+xml,text/html,text/*,*/*";
break;
case JSON:
contentTypes = "application/json,text/*,*/*";
break;
case XML:
contentTypes = "application/xml,text/*,*/*";
break;
default: // Includes TEXT
contentTypes = "text/*,*/*";
}
return downloadViaHttp(uri, contentTypes, maxChars);
} | 3.68 |
flink_DeserializationSchema_deserialize | /**
* Deserializes the byte message.
*
* <p>Can output multiple records through the {@link Collector}. Note that number and size of
* the produced records should be relatively small. Depending on the source implementation
* records can be buffered in memory or collecting records might delay emitting checkpoint
* barrier.
*
* @param message The message, as a byte array.
* @param out The collector to put the resulting messages.
*/
@PublicEvolving
default void deserialize(byte[] message, Collector<T> out) throws IOException {
T deserialize = deserialize(message);
if (deserialize != null) {
out.collect(deserialize);
}
} | 3.68 |
pulsar_PulsarZooKeeperClient_isRecoverableException | /**
* Check whether the given exception is recoverable by retry.
*
* @param exception given exception
* @return true if given exception is recoverable.
*/
public static boolean isRecoverableException(KeeperException exception) {
return isRecoverableException(exception.code().intValue());
} | 3.68 |
hbase_HFileBlock_invalidateNextBlockHeader | /**
* Clear the cached value when its integrity is suspect.
*/
private void invalidateNextBlockHeader() {
prefetchedHeader.set(null);
} | 3.68 |
flink_AllocatedSlot_isUsed | /**
* Returns true if this slot is being used (e.g. a logical slot is allocated from this slot).
*
* @return true if a logical slot is allocated from this slot, otherwise false
*/
public boolean isUsed() {
return payloadReference.get() != null;
} | 3.68 |
flink_AbstractMetricGroup_getMetricIdentifier | /**
* Returns the fully qualified metric name using the configured delimiter for the reporter with
* the given index, for example {@code
* "host-7.taskmanager-2.window_word_count.my-mapper.metricName"}.
*
* @param metricName metric name
* @param filter character filter which is applied to the scope components if not null.
* @param reporterIndex index of the reporter whose delimiter should be used
* @param delimiter delimiter to use
* @return fully qualified metric name
*/
public String getMetricIdentifier(
String metricName, CharacterFilter filter, int reporterIndex, char delimiter) {
Preconditions.checkNotNull(filter);
metricName = filter.filterCharacters(metricName);
if (scopeStrings.length == 0
|| (reporterIndex < 0 || reporterIndex >= scopeStrings.length)) {
return ScopeFormat.concat(filter, delimiter, scopeComponents) + delimiter + metricName;
} else {
if (scopeStrings[reporterIndex] == null) {
scopeStrings[reporterIndex] =
ScopeFormat.concat(filter, delimiter, scopeComponents);
}
return scopeStrings[reporterIndex] + delimiter + metricName;
}
} | 3.68 |
morf_SqlParameter_withWidth | /**
* Support for {@link SqlUtils#parameter(String)}. Creates
* a duplicate with a new width and size.
*
* @param width The width.
* @param scale The scale.
* @return The copy.
*/
protected SqlParameter withWidth(int width, int scale) {
return new SqlParameter(name, type, width, scale);
} | 3.68 |
querydsl_SQLMergeClause_executeWithKey | /**
* Execute the clause and return the generated key cast to the given type.
* If no rows were created, null is returned, otherwise the key of the first row is returned.
*
* @param <T>
* @param type type of key
* @return generated key
*/
public <T> T executeWithKey(Class<T> type) {
return executeWithKey(type, null);
} | 3.68 |
framework_DesignAttributeHandler_toAttributeName | /**
* Returns the design attribute name corresponding the given method name.
* For example given a method name <code>setPrimaryStyleName</code> the
* return value would be <code>primary-style-name</code>
*
* @param propertyName
* the property name returned by {@link Introspector}
* @return the design attribute name corresponding the given method name
*/
private static String toAttributeName(String propertyName) {
propertyName = removeSubsequentUppercase(propertyName);
String[] words = propertyName.split("(?<!^)(?=[A-Z])");
StringBuilder builder = new StringBuilder();
for (String word : words) {
if (builder.length() != 0) {
builder.append('-');
}
builder.append(word.toLowerCase(Locale.ROOT));
}
return builder.toString();
} | 3.68 |
framework_DefaultConnectionStateHandler_isDialogVisible | /**
* Checks if the reconnect dialog is visible to the user.
*
* @return true if the user can see the dialog, false otherwise
*/
protected boolean isDialogVisible() {
return reconnectDialog.isVisible();
} | 3.68 |
framework_CvalChecker_parseJson | /*
* used in tests
*/
static CvalInfo parseJson(String json) {
if (json == null) {
return null;
}
try {
JsonObject o = JsonUtil.parse(json);
return new CvalInfo(o);
} catch (JsonException e) {
return null;
}
} | 3.68 |
framework_VCalendarPanel_renderCalendar | /**
* For internal use only. May be removed or replaced in the future.
*
* Updates the calendar and text field with the selected dates.
*
* @param updateDate
* The value false prevents setting the selected date of the
* calendar based on focusedDate. That can be used when only the
* resolution of the calendar is changed and no date has been
* selected.
*/
public void renderCalendar(boolean updateDate) {
super.setStylePrimaryName(
parent.getStylePrimaryName() + "-calendarpanel");
if (focusedDate == null) {
Date now = new Date();
// focusedDate must have zero hours, mins, secs, millisecs
focusedDate = new FocusedDate(now.getYear(), now.getMonth(),
now.getDate());
displayedMonth = new FocusedDate(now.getYear(), now.getMonth(), 1);
}
if (updateDate && getResolution().getCalendarField() <= Resolution.MONTH
.getCalendarField() && focusChangeListener != null) {
focusChangeListener.focusChanged(new Date(focusedDate.getTime()));
}
final boolean needsMonth = getResolution()
.getCalendarField() > Resolution.YEAR.getCalendarField();
boolean needsBody = getResolution().getCalendarField() >= Resolution.DAY
.getCalendarField();
buildCalendarHeader(needsMonth);
clearCalendarBody(!needsBody);
if (needsBody) {
buildCalendarBody();
}
if (isTimeSelectorNeeded()) {
time = new VTime();
setWidget(2, 0, time);
getFlexCellFormatter().setColSpan(2, 0, 5);
getFlexCellFormatter().setStyleName(2, 0,
parent.getStylePrimaryName() + "-calendarpanel-time");
} else if (time != null) {
remove(time);
}
initialRenderDone = true;
} | 3.68 |
graphhopper_VectorTile_clearDoubleValue | /**
* <code>optional double double_value = 3;</code>
*/
public Builder clearDoubleValue() {
bitField0_ = (bitField0_ & ~0x00000004);
doubleValue_ = 0D;
onChanged();
return this;
} | 3.68 |
hbase_WALEntryStream_readNextEntryAndRecordReaderPosition | /**
* Returns whether the file is opened for writing.
*/
private Pair<WALTailingReader.State, Boolean> readNextEntryAndRecordReaderPosition() {
OptionalLong fileLength;
if (logQueue.getQueueSize(walGroupId) > 1) {
// if there are more than one files in queue, although it is possible that we are
// still trying to write the trailer of the file and it is not closed yet, we can
// make sure that we will not write any WAL entries to it any more, so it is safe
// to just let the upper layer try to read the whole file without limit
fileLength = OptionalLong.empty();
} else {
// if there is only one file in queue, check whether it is still being written to
// we must call this before actually reading from the reader, as this method will acquire the
// rollWriteLock. This is very important, as we will enqueue the new WAL file in postLogRoll,
// and before this happens, we could have already finished closing the previous WAL file. If
// we do not acquire the rollWriteLock and return whether the current file is being written
// to, we may finish reading the previous WAL file and start to read the next one, before it
// is enqueued into the logQueue, thus lead to an empty logQueue and make the shipper think
// the queue is already ended and quit. See HBASE-28114 and related issues for more details.
// in the future, if we want to optimize the logic here, for example, do not call this method
// every time, or do not acquire rollWriteLock in the implementation of this method, we need
// to carefully review the optimized implementation
fileLength = walFileLengthProvider.getLogFileSizeIfBeingWritten(currentPath);
}
WALTailingReader.Result readResult = reader.next(fileLength.orElse(-1));
long readerPos = readResult.getEntryEndPos();
Entry readEntry = readResult.getEntry();
if (readResult.getState() == WALTailingReader.State.NORMAL) {
LOG.trace("reading entry: {} ", readEntry);
metrics.incrLogEditsRead();
metrics.incrLogReadInBytes(readerPos - currentPositionOfEntry);
// record current entry and reader position
currentEntry = readResult.getEntry();
this.currentPositionOfReader = readerPos;
} else {
LOG.trace("reading entry failed with: {}", readResult.getState());
// set current entry to null
currentEntry = null;
try {
this.currentPositionOfReader = reader.getPosition();
} catch (IOException e) {
LOG.warn("failed to get current position of reader", e);
if (readResult.getState().resetCompression()) {
return Pair.newPair(WALTailingReader.State.ERROR_AND_RESET_COMPRESSION,
fileLength.isPresent());
}
}
}
return Pair.newPair(readResult.getState(), fileLength.isPresent());
} | 3.68 |
graphhopper_Snap_getSnappedPoint | /**
* Calculates the position of the query point 'snapped' to a close road segment or node. Call
* calcSnappedPoint before, if not, an IllegalStateException is thrown.
*/
public GHPoint3D getSnappedPoint() {
if (snappedPoint == null)
throw new IllegalStateException("Calculate snapped point before!");
return snappedPoint;
} | 3.68 |
framework_VScrollTable_ensureRowIsVisible | /**
* Ensures that the row is visible
*
* @param row
* The row to ensure is visible
*/
private void ensureRowIsVisible(VScrollTableRow row) {
if (BrowserInfo.get().isTouchDevice()) {
// Skip due to android devices that have broken scrolltop will may
// get odd scrolling here.
return;
}
/*
* FIXME The next line doesn't always do what expected, because if the
* row is not in the DOM it won't scroll to it.
*/
WidgetUtil.scrollIntoViewVertically(row.getElement());
} | 3.68 |
hbase_TableDescriptorBuilder_compareTo | // Comparable
/**
* Compares the descriptor with another descriptor which is passed as a parameter. This compares
* the content of the two descriptors and not the reference.
* @param other The MTD to compare
* @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch
* in the contents
*/
@Override
public int compareTo(final ModifyableTableDescriptor other) {
return TableDescriptor.COMPARATOR.compare(this, other);
} | 3.68 |
pulsar_ServiceConfigurationUtils_getAppliedAdvertisedAddress | /**
* Get the address of Broker, first try to get it from AdvertisedAddress.
* If it is not set, try to get the address set by advertisedListener.
* If it is still not set, get it through InetAddress.getLocalHost().
* @param configuration
* @param ignoreAdvertisedListener Sometimes we can’t use the default key of AdvertisedListener,
* setting it to true can ignore AdvertisedListener.
* @return
*/
@Deprecated
public static String getAppliedAdvertisedAddress(ServiceConfiguration configuration,
boolean ignoreAdvertisedListener) {
Map<String, AdvertisedListener> result = MultipleListenerValidator
.validateAndAnalysisAdvertisedListener(configuration);
String advertisedAddress = configuration.getAdvertisedAddress();
if (advertisedAddress != null) {
return advertisedAddress;
}
AdvertisedListener advertisedListener = result.get(configuration.getInternalListenerName());
if (advertisedListener != null && !ignoreAdvertisedListener) {
String address = advertisedListener.getBrokerServiceUrl().getHost();
if (address != null) {
return address;
}
}
return getDefaultOrConfiguredAddress(advertisedAddress);
} | 3.68 |
dubbo_FrameworkModel_defaultApplication | /**
* Get or create default application model
* @return
*/
public ApplicationModel defaultApplication() {
ApplicationModel appModel = this.defaultAppModel;
if (appModel == null) {
// check destroyed before acquire inst lock, avoid blocking during destroying
checkDestroyed();
resetDefaultAppModel();
if ((appModel = this.defaultAppModel) == null) {
synchronized (instLock) {
if (this.defaultAppModel == null) {
this.defaultAppModel = newApplication();
}
appModel = this.defaultAppModel;
}
}
}
Assert.notNull(appModel, "Default ApplicationModel is null");
return appModel;
} | 3.68 |
flink_CheckpointStatsStatus_isInProgress | /**
* Returns whether the checkpoint is in progress.
*
* @return <code>true</code> if checkpoint is in progress, <code>false</code> otherwise.
*/
public boolean isInProgress() {
return this == IN_PROGRESS;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedForUpdate | /**
* @return How the dialect should represent a FOR UPDATE.
*/
protected String expectedForUpdate() {
return " FOR UPDATE";
} | 3.68 |
hadoop_MappingRuleResult_createPlacementResult | /**
* Generator method for place results.
* @param queue The name of the queue in which we shall place the application
* @param allowCreate Flag to indicate if the placement rule is allowed to
* create a queue if possible.
* @return The generated MappingRuleResult
*/
public static MappingRuleResult createPlacementResult(
String queue, boolean allowCreate) {
return new MappingRuleResult(
queue, MappingRuleResultType.PLACE, allowCreate);
} | 3.68 |
hbase_MultiRowRangeFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.MultiRowRangeFilter.Builder builder =
FilterProtos.MultiRowRangeFilter.newBuilder();
for (RowRange range : rangeList) {
if (range != null) {
FilterProtos.RowRange.Builder rangebuilder = FilterProtos.RowRange.newBuilder();
if (range.startRow != null)
rangebuilder.setStartRow(UnsafeByteOperations.unsafeWrap(range.startRow));
rangebuilder.setStartRowInclusive(range.startRowInclusive);
if (range.stopRow != null)
rangebuilder.setStopRow(UnsafeByteOperations.unsafeWrap(range.stopRow));
rangebuilder.setStopRowInclusive(range.stopRowInclusive);
builder.addRowRangeList(rangebuilder.build());
}
}
return builder.build().toByteArray();
} | 3.68 |
flink_HiveParserRowResolver_putWithCheck | /**
* Adds column to RR, checking for duplicate columns. Needed because CBO cannot handle the Hive
* behavior of blindly overwriting old mapping in RR and still somehow working after that.
*
* @return True if mapping was added without duplicates.
*/
public boolean putWithCheck(
String tabAlias, String colAlias, String internalName, ColumnInfo newCI)
throws SemanticException {
ColumnInfo existing = get(tabAlias, colAlias);
// Hive adds the same mapping twice... I wish we could fix stuff like that.
if (existing == null) {
put(tabAlias, colAlias, newCI);
return true;
} else if (existing.isSameColumnForRR(newCI)) {
return true;
}
LOG.warn(
"Found duplicate column alias in RR: "
+ existing.toMappingString(tabAlias, colAlias)
+ " adding "
+ newCI.toMappingString(tabAlias, colAlias));
if (internalName != null) {
existing = get(tabAlias, internalName);
if (existing == null) {
keepAmbiguousInfo(colAlias, tabAlias);
put(tabAlias, internalName, newCI);
return true;
} else if (existing.isSameColumnForRR(newCI)) {
return true;
}
LOG.warn(
"Failed to use internal name after finding a duplicate: "
+ existing.toMappingString(tabAlias, internalName));
}
return false;
} | 3.68 |
pulsar_BaseContext_getPulsarClientBuilder | /**
* Get the pre-configured pulsar client builder.
*
* You can use this Builder to setup client to connect to the Pulsar cluster.
* But you need to close client properly after using it.
*
* @return the instance of pulsar client builder.
*/
default ClientBuilder getPulsarClientBuilder() {
throw new UnsupportedOperationException("not implemented");
} | 3.68 |
framework_VFilterSelect_afterUpdateClientVariables | /*
* Anything that should be set after the client updates the server.
*/
private void afterUpdateClientVariables() {
// We need this here to be consistent with the all the calls.
// Then set your specific selection type only after
// client.updateVariable() method call.
selectPopupItemWhenResponseIsReceived = Select.NONE;
} | 3.68 |
flink_NetUtils_unresolvedHostAndPortToNormalizedString | /**
* Returns a valid address for Pekko. It returns a String of format 'host:port'. When an IPv6
* address is specified, it normalizes the IPv6 address to avoid complications with the exact
* URL match policy of Pekko.
*
* @param host The hostname, IPv4 or IPv6 address
* @param port The port
* @return host:port where host will be normalized if it is an IPv6 address
*/
public static String unresolvedHostAndPortToNormalizedString(String host, int port) {
Preconditions.checkArgument(isValidHostPort(port), "Port is not within the valid range,");
return unresolvedHostToNormalizedString(host) + ":" + port;
} | 3.68 |
dubbo_ClassUtils_getAllSuperClasses | /**
* Get all super classes from the specified type
*
* @param type the specified type
* @param classFilters the filters for classes
* @return non-null read-only {@link Set}
* @since 2.7.6
*/
public static Set<Class<?>> getAllSuperClasses(Class<?> type, Predicate<Class<?>>... classFilters) {
Set<Class<?>> allSuperClasses = new LinkedHashSet<>();
Class<?> superClass = type.getSuperclass();
while (superClass != null) {
// add current super class
allSuperClasses.add(superClass);
superClass = superClass.getSuperclass();
}
return unmodifiableSet(filterAll(allSuperClasses, classFilters));
} | 3.68 |
pulsar_ReaderConfiguration_setCryptoKeyReader | /**
* Sets a {@link CryptoKeyReader}.
*
* @param cryptoKeyReader
* CryptoKeyReader object
*/
public ReaderConfiguration setCryptoKeyReader(CryptoKeyReader cryptoKeyReader) {
Objects.requireNonNull(cryptoKeyReader);
conf.setCryptoKeyReader(cryptoKeyReader);
return this;
} | 3.68 |
hbase_RegionStates_checkReopened | /**
* Check whether the region has been reopened. The meaning of the {@link HRegionLocation} is the
* same with {@link #getRegionsOfTableForReopen(TableName)}.
* <p/>
* For a region which is in {@link State#OPEN} before, if the region state is changed or the open
* seq num is changed, we can confirm that it has been reopened.
* <p/>
* For a region which is in {@link State#OPENING} before, usually it will be in {@link State#OPEN}
* now and we will schedule a MRP to reopen it. But there are several exceptions:
* <ul>
* <li>The region is in state other than {@link State#OPEN} or {@link State#OPENING}.</li>
* <li>The location of the region has been changed</li>
* </ul>
* Of course the region could still be in {@link State#OPENING} state and still on the same
* server, then here we will still return a {@link HRegionLocation} for it, just like
* {@link #getRegionsOfTableForReopen(TableName)}.
* @param oldLoc the previous state/location of this region
* @return null if the region has been reopened, otherwise a new {@link HRegionLocation} which
* means we still need to reopen the region.
* @see #getRegionsOfTableForReopen(TableName)
*/
public HRegionLocation checkReopened(HRegionLocation oldLoc) {
RegionStateNode node = getRegionStateNode(oldLoc.getRegion());
// HBASE-20921
// if the oldLoc's state node does not exist, that means the region is
// merged or split, no need to check it
if (node == null) {
return null;
}
node.lock();
try {
if (oldLoc.getSeqNum() >= 0) {
// in OPEN state before
if (node.isInState(State.OPEN)) {
if (node.getOpenSeqNum() > oldLoc.getSeqNum()) {
// normal case, the region has been reopened
return null;
} else {
// the open seq num does not change, need to reopen again
return new HRegionLocation(node.getRegionInfo(), node.getRegionLocation(),
node.getOpenSeqNum());
}
} else {
// the state has been changed so we can make sure that the region has been reopened(not
// finished maybe, but not a problem).
return null;
}
} else {
// in OPENING state before
if (!node.isInState(State.OPEN, State.OPENING)) {
// not in OPEN or OPENING state, then we can make sure that the region has been
// reopened(not finished maybe, but not a problem)
return null;
} else {
if (!node.getRegionLocation().equals(oldLoc.getServerName())) {
// the region has been moved, so we can make sure that the region has been reopened.
return null;
}
// normal case, we are still in OPENING state, or the reopen has been opened and the state
// is changed to OPEN.
long openSeqNum = node.isInState(State.OPEN) ? node.getOpenSeqNum() : -1;
return new HRegionLocation(node.getRegionInfo(), node.getRegionLocation(), openSeqNum);
}
}
} finally {
node.unlock();
}
} | 3.68 |
flink_GenericDataSourceBase_getUserCodeWrapper | /**
* Gets the class describing the input format.
*
* <p>This method is basically identical to {@link #getFormatWrapper()}.
*
* @return The class describing the input format.
* @see org.apache.flink.api.common.operators.Operator#getUserCodeWrapper()
*/
@Override
public UserCodeWrapper<? extends T> getUserCodeWrapper() {
return this.formatWrapper;
} | 3.68 |
flink_ExecutionConfig_isObjectReuseEnabled | /** Returns whether object reuse has been enabled or disabled. @see #enableObjectReuse() */
public boolean isObjectReuseEnabled() {
return configuration.get(PipelineOptions.OBJECT_REUSE);
} | 3.68 |
hadoop_HAServiceTarget_getZKFCProxy | /**
* @return a proxy to the ZKFC which is associated with this HA service.
* @param conf configuration.
* @param timeoutMs timeout in milliseconds.
* @throws IOException raised on errors performing I/O.
*/
public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs)
throws IOException {
Configuration confCopy = new Configuration(conf);
// Lower the timeout so we quickly fail to connect
confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
return new ZKFCProtocolClientSideTranslatorPB(
getZKFCAddress(),
confCopy, factory, timeoutMs);
} | 3.68 |
framework_ValueChangeHandler_isScheduled | /**
* Checks whether the value change is scheduled for sending.
*
* @since 8.0
*
* @return {@code true} if value change is scheduled for sending,
* {@code false} otherwise
*/
public boolean isScheduled() {
return scheduled;
} | 3.68 |
hbase_HBaseTestingUtility_getAsyncConnection | /**
* Get a assigned AsyncClusterConnection to the cluster. this method is thread safe.
* @param user assigned user
* @return An AsyncClusterConnection with assigned user.
*/
public AsyncClusterConnection getAsyncConnection(User user) throws IOException {
return ClusterConnectionFactory.createAsyncClusterConnection(conf, null, user);
} | 3.68 |
hadoop_Chain_runReducer | // Run the reducer directly.
@SuppressWarnings("unchecked")
<KEYIN, VALUEIN, KEYOUT, VALUEOUT> void runReducer(
TaskInputOutputContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context)
throws IOException, InterruptedException {
RecordWriter<KEYOUT, VALUEOUT> rw = new ChainRecordWriter<KEYOUT, VALUEOUT>(
context);
Reducer.Context reducerContext = createReduceContext(rw,
(ReduceContext) context, rConf);
reducer.run(reducerContext);
rw.close(context);
} | 3.68 |
flink_ExecutionConfig_disableAutoTypeRegistration | /**
* Control whether Flink is automatically registering all types in the user programs with Kryo.
*
* @deprecated The method is deprecated because it's only used in DataSet API. All Flink DataSet
* APIs are deprecated since Flink 1.18 and will be removed in a future Flink major version.
* You can still build your application in DataSet, but you should move to either the
* DataStream and/or Table API.
* @see <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=158866741">
* FLIP-131: Consolidate the user-facing Dataflow SDKs/APIs (and deprecate the DataSet
* API</a>
*/
@Deprecated
public void disableAutoTypeRegistration() {
setAutoTypeRegistration(false);
} | 3.68 |
hbase_HRegion_interruptRegionOperations | /**
* Interrupt any region options that have acquired the region lock via
* {@link #startRegionOperation(org.apache.hadoop.hbase.regionserver.Region.Operation)}, or
* {@link #startBulkRegionOperation(boolean)}.
*/
private void interruptRegionOperations() {
for (Map.Entry<Thread, Boolean> entry : regionLockHolders.entrySet()) {
// An entry in this map will have a boolean value indicating if it is currently
// eligible for interrupt; if so, we should interrupt it.
if (entry.getValue().booleanValue()) {
entry.getKey().interrupt();
}
}
} | 3.68 |
framework_Escalator_getInnerWidth | /**
* Gets the escalator's inner width. This is the entire width in pixels,
* without the vertical scrollbar.
*
* @return escalator's inner width
*/
public double getInnerWidth() {
return WidgetUtil
.getRequiredWidthBoundingClientRectDouble(tableWrapper);
} | 3.68 |
framework_AbstractOrderedLayoutConnector_updateCaption | /*
* (non-Javadoc)
*
* @see com.vaadin.client.HasComponentsConnector#updateCaption(com.vaadin
* .client.ComponentConnector)
*/
@Override
public void updateCaption(ComponentConnector connector) {
/*
* Don't directly update captions here to avoid calling e.g.
* updateLayoutHeight() before everything is initialized.
* updateInternalState() will ensure all captions are updated when
* appropriate.
*/
updateInternalState();
} | 3.68 |
hadoop_Server_init | /**
* Initializes the Server.
* <p>
* The initialization steps are:
* <ul>
* <li>It verifies the service home and temp directories exist</li>
* <li>Loads the Server <code>#SERVER#-default.xml</code>
* configuration file from the classpath</li>
* <li>Initializes log4j logging. If the
* <code>#SERVER#-log4j.properties</code> file does not exist in the config
* directory it load <code>default-log4j.properties</code> from the classpath
* </li>
* <li>Loads the <code>#SERVER#-site.xml</code> file from the server config
* directory and merges it with the default configuration.</li>
* <li>Loads the services</li>
* <li>Initializes the services</li>
* <li>Post-initializes the services</li>
* <li>Sets the server startup status</li>
* </ul>
*
* @throws ServerException thrown if the server could not be initialized.
*/
public void init() throws ServerException {
if (status != Status.UNDEF) {
throw new IllegalStateException("Server already initialized");
}
status = Status.BOOTING;
verifyDir(homeDir);
verifyDir(tempDir);
Properties serverInfo = new Properties();
try {
InputStream is = getResource(name + ".properties");
serverInfo.load(is);
is.close();
} catch (IOException ex) {
throw new RuntimeException("Could not load server information file: " + name + ".properties");
}
initLog();
log.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++");
log.info("Server [{}] starting", name);
log.info(" Built information:");
log.info(" Version : {}", serverInfo.getProperty(name + ".version", "undef"));
log.info(" Source Repository : {}", serverInfo.getProperty(name + ".source.repository", "undef"));
log.info(" Source Revision : {}", serverInfo.getProperty(name + ".source.revision", "undef"));
log.info(" Built by : {}", serverInfo.getProperty(name + ".build.username", "undef"));
log.info(" Built timestamp : {}", serverInfo.getProperty(name + ".build.timestamp", "undef"));
log.info(" Runtime information:");
log.info(" Home dir: {}", homeDir);
log.info(" Config dir: {}", (config == null) ? configDir : "-");
log.info(" Log dir: {}", logDir);
log.info(" Temp dir: {}", tempDir);
initConfig();
log.debug("Loading services");
List<Service> list = loadServices();
try {
log.debug("Initializing services");
initServices(list);
log.info("Services initialized");
} catch (ServerException ex) {
log.error("Services initialization failure, destroying initialized services");
destroyServices();
throw ex;
}
Status status = Status.valueOf(getConfig().get(getPrefixedName(CONF_STARTUP_STATUS), Status.NORMAL.toString()));
setStatus(status);
log.info("Server [{}] started!, status [{}]", name, status);
} | 3.68 |
hbase_WALEventTrackerTableAccessor_addWalEventTrackerRows | /**
* Add wal event tracker rows to hbase:waleventtracker table
* @param walEventPayloads List of walevents to process
* @param connection Connection to use.
*/
public static void addWalEventTrackerRows(Queue<WALEventTrackerPayload> walEventPayloads,
final Connection connection) throws Exception {
List<Put> puts = new ArrayList<>(walEventPayloads.size());
for (WALEventTrackerPayload payload : walEventPayloads) {
final byte[] rowKey = getRowKey(payload);
final Put put = new Put(rowKey);
// TODO Do we need to SKIP_WAL ?
put.setPriority(HConstants.NORMAL_QOS);
put
.addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(RS_COLUMN),
Bytes.toBytes(payload.getRsName()))
.addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(WAL_NAME_COLUMN),
Bytes.toBytes(payload.getWalName()))
.addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(TIMESTAMP_COLUMN),
Bytes.toBytes(payload.getTimeStamp()))
.addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(WAL_STATE_COLUMN),
Bytes.toBytes(payload.getState()))
.addColumn(WAL_EVENT_TRACKER_INFO_FAMILY, Bytes.toBytes(WAL_LENGTH_COLUMN),
Bytes.toBytes(payload.getWalLength()));
puts.add(put);
}
doPut(connection, puts);
} | 3.68 |
hadoop_FindOptions_setMaxDepth | /**
* Sets the maximum depth for applying expressions.
*
* @param maxDepth maximum depth
*/
public void setMaxDepth(int maxDepth) {
this.maxDepth = maxDepth;
} | 3.68 |
hibernate-validator_ValueExtractorManager_getMaximallySpecificAndRuntimeContainerElementCompliantValueExtractor | /**
* Used to find the maximally specific and container element compliant value extractor based on the runtime type.
* <p>
* The maximally specific one is chosen among the candidates passed to this method.
* <p>
* Used for cascading validation.
*
* @see ValueExtractorResolver#getMaximallySpecificAndRuntimeContainerElementCompliantValueExtractor(Type,
* TypeVariable, Class, Collection)
* @throws ConstraintDeclarationException if more than 2 maximally specific container-element-compliant value extractors are found
*/
public ValueExtractorDescriptor getMaximallySpecificAndRuntimeContainerElementCompliantValueExtractor(Type declaredType, TypeVariable<?> typeParameter,
Class<?> runtimeType, Collection<ValueExtractorDescriptor> valueExtractorCandidates) {
if ( valueExtractorCandidates.size() == 1 ) {
return valueExtractorCandidates.iterator().next();
}
else if ( !valueExtractorCandidates.isEmpty() ) {
return valueExtractorResolver.getMaximallySpecificAndRuntimeContainerElementCompliantValueExtractor(
declaredType,
typeParameter,
runtimeType,
valueExtractorCandidates
);
}
else {
return valueExtractorResolver.getMaximallySpecificAndRuntimeContainerElementCompliantValueExtractor(
declaredType,
typeParameter,
runtimeType,
registeredValueExtractors.values()
);
}
} | 3.68 |
morf_ExecuteStatement_reverse | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
return schema;
} | 3.68 |
hbase_MasterObserver_preCreateTableAction | /**
* Called before a new table is created by {@link org.apache.hadoop.hbase.master.HMaster}. Called
* as part of create table procedure and it is async to the create RPC call.
* @param ctx the environment to interact with the framework and master
* @param desc the TableDescriptor for the table
* @param regions the initial regions created for the table
*/
default void preCreateTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableDescriptor desc, final RegionInfo[] regions) throws IOException {
} | 3.68 |
pulsar_BrokerInterceptor_messageDispatched | /**
* Intercept after a message is dispatched to consumer.
*
* @param cnx client Connection
* @param consumer Consumer object
* @param ledgerId Ledger ID
* @param entryId Entry ID
* @param headersAndPayload Data
*/
default void messageDispatched(ServerCnx cnx, Consumer consumer, long ledgerId,
long entryId, ByteBuf headersAndPayload) {
} | 3.68 |
flink_PojoTestUtils_assertSerializedAsPojo | /**
* Verifies that instances of the given class fulfill all conditions to be serialized with the
* {@link PojoSerializer}, as documented <a
* href="https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/datastream/fault-tolerance/serialization/types_serialization/#pojos">here</a>.
*
* <p>Note that this check will succeed even if the Pojo is partially serialized with Kryo. If
* this is not desired, use {@link #assertSerializedAsPojoWithoutKryo(Class)} instead.
*
* @param clazz class to analyze
* @param <T> class type
* @throws AssertionError if instances of the class cannot be serialized as a POJO
*/
public static <T> void assertSerializedAsPojo(Class<T> clazz) throws AssertionError {
final TypeInformation<T> typeInformation = TypeInformation.of(clazz);
final TypeSerializer<T> actualSerializer =
typeInformation.createSerializer(new ExecutionConfig());
assertThat(actualSerializer)
.withFailMessage(
"Instances of the class '%s' cannot be serialized as a POJO, but would use a '%s' instead. %n"
+ "Re-run this test with INFO logging enabled and check messages from the '%s' for possible reasons.",
clazz.getSimpleName(),
actualSerializer.getClass().getSimpleName(),
TypeExtractor.class.getCanonicalName())
.isInstanceOf(PojoSerializer.class);
} | 3.68 |
hbase_SimpleRpcServerResponder_purge | /**
* If there were some calls that have not been sent out for a long time, we close the connection.
* @return the time of the purge.
*/
private long purge(long lastPurgeTime) {
long now = EnvironmentEdgeManager.currentTime();
if (now < lastPurgeTime + this.simpleRpcServer.purgeTimeout) {
return lastPurgeTime;
}
ArrayList<SimpleServerRpcConnection> conWithOldCalls = new ArrayList<>();
// get the list of channels from list of keys.
synchronized (writeSelector.keys()) {
for (SelectionKey key : writeSelector.keys()) {
SimpleServerRpcConnection connection = (SimpleServerRpcConnection) key.attachment();
if (connection == null) {
throw new IllegalStateException("Coding error: SelectionKey key without attachment.");
}
if (
connection.lastSentTime > 0
&& now > connection.lastSentTime + this.simpleRpcServer.purgeTimeout
) {
conWithOldCalls.add(connection);
}
}
}
// Seems safer to close the connection outside of the synchronized loop...
for (SimpleServerRpcConnection connection : conWithOldCalls) {
this.simpleRpcServer.closeConnection(connection);
}
return now;
} | 3.68 |
framework_DataCommunicator_setDataProvider | /**
* Sets a new {@code DataProvider} and refreshes all the internal
* structures.
*
* @param dataProvider
* @since 8.1
*/
protected void setDataProvider(DataProvider<T, ?> dataProvider) {
detachDataProviderListener();
dropAllData();
this.dataProvider = dataProvider;
getKeyMapper().setIdentifierGetter(dataProvider::getId);
} | 3.68 |
hbase_AbstractProtobufWALReader_setTrailerIfPresent | /**
* To check whether a trailer is present in a WAL, it seeks to position (fileLength -
* PB_WAL_COMPLETE_MAGIC.size() - Bytes.SIZEOF_INT). It reads the int value to know the size of
* the trailer, and checks whether the trailer is present at the end or not by comparing the last
* PB_WAL_COMPLETE_MAGIC.size() bytes. In case trailer is not present, it returns false;
* otherwise, sets the trailer and sets this.walEditsStopOffset variable up to the point just
* before the trailer.
* <p/>
* The trailer is ignored in case:
* <ul>
* <li>fileLength is 0 or not correct (when file is under recovery, etc).
* <li>the trailer size is negative.
* </ul>
* In case the trailer size > this.trailerMaxSize, it is read after a WARN message.
* @return true if a valid trailer is present
*/
private boolean setTrailerIfPresent(FSDataInputStream stream) throws IOException {
try {
long trailerSizeOffset = this.fileLength - (PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT);
if (trailerSizeOffset <= 0) {
// no trailer possible.
return false;
}
stream.seek(trailerSizeOffset);
// read the int as trailer size.
int trailerSize = stream.readInt();
ByteBuffer buf = ByteBuffer.allocate(PB_WAL_COMPLETE_MAGIC.length);
stream.readFully(buf.array(), buf.arrayOffset(), buf.capacity());
if (!Arrays.equals(buf.array(), PB_WAL_COMPLETE_MAGIC)) {
LOG.trace("No trailer found.");
return false;
}
if (trailerSize < 0) {
LOG.warn("Invalid trailer Size " + trailerSize + ", ignoring the trailer");
return false;
} else if (trailerSize > this.trailerWarnSize) {
// continue reading after warning the user.
LOG.warn("Please investigate WALTrailer usage. Trailer size > maximum configured size : "
+ trailerSize + " > " + this.trailerWarnSize);
}
// seek to the position where trailer starts.
long positionOfTrailer = trailerSizeOffset - trailerSize;
stream.seek(positionOfTrailer);
// read the trailer.
buf = ByteBuffer.allocate(trailerSize);// for trailer.
stream.readFully(buf.array(), buf.arrayOffset(), buf.capacity());
trailer = WALTrailer.parseFrom(buf.array());
this.walEditsStopOffset = positionOfTrailer;
return true;
} catch (IOException ioe) {
LOG.warn("Got IOE while reading the trailer. Continuing as if no trailer is present.", ioe);
}
return false;
} | 3.68 |
hadoop_AuxServiceRecord_description | /**
* Description of the service.
*/
public AuxServiceRecord description(String d) {
this.description = d;
return this;
} | 3.68 |
hadoop_XMLUtils_bestEffortSetAttribute | /**
* Set an attribute value on a {@link TransformerFactory}. If the TransformerFactory
* does not support the attribute, the method just returns <code>false</code> and
* logs the issue at debug level.
*
* @param transformerFactory to update
* @param flag that indicates whether to do the update and the flag can be set to
* <code>false</code> if an update fails
* @param name of the attribute to set
* @param value to set on the attribute
*/
static void bestEffortSetAttribute(TransformerFactory transformerFactory, AtomicBoolean flag,
String name, Object value) {
if (flag.get()) {
try {
transformerFactory.setAttribute(name, value);
} catch (Throwable t) {
flag.set(false);
LOG.debug("Issue setting TransformerFactory attribute {}: {}", name, t.toString());
}
}
} | 3.68 |
hadoop_ActiveAuditManagerS3A_beforeExecution | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void beforeExecution(Context.BeforeExecution context,
ExecutionAttributes executionAttributes) {
span.beforeExecution(context, executionAttributes);
} | 3.68 |
hadoop_CachingGetSpaceUsed_setUsed | /**
* Reset the current used data amount. This should be called
* when the cached value is re-computed.
*
* @param usedValue new value that should be the disk usage.
*/
protected void setUsed(long usedValue) {
this.used.set(usedValue);
} | 3.68 |
morf_MySqlDialect_prepareBooleanParameter | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#prepareBooleanParameter(org.alfasoftware.morf.jdbc.NamedParameterPreparedStatement, java.lang.Boolean, org.alfasoftware.morf.sql.element.SqlParameter)
*/
@Override
protected void prepareBooleanParameter(NamedParameterPreparedStatement statement, Boolean boolVal, SqlParameter parameter) throws SQLException {
Integer intValue = boolVal == null ? null : boolVal ? 1 : 0;
super.prepareIntegerParameter(statement, intValue, parameter(parameter.getImpliedName()).type(INTEGER));
} | 3.68 |
framework_VAbstractPopupCalendar_openCalendarPanel | /**
* Opens the calendar panel popup.
*/
public void openCalendarPanel() {
if (!open && !readonly && isEnabled()) {
open = true;
if (getCurrentDate() != null) {
calendar.setDate((Date) getCurrentDate().clone());
} else if (getDefaultDate() != null) {
calendar.setDate(getDefaultDate());
} else {
calendar.setDate(new Date());
}
// clear previous values
popup.setWidth("");
popup.setHeight("");
popup.setPopupPositionAndShow(new PopupPositionCallback());
checkGroupFocus(true);
} else {
getLogger().severe("Cannot reopen popup, it is already open!");
}
} | 3.68 |
pulsar_SSLContextValidatorEngine_ensureCapacity | /**
* Check if the given ByteBuffer capacity.
* @param existingBuffer ByteBuffer capacity to check
* @param newLength new length for the ByteBuffer.
* returns ByteBuffer
*/
public static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength) {
if (newLength > existingBuffer.capacity()) {
ByteBuffer newBuffer = ByteBuffer.allocate(newLength);
existingBuffer.flip();
newBuffer.put(existingBuffer);
return newBuffer;
}
return existingBuffer;
} | 3.68 |
hibernate-validator_ExecutableHelper_overrides | /**
* Checks, whether {@code subTypeMethod} overrides {@code superTypeMethod}.
*
* @param subTypeMethod The sub type method (cannot be {@code null}).
* @param superTypeMethod The super type method (cannot be {@code null}).
*
* @return Returns {@code true} if {@code subTypeMethod} overrides {@code superTypeMethod},
* {@code false} otherwise.
*/
public boolean overrides(Method subTypeMethod, Method superTypeMethod) {
Contracts.assertValueNotNull( subTypeMethod, "subTypeMethod" );
Contracts.assertValueNotNull( superTypeMethod, "superTypeMethod" );
if ( subTypeMethod.equals( superTypeMethod ) ) {
return false;
}
if ( !subTypeMethod.getName().equals( superTypeMethod.getName() ) ) {
return false;
}
if ( subTypeMethod.getParameterCount() != superTypeMethod.getParameterCount() ) {
return false;
}
if ( !superTypeMethod.getDeclaringClass().isAssignableFrom( subTypeMethod.getDeclaringClass() ) ) {
return false;
}
if ( Modifier.isStatic( superTypeMethod.getModifiers() ) || Modifier.isStatic(
subTypeMethod.getModifiers()
) ) {
return false;
}
// HV-861 Bridge method should be ignored. Classmates type/member resolution will take care of proper
// override detection without considering bridge methods
if ( subTypeMethod.isBridge() ) {
return false;
}
if ( Modifier.isPrivate( superTypeMethod.getModifiers() ) ) {
return false;
}
if ( !isMethodVisibleTo( superTypeMethod, subTypeMethod ) ) {
return false;
}
return instanceMethodParametersResolveToSameTypes( subTypeMethod, superTypeMethod );
} | 3.68 |
hbase_CompactSplit_registerChildren | /**
* {@inheritDoc}
*/
@Override
public void registerChildren(ConfigurationManager manager) {
// No children to register.
} | 3.68 |
morf_UseParallelDml_getDegreeOfParallelism | /**
* @return the degree of parallelism for this PARALLEL query hint.
*/
public Optional<Integer> getDegreeOfParallelism() {
return degreeOfParallelism;
} | 3.68 |
morf_TruncateStatement_deepCopy | /**
* @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation)
*/
@Override
public Builder<TruncateStatement> deepCopy(DeepCopyTransformation transformer) {
return TempTransitionalBuilderWrapper.wrapper(new TruncateStatement(transformer.deepCopy(table)));
} | 3.68 |
morf_DataValueLookupMetadataRegistry_intern | /**
* Interns the metadata for a single-column record, returning the same {@link DataValueLookupMetadata}
* for all records which contain that one record.
*
* <p>Used when initialising a new {@link DataValueLookupBuilderImpl}.</p>
*
* @param columnName A case-insensitive name for the column.
* @return The interned metadata.
*/
static DataValueLookupMetadata intern(CaseInsensitiveString columnName) {
ImmutableMap<CaseInsensitiveString, DataValueLookupMetadata> old = lookups;
DataValueLookupMetadata result = old.get(columnName);
if (result != null) {
return result;
}
synchronized (SYNC) {
ImmutableMap<CaseInsensitiveString, DataValueLookupMetadata> current = lookups;
if (old != current) {
result = current.get(columnName);
if (result != null) {
return result;
}
}
result = new DataValueLookupMetadata(ImmutableList.of(columnName));
lookups = builderPlusOneEntry(current)
.putAll(current)
.put(columnName, result)
.build();
return result;
}
} | 3.68 |
hbase_ByteBuff_touch | /**
* Calling this method in strategic locations where ByteBuffs are referenced may help diagnose
* potential buffer leaks. We pass the buffer itself as a default hint, but one can use
* {@link #touch(Object)} to pass their own hint as well.
*/
@Override
public ByteBuff touch() {
return touch(this);
} | 3.68 |
flink_CsvReader_ignoreComments | /**
* Configures the string that starts comments. By default comments will be treated as invalid
* lines. This function only recognizes comments which start at the beginning of the line!
*
* @param commentPrefix The string that starts the comments.
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader ignoreComments(String commentPrefix) {
if (commentPrefix == null || commentPrefix.length() == 0) {
throw new IllegalArgumentException(
"The comment prefix must not be null or an empty string");
}
this.commentPrefix = commentPrefix;
return this;
} | 3.68 |
flink_MiniCluster_checkRestoreModeForChangelogStateBackend | // HACK: temporary hack to make the randomized changelog state backend tests work with forced
// full snapshots. This option should be removed once changelog state backend supports forced
// full snapshots
private void checkRestoreModeForChangelogStateBackend(JobGraph jobGraph) {
final SavepointRestoreSettings savepointRestoreSettings =
jobGraph.getSavepointRestoreSettings();
if (overrideRestoreModeForChangelogStateBackend
&& savepointRestoreSettings.getRestoreMode() == RestoreMode.NO_CLAIM) {
final Configuration conf = new Configuration();
SavepointRestoreSettings.toConfiguration(savepointRestoreSettings, conf);
conf.set(SavepointConfigOptions.RESTORE_MODE, RestoreMode.LEGACY);
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.fromConfiguration(conf));
}
} | 3.68 |
dubbo_ServiceInvokeRestFilter_writeResult | /**
* write return value by accept
*
* @param nettyHttpResponse
* @param request
* @param value
* @param returnType
* @throws Exception
*/
public static void writeResult(
NettyHttpResponse nettyHttpResponse, RequestFacade<?> request, URL url, Object value, Class<?> returnType)
throws Exception {
MediaType mediaType = getAcceptMediaType(request, returnType);
writeResult(nettyHttpResponse, url, value, returnType, mediaType);
} | 3.68 |
querydsl_SQLExpressions_select | /**
* Create a new detached SQLQuery instance with the given projection
*
* @param exprs projection
* @return select(exprs)
*/
public static SQLQuery<Tuple> select(Expression<?>... exprs) {
return new SQLQuery<Void>().select(exprs);
} | 3.68 |
flink_AbstractWritableVector_hasDictionary | /** Returns true if this column has a dictionary. */
@Override
public boolean hasDictionary() {
return this.dictionary != null;
} | 3.68 |
framework_VAbstractCalendarPanel_contains | /**
* Checks if subElement is inside the widget DOM hierarchy.
*
* @param w
* the widget to investigate
* @param subElement
* the element to search for
* @return {@code true} if the given widget is a parent of the given
* element, {@code false} otherwise.
*/
protected boolean contains(Widget w, Element subElement) {
if (w == null || w.getElement() == null) {
return false;
}
return w.getElement().isOrHasChild(subElement);
} | 3.68 |
flink_RowUtils_deepHashCodeRow | /**
* Hashes two objects with proper (nested) equality semantics. This method supports all external
* and most internal conversion classes of the table ecosystem.
*/
static int deepHashCodeRow(
RowKind kind,
@Nullable Object[] fieldByPosition,
@Nullable Map<String, Object> fieldByName) {
int result = kind.toByteValue(); // for stable hash across JVM instances
if (fieldByPosition != null) {
// positionByName is not included
result = 31 * result + deepHashCodeInternal(fieldByPosition);
} else {
result = 31 * result + deepHashCodeInternal(fieldByName);
}
return result;
} | 3.68 |
hadoop_EvaluatingStatisticsMap_snapshot | /**
* Take a snapshot.
* @return a map snapshot.
*/
public Map<String, E> snapshot() {
return IOStatisticsBinding.snapshotMap(this, copyFn);
} | 3.68 |
hbase_MultiRowRangeFilter_hasFoundFirstRange | /**
* Returns true if the first matching row range was found.
*/
public boolean hasFoundFirstRange() {
return foundFirstRange;
} | 3.68 |
hadoop_StagingCommitter_listPendingUploadsToAbort | /**
* Get the list of pending uploads for this job attempt, swallowing
* exceptions.
* @param commitContext commit context
* @return a list of pending uploads. If an exception was swallowed,
* then this may not match the actual set of pending operations
* @throws IOException shouldn't be raised, but retained for the compiler
*/
protected ActiveCommit listPendingUploadsToAbort(
CommitContext commitContext) throws IOException {
return listPendingUploads(commitContext, true);
} | 3.68 |
flink_BeamStateRequestHandler_of | /**
* Create a {@link BeamStateRequestHandler}.
*
* @param keyedStateBackend if null, {@link BeamStateRequestHandler} would throw an error when
* receive keyed-state requests.
* @param operatorStateBackend if null, {@link BeamStateRequestHandler} would throw an error
* when receive operator-state requests.
* @param keySerializer key serializer for {@link KeyedStateBackend}, must not be null if {@code
* keyedStatedBackend} is not null.
* @param namespaceSerializer namespace serializer for {@link KeyedStateBackend}, could be null
* when there's no window logic involved.
* @param config state-related configurations
* @return A new {@link BeamBagStateHandler}
*/
public static BeamStateRequestHandler of(
@Nullable KeyedStateBackend<?> keyedStateBackend,
@Nullable OperatorStateBackend operatorStateBackend,
@Nullable TypeSerializer<?> keySerializer,
@Nullable TypeSerializer<?> namespaceSerializer,
ReadableConfig config) {
BeamStateStore keyedStateStore = BeamStateStore.unsupported();
if (keyedStateBackend != null) {
assert keySerializer != null;
keyedStateStore =
new BeamKeyedStateStore(keyedStateBackend, keySerializer, namespaceSerializer);
}
BeamStateStore operatorStateStore = BeamStateStore.unsupported();
if (operatorStateBackend != null) {
operatorStateStore = new BeamOperatorStateStore(operatorStateBackend);
}
BeamStateHandler<ListState<byte[]>> bagStateBeamStateHandler =
new BeamBagStateHandler(namespaceSerializer);
BeamStateHandler<MapState<ByteArrayWrapper, byte[]>> mapStateBeamStateHandler =
new BeamMapStateHandler(config);
return new BeamStateRequestHandler(
keyedStateStore,
operatorStateStore,
bagStateBeamStateHandler,
mapStateBeamStateHandler);
} | 3.68 |
flink_StickyAllocationAndLocalRecoveryTestJob_getJvmPid | /**
* This code is copied from Stack Overflow.
*
* <p><a
* href="https://stackoverflow.com/questions/35842">https://stackoverflow.com/questions/35842</a>,
* answer <a
* href="https://stackoverflow.com/a/12066696/9193881">https://stackoverflow.com/a/12066696/9193881</a>
*
* <p>Author: <a href="https://stackoverflow.com/users/446591/brad-mace">Brad Mace</a>)
*/
private static int getJvmPid() throws Exception {
java.lang.management.RuntimeMXBean runtime =
java.lang.management.ManagementFactory.getRuntimeMXBean();
java.lang.reflect.Field jvm = runtime.getClass().getDeclaredField("jvm");
jvm.setAccessible(true);
sun.management.VMManagement mgmt = (sun.management.VMManagement) jvm.get(runtime);
java.lang.reflect.Method pidMethod = mgmt.getClass().getDeclaredMethod("getProcessId");
pidMethod.setAccessible(true);
return (int) (Integer) pidMethod.invoke(mgmt);
} | 3.68 |
flink_CheckpointFailureManager_updateStatsAfterCheckpointFailed | /**
* Updating checkpoint statistics after checkpoint failed.
*
* @param pendingCheckpointStats the pending checkpoint statistics.
* @param exception the checkpoint exception.
*/
private void updateStatsAfterCheckpointFailed(
@Nullable PendingCheckpointStats pendingCheckpointStats,
CheckpointStatsTracker statsTracker,
CheckpointException exception) {
if (pendingCheckpointStats != null) {
long failureTimestamp = System.currentTimeMillis();
statsTracker.reportFailedCheckpoint(
pendingCheckpointStats.toFailedCheckpoint(failureTimestamp, exception));
} else {
statsTracker.reportFailedCheckpointsWithoutInProgress();
}
} | 3.68 |
flink_LogicalTypeUtils_renameRowFields | /** Renames the fields of the given {@link RowType}. */
public static RowType renameRowFields(RowType rowType, List<String> newFieldNames) {
Preconditions.checkArgument(
rowType.getFieldCount() == newFieldNames.size(),
"Row length and new names must match.");
final List<RowField> newFields =
IntStream.range(0, rowType.getFieldCount())
.mapToObj(
pos -> {
final RowField oldField = rowType.getFields().get(pos);
return new RowField(
newFieldNames.get(pos),
oldField.getType(),
oldField.getDescription().orElse(null));
})
.collect(Collectors.toList());
return new RowType(rowType.isNullable(), newFields);
} | 3.68 |
flink_ExternalServiceDecorator_getExternalServiceName | /** Generate name of the external rest Service. */
public static String getExternalServiceName(String clusterId) {
return clusterId + Constants.FLINK_REST_SERVICE_SUFFIX;
} | 3.68 |
hadoop_MountInterface_fromValue | /** The procedure of given value.
* @param value specifies the procedure index
* @return the procedure corresponding to the value.
*/
public static MNTPROC fromValue(int value) {
if (value < 0 || value >= values().length) {
return null;
}
return values()[value];
} | 3.68 |
hudi_RollbackUtils_mergeRollbackStat | /**
* Helper to merge 2 rollback-stats for a given partition.
*
* @param stat1 HoodieRollbackStat
* @param stat2 HoodieRollbackStat
* @return Merged HoodieRollbackStat
*/
static HoodieRollbackStat mergeRollbackStat(HoodieRollbackStat stat1, HoodieRollbackStat stat2) {
checkArgument(stat1.getPartitionPath().equals(stat2.getPartitionPath()));
final List<String> successDeleteFiles = new ArrayList<>();
final List<String> failedDeleteFiles = new ArrayList<>();
final Map<FileStatus, Long> commandBlocksCount = new HashMap<>();
final Map<String, Long> logFilesFromFailedCommit = new HashMap<>();
Option.ofNullable(stat1.getSuccessDeleteFiles()).ifPresent(successDeleteFiles::addAll);
Option.ofNullable(stat2.getSuccessDeleteFiles()).ifPresent(successDeleteFiles::addAll);
Option.ofNullable(stat1.getFailedDeleteFiles()).ifPresent(failedDeleteFiles::addAll);
Option.ofNullable(stat2.getFailedDeleteFiles()).ifPresent(failedDeleteFiles::addAll);
Option.ofNullable(stat1.getCommandBlocksCount()).ifPresent(commandBlocksCount::putAll);
Option.ofNullable(stat2.getCommandBlocksCount()).ifPresent(commandBlocksCount::putAll);
Option.ofNullable(stat1.getLogFilesFromFailedCommit()).ifPresent(logFilesFromFailedCommit::putAll);
Option.ofNullable(stat2.getLogFilesFromFailedCommit()).ifPresent(logFilesFromFailedCommit::putAll);
return new HoodieRollbackStat(stat1.getPartitionPath(), successDeleteFiles, failedDeleteFiles, commandBlocksCount, logFilesFromFailedCommit);
} | 3.68 |
hbase_MiniHBaseCluster_getMasterThreads | /** Returns List of master threads. */
public List<JVMClusterUtil.MasterThread> getMasterThreads() {
return this.hbaseCluster.getMasters();
} | 3.68 |
flink_AsynchronousBlockReader_readBlock | /**
* Issues a read request, which will asynchronously fill the given segment with the next block
* in the underlying file channel. Once the read request is fulfilled, the segment will be added
* to this reader's return queue.
*
* @param segment The segment to read the block into.
* @throws IOException Thrown, when the reader encounters an I/O error. Due to the asynchronous
* nature of the reader, the exception thrown here may have been caused by an earlier read
* request.
*/
@Override
public void readBlock(MemorySegment segment) throws IOException {
addRequest(new SegmentReadRequest(this, segment));
} | 3.68 |
hibernate-validator_MessagerAdapter_reportErrors | /**
* Reports the given errors against the underlying {@link Messager} using
* the specified {@link Kind}.
*
* @param errors A set with errors to report. May be empty but must not be
* null.
*/
public void reportErrors(Collection<ConstraintCheckIssue> errors) {
for ( ConstraintCheckIssue error : errors ) {
reportError( error );
}
} | 3.68 |
framework_DateField_setTimeZone | /**
* Sets the time zone used by this date field. The time zone is used to
* convert the absolute time in a Date object to a logical time displayed in
* the selector and to convert the select time back to a Date object.
*
* If no time zone has been set, the current default time zone returned by
* {@code TimeZone.getDefault()} is used.
*
* @see #getTimeZone()
* @param timeZone
* the time zone to use for time calculations.
*/
public void setTimeZone(TimeZone timeZone) {
this.timeZone = timeZone;
markAsDirty();
} | 3.68 |
framework_VSlider_setFeedbackValue | /**
* Updates the value shown in the feedback pop-up when the slider is moved.
* The value should match the current value of this widget.
*
* @param value
* the new value to show
*/
public void setFeedbackValue(double value) {
feedback.setText(String.valueOf(value));
} | 3.68 |
hudi_HoodieAvroUtils_addMetadataFields | /**
* Adds the Hoodie metadata fields to the given schema.
*
* @param schema The schema
* @param withOperationField Whether to include the '_hoodie_operation' field
*/
public static Schema addMetadataFields(Schema schema, boolean withOperationField) {
int newFieldsSize = HoodieRecord.HOODIE_META_COLUMNS.size() + (withOperationField ? 1 : 0);
List<Schema.Field> parentFields = new ArrayList<>(schema.getFields().size() + newFieldsSize);
Schema.Field commitTimeField =
new Schema.Field(HoodieRecord.COMMIT_TIME_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field commitSeqnoField =
new Schema.Field(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field recordKeyField =
new Schema.Field(HoodieRecord.RECORD_KEY_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field partitionPathField =
new Schema.Field(HoodieRecord.PARTITION_PATH_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
Schema.Field fileNameField =
new Schema.Field(HoodieRecord.FILENAME_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
parentFields.add(commitTimeField);
parentFields.add(commitSeqnoField);
parentFields.add(recordKeyField);
parentFields.add(partitionPathField);
parentFields.add(fileNameField);
if (withOperationField) {
final Schema.Field operationField =
new Schema.Field(HoodieRecord.OPERATION_METADATA_FIELD, METADATA_FIELD_SCHEMA, "", JsonProperties.NULL_VALUE);
parentFields.add(operationField);
}
for (Schema.Field field : schema.getFields()) {
if (!isMetadataField(field.name())) {
Schema.Field newField = new Schema.Field(field.name(), field.schema(), field.doc(), field.defaultVal());
for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) {
newField.addProp(prop.getKey(), prop.getValue());
}
parentFields.add(newField);
}
}
Schema mergedSchema = Schema.createRecord(schema.getName(), schema.getDoc(), schema.getNamespace(), false);
mergedSchema.setFields(parentFields);
return mergedSchema;
} | 3.68 |
framework_VTree_getLastVisibleChildInTree | /**
* Traverses the tree to the bottom most child
*
* @param root
* The root of the tree
* @return The bottom most child
*/
private TreeNode getLastVisibleChildInTree(TreeNode root) {
if (root.isLeaf() || !root.getState() || root.getChildren().isEmpty()) {
return root;
}
List<TreeNode> children = root.getChildren();
return getLastVisibleChildInTree(children.get(children.size() - 1));
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.