name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ByteBufferUtils_toLong | /**
* Reads a long value at the given buffer's offset.
* @param buffer input byte buffer to read
* @param offset input offset where Long is
* @return long value at offset
*/
public static long toLong(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.toLong(buffer, offset);
} | 3.68 |
morf_Oracle_extractJdbcUrl | /**
*
* @see org.alfasoftware.morf.jdbc.AbstractDatabaseType#extractJdbcUrl(java.lang.String)
*/
@Override
public Optional<JdbcUrlElements> extractJdbcUrl(String jdbcUrl) {
Stack<String> splitURL = splitJdbcUrl(jdbcUrl);
String scheme = splitURL.pop();
if (!scheme.equalsIgnoreCase("oracle")) {
return Optional.empty();
}
splitURL.pop(); // Remove the "mem" or "thin"
splitURL.pop(); // Remove the delimiter
if (!splitURL.pop().equals(":@")) {
throw new IllegalArgumentException("Expected '@' to follow the scheme name in [" + jdbcUrl + "]");
}
JdbcUrlElements.Builder connectionDetails = extractHostAndPort(splitURL);
// Now get the path
String path = extractPath(splitURL);
connectionDetails.withInstanceName(path);
return Optional.of(connectionDetails.build());
} | 3.68 |
framework_AbstractRemoteDataSource_getCachedRange | /**
* Gets the current range of cached rows.
*
* @return the range of currently cached rows
*/
public Range getCachedRange() {
return cached;
} | 3.68 |
pulsar_ResourceGroupService_calculateQuotaForAllResourceGroups | // Periodically calculate the updated quota for all RGs in the background,
// from the reports received from other brokers.
// [Visibility for unit testing.]
protected void calculateQuotaForAllResourceGroups() {
// Calculate the quota for the next window for this RG, based on the observed usage.
final Summary.Timer quotaCalcTimer = rgQuotaCalculationLatency.startTimer();
BytesAndMessagesCount updatedQuota = new BytesAndMessagesCount();
this.resourceGroupsMap.forEach((rgName, resourceGroup) -> {
BytesAndMessagesCount globalUsageStats;
BytesAndMessagesCount localUsageStats;
BytesAndMessagesCount confCounts;
for (ResourceGroupMonitoringClass monClass : ResourceGroupMonitoringClass.values()) {
try {
globalUsageStats = resourceGroup.getGlobalUsageStats(monClass);
localUsageStats = resourceGroup.getLocalUsageStatsFromBrokerReports(monClass);
confCounts = resourceGroup.getConfLimits(monClass);
long[] globUsageBytesArray = new long[] { globalUsageStats.bytes };
updatedQuota.bytes = this.quotaCalculator.computeLocalQuota(
confCounts.bytes,
localUsageStats.bytes,
globUsageBytesArray);
long[] globUsageMessagesArray = new long[] {globalUsageStats.messages };
updatedQuota.messages = this.quotaCalculator.computeLocalQuota(
confCounts.messages,
localUsageStats.messages,
globUsageMessagesArray);
BytesAndMessagesCount oldBMCount = resourceGroup.updateLocalQuota(monClass, updatedQuota);
// Guard against unconfigured quota settings, for which computeLocalQuota will return negative.
if (updatedQuota.messages >= 0) {
rgCalculatedQuotaMessages.labels(rgName, monClass.name()).inc(updatedQuota.messages);
}
if (updatedQuota.bytes >= 0) {
rgCalculatedQuotaBytes.labels(rgName, monClass.name()).inc(updatedQuota.bytes);
}
if (oldBMCount != null) {
long messagesIncrement = updatedQuota.messages - oldBMCount.messages;
long bytesIncrement = updatedQuota.bytes - oldBMCount.bytes;
if (log.isDebugEnabled()) {
log.debug("calculateQuota for RG={} [class {}]: "
+ "updatedlocalBytes={}, updatedlocalMesgs={}; "
+ "old bytes={}, old mesgs={}; incremented bytes by {}, messages by {}",
rgName, monClass, updatedQuota.bytes, updatedQuota.messages,
oldBMCount.bytes, oldBMCount.messages,
bytesIncrement, messagesIncrement);
}
} else {
if (log.isDebugEnabled()) {
log.debug("calculateQuota for RG={} [class {}]: got back null from updateLocalQuota",
rgName, monClass);
}
}
} catch (Throwable t) {
log.error("Got exception={} while calculating new quota for monitoring-class={} of RG={}",
t.getMessage(), monClass, rgName);
}
}
});
double diffTimeSeconds = quotaCalcTimer.observeDuration();
if (log.isDebugEnabled()) {
log.debug("calculateQuotaForAllResourceGroups took {} milliseconds", diffTimeSeconds * 1000);
}
// Check any re-scheduling requirements for next time.
// Use the same period as getResourceUsagePublishIntervalInSecs;
// cancel and re-schedule this task if the period of execution has changed.
ServiceConfiguration config = pulsar.getConfiguration();
long newPeriodInSeconds = config.getResourceUsageTransportPublishIntervalInSecs();
if (newPeriodInSeconds != this.resourceUsagePublishPeriodInSeconds) {
if (this.calculateQuotaPeriodicTask == null) {
log.error("calculateQuotaForAllResourceGroups: Unable to find running task to cancel when "
+ "publish period changed from {} to {} {}",
this.resourceUsagePublishPeriodInSeconds, newPeriodInSeconds, timeUnitScale);
} else {
boolean cancelStatus = this.calculateQuotaPeriodicTask.cancel(true);
log.info("calculateQuotaForAllResourceGroups: Got status={} in cancel of periodic "
+ " when publish period changed from {} to {} {}",
cancelStatus, this.resourceUsagePublishPeriodInSeconds, newPeriodInSeconds, timeUnitScale);
}
this.calculateQuotaPeriodicTask = pulsar.getExecutor().scheduleAtFixedRate(
catchingAndLoggingThrowables(this::calculateQuotaForAllResourceGroups),
newPeriodInSeconds,
newPeriodInSeconds,
timeUnitScale);
this.resourceUsagePublishPeriodInSeconds = newPeriodInSeconds;
maxIntervalForSuppressingReportsMSecs =
this.resourceUsagePublishPeriodInSeconds * MaxUsageReportSuppressRounds;
}
} | 3.68 |
pulsar_ClusterDataImpl_checkPropertiesIfPresent | /**
* Check cluster data properties by rule, if some property is illegal, it will throw
* {@link IllegalArgumentException}.
*
* @throws IllegalArgumentException exist illegal property.
*/
public void checkPropertiesIfPresent() throws IllegalArgumentException {
URIPreconditions.checkURIIfPresent(getServiceUrl(),
uri -> Objects.equals(uri.getScheme(), "http"),
"Illegal service url, example: http://pulsar.example.com:8080");
URIPreconditions.checkURIIfPresent(getServiceUrlTls(),
uri -> Objects.equals(uri.getScheme(), "https"),
"Illegal service tls url, example: https://pulsar.example.com:8443");
URIPreconditions.checkURIIfPresent(getBrokerServiceUrl(),
uri -> Objects.equals(uri.getScheme(), "pulsar"),
"Illegal broker service url, example: pulsar://pulsar.example.com:6650");
URIPreconditions.checkURIIfPresent(getBrokerServiceUrlTls(),
uri -> Objects.equals(uri.getScheme(), "pulsar+ssl"),
"Illegal broker service tls url, example: pulsar+ssl://pulsar.example.com:6651");
URIPreconditions.checkURIIfPresent(getProxyServiceUrl(),
uri -> Objects.equals(uri.getScheme(), "pulsar")
|| Objects.equals(uri.getScheme(), "pulsar+ssl"),
"Illegal proxy service url, example: pulsar+ssl://ats-proxy.example.com:4443 "
+ "or pulsar://ats-proxy.example.com:4080");
warnIfUrlIsNotPresent();
} | 3.68 |
hbase_MetricsSource_incrLogEditsFiltered | /** The number of log edits filtered out. */
public void incrLogEditsFiltered() {
incrLogEditsFiltered(1);
} | 3.68 |
hbase_MasterCoprocessorHost_postTruncateRegion | /**
* Invoked after calling the truncate region procedure
* @param regionInfo region being truncated
*/
public void postTruncateRegion(RegionInfo regionInfo) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {
@Override
public void call(MasterObserver observer) {
observer.postTruncateRegion(this, regionInfo);
}
});
} | 3.68 |
pulsar_ResourceUnitRanking_getPreAllocatedBundles | /**
* Get the pre-allocated bundles.
*/
public Set<String> getPreAllocatedBundles() {
return this.preAllocatedBundles;
} | 3.68 |
framework_TreeData_getChildren | /**
* Get the immediate child items for the given item.
*
* @param item
* the item for which to retrieve child items for, null to
* retrieve all root items
* @return an unmodifiable list of child items for the given item
*
* @throws IllegalArgumentException
* if the item does not exist in this structure
*/
public List<T> getChildren(T item) {
if (!contains(item)) {
throw new IllegalArgumentException(
"Item '" + item + "' not in the hierarchy");
}
return Collections
.unmodifiableList(itemToWrapperMap.get(item).getChildren());
} | 3.68 |
hbase_MetricsHeapMemoryManager_increaseTunerDoNothingCounter | /**
* Increase the counter for tuner neither expanding memstore global size limit nor expanding
* blockcache max size.
*/
public void increaseTunerDoNothingCounter() {
source.increaseTunerDoNothingCounter();
} | 3.68 |
AreaShop_GeneralRegion_setDeleted | /**
* Indicate that this region has been deleted.
*/
public void setDeleted() {
deleted = true;
} | 3.68 |
hudi_OptionsResolver_isInsertOperation | /**
* Returns whether the table operation is 'insert'.
*/
public static boolean isInsertOperation(Configuration conf) {
WriteOperationType operationType = WriteOperationType.fromValue(conf.getString(FlinkOptions.OPERATION));
return operationType == WriteOperationType.INSERT;
} | 3.68 |
framework_FileTypeResolver_getMIMETypeToIconMapping | /**
* Gets the internal mime-type to icon mapping.
*
* @return unmodifiable map containing the current mime-type to icon mapping
*/
public static Map<String, Resource> getMIMETypeToIconMapping() {
return Collections.unmodifiableMap(MIME_TO_ICON_MAP);
} | 3.68 |
framework_VFlash_setCodetype | /**
* This attribute specifies the content type of data expected when
* downloading the object specified by classid. This attribute is optional
* but recommended when classid is specified since it allows the user agent
* to avoid loading information for unsupported content types. The default
* value is the value of the type attribute.
*
* @param codetype
* the codetype to set.
*/
public void setCodetype(String codetype) {
if (this.codetype != codetype) {
this.codetype = codetype;
needsRebuild = true;
}
} | 3.68 |
framework_AbstractComponent_getDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Component#getDescription()
*/
@Override
public String getDescription() {
return getState(false).description;
} | 3.68 |
framework_GeneratedPropertyContainer_equals | /**
* Tests if the given object is the same as the this object. Two Items
* from the same container with the same ID are equal.
*
* @param obj
* an object to compare with this object
* @return <code>true</code> if the given object is the same as this
* object, <code>false</code> if not
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null
|| !obj.getClass().equals(GeneratedPropertyItem.class)) {
return false;
}
final GeneratedPropertyItem li = (GeneratedPropertyItem) obj;
return getContainer() == li.getContainer()
&& itemId.equals(li.itemId);
} | 3.68 |
framework_VTabsheetBase_setTabCaptionsAsHtml | /**
* Sets whether the caption is rendered as HTML.
* <p>
* The default is false, i.e. render tab captions as plain text
* <p>
* This value is delegated from the TabsheetState.
*
* @since 7.4
* @param tabCaptionsAsHtml
* {@code true} if the captions are rendered as HTML,
* {@code false} if rendered as plain text
*/
public void setTabCaptionsAsHtml(boolean tabCaptionsAsHtml) {
this.tabCaptionsAsHtml = tabCaptionsAsHtml;
} | 3.68 |
pulsar_ProducerStats_getPartitionStats | /**
* @return stats for each partition if topic is partitioned topic
*/
default Map<String, ProducerStats> getPartitionStats() {
return Collections.emptyMap();
} | 3.68 |
hadoop_IOStatisticsBinding_aggregateMaximums | /**
* Aggregate two maximum values.
* @param l left
* @param r right
* @return the new minimum.
*/
public static Long aggregateMaximums(Long l, Long r) {
if (l == MIN_UNSET_VALUE) {
return r;
} else if (r == MIN_UNSET_VALUE) {
return l;
} else {
return Math.max(l, r);
}
} | 3.68 |
flink_HsSelectiveSpillingStrategy_decideActionWithGlobalInfo | // Score the buffer of each subpartition and decide the spill and release action. The lower the
// score, the more likely the buffer will be consumed in the next time, and should be kept in
// memory as much as possible. Select all buffers that need to be spilled according to the score
// from high to low.
@Override
public Decision decideActionWithGlobalInfo(HsSpillingInfoProvider spillingInfoProvider) {
if (spillingInfoProvider.getNumTotalRequestedBuffers()
< spillingInfoProvider.getPoolSize() * spillThreshold) {
// In case situation changed since onMemoryUsageChanged() returns Optional#empty()
return Decision.NO_ACTION;
}
int spillNum = (int) (spillingInfoProvider.getPoolSize() * spillBufferRatio);
TreeMap<Integer, Deque<BufferIndexAndChannel>> subpartitionToBuffers = new TreeMap<>();
for (int channel = 0; channel < spillingInfoProvider.getNumSubpartitions(); channel++) {
subpartitionToBuffers.put(
channel,
spillingInfoProvider.getBuffersInOrder(
channel,
SpillStatus.NOT_SPILL,
// selective spilling strategy does not support multiple consumer.
ConsumeStatusWithId.fromStatusAndConsumerId(
ConsumeStatus.NOT_CONSUMED, HsConsumerId.DEFAULT)));
}
TreeMap<Integer, List<BufferIndexAndChannel>> subpartitionToHighPriorityBuffers =
getBuffersByConsumptionPriorityInOrder(
// selective spilling strategy does not support multiple consumer.
spillingInfoProvider.getNextBufferIndexToConsume(HsConsumerId.DEFAULT),
subpartitionToBuffers,
spillNum);
Decision.Builder builder = Decision.builder();
subpartitionToHighPriorityBuffers.forEach(
(subpartitionId, buffers) -> {
builder.addBufferToSpill(subpartitionId, buffers);
builder.addBufferToRelease(subpartitionId, buffers);
});
return builder.build();
} | 3.68 |
hibernate-validator_ValidationXmlTestHelper_runWithCustomValidationXml | /**
* Executes the given runnable, using the specified file as replacement for
* {@code META-INF/validation.xml}.
*
* @param validationXmlName The file to be used as validation.xml file.
* @param runnable The runnable to execute.
*/
public void runWithCustomValidationXml(final String validationXmlName, Runnable runnable) {
ClassLoader previousContextCl = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(
new ClassLoader( previousContextCl ) {
@Override
public InputStream getResourceAsStream(String name) {
if ( "META-INF/validation.xml".equals( name ) ) {
return clazz.getResourceAsStream( validationXmlName );
}
return super.getResourceAsStream( name );
}
}
);
runnable.run();
}
finally {
Thread.currentThread().setContextClassLoader( previousContextCl );
}
} | 3.68 |
hadoop_HsLogsPage_content | /**
* The content of this page is the JobBlock
* @return HsJobBlock.class
*/
@Override protected Class<? extends SubView> content() {
return AggregatedLogsBlock.class;
} | 3.68 |
hbase_HBaseTestingUtility_enableShortCircuit | /**
* Enable the short circuit read, unless configured differently. Set both HBase and HDFS settings,
* including skipping the hdfs checksum checks.
*/
private void enableShortCircuit() {
if (isReadShortCircuitOn()) {
String curUser = System.getProperty("user.name");
LOG.info("read short circuit is ON for user " + curUser);
// read short circuit, for hdfs
conf.set("dfs.block.local-path-access.user", curUser);
// read short circuit, for hbase
conf.setBoolean("dfs.client.read.shortcircuit", true);
// Skip checking checksum, for the hdfs client and the datanode
conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
} else {
LOG.info("read short circuit is OFF");
}
} | 3.68 |
flink_AvroSchemaConverter_convertToDataType | /**
* Converts an Avro schema string into a nested row structure with deterministic field order and
* data types that are compatible with Flink's Table & SQL API.
*
* @param avroSchemaString Avro schema definition string
* @return data type matching the schema
*/
public static DataType convertToDataType(String avroSchemaString) {
Preconditions.checkNotNull(avroSchemaString, "Avro schema must not be null.");
final Schema schema;
try {
schema = new Schema.Parser().parse(avroSchemaString);
} catch (SchemaParseException e) {
throw new IllegalArgumentException("Could not parse Avro schema string.", e);
}
return convertToDataType(schema);
} | 3.68 |
morf_WhenCondition_getValue | /**
* @return the value
*/
public AliasedField getValue() {
return value;
} | 3.68 |
hbase_BufferChain_getBytes | /**
* Expensive. Makes a new buffer to hold a copy of what is in contained ByteBuffers. This call
* drains this instance; it cannot be used subsequent to the call.
* @return A new byte buffer with the content of all contained ByteBuffers.
*/
byte[] getBytes() {
if (!hasRemaining()) throw new IllegalAccessError();
byte[] bytes = new byte[this.remaining];
int offset = 0;
for (ByteBuffer bb : this.buffers) {
int length = bb.remaining();
bb.get(bytes, offset, length);
offset += length;
}
return bytes;
} | 3.68 |
flink_MapView_setMap | /** Replaces the entire view's content with the content of the given {@link Map}. */
public void setMap(Map<K, V> map) {
this.map = map;
} | 3.68 |
framework_ContainerOrderedWrapper_size | /*
* Gets the number of Items in the Container. Don't add a JavaDoc comment
* here, we use the default documentation from implemented interface.
*/
@Override
public int size() {
int newSize = container.size();
assert newSize >= 0;
if (lastKnownSize != -1 && newSize != lastKnownSize
&& !(container instanceof Container.ItemSetChangeNotifier)) {
// Update the internal cache when the size of the container changes
// and the container is incapable of sending ItemSetChangeEvents
updateOrderWrapper();
}
lastKnownSize = newSize;
return newSize;
} | 3.68 |
graphhopper_VectorTile_addGeometry | /**
* <pre>
* Contains a stream of commands and parameters (vertices).
* A detailed description on geometry encoding is located in
* section 4.3 of the specification.
* </pre>
*
* <code>repeated uint32 geometry = 4 [packed = true];</code>
*/
public Builder addGeometry(int value) {
ensureGeometryIsMutable();
geometry_.add(value);
onChanged();
return this;
} | 3.68 |
hadoop_ApplicationFinishEvent_getDiagnostic | /**
* Why the app was aborted
* @return diagnostic message
*/
public String getDiagnostic() {
return diagnostic;
} | 3.68 |
hbase_StorageClusterStatusModel_getName | /** Returns the region server's name */
@XmlAttribute
public String getName() {
return name;
} | 3.68 |
dubbo_DubboShutdownHook_register | /**
* Register the ShutdownHook
*/
public void register() {
if (!ignoreListenShutdownHook && registered.compareAndSet(false, true)) {
try {
Runtime.getRuntime().addShutdownHook(this);
} catch (IllegalStateException e) {
logger.warn(CONFIG_FAILED_SHUTDOWN_HOOK, "", "", "register shutdown hook failed: " + e.getMessage(), e);
} catch (Exception e) {
logger.warn(CONFIG_FAILED_SHUTDOWN_HOOK, "", "", "register shutdown hook failed: " + e.getMessage(), e);
}
}
} | 3.68 |
open-banking-gateway_RequestScopedProvider_registerForFintechSession | /**
* Registers scoped services for the FinTech request.
* @param fintech FinTech to provide services for.
* @param profile ASPSP profile (i.e. FinTS or Xs2a)
* @param session Owning session for current scoped services
* @param bankProtocolId Bank protocol id to scope the services more precisely
* @param encryptionServiceProvider Consent encryption services for the FinTech
* @param futureAuthorizationSessionKey Authorization session key that is going to be used (if the session is not opened yet)
* or current session key if it is already opened
* @param fintechPassword Fintech Datasafe/KeyStore access password
* @return Request scoped services for FinTech
*/
public RequestScoped registerForFintechSession(
Fintech fintech,
BankProfile profile,
ServiceSession session,
Long bankProtocolId,
ConsentAuthorizationEncryptionServiceProvider encryptionServiceProvider,
SecretKeyWithIv futureAuthorizationSessionKey,
Supplier<char[]> fintechPassword
) {
ConsentAccess consentAccess = consentAccessProvider.consentForFintech(fintech, profile.getBank(), session, fintechPassword);
PaymentAccess paymentAccess = paymentAccessProvider.paymentForFintech(fintech, session, fintechPassword);
EncryptionService authorizationSessionEncService = sessionEncryption(encryptionServiceProvider, futureAuthorizationSessionKey);
return doRegister(
profile,
fintechConfig.getConsumers().get(fintech.getGlobalId()),
consentAccess,
paymentAccess,
authorizationSessionEncService,
futureAuthorizationSessionKey,
bankProtocolId);
} | 3.68 |
framework_VaadinSession_getLocale | /**
* Gets the default locale for this session.
*
* By default this is the preferred locale of the user using the session. In
* most cases it is read from the browser defaults.
*
* @return the locale of this session.
*/
public Locale getLocale() {
assert hasLock();
if (locale != null) {
return locale;
}
return Locale.getDefault();
} | 3.68 |
graphhopper_WayToEdgeConverter_convertForViaNode | /**
* Finds the edge IDs associated with the given OSM ways that are adjacent to the given via-node.
* For each way there can be multiple edge IDs and there should be exactly one that is adjacent to the via-node
* for each way. Otherwise we throw {@link OSMRestrictionException}
*/
public NodeResult convertForViaNode(LongArrayList fromWays, int viaNode, LongArrayList toWays) throws OSMRestrictionException {
if (fromWays.isEmpty() || toWays.isEmpty())
throw new IllegalArgumentException("There must be at least one from- and to-way");
if (fromWays.size() > 1 && toWays.size() > 1)
throw new IllegalArgumentException("There can only be multiple from- or to-ways, but not both");
NodeResult result = new NodeResult(fromWays.size(), toWays.size());
for (LongCursor fromWay : fromWays)
edgesByWay.apply(fromWay.value).forEachRemaining(e -> {
if (baseGraph.isAdjacentToNode(e.value, viaNode))
result.fromEdges.add(e.value);
});
if (result.fromEdges.size() < fromWays.size())
throw new OSMRestrictionException("has from member ways that aren't adjacent to the via member node");
else if (result.fromEdges.size() > fromWays.size())
throw new OSMRestrictionException("has from member ways that aren't split at the via member node");
for (LongCursor toWay : toWays)
edgesByWay.apply(toWay.value).forEachRemaining(e -> {
if (baseGraph.isAdjacentToNode(e.value, viaNode))
result.toEdges.add(e.value);
});
if (result.toEdges.size() < toWays.size())
throw new OSMRestrictionException("has to member ways that aren't adjacent to the via member node");
else if (result.toEdges.size() > toWays.size())
throw new OSMRestrictionException("has to member ways that aren't split at the via member node");
return result;
} | 3.68 |
hadoop_ReencryptionHandler_run | /**
* Main loop. It takes at most 1 zone per scan, and executes until the zone
* is completed.
* {@link #reencryptEncryptionZone(long)}.
*/
@Override
public void run() {
LOG.info("Starting up re-encrypt thread with interval={} millisecond.",
interval);
while (true) {
try {
synchronized (this) {
wait(interval);
}
traverser.checkPauseForTesting();
} catch (InterruptedException ie) {
LOG.info("Re-encrypt handler interrupted. Exiting");
Thread.currentThread().interrupt();
return;
}
final Long zoneId;
dir.getFSNamesystem().readLock();
try {
zoneId = getReencryptionStatus().getNextUnprocessedZone();
if (zoneId == null) {
// empty queue.
continue;
}
LOG.info("Executing re-encrypt commands on zone {}. Current zones:{}",
zoneId, getReencryptionStatus());
getReencryptionStatus().markZoneStarted(zoneId);
resetSubmissionTracker(zoneId);
} finally {
dir.getFSNamesystem().readUnlock("reEncryptThread");
}
try {
reencryptEncryptionZone(zoneId);
} catch (RetriableException | SafeModeException re) {
LOG.info("Re-encryption caught exception, will retry", re);
getReencryptionStatus().markZoneForRetry(zoneId);
} catch (IOException ioe) {
LOG.warn("IOException caught when re-encrypting zone {}", zoneId, ioe);
} catch (InterruptedException ie) {
LOG.info("Re-encrypt handler interrupted. Exiting.");
Thread.currentThread().interrupt();
return;
} catch (Throwable t) {
LOG.error("Re-encrypt handler thread exiting. Exception caught when"
+ " re-encrypting zone {}.", zoneId, t);
return;
}
}
} | 3.68 |
framework_Calendar_fireWeekClick | /**
* Fires event when a week was clicked in the calendar.
*
* @param week
* The week that was clicked
* @param year
* The year of the week
*/
protected void fireWeekClick(int week, int year) {
fireEvent(new WeekClick(this, week, year));
} | 3.68 |
flink_LogicalTypeChecks_hasWellDefinedString | /**
* Checks whether the given {@link LogicalType} has a well-defined string representation when
* calling {@link Object#toString()} on the internal data structure. The string representation
* would be similar in SQL or in a programming language.
*
* <p>Note: This method might not be necessary anymore, once we have implemented a utility that
* can convert any internal data structure to a well-defined string representation.
*/
public static boolean hasWellDefinedString(LogicalType logicalType) {
if (logicalType instanceof DistinctType) {
return hasWellDefinedString(((DistinctType) logicalType).getSourceType());
}
switch (logicalType.getTypeRoot()) {
case CHAR:
case VARCHAR:
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
case FLOAT:
case DOUBLE:
return true;
default:
return false;
}
} | 3.68 |
flink_RocksDBResourceContainer_getDbOptions | /** Gets the RocksDB {@link DBOptions} to be used for RocksDB instances. */
public DBOptions getDbOptions() {
// initial options from common profile
DBOptions opt = createBaseCommonDBOptions();
handlesToClose.add(opt);
// load configurable options on top of pre-defined profile
setDBOptionsFromConfigurableOptions(opt);
// add user-defined options factory, if specified
if (optionsFactory != null) {
opt = optionsFactory.createDBOptions(opt, handlesToClose);
}
// add necessary default options
opt = opt.setCreateIfMissing(true);
// if sharedResources is non-null, use the write buffer manager from it.
if (sharedResources != null) {
opt.setWriteBufferManager(sharedResources.getResourceHandle().getWriteBufferManager());
}
if (enableStatistics) {
Statistics statistics = new Statistics();
opt.setStatistics(statistics);
handlesToClose.add(statistics);
}
return opt;
} | 3.68 |
framework_RadioButtonGroupElement_clear | /**
* Clear operation is not supported for Option Group. This operation has no
* effect on Option Group element.
*/
@Override
public void clear() {
super.clear();
} | 3.68 |
hbase_MiniZooKeeperCluster_getAddress | /** Returns Address for this cluster instance. */
public Address getAddress() {
return Address.fromParts(HOST, getClientPort());
} | 3.68 |
querydsl_Expressions_dateTimeOperation | /**
* Create a new Operation expression
*
* @param type type of expression
* @param operator operator
* @param args operation arguments
* @return operation expression
*/
public static <T extends Comparable<?>> DateTimeOperation<T> dateTimeOperation(Class<? extends T> type,
Operator operator, Expression<?>... args) {
return new DateTimeOperation<T>(type, operator, args);
} | 3.68 |
hbase_ZNodePaths_isMetaZNodePrefix | /** Returns True if meta znode. */
public boolean isMetaZNodePrefix(String znode) {
return znode != null && znode.startsWith(this.metaZNodePrefix);
} | 3.68 |
querydsl_MathExpressions_cos | /**
* Create a {@code cos(num)} expression
*
* <p>Returns the cosine of an angle of num radians.</p>
*
* @param num numeric expression
* @return cos(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> cos(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.COS, num);
} | 3.68 |
hbase_TableDescriptorBuilder_getColumnFamily | /**
* Returns the ColumnFamilyDescriptor for a specific column family with name as specified by the
* parameter column.
* @param column Column family name
* @return Column descriptor for the passed family name or the family on passed in column.
*/
@Override
public ColumnFamilyDescriptor getColumnFamily(final byte[] column) {
return this.families.get(column);
} | 3.68 |
framework_VFilterSelect_hasNextPage | /**
* Does the Select have more pages?
*
* @return true if a next page exists, else false if the current page is the
* last page
*/
public boolean hasNextPage() {
return pageLength > 0 && totalMatches > (currentPage + 1) * pageLength;
} | 3.68 |
hudi_HoodieLogBlock_isCompactedLogBlock | /**
* Compacted blocks are created using log compaction which basically merges the consecutive blocks together and create
* huge block with all the changes.
*/
public boolean isCompactedLogBlock() {
return logBlockHeader.containsKey(HeaderMetadataType.COMPACTED_BLOCK_TIMES);
} | 3.68 |
hadoop_NativeTaskOutputFiles_getInputFile | /**
* Return a local reduce input file created earlier
*
* @param mapId a map task id
*/
public Path getInputFile(int mapId) throws IOException {
return lDirAlloc.getLocalPathToRead(
String.format(REDUCE_INPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, Integer.valueOf(mapId)),
conf);
} | 3.68 |
hbase_BalancerClusterState_serverHasTooFewRegions | /**
* Returns true iff a given server has less regions than the balanced amount
*/
public boolean serverHasTooFewRegions(int server) {
int minLoad = this.numRegions / numServers;
int numRegions = getNumRegions(server);
return numRegions < minLoad;
} | 3.68 |
hadoop_FederationApplicationHomeSubClusterStoreInputValidator_validate | /**
* Quick validation on the input to check some obvious fail conditions (fail
* fast). Check if the provided {@link DeleteApplicationHomeSubClusterRequest}
* for deleting an application is valid or not.
*
* @param request the {@link DeleteApplicationHomeSubClusterRequest} to
* validate against
* @throws FederationStateStoreInvalidInputException if the request is invalid
*/
public static void validate(DeleteApplicationHomeSubClusterRequest request)
throws FederationStateStoreInvalidInputException {
if (request == null) {
String message = "Missing DeleteApplicationHomeSubCluster Request."
+ " Please try again by specifying"
+ " an ApplicationHomeSubCluster information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate application Id
checkApplicationId(request.getApplicationId());
} | 3.68 |
flink_TaskStatsRequestCoordinator_handleSuccessfulResponse | /**
* Handles the successfully returned tasks stats response by collecting the corresponding
* subtask samples.
*
* @param requestId ID of the request.
* @param executionIds ID of the sampled task.
* @param result Result of stats request returned by an individual task.
* @throws IllegalStateException If unknown request ID and not recently finished or cancelled
* sample.
*/
public void handleSuccessfulResponse(
int requestId, ImmutableSet<ExecutionAttemptID> executionIds, T result) {
synchronized (lock) {
if (isShutDown) {
return;
}
final String ids =
executionIds.stream()
.map(ExecutionAttemptID::toString)
.collect(Collectors.joining(", "));
if (log.isDebugEnabled()) {
log.debug("Collecting stats sample {} of tasks {}", requestId, ids);
}
PendingStatsRequest<T, V> pending = pendingRequests.get(requestId);
if (pending != null) {
pending.collectTaskStats(executionIds, result);
// Publish the sample
if (pending.isComplete()) {
pendingRequests.remove(requestId);
rememberRecentRequestId(requestId);
pending.completePromiseAndDiscard();
}
} else if (recentPendingRequestIds.contains(requestId)) {
if (log.isDebugEnabled()) {
log.debug("Received late stats sample {} of tasks {}", requestId, ids);
}
} else {
if (log.isDebugEnabled()) {
log.debug(String.format("Unknown request ID %d.", requestId));
}
}
}
} | 3.68 |
hbase_ZKMainServer_hasServer | /**
* @param args the arguments to check
* @return True if argument strings have a '-server' in them.
*/
private static boolean hasServer(final String[] args) {
return args.length > 0 && args[0].equals(SERVER_ARG);
} | 3.68 |
hadoop_HdfsFileStatus_isdir | /**
* Set the isDir flag for the entity (default = false).
* @param isdir True if the referent is a directory.
* @return This Builder instance
*/
public Builder isdir(boolean isdir) {
this.isdir = isdir;
return this;
} | 3.68 |
hbase_SnapshotInfo_addStoreFile | /**
* Add the specified store file to the stats
* @param region region encoded Name
* @param family family name
* @param storeFile store file name
* @param filesMap store files map for all snapshots, it may be null
* @return the store file information
*/
FileInfo addStoreFile(final RegionInfo region, final String family,
final SnapshotRegionManifest.StoreFile storeFile, final Map<Path, Integer> filesMap)
throws IOException {
HFileLink link =
HFileLink.build(conf, snapshotTable, region.getEncodedName(), family, storeFile.getName());
boolean isCorrupted = false;
boolean inArchive = false;
long size = -1;
try {
if (fs.exists(link.getArchivePath())) {
inArchive = true;
size = fs.getFileStatus(link.getArchivePath()).getLen();
hfilesArchiveSize.addAndGet(size);
hfilesArchiveCount.incrementAndGet();
// If store file is not shared with other snapshots and tables,
// increase nonSharedHfilesArchiveSize
if (
(filesMap != null) && !isArchivedFileStillReferenced(link.getArchivePath(), filesMap)
) {
nonSharedHfilesArchiveSize.addAndGet(size);
}
} else if (fs.exists(link.getMobPath())) {
inArchive = true;
size = fs.getFileStatus(link.getMobPath()).getLen();
hfilesMobSize.addAndGet(size);
hfilesMobCount.incrementAndGet();
} else {
size = link.getFileStatus(fs).getLen();
hfilesSize.addAndGet(size);
hfilesCount.incrementAndGet();
}
isCorrupted = (storeFile.hasFileSize() && storeFile.getFileSize() != size);
if (isCorrupted) hfilesCorrupted.incrementAndGet();
} catch (FileNotFoundException e) {
hfilesMissing.incrementAndGet();
}
return new FileInfo(inArchive, size, isCorrupted);
} | 3.68 |
flink_AbstractPythonScalarFunctionOperator_createUserDefinedFunctionsProto | /** Gets the proto representation of the Python user-defined functions to be executed. */
@Override
public FlinkFnApi.UserDefinedFunctions createUserDefinedFunctionsProto() {
return ProtoUtils.createUserDefinedFunctionsProto(
getRuntimeContext(),
scalarFunctions,
config.get(PYTHON_METRIC_ENABLED),
config.get(PYTHON_PROFILE_ENABLED));
} | 3.68 |
framework_Window_getWindow | /**
* Get the window form which this event originated.
*
* @return the window
*/
public Window getWindow() {
return (Window) getSource();
} | 3.68 |
flink_AbstractPagedInputView_seekInput | /**
* Sets the internal state of the view such that the next bytes will be read from the given
* memory segment, starting at the given position. The memory segment will provide bytes up to
* the given limit position.
*
* @param segment The segment to read the next bytes from.
* @param positionInSegment The position in the segment to start reading from.
* @param limitInSegment The limit in the segment. When reached, the view will attempt to switch
* to the next segment.
*/
protected void seekInput(MemorySegment segment, int positionInSegment, int limitInSegment) {
this.currentSegment = segment;
this.positionInSegment = positionInSegment;
this.limitInSegment = limitInSegment;
} | 3.68 |
open-banking-gateway_Xs2aConsentInfo_isStartConsentAuthorizationWithPin | /**
* If ASPSP needs startConsentAuthorization with User Password.
*/
public boolean isStartConsentAuthorizationWithPin(Xs2aContext ctx) {
return ctx.aspspProfile().isXs2aStartConsentAuthorizationWithPin();
} | 3.68 |
hmily_HmilySqlParserEngineFactory_newInstance | /**
* New instance hmily sql parser engine.
*
* @return the hmily sql parser engine
*/
public static HmilySqlParserEngine newInstance() {
if (hmilySqlParserEngine == null) {
synchronized (HmilySqlParserEngineFactory.class) {
if (hmilySqlParserEngine == null) {
HmilyConfig config = ConfigEnv.getInstance().getConfig(HmilyConfig.class);
hmilySqlParserEngine = ExtensionLoaderFactory.load(HmilySqlParserEngine.class, config.getSqlParserType());
}
}
}
return hmilySqlParserEngine;
} | 3.68 |
hudi_CompactionUtils_getCompactionPlan | /**
* Util method to fetch both compaction and log compaction plan from requestedInstant.
*/
public static HoodieCompactionPlan getCompactionPlan(HoodieTableMetaClient metaClient, Option<byte[]> planContent) {
CompactionPlanMigrator migrator = new CompactionPlanMigrator(metaClient);
try {
HoodieCompactionPlan compactionPlan = TimelineMetadataUtils.deserializeCompactionPlan(planContent.get());
return migrator.upgradeToLatest(compactionPlan, compactionPlan.getVersion());
} catch (IOException e) {
throw new HoodieException(e);
}
} | 3.68 |
hbase_AssignmentVerificationReport_fillUpDispersion | /**
* Use this to project the dispersion scores
*/
public void fillUpDispersion(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot,
FavoredNodesPlan newPlan) {
// Set the table name
this.tableName = tableName;
// Get all the regions for this table
List<RegionInfo> regionInfoList = snapshot.getTableToRegionMap().get(tableName);
// Get the total region num for the current table
this.totalRegions = regionInfoList.size();
FavoredNodesPlan plan = null;
if (newPlan == null) {
plan = snapshot.getExistingAssignmentPlan();
} else {
plan = newPlan;
}
// Get the region to region server mapping
Map<ServerName, Integer> primaryRSToRegionCounterMap = new HashMap<>();
Map<ServerName, Set<ServerName>> primaryToSecTerRSMap = new HashMap<>();
// Check the favored nodes and its locality information
// Also keep tracker of the most loaded and least loaded region servers
for (RegionInfo region : regionInfoList) {
try {
// Get the favored nodes from the assignment plan and verify it.
List<ServerName> favoredNodes = plan.getFavoredNodes(region);
if (
favoredNodes == null
|| favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM
) {
regionsWithoutValidFavoredNodes.add(region);
continue;
}
// Get the primary, secondary and tertiary region server
ServerName primaryRS = favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal());
ServerName secondaryRS = favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal());
ServerName tertiaryRS = favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal());
// Update the primary rs to its region set map
Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS);
if (regionCounter == null) {
regionCounter = Integer.valueOf(0);
}
regionCounter = regionCounter.intValue() + 1;
primaryRSToRegionCounterMap.put(primaryRS, regionCounter);
// Update the primary rs to secondary and tertiary rs map
Set<ServerName> secAndTerSet = primaryToSecTerRSMap.get(primaryRS);
if (secAndTerSet == null) {
secAndTerSet = new HashSet<>();
}
secAndTerSet.add(secondaryRS);
secAndTerSet.add(tertiaryRS);
primaryToSecTerRSMap.put(primaryRS, secAndTerSet);
} catch (Exception e) {
LOG.error("Cannot verify the region assignment for region "
+ ((region == null) ? " null " : region.getRegionNameAsString()) + "because of " + e);
}
}
float dispersionScoreSummary = 0;
float dispersionNumSummary = 0;
// Calculate the secondary score for each primary region server
for (Map.Entry<ServerName, Integer> entry : primaryRSToRegionCounterMap.entrySet()) {
ServerName primaryRS = entry.getKey();
Integer regionsOnPrimary = entry.getValue();
// Process the dispersion number and score
float dispersionScore = 0;
int dispersionNum = 0;
if (primaryToSecTerRSMap.get(primaryRS) != null && regionsOnPrimary.intValue() != 0) {
dispersionNum = primaryToSecTerRSMap.get(primaryRS).size();
dispersionScore = dispersionNum / ((float) regionsOnPrimary.intValue() * 2);
}
// Update the max dispersion num
if (dispersionNum > this.maxDispersionNum) {
this.maxDispersionNumServerSet.clear();
this.maxDispersionNumServerSet.add(primaryRS);
this.maxDispersionNum = dispersionNum;
} else if (dispersionNum == this.maxDispersionNum) {
this.maxDispersionNumServerSet.add(primaryRS);
}
// Update the min dispersion score
if (dispersionScore < this.minDispersionScore) {
this.minDispersionScoreServerSet.clear();
this.minDispersionScoreServerSet.add(primaryRS);
this.minDispersionScore = dispersionScore;
} else if (dispersionScore == this.minDispersionScore) {
this.minDispersionScoreServerSet.add(primaryRS);
}
// Update the min dispersion num
if (dispersionNum < this.minDispersionNum) {
this.minDispersionNumServerSet.clear();
this.minDispersionNumServerSet.add(primaryRS);
this.minDispersionNum = dispersionNum;
} else if (dispersionNum == this.minDispersionNum) {
this.minDispersionNumServerSet.add(primaryRS);
}
dispersionScoreSummary += dispersionScore;
dispersionNumSummary += dispersionNum;
}
// Update the avg dispersion score
if (primaryRSToRegionCounterMap.keySet().size() != 0) {
this.avgDispersionScore =
dispersionScoreSummary / (float) primaryRSToRegionCounterMap.keySet().size();
this.avgDispersionNum =
dispersionNumSummary / (float) primaryRSToRegionCounterMap.keySet().size();
}
} | 3.68 |
framework_Heartbeat_schedule | /**
* Updates the schedule of the heartbeat to match the set interval. A
* negative interval disables the heartbeat.
*/
public void schedule() {
if (interval > 0) {
getLogger()
.fine("Scheduling heartbeat in " + interval + " seconds");
timer.schedule(interval * 1000);
} else {
getLogger().fine("Disabling heartbeat");
timer.cancel();
}
} | 3.68 |
dubbo_MemorySafeLinkedBlockingQueue_setRejector | /**
* set the rejector.
*
* @param rejector the rejector
*/
public void setRejector(final Rejector<E> rejector) {
this.rejector = rejector;
} | 3.68 |
hadoop_ZookeeperUtils_buildQuorum | /**
* Build a quorum list, injecting a ":defaultPort" ref if needed on
* any entry without one
* @param hostAndPorts
* @param defaultPort
* @return
*/
public static String buildQuorum(List<HostAndPort> hostAndPorts, int defaultPort) {
List<String> entries = new ArrayList<String>(hostAndPorts.size());
for (HostAndPort hostAndPort : hostAndPorts) {
entries.add(buildQuorumEntry(hostAndPort, defaultPort));
}
return ServiceUtils.join(entries, ",", false);
} | 3.68 |
hbase_MetaFixer_createRegionInfosForHoles | /**
* Create a new {@link RegionInfo} corresponding to each provided "hole" pair.
*/
private static List<RegionInfo>
createRegionInfosForHoles(final List<Pair<RegionInfo, RegionInfo>> holes) {
final List<RegionInfo> newRegionInfos = holes.stream().map(MetaFixer::getHoleCover)
.filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList());
LOG.debug("Constructed {}/{} RegionInfo descriptors corresponding to identified holes.",
newRegionInfos.size(), holes.size());
return newRegionInfos;
} | 3.68 |
morf_ColumnBean_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return this.toStringHelper();
} | 3.68 |
hadoop_BigDecimalSplitter_split | /**
* Returns a list of BigDecimals one element longer than the list of input splits.
* This represents the boundaries between input splits.
* All splits are open on the top end, except the last one.
*
* So the list [0, 5, 8, 12, 18] would represent splits capturing the intervals:
*
* [0, 5)
* [5, 8)
* [8, 12)
* [12, 18] note the closed interval for the last split.
*/
List<BigDecimal> split(BigDecimal numSplits, BigDecimal minVal, BigDecimal maxVal)
throws SQLException {
List<BigDecimal> splits = new ArrayList<BigDecimal>();
// Use numSplits as a hint. May need an extra task if the size doesn't
// divide cleanly.
BigDecimal splitSize = tryDivide(maxVal.subtract(minVal), (numSplits));
if (splitSize.compareTo(MIN_INCREMENT) < 0) {
splitSize = MIN_INCREMENT;
LOG.warn("Set BigDecimal splitSize to MIN_INCREMENT");
}
BigDecimal curVal = minVal;
while (curVal.compareTo(maxVal) <= 0) {
splits.add(curVal);
curVal = curVal.add(splitSize);
}
if (splits.get(splits.size() - 1).compareTo(maxVal) != 0 || splits.size() == 1) {
// We didn't end on the maxVal. Add that to the end of the list.
splits.add(maxVal);
}
return splits;
} | 3.68 |
framework_UIConnector_getThemeUrl | /**
* Internal helper to get the theme URL for a given theme
*
* @since 7.3
* @param theme
* the name of the theme
* @return The URL the theme can be loaded from
*/
private String getThemeUrl(String theme) {
String themeUrl = getConnection()
.translateVaadinUri(ApplicationConstants.VAADIN_PROTOCOL_PREFIX
+ "themes/" + theme + "/styles" + ".css");
// Parameter appended to bypass caches after version upgrade.
themeUrl += "?v=" + Version.getFullVersion();
return themeUrl;
} | 3.68 |
hudi_BufferedRandomAccessFile_getFilePointer | /**
* @return current file position
*/
@Override
public long getFilePointer() {
return this.currentPosition;
} | 3.68 |
druid_PoolUpdater_init | /**
* Create a ScheduledExecutorService to remove unused DataSources.
*/
public void init() {
if (inited) {
return;
}
synchronized (this) {
if (inited) {
return;
}
if (intervalSeconds < 10) {
LOG.warn("CAUTION: Purge interval has been set to " + intervalSeconds
+ ". This value should NOT be too small.");
}
if (intervalSeconds <= 0) {
intervalSeconds = DEFAULT_INTERVAL;
}
executor = Executors.newScheduledThreadPool(1);
executor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
LOG.debug("Purging the DataSource Pool every " + intervalSeconds + "s.");
try {
removeDataSources();
} catch (Exception e) {
LOG.error("Exception occurred while removing DataSources.", e);
}
}
}, intervalSeconds, intervalSeconds, TimeUnit.SECONDS);
}
} | 3.68 |
flink_MapTypeInfo_getValueTypeInfo | /** Gets the type information for the values in the map */
public TypeInformation<V> getValueTypeInfo() {
return valueTypeInfo;
} | 3.68 |
hadoop_ProducerConsumer_shutdown | /**
* Shutdown ProducerConsumer worker thread-pool without waiting for
* completion of any pending work.
*/
public void shutdown() {
if (hasWork()) {
LOG.warn("Shutdown() is called but there are still unprocessed work!");
}
executor.shutdownNow();
} | 3.68 |
hbase_CellCounter_main | /**
* Main entry point.
* @param args The command line parameters.
* @throws Exception When running the job fails.
*/
public static void main(String[] args) throws Exception {
int errCode = ToolRunner.run(HBaseConfiguration.create(), new CellCounter(), args);
System.exit(errCode);
} | 3.68 |
flink_HiveShim_isMaterializedView | /** Checks whether a hive table is a materialized view. */
default boolean isMaterializedView(org.apache.hadoop.hive.ql.metadata.Table table) {
return false;
} | 3.68 |
framework_Embedded_getParameter | /**
* Gets the value of an object parameter. Parameters are optional
* information, and they are passed to the instantiated object. Parameters
* are are stored as name value pairs.
*
* @return the Value of parameter or null if not found.
*/
public String getParameter(String name) {
return getState(false).parameters.get(name);
} | 3.68 |
hadoop_TFile_getValue | /**
* Copy value into user-supplied buffer. User supplied buffer must be
* large enough to hold the whole value (starting from the offset). The
* value part of the key-value pair pointed by the current cursor is not
* cached and can only be examined once. Calling any of the following
* functions more than once without moving the cursor will result in
* exception: {@link #getValue(byte[])}, {@link #getValue(byte[], int)},
* {@link #getValueStream}.
*
* @param buf buf.
* @param offset offset.
* @return the length of the value. Does not require
* isValueLengthKnown() to be true.
* @throws IOException raised on errors performing I/O.
*/
public int getValue(byte[] buf, int offset) throws IOException {
DataInputStream dis = getValueStream();
try {
if (isValueLengthKnown()) {
if ((offset | (buf.length - offset - vlen)) < 0) {
throw new IndexOutOfBoundsException(
"Buffer too small to hold value");
}
dis.readFully(buf, offset, vlen);
return vlen;
}
int nextOffset = offset;
while (nextOffset < buf.length) {
int n = dis.read(buf, nextOffset, buf.length - nextOffset);
if (n < 0) {
break;
}
nextOffset += n;
}
if (dis.read() >= 0) {
// attempt to read one more byte to determine whether we reached
// the
// end or not.
throw new IndexOutOfBoundsException(
"Buffer too small to hold value");
}
return nextOffset - offset;
} finally {
dis.close();
}
} | 3.68 |
hadoop_IsActiveServlet_doGet | /**
* Check whether this instance is the Active one.
* @param req HTTP request
* @param resp HTTP response to write to
*/
@Override
public void doGet(
final HttpServletRequest req, final HttpServletResponse resp)
throws IOException {
// By default requests are persistent. We don't want long-lived connections
// on server side.
resp.addHeader("Connection", "close");
if (!isActive()) {
// Report not SC_OK
resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED,
RESPONSE_NOT_ACTIVE);
return;
}
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(RESPONSE_ACTIVE);
resp.getWriter().flush();
} | 3.68 |
framework_SQLContainer_isAutoCommit | /**
* Returns status of the auto commit mode.
*
* @return true if auto commit mode is enabled
*/
public boolean isAutoCommit() {
return autoCommit;
} | 3.68 |
hadoop_TimedHealthReporterService_serviceStart | /**
* Method used to start the health monitoring.
*/
@Override
public void serviceStart() throws Exception {
if (task == null) {
throw new Exception("Health reporting task hasn't been set!");
}
timer = new Timer("HealthReporterService-Timer", true);
long delay = 0;
if (runBeforeStartup) {
delay = intervalMs;
task.run();
}
timer.scheduleAtFixedRate(task, delay, intervalMs);
super.serviceStart();
} | 3.68 |
flink_Tuple21_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple21<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20>
copy() {
return new Tuple21<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16,
this.f17, this.f18, this.f19, this.f20);
} | 3.68 |
querydsl_ComparableExpression_ltAll | /**
* Create a {@code this < all right} expression
*
* @param right rhs of the comparison
* @return this < all right
*/
public BooleanExpression ltAll(SubQueryExpression<? extends T> right) {
return lt(ExpressionUtils.all(right));
} | 3.68 |
framework_VTooltip_initializeAssistiveTooltips | /**
* Initialize the tooltip overlay for assistive devices.
*
* @since 7.2.4
*/
public void initializeAssistiveTooltips() {
updatePosition(null, true);
setTooltipText(new TooltipInfo(" "));
showTooltip();
hideTooltip();
description.getParent().getElement().getStyle().clearWidth();
} | 3.68 |
hadoop_EntityRowKeyPrefix_getRowKeyPrefix | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.application.
* RowKeyPrefix#getRowKeyPrefix()
*/
public byte[] getRowKeyPrefix() {
return super.getRowKey();
} | 3.68 |
hbase_PermissionStorage_getPermissions | /**
* Reads user permission assignments stored in the <code>l:</code> column family of the first
* table row in <code>_acl_</code>.
* <p>
* See {@link PermissionStorage class documentation} for the key structure used for storage.
* </p>
*/
static ListMultimap<String, UserPermission> getPermissions(Configuration conf, byte[] entryName,
Table t, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException {
if (entryName == null) {
entryName = ACL_GLOBAL_NAME;
}
// for normal user tables, we just read the table row from _acl_
ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
Get get = new Get(entryName);
get.addFamily(ACL_LIST_FAMILY);
Result row = null;
if (t == null) {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
row = table.get(get);
}
}
} else {
row = t.get(get);
}
if (!row.isEmpty()) {
perms = parsePermissions(entryName, row, cf, cq, user, hasFilterUser);
} else {
LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry "
+ Bytes.toString(entryName));
}
return perms;
} | 3.68 |
hadoop_AbfsOperationMetrics_getBytesSuccessful | /**
*
* @return bytes successfully transferred.
*/
AtomicLong getBytesSuccessful() {
return bytesSuccessful;
} | 3.68 |
hbase_MobUtils_getMobTableDir | /**
* Gets the table dir of the mob files under the qualified HBase root dir. It's
* {rootDir}/mobdir/data/${namespace}/${tableName}
* @param rootDir The qualified path of HBase root directory.
* @param tableName The name of table.
* @return The table dir of the mob file.
*/
public static Path getMobTableDir(Path rootDir, TableName tableName) {
return CommonFSUtils.getTableDir(getMobHome(rootDir), tableName);
} | 3.68 |
querydsl_StringExpression_coalesce | /**
* Create a {@code coalesce(this, args...)} expression
*
* @param args additional arguments
* @return coalesce
*/
@Override
public StringExpression coalesce(String... args) {
Coalesce<String> coalesce = new Coalesce<String>(getType(), mixin);
for (String arg : args) {
coalesce.add(arg);
}
return coalesce.asString();
} | 3.68 |
hudi_KafkaConnectHdfsProvider_buildCheckpointStr | /**
* Convert map contains max offset of each partition to string.
*
* @param topic Topic name
* @param checkpoint Map with partition as key and max offset as value
* @return Checkpoint string
*/
private static String buildCheckpointStr(final String topic,
final HashMap<Integer, Integer> checkpoint) {
final StringBuilder checkpointStr = new StringBuilder();
checkpointStr.append(topic);
for (int i = 0; i < checkpoint.size(); ++i) {
checkpointStr.append(",").append(i).append(":").append(checkpoint.get(i));
}
return checkpointStr.toString();
} | 3.68 |
hbase_Encryption_getSecretKeyForSubject | /**
* Resolves a key for the given subject
* @return a key for the given subject
* @throws IOException if the key is not found
*/
public static Key getSecretKeyForSubject(String subject, Configuration conf) throws IOException {
KeyProvider provider = getKeyProvider(conf);
if (provider != null) {
try {
Key[] keys = provider.getKeys(new String[] { subject });
if (keys != null && keys.length > 0) {
return keys[0];
}
} catch (Exception e) {
throw new IOException(e);
}
}
throw new IOException("No key found for subject '" + subject + "'");
} | 3.68 |
hadoop_CommitResponse_newInstance | /**
* Create a Commit Response.
* @return Commit Response.
*/
@Private
@Unstable
public static CommitResponse newInstance() {
return Records.newRecord(CommitResponse.class);
} | 3.68 |
Activiti_JsonConverterUtil_gatherLongPropertyFromJsonNodes | /**
* Loops through a list of {@link JsonNode} instances, and stores the given property with given type in the returned list.
*
* In Java 8, this probably could be done a lot cooler.
*/
public static Set<Long> gatherLongPropertyFromJsonNodes(Iterable<JsonNode> jsonNodes, String propertyName) {
Set<Long> result = new HashSet<Long>(); // Using a Set to filter out doubles
for (JsonNode node : jsonNodes) {
if (node.has(propertyName)) {
Long propertyValue = node.get(propertyName).asLong();
if (propertyValue > 0) { // Just to be safe
result.add(propertyValue);
}
}
}
return result;
} | 3.68 |
hbase_CellComparatorImpl_getCellComparator | /**
* Utility method that makes a guess at comparator to use based off passed tableName. Use in
* extreme when no comparator specified.
* @return CellComparator to use going off the {@code tableName} passed.
*/
public static CellComparator getCellComparator(byte[] tableName) {
// FYI, TableName.toBytes does not create an array; just returns existing array pointer.
return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes())
? MetaCellComparator.META_COMPARATOR
: CellComparatorImpl.COMPARATOR;
} | 3.68 |
hbase_HBaseTestingUtility_getHBaseClusterInterface | /**
* Returns the HBaseCluster instance.
* <p>
* Returned object can be any of the subclasses of HBaseCluster, and the tests referring this
* should not assume that the cluster is a mini cluster or a distributed one. If the test only
* works on a mini cluster, then specific method {@link #getMiniHBaseCluster()} can be used
* instead w/o the need to type-cast.
*/
public HBaseCluster getHBaseClusterInterface() {
// implementation note: we should rename this method as #getHBaseCluster(),
// but this would require refactoring 90+ calls.
return hbaseCluster;
} | 3.68 |
pulsar_ConsumerConfiguration_setAckTimeout | /**
* Set the timeout for unacked messages, truncated to the nearest millisecond. The timeout needs to be greater than
* 10 seconds.
*
* @param ackTimeout
* for unacked messages.
* @param timeUnit
* unit in which the timeout is provided.
* @return {@link ConsumerConfiguration}
*/
public ConsumerConfiguration setAckTimeout(long ackTimeout, TimeUnit timeUnit) {
long ackTimeoutMillis = timeUnit.toMillis(ackTimeout);
checkArgument(ackTimeoutMillis >= minAckTimeoutMillis,
"Ack timeout should be should be greater than " + minAckTimeoutMillis + " ms");
conf.setAckTimeoutMillis(timeUnit.toMillis(ackTimeout));
return this;
} | 3.68 |
hudi_BaseHoodieWriteClient_tryUpgrade | /**
* Upgrades the hoodie table if need be when moving to a new Hudi version.
* This method is called within a lock. Try to avoid double locking from within this method.
* @param metaClient instance of {@link HoodieTableMetaClient} to use.
* @param instantTime instant time of interest if we have one.
*/
protected void tryUpgrade(HoodieTableMetaClient metaClient, Option<String> instantTime) {
UpgradeDowngrade upgradeDowngrade =
new UpgradeDowngrade(metaClient, config, context, upgradeDowngradeHelper);
if (upgradeDowngrade.needsUpgradeOrDowngrade(HoodieTableVersion.current())) {
metaClient = HoodieTableMetaClient.reload(metaClient);
// Ensure no inflight commits by setting EAGER policy and explicitly cleaning all failed commits
List<String> instantsToRollback = tableServiceClient.getInstantsToRollback(metaClient, HoodieFailedWritesCleaningPolicy.EAGER, instantTime);
if (!instantsToRollback.isEmpty()) {
Map<String, Option<HoodiePendingRollbackInfo>> pendingRollbacks = tableServiceClient.getPendingRollbackInfos(metaClient);
instantsToRollback.forEach(entry -> pendingRollbacks.putIfAbsent(entry, Option.empty()));
tableServiceClient.rollbackFailedWrites(pendingRollbacks, true);
}
new UpgradeDowngrade(metaClient, config, context, upgradeDowngradeHelper)
.run(HoodieTableVersion.current(), instantTime.orElse(null));
metaClient.reloadActiveTimeline();
}
} | 3.68 |
hbase_ZKSplitLog_isRescanNode | /**
* Checks if the given path represents a rescan node.
* @param zkw reference to the {@link ZKWatcher} which also contains configuration and constants
* @param path the absolute path, starts with '/'
* @return whether the path represents a rescan node
*/
public static boolean isRescanNode(ZKWatcher zkw, String path) {
String prefix = getRescanNode(zkw);
if (path.length() <= prefix.length()) {
return false;
}
for (int i = 0; i < prefix.length(); i++) {
if (prefix.charAt(i) != path.charAt(i)) {
return false;
}
}
return true;
} | 3.68 |
hbase_KeyValue_getKeyOffset | /** Returns Key offset in backing buffer.. */
public int getKeyOffset() {
return this.offset + ROW_OFFSET;
} | 3.68 |
flink_ParallelismProvider_getParallelism | /**
* Returns the parallelism for this instance.
*
* <p>The parallelism denotes how many parallel instances of a source or sink will be spawned
* during the execution.
*
* <p>Enforcing a different parallelism for sources/sinks might mess up the changelog if the
* output/input is not {@link ChangelogMode#insertOnly()}. Therefore, a primary key is required
* by which the output/input will be shuffled after/before records leave/enter the {@link
* ScanRuntimeProvider}/{@link SinkRuntimeProvider} implementation.
*
* @return empty if the connector does not provide a custom parallelism, then the planner will
* decide the number of parallel instances by itself.
*/
default Optional<Integer> getParallelism() {
return Optional.empty();
} | 3.68 |
morf_DatabaseMetaDataProvider_loadView | /**
* Loads a view.
*
* @param viewName Name of the view.
* @return The view metadata.
*/
protected View loadView(AName viewName) {
final RealName realViewName = viewNames.get().get(viewName);
if (realViewName == null) {
throw new IllegalArgumentException("View [" + viewName + "] not found.");
}
return new View() {
@Override
public String getName() {
return realViewName.getRealName();
}
@Override
public boolean knowsSelectStatement() {
return false;
}
@Override
public boolean knowsDependencies() {
return false;
}
@Override
public SelectStatement getSelectStatement() {
throw new UnsupportedOperationException("Cannot return SelectStatement as [" + realViewName.getRealName() + "] has been loaded from the database");
}
@Override
public String[] getDependencies() {
throw new UnsupportedOperationException("Cannot return dependencies as [" + realViewName.getRealName() + "] has been loaded from the database");
}
};
} | 3.68 |
hadoop_OBSCommonUtils_deleteObject | /**
* Delete an object. Increments the {@code OBJECT_DELETE_REQUESTS} and write
* operation statistics.
*
* @param owner the owner OBSFileSystem instance
* @param key key to blob to delete.
* @throws IOException on any failure to delete object
*/
static void deleteObject(final OBSFileSystem owner, final String key)
throws IOException {
blockRootDelete(owner.getBucket(), key);
ObsException lastException = null;
for (int retryTime = 1; retryTime <= MAX_RETRY_TIME; retryTime++) {
try {
owner.getObsClient().deleteObject(owner.getBucket(), key);
owner.getSchemeStatistics().incrementWriteOps(1);
return;
} catch (ObsException e) {
lastException = e;
LOG.warn("Delete path failed with [{}], "
+ "retry time [{}] - request id [{}] - "
+ "error code [{}] - error message [{}]",
e.getResponseCode(), retryTime, e.getErrorRequestId(),
e.getErrorCode(), e.getErrorMessage());
if (retryTime < MAX_RETRY_TIME) {
try {
Thread.sleep(DELAY_TIME);
} catch (InterruptedException ie) {
throw translateException("delete", key, e);
}
}
}
}
throw translateException(
String.format("retry max times [%s] delete failed", MAX_RETRY_TIME),
key, lastException);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.