name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_RegionServerObserver_postCreateReplicationEndPoint | /**
* This will be called after the replication endpoint is instantiated.
* @param ctx the environment to interact with the framework and region server.
* @param endpoint - the base endpoint for replication
* @return the endpoint to use during replication.
*/
default ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint) {
return endpoint;
} | 3.68 |
framework_VScrollTable_resizeSortedColumnForSortIndicator | /** For internal use only. May be removed or replaced in the future. */
public void resizeSortedColumnForSortIndicator() {
// Force recalculation of the captionContainer element inside the header
// cell to accommodate for the size of the sort arrow.
HeaderCell sortedHeader = tHead.getHeaderCell(sortColumn);
if (sortedHeader != null) {
// Mark header as sorted now. Any earlier marking would lead to
// columns with wrong sizes
sortedHeader.setSorted(true);
tHead.resizeCaptionContainer(sortedHeader);
}
// Also recalculate the width of the captionContainer element in the
// previously sorted header, since this now has more room.
HeaderCell oldSortedHeader = tHead.getHeaderCell(oldSortColumn);
if (oldSortedHeader != null) {
tHead.resizeCaptionContainer(oldSortedHeader);
}
} | 3.68 |
hadoop_AbfsClientThrottlingIntercept_initializeSingleton | /**
* Creates a singleton object of the AbfsClientThrottlingIntercept.
* which is shared across all filesystem instances.
* @param abfsConfiguration configuration set.
* @return singleton object of intercept.
*/
static AbfsClientThrottlingIntercept initializeSingleton(AbfsConfiguration abfsConfiguration) {
if (singleton == null) {
LOCK.lock();
try {
if (singleton == null) {
singleton = new AbfsClientThrottlingIntercept(abfsConfiguration);
LOG.debug("Client-side throttling is enabled for the ABFS file system.");
}
} finally {
LOCK.unlock();
}
}
return singleton;
} | 3.68 |
pulsar_BrokersBase_closeAndReCheck | /**
* Close producer and reader and then to re-check if this operation is success.
*
* Re-check
* - Producer: If close fails we will print error log to notify user.
* - Consumer: If close fails we will force delete subscription.
*
* @param producer Producer
* @param reader Reader
* @param topic Topic
* @param subscriptionName Subscription name
*/
private CompletableFuture<Void> closeAndReCheck(Producer<String> producer, Reader<String> reader,
Topic topic, String subscriptionName) {
// no matter exception or success, we still need to
// close producer/reader
CompletableFuture<Void> producerFuture = producer.closeAsync();
CompletableFuture<Void> readerFuture = reader.closeAsync();
List<CompletableFuture<Void>> futures = new ArrayList<>(2);
futures.add(producerFuture);
futures.add(readerFuture);
return FutureUtil.waitForAll(Collections.unmodifiableList(futures))
.exceptionally(closeException -> {
if (readerFuture.isCompletedExceptionally()) {
LOG.error("[{}] Close reader fail while heath check.", clientAppId());
Subscription subscription =
topic.getSubscription(subscriptionName);
// re-check subscription after reader close
if (subscription != null) {
LOG.warn("[{}] Force delete subscription {} "
+ "when it still exists after the"
+ " reader is closed.",
clientAppId(), subscription);
subscription.deleteForcefully()
.exceptionally(ex -> {
LOG.error("[{}] Force delete subscription fail"
+ " while health check",
clientAppId(), ex);
return null;
});
}
} else {
// producer future fail.
LOG.error("[{}] Close producer fail while heath check.", clientAppId());
}
return null;
});
} | 3.68 |
framework_Overlay_getOverlayContainer | /**
* Gets the 'overlay container' element.
*
* @return the overlay container element
*/
public com.google.gwt.user.client.Element getOverlayContainer() {
return RootPanel.get().getElement();
} | 3.68 |
hbase_HFileBlock_startWriting | /**
* Starts writing into the block. The previous block's data is discarded.
* @return the stream the user can write their data into
*/
DataOutputStream startWriting(BlockType newBlockType) throws IOException {
if (state == State.BLOCK_READY && startOffset != -1) {
// We had a previous block that was written to a stream at a specific
// offset. Save that offset as the last offset of a block of that type.
prevOffsetByType[blockType.getId()] = startOffset;
}
startOffset = -1;
blockType = newBlockType;
baosInMemory.reset();
baosInMemory.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
state = State.WRITING;
// We will compress it later in finishBlock()
userDataStream = new ByteBufferWriterDataOutputStream(baosInMemory);
if (newBlockType == BlockType.DATA) {
this.dataBlockEncoder.startBlockEncoding(dataBlockEncodingCtx, userDataStream);
}
return userDataStream;
} | 3.68 |
flink_Conditions_haveLeafArgumentTypes | /**
* Tests leaf argument types of a method against the given predicate.
*
* <p>See {@link #haveLeafTypes(DescribedPredicate)} for details.
*/
public static ArchCondition<JavaMethod> haveLeafArgumentTypes(
DescribedPredicate<JavaClass> typePredicate) {
return new ArchCondition<JavaMethod>(
"have leaf argument types" + typePredicate.getDescription()) {
@Override
public void check(JavaMethod method, ConditionEvents events) {
final List<JavaClass> leafArgumentTypes =
method.getParameterTypes().stream()
.flatMap(argumentType -> getLeafTypes(argumentType).stream())
.collect(Collectors.toList());
for (JavaClass leafType : leafArgumentTypes) {
if (!isJavaClass(leafType)) {
continue;
}
if (!typePredicate.test(leafType)) {
final String message =
String.format(
"%s: Argument leaf type %s does not satisfy: %s",
method.getFullName(),
leafType.getName(),
typePredicate.getDescription());
events.add(SimpleConditionEvent.violated(method, message));
}
}
}
};
} | 3.68 |
hadoop_ConnectionContext_isClosed | /**
* Check if the connection is closed.
*
* @return If the connection is closed.
*/
public synchronized boolean isClosed() {
return this.closed;
} | 3.68 |
hbase_HRegion_compact | /**
* We are trying to remove / relax the region read lock for compaction. Let's see what are the
* potential race conditions among the operations (user scan, region split, region close and
* region bulk load). user scan ---> region read lock region split --> region close first -->
* region write lock region close --> region write lock region bulk load --> region write lock
* read lock is compatible with read lock. ---> no problem with user scan/read region bulk load
* does not cause problem for compaction (no consistency problem, store lock will help the store
* file accounting). They can run almost concurrently at the region level. The only remaining race
* condition is between the region close and compaction. So we will evaluate, below, how region
* close intervenes with compaction if compaction does not acquire region read lock. Here are the
* steps for compaction: 1. obtain list of StoreFile's 2. create StoreFileScanner's based on list
* from #1 3. perform compaction and save resulting files under tmp dir 4. swap in compacted files
* #1 is guarded by store lock. This patch does not change this --> no worse or better For #2, we
* obtain smallest read point (for region) across all the Scanners (for both default compactor and
* stripe compactor). The read points are for user scans. Region keeps the read points for all
* currently open user scanners. Compaction needs to know the smallest read point so that during
* re-write of the hfiles, it can remove the mvcc points for the cells if their mvccs are older
* than the smallest since they are not needed anymore. This will not conflict with compaction.
* For #3, it can be performed in parallel to other operations. For #4 bulk load and compaction
* don't conflict with each other on the region level (for multi-family atomicy). Region close and
* compaction are guarded pretty well by the 'writestate'. In HRegion#doClose(), we have :
* synchronized (writestate) { // Disable compacting and flushing by background threads for this
* // region. canFlush = !writestate.readOnly; writestate.writesEnabled = false;
* LOG.debug("Closing " + this + ": disabling compactions & flushes");
* waitForFlushesAndCompactions(); } waitForFlushesAndCompactions() would wait for
* writestate.compacting to come down to 0. and in HRegion.compact() try { synchronized
* (writestate) { if (writestate.writesEnabled) { wasStateSet = true; ++writestate.compacting; }
* else { String msg = "NOT compacting region " + this + ". Writes disabled."; LOG.info(msg);
* status.abort(msg); return false; } } Also in compactor.performCompaction(): check periodically
* to see if a system stop is requested if (closeChecker != null &&
* closeChecker.isTimeLimit(store, now)) { progress.cancel(); return false; } if (closeChecker !=
* null && closeChecker.isSizeLimit(store, len)) { progress.cancel(); return false; }
*/
public boolean compact(CompactionContext compaction, HStore store,
ThroughputController throughputController, User user) throws IOException {
assert compaction != null && compaction.hasSelection();
assert !compaction.getRequest().getFiles().isEmpty();
if (this.closing.get() || this.closed.get()) {
LOG.debug("Skipping compaction on " + this + " because closing/closed");
store.cancelRequestedCompaction(compaction);
return false;
}
if (compaction.getRequest().isAllFiles() && shouldForbidMajorCompaction()) {
LOG.warn("Skipping major compaction on " + this
+ " because this cluster is transiting sync replication state"
+ " from STANDBY to DOWNGRADE_ACTIVE");
store.cancelRequestedCompaction(compaction);
return false;
}
MonitoredTask status = null;
boolean requestNeedsCancellation = true;
try {
byte[] cf = Bytes.toBytes(store.getColumnFamilyName());
if (stores.get(cf) != store) {
LOG.warn("Store " + store.getColumnFamilyName() + " on region " + this
+ " has been re-instantiated, cancel this compaction request. "
+ " It may be caused by the roll back of split transaction");
return false;
}
status = TaskMonitor.get().createStatus("Compacting " + store + " in " + this);
if (this.closed.get()) {
String msg = "Skipping compaction on " + this + " because closed";
LOG.debug(msg);
status.abort(msg);
return false;
}
boolean wasStateSet = false;
try {
synchronized (writestate) {
if (writestate.writesEnabled) {
wasStateSet = true;
writestate.compacting.incrementAndGet();
} else {
String msg = "NOT compacting region " + this + ". Writes disabled.";
LOG.info(msg);
status.abort(msg);
return false;
}
}
LOG.info("Starting compaction of {} in {}{}", store, this,
(compaction.getRequest().isOffPeak() ? " as an off-peak compaction" : ""));
doRegionCompactionPrep();
try {
status.setStatus("Compacting store " + store);
// We no longer need to cancel the request on the way out of this
// method because Store#compact will clean up unconditionally
requestNeedsCancellation = false;
store.compact(compaction, throughputController, user);
} catch (InterruptedIOException iioe) {
String msg = "region " + this + " compaction interrupted";
LOG.info(msg, iioe);
status.abort(msg);
return false;
}
} finally {
if (wasStateSet) {
synchronized (writestate) {
writestate.compacting.decrementAndGet();
if (writestate.compacting.get() <= 0) {
writestate.notifyAll();
}
}
}
}
status.markComplete("Compaction complete");
return true;
} finally {
if (requestNeedsCancellation) store.cancelRequestedCompaction(compaction);
if (status != null) {
LOG.debug("Compaction status journal for {}:\n{}", this.getRegionInfo().getEncodedName(),
status.prettyPrintJournal());
status.cleanup();
}
}
} | 3.68 |
hbase_TableMapReduceUtil_updateMap | /**
* Add entries to <code>packagedClasses</code> corresponding to class files contained in
* <code>jar</code>.
* @param jar The jar who's content to list.
* @param packagedClasses map[class -> jar]
*/
private static void updateMap(String jar, Map<String, String> packagedClasses)
throws IOException {
if (null == jar || jar.isEmpty()) {
return;
}
ZipFile zip = null;
try {
zip = new ZipFile(jar);
for (Enumeration<? extends ZipEntry> iter = zip.entries(); iter.hasMoreElements();) {
ZipEntry entry = iter.nextElement();
if (entry.getName().endsWith("class")) {
packagedClasses.put(entry.getName(), jar);
}
}
} finally {
if (null != zip) zip.close();
}
} | 3.68 |
framework_ContainerHierarchicalWrapper_getType | /*
* Gets the data type of all Properties identified by the given Property ID.
* Don't add a JavaDoc comment here, we use the default documentation from
* implemented interface.
*/
@Override
public Class<?> getType(Object propertyId) {
return container.getType(propertyId);
} | 3.68 |
pulsar_InetAddressUtils_isIPv6Address | /**
* Checks whether the parameter is a valid IPv6 address (including compressed).
*
* @param input the address string to check for validity
* @return true if the input parameter is a valid standard or compressed IPv6 address
*/
public static boolean isIPv6Address(final String input) {
return isIPv6StdAddress(input) || isIPv6HexCompressedAddress(input);
} | 3.68 |
morf_DeleteStatement_getWhereCriterion | /**
* Gets the where criteria.
*
* @return the where criteria
*/
public Criterion getWhereCriterion() {
return whereCriterion;
} | 3.68 |
flink_BlobOutputStream_sendPutHeader | /**
* Constructs and writes the header data for a PUT request to the given output stream.
*
* @param outputStream the output stream to write the PUT header data to
* @param jobId the ID of job the BLOB belongs to (or <tt>null</tt> if job-unrelated)
* @param blobType whether the BLOB should become permanent or transient
* @throws IOException thrown if an I/O error occurs while writing the header data to the output
* stream
*/
private static void sendPutHeader(
OutputStream outputStream, @Nullable JobID jobId, BlobKey.BlobType blobType)
throws IOException {
// Signal type of operation
outputStream.write(PUT_OPERATION);
if (jobId == null) {
outputStream.write(JOB_UNRELATED_CONTENT);
} else {
outputStream.write(JOB_RELATED_CONTENT);
outputStream.write(jobId.getBytes());
}
outputStream.write(blobType.ordinal());
} | 3.68 |
framework_Embedded_getAlternateText | /**
* Gets this component's "alt-text".
*
* @see #setAlternateText(String)
*/
public String getAlternateText() {
return getState(false).altText;
} | 3.68 |
hadoop_NodePlan_setURI | /**
* Sets the DataNodeURI.
*
* @param dataNodeName - String
*/
public void setURI(String dataNodeName) {
this.nodeName = dataNodeName;
} | 3.68 |
hudi_HoodieMetrics_reportMetrics | /**
* Given a commit action, metrics name and value this method reports custom metrics.
*/
public void reportMetrics(String commitAction, String metricName, long value) {
metrics.registerGauge(getMetricsName(commitAction, metricName), value);
} | 3.68 |
hbase_RegionNormalizerWorkQueue_putFirst | /**
* Inserts the specified element at the head of the queue.
* @param e the element to add
*/
public void putFirst(E e) {
if (e == null) {
throw new NullPointerException();
}
putAllFirst(Collections.singleton(e));
} | 3.68 |
hadoop_AuditingIntegration_updateCommonContextOnCommitterExit | /**
* Remove commit info at the end of the task or job.
*/
public static void updateCommonContextOnCommitterExit() {
currentAuditContext().remove(PARAM_JOB_ID);
currentAuditContext().remove(CONTEXT_ATTR_TASK_ATTEMPT_ID);
} | 3.68 |
morf_HumanReadableStatementProducer_addIndex | /** @see org.alfasoftware.morf.upgrade.SchemaEditor#addIndex(java.lang.String, org.alfasoftware.morf.metadata.Index) **/
@Override
public void addIndex(String tableName, Index index) {
consumer.schemaChange(HumanReadableStatementHelper.generateAddIndexString(tableName, index));
} | 3.68 |
pulsar_Transactions_getPendingAckStats | /**
* Get transaction pending ack stats.
*
* @param topic the topic of this transaction pending ack stats
* @param subName the subscription name of this transaction pending ack stats
* @return the stats of transaction pending ack.
*/
default TransactionPendingAckStats getPendingAckStats(String topic, String subName) throws PulsarAdminException {
return getPendingAckStats(topic, subName, false);
} | 3.68 |
flink_StreamProjection_projectTuple16 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>
SingleOutputStreamOperator<
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>
projectTuple16() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>
tType =
new TupleTypeInfo<
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
querydsl_Expressions_numberOperation | /**
* Create a new Operation expression
*
* @param type type of expression
* @param operator operator
* @param args operation arguments
* @return operation expression
*/
public static <T extends Number & Comparable<?>> NumberOperation<T> numberOperation(Class<? extends T> type,
Operator operator, Expression<?>... args) {
return new NumberOperation<T>(type, operator, args);
} | 3.68 |
hbase_BucketAllocator_freeBytes | /**
* How many more bytes can be allocated from the currently claimed blocks of this bucket size
*/
public long freeBytes() {
return freeCount * itemSize;
} | 3.68 |
flink_CopyOnWriteStateMap_incrementalRehash | /** Runs a number of steps for incremental rehashing. */
@SuppressWarnings("unchecked")
private void incrementalRehash() {
StateMapEntry<K, N, S>[] oldMap = primaryTable;
StateMapEntry<K, N, S>[] newMap = incrementalRehashTable;
int oldCapacity = oldMap.length;
int newMask = newMap.length - 1;
int requiredVersion = highestRequiredSnapshotVersion;
int rhIdx = rehashIndex;
int transferred = 0;
// we migrate a certain minimum amount of entries from the old to the new table
while (transferred < MIN_TRANSFERRED_PER_INCREMENTAL_REHASH) {
StateMapEntry<K, N, S> e = oldMap[rhIdx];
while (e != null) {
// copy-on-write check for entry
if (e.entryVersion < requiredVersion) {
e = new StateMapEntry<>(e, stateMapVersion);
}
StateMapEntry<K, N, S> n = e.next;
int pos = e.hash & newMask;
e.next = newMap[pos];
newMap[pos] = e;
e = n;
++transferred;
}
oldMap[rhIdx] = null;
if (++rhIdx == oldCapacity) {
// here, the rehash is complete and we release resources and reset fields
primaryTable = newMap;
incrementalRehashTable = (StateMapEntry<K, N, S>[]) EMPTY_TABLE;
primaryTableSize += incrementalRehashTableSize;
incrementalRehashTableSize = 0;
rehashIndex = 0;
return;
}
}
// sync our local bookkeeping the with official bookkeeping fields
primaryTableSize -= transferred;
incrementalRehashTableSize += transferred;
rehashIndex = rhIdx;
} | 3.68 |
pulsar_ConcurrentOpenHashMap_forEach | /**
* Iterate over all the entries in the map and apply the processor function to each of them.
* <p>
* <b>Warning: Do Not Guarantee Thread-Safety.</b>
* @param processor the function to apply to each entry
*/
public void forEach(BiConsumer<? super K, ? super V> processor) {
for (int i = 0; i < sections.length; i++) {
sections[i].forEach(processor);
}
} | 3.68 |
flink_OuterJoinPaddingUtil_padLeft | /**
* Returns a padding result with the given left row.
*
* @param leftRow the left row to pad
* @return the reusable null padding result
*/
public final RowData padLeft(RowData leftRow) {
return joinedRow.replace(leftRow, rightNullPaddingRow);
} | 3.68 |
hadoop_JWTRedirectAuthenticationHandler_validateSignature | /**
* Verify the signature of the JWT token in this method. This method depends
* on the public key that was established during init based upon the
* provisioned public key. Override this method in subclasses in order to
* customize the signature verification behavior.
*
* @param jwtToken the token that contains the signature to be validated
* @return valid true if signature verifies successfully; false otherwise
*/
protected boolean validateSignature(SignedJWT jwtToken) {
boolean valid = false;
if (JWSObject.State.SIGNED == jwtToken.getState()) {
LOG.debug("JWT token is in a SIGNED state");
if (jwtToken.getSignature() != null) {
LOG.debug("JWT token signature is not null");
try {
JWSVerifier verifier = new RSASSAVerifier(publicKey);
if (jwtToken.verify(verifier)) {
valid = true;
LOG.debug("JWT token has been successfully verified");
} else {
LOG.warn("JWT signature verification failed.");
}
} catch (JOSEException je) {
LOG.warn("Error while validating signature", je);
}
}
}
return valid;
} | 3.68 |
framework_AbstractListing_deserializeDeclarativeRepresentation | /**
* Deserializes a string to a data item.
* <p>
* Default implementation is able to handle only {@link String} as an item
* type. There will be a {@link ClassCastException} if {@code T } is not a
* {@link String}.
*
* @see #serializeDeclarativeRepresentation(Object)
*
* @param item
* string to deserialize
* @throws ClassCastException
* if type {@code T} is not a {@link String}
* @return deserialized item
*/
protected T deserializeDeclarativeRepresentation(String item) {
return (T) item;
} | 3.68 |
hbase_HFileBlock_allocateBufferForUnpacking | /**
* Always allocates a new buffer of the correct size. Copies header bytes from the existing
* buffer. Does not change header fields. Reserve room to keep checksum bytes too.
*/
private ByteBuff allocateBufferForUnpacking() {
int headerSize = headerSize();
int capacityNeeded = headerSize + uncompressedSizeWithoutHeader;
ByteBuff source = bufWithoutChecksum.duplicate();
ByteBuff newBuf = allocator.allocate(capacityNeeded);
// Copy header bytes into newBuf.
source.position(0);
newBuf.put(0, source, 0, headerSize);
// set limit to exclude next block's header
newBuf.limit(capacityNeeded);
return newBuf;
} | 3.68 |
hmily_HmilyLockCacheManager_removeByKey | /**
* remove guava cache by key.
*
* @param lockId guava cache key.
*/
public void removeByKey(final String lockId) {
if (Objects.nonNull(lockId)) {
loadingCache.invalidate(lockId);
}
} | 3.68 |
hadoop_FilterFileSystem_listLocatedStatus | /** List files and its block locations in a directory. */
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f)
throws IOException {
return fs.listLocatedStatus(f);
} | 3.68 |
framework_CustomizedSystemMessages_setCommunicationErrorNotificationEnabled | /**
* Enables or disables the notification. If disabled, the set URL (or
* current) is loaded directly.
*
* @param communicationErrorNotificationEnabled
* true = enabled, false = disabled
*/
public void setCommunicationErrorNotificationEnabled(
boolean communicationErrorNotificationEnabled) {
this.communicationErrorNotificationEnabled = communicationErrorNotificationEnabled;
} | 3.68 |
framework_RenderInformation_updateSize | /**
* Update the size of the widget.
*
* @param widget
*
* @return true if the size has changed since last update
*
* @since 7.2
*/
public boolean updateSize(Element element) {
return updateSize(DOM.asOld(element));
} | 3.68 |
hbase_RestoreSnapshotProcedure_updateTableDescriptor | /**
* Update descriptor
* @param env MasterProcedureEnv
**/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor);
} | 3.68 |
pulsar_MLTransactionSequenceIdGenerator_onManagedLedgerLastLedgerInitialize | // When we don't roll over ledger, we can init sequenceId from the getLastAddConfirmed transaction metadata entry
@Override
public CompletableFuture<Void> onManagedLedgerLastLedgerInitialize(String name, LedgerHandle lh) {
CompletableFuture<Void> promise = new CompletableFuture<>();
if (lh.getLastAddConfirmed() >= 0) {
lh.readAsync(lh.getLastAddConfirmed(), lh.getLastAddConfirmed()).whenComplete((entries, ex) -> {
if (ex != null) {
log.error("[{}] Read last entry error.", name, ex);
promise.completeExceptionally(ex);
} else {
if (entries != null) {
try {
LedgerEntry ledgerEntry = entries.getEntry(lh.getLastAddConfirmed());
if (ledgerEntry != null) {
List<TransactionMetadataEntry> transactionLogs =
MLTransactionLogImpl.deserializeEntry(ledgerEntry.getEntryBuffer());
if (!CollectionUtils.isEmpty(transactionLogs)){
TransactionMetadataEntry lastConfirmEntry =
transactionLogs.get(transactionLogs.size() - 1);
this.sequenceId.set(lastConfirmEntry.getMaxLocalTxnId());
}
}
entries.close();
promise.complete(null);
} catch (Exception e) {
entries.close();
log.error("[{}] Failed to recover the tc sequenceId from the last add confirmed entry.",
name, e);
promise.completeExceptionally(e);
}
} else {
promise.complete(null);
}
}
});
} else {
promise.complete(null);
}
return promise;
} | 3.68 |
hadoop_StageConfig_getConf | /**
* Get configuration.
* @return the configuration
*/
public Configuration getConf() {
return conf;
} | 3.68 |
framework_BaseLayoutTestUI_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
// TODO Auto-generated method stub
return null;
} | 3.68 |
hbase_KeyValueTestUtil_containsIgnoreMvccVersion | /**
* Checks whether KeyValues from kvCollection2 are contained in kvCollection1. The comparison is
* made without distinguishing MVCC version of the KeyValues
* @return true if KeyValues from kvCollection2 are contained in kvCollection1
*/
public static boolean containsIgnoreMvccVersion(Collection<? extends Cell> kvCollection1,
Collection<? extends Cell> kvCollection2) {
for (Cell kv1 : kvCollection1) {
boolean found = false;
for (Cell kv2 : kvCollection2) {
if (PrivateCellUtil.equalsIgnoreMvccVersion(kv1, kv2)) found = true;
}
if (!found) return false;
}
return true;
} | 3.68 |
hibernate-validator_AnnotationProxy_equals | /**
* Performs an equality check as described in {@link Annotation#equals(Object)}.
*
* @param obj The object to compare
*
* @return Whether the given object is equal to this annotation proxy or not
*
* @see Annotation#equals(Object)
*/
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj == null ) {
return false;
}
if ( !descriptor.getType().isInstance( obj ) ) {
return false;
}
Annotation other = descriptor.getType().cast( obj );
Map<String, Object> otherAttributes = getAnnotationAttributes( other );
if ( descriptor.getAttributes().size() != otherAttributes.size() ) {
return false;
}
// compare annotation member values
for ( Entry<String, Object> member : descriptor.getAttributes().entrySet() ) {
Object value = member.getValue();
Object otherValue = otherAttributes.get( member.getKey() );
if ( !areEqual( value, otherValue ) ) {
return false;
}
}
return true;
} | 3.68 |
flink_TopologyGraph_unlink | /**
* Remove the edge from `from` node to `to` node. If there is no edge between them then do
* nothing.
*/
void unlink(ExecNode<?> from, ExecNode<?> to) {
TopologyNode fromNode = getOrCreateTopologyNode(from);
TopologyNode toNode = getOrCreateTopologyNode(to);
fromNode.outputs.remove(toNode);
toNode.inputs.remove(fromNode);
} | 3.68 |
morf_TableNameDecorator_getName | /**
* @see org.alfasoftware.morf.metadata.Table#getName()
*/
@Override
public String getName() {
return name;
} | 3.68 |
flink_LogicalType_is | /**
* Returns whether the family type of the type equals to the {@code family} or not.
*
* @param family The family type to check against for equality
*/
public boolean is(LogicalTypeFamily family) {
return typeRoot.getFamilies().contains(family);
} | 3.68 |
flink_SourceCoordinatorContext_unregisterSourceReader | /**
* Unregister a source reader.
*
* @param subtaskId the subtask id of the source reader.
* @param attemptNumber the attempt number of the source reader.
*/
void unregisterSourceReader(int subtaskId, int attemptNumber) {
final Map<Integer, ReaderInfo> attemptReaders = registeredReaders.get(subtaskId);
if (attemptReaders != null) {
attemptReaders.remove(attemptNumber);
if (attemptReaders.isEmpty()) {
registeredReaders.remove(subtaskId);
}
}
} | 3.68 |
pulsar_PulsarConfigurationLoader_create | /**
* Creates PulsarConfiguration and loads it with populated attribute values from provided Properties object.
*
* @param properties The properties to populate the attributed from
* @throws IOException
* @throws IllegalArgumentException
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public static <T extends PulsarConfiguration> T create(Properties properties,
Class<? extends PulsarConfiguration> clazz) throws IOException, IllegalArgumentException {
requireNonNull(properties);
T configuration;
try {
configuration = (T) clazz.getDeclaredConstructor().newInstance();
configuration.setProperties(properties);
update((Map) properties, configuration);
} catch (InstantiationException | IllegalAccessException
| NoSuchMethodException | InvocationTargetException e) {
throw new IllegalArgumentException("Failed to instantiate " + clazz.getName(), e);
}
return configuration;
} | 3.68 |
streampipes_ProcessingElementBuilder_category | /**
* Assigns a category to the element which later serves to categorize data processors in the UI.
*
* @param epaCategory The {@link org.apache.streampipes.model.DataProcessorType} of the element.
* @return {@link ProcessingElementBuilder}
*/
public ProcessingElementBuilder category(DataProcessorType... epaCategory) {
this.elementDescription.setCategory(Arrays
.stream(epaCategory)
.map(Enum::name)
.collect(Collectors.toList()));
return me();
} | 3.68 |
flink_CheckpointConfig_isForceCheckpointing | /**
* Checks whether checkpointing is forced, despite currently non-checkpointable iteration
* feedback.
*
* @return True, if checkpointing is forced, false otherwise.
* @deprecated This will be removed once iterations properly participate in checkpointing.
*/
@Deprecated
@PublicEvolving
public boolean isForceCheckpointing() {
return configuration.get(ExecutionCheckpointingOptions.FORCE_CHECKPOINTING);
} | 3.68 |
flink_HashJoinOperator_fallbackSMJProcessPartition | /**
* If here also exists partitions which spilled to disk more than three time when hash join end,
* means that the key in these partitions is very skewed, so fallback to sort merge join
* algorithm to process it.
*/
private void fallbackSMJProcessPartition() throws Exception {
if (!table.getPartitionsPendingForSMJ().isEmpty()) {
// release memory to MemoryManager first that is used to sort merge join operator
table.releaseMemoryCacheForSMJ();
// initialize sort merge join operator
LOG.info("Fallback to sort merge join to process spilled partitions.");
initialSortMergeJoinFunction();
fallbackSMJ = true;
for (BinaryHashPartition p : table.getPartitionsPendingForSMJ()) {
// process build side
RowIterator<BinaryRowData> buildSideIter =
table.getSpilledPartitionBuildSideIter(p);
while (buildSideIter.advanceNext()) {
processSortMergeJoinElement1(buildSideIter.getRow());
}
// process probe side
ProbeIterator probeIter = table.getSpilledPartitionProbeSideIter(p);
BinaryRowData probeNext;
while ((probeNext = probeIter.next()) != null) {
processSortMergeJoinElement2(probeNext);
}
}
// close the HashTable
closeHashTable();
// finish build and probe
sortMergeJoinFunction.endInput(1);
sortMergeJoinFunction.endInput(2);
LOG.info("Finish sort merge join for spilled partitions.");
}
} | 3.68 |
framework_Sort_by | /**
* Start building a Sort order by sorting a provided column.
*
* @param column
* a grid column object reference
* @param direction
* indicator of sort direction - either ascending or descending
* @return a sort instance, typed to the grid data type
*/
public static Sort by(Grid.Column<?, ?> column, SortDirection direction) {
return new Sort(column, direction);
} | 3.68 |
graphhopper_VectorTile_clearExtent | /**
* <pre>
* Although this is an "optional" field it is required by the specification.
* See https://github.com/mapbox/vector-tile-spec/issues/47
* </pre>
*
* <code>optional uint32 extent = 5 [default = 4096];</code>
*/
public Builder clearExtent() {
bitField0_ = (bitField0_ & ~0x00000020);
extent_ = 4096;
onChanged();
return this;
} | 3.68 |
hadoop_HttpFSAuthenticationFilter_getConfiguration | /**
* Returns the hadoop-auth configuration from HttpFSServer's configuration.
* <p>
* It returns all HttpFSServer's configuration properties prefixed with
* <code>hadoop.http.authentication</code>. The
* <code>hadoop.http.authentication</code> prefix is removed from the
* returned property names.
*
* @param configPrefix parameter not used.
* @param filterConfig parameter not used.
*
* @return hadoop-auth configuration read from HttpFSServer's configuration.
*/
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException{
Configuration conf = HttpFSServerWebApp.get().getConfig();
Properties props = HttpServer2.getFilterProperties(conf,
new ArrayList<>(Arrays.asList(CONF_PREFIXES)));
String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: "
+ SIGNATURE_SECRET_FILE);
}
if (!isRandomSecret(filterConfig)) {
try (Reader reader = new InputStreamReader(Files.newInputStream(
Paths.get(signatureSecretFile)), StandardCharsets.UTF_8)) {
StringBuilder secret = new StringBuilder();
int c = reader.read();
while (c > -1) {
secret.append((char) c);
c = reader.read();
}
String secretString = secret.toString();
if (secretString.isEmpty()) {
throw new RuntimeException(
"No secret in HttpFs signature secret file: "
+ signatureSecretFile);
}
props.setProperty(AuthenticationFilter.SIGNATURE_SECRET,
secretString);
} catch (IOException ex) {
throw new RuntimeException("Could not read HttpFS signature "
+ "secret file: " + signatureSecretFile);
}
}
setAuthHandlerClass(props);
String dtkind = WebHdfsConstants.WEBHDFS_TOKEN_KIND.toString();
if (conf.getBoolean(HttpFSServerWebServer.SSL_ENABLED_KEY, false)) {
dtkind = WebHdfsConstants.SWEBHDFS_TOKEN_KIND.toString();
}
props.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
dtkind);
return props;
} | 3.68 |
hadoop_PlacementConstraints_nodeAttribute | /**
* Constructs a target expression on a node attribute. It is satisfied if
* the specified node attribute has one of the specified values.
*
* @param attributeKey the name of the node attribute
* @param attributeValues the set of values that the attribute should take
* values from
* @return the resulting expression on the node attribute
*/
public static TargetExpression nodeAttribute(String attributeKey,
String... attributeValues) {
return new TargetExpression(TargetType.NODE_ATTRIBUTE, attributeKey,
attributeValues);
} | 3.68 |
hbase_HFileSystem_getNoChecksumFs | /**
* Returns the filesystem that is specially setup for doing reads from storage. This object avoids
* doing checksum verifications for reads.
* @return The FileSystem object that can be used to read data from files.
*/
public FileSystem getNoChecksumFs() {
return noChecksumFs;
} | 3.68 |
hadoop_ResourceUsageMetrics_setPhysicalMemoryUsage | /**
* Set the physical memory usage.
*/
public void setPhysicalMemoryUsage(long usage) {
physicalMemoryUsage = usage;
} | 3.68 |
hadoop_AbstractMultipartUploader_checkUploadId | /**
* Utility method to validate uploadIDs.
* @param uploadId Upload ID
* @throws IllegalArgumentException invalid ID
*/
protected void checkUploadId(byte[] uploadId)
throws IllegalArgumentException {
checkArgument(uploadId != null, "null uploadId");
checkArgument(uploadId.length > 0,
"Empty UploadId is not valid");
} | 3.68 |
hbase_SnapshotManifest_getRegionManifestsMap | /**
* Get all the Region Manifest from the snapshot. This is an helper to get a map with the region
* encoded name
*/
public Map<String, SnapshotRegionManifest> getRegionManifestsMap() {
if (regionManifests == null || regionManifests.isEmpty()) return null;
HashMap<String, SnapshotRegionManifest> regionsMap = new HashMap<>(regionManifests.size());
for (SnapshotRegionManifest manifest : regionManifests) {
String regionName = getRegionNameFromManifest(manifest);
regionsMap.put(regionName, manifest);
}
return regionsMap;
} | 3.68 |
framework_VaadinPortlet_invokeStaticLiferayMethod | /**
* Simplified version of what Liferay PortalClassInvoker did. This is
* used because the API of PortalClassInvoker has changed in Liferay
* 6.2.
*
* This simply uses reflection with Liferay class loader. Parameters are
* Strings to avoid static dependencies and to load all classes with
* Liferay's own class loader. Only static utility methods are
* supported.
*
* This method is for internal use only and may change in future
* versions.
*
* @param className
* name of the Liferay class to call
* @param methodName
* name of the method to call
* @param parameterClassName
* name of the parameter class of the method
* @throws Exception
* @return return value of the invoked method
*/
private Object invokeStaticLiferayMethod(String className,
String methodName, Object argument, String parameterClassName)
throws Exception {
Thread currentThread = Thread.currentThread();
ClassLoader contextClassLoader = currentThread
.getContextClassLoader();
try {
// this should be available across all Liferay versions with no
// problematic static dependencies
ClassLoader portalClassLoader = PortalClassLoaderUtil
.getClassLoader();
// this is in case the class loading triggers code that
// explicitly
// uses current thread class loader
currentThread.setContextClassLoader(portalClassLoader);
Class<?> targetClass = portalClassLoader.loadClass(className);
Class<?> parameterClass = portalClassLoader
.loadClass(parameterClassName);
Method method = targetClass.getMethod(methodName,
parameterClass);
return method.invoke(null, argument);
} catch (InvocationTargetException ite) {
throw (Exception) ite.getCause();
} finally {
currentThread.setContextClassLoader(contextClassLoader);
}
} | 3.68 |
dubbo_MediaTypeUtil_convertMediaType | /**
* return first match , if any multiple content-type ,acquire mediaType by targetClass type .if contentTypes is empty
*
* @param contentTypes
* @return
*/
public static MediaType convertMediaType(Class<?> targetType, String... contentTypes) {
if (contentTypes == null || contentTypes.length == 0) {
return HttpMessageCodecManager.typeSupport(targetType);
}
for (String contentType : contentTypes) {
for (MediaType mediaType : mediaTypes) {
if (contentType != null && contentType.contains(mediaType.value)) {
return mediaType;
}
}
if (contentType != null && contentType.contains(MediaType.ALL_VALUE.value)) {
return HttpMessageCodecManager.typeSupport(targetType);
}
}
throw new UnSupportContentTypeException(Arrays.toString(contentTypes));
} | 3.68 |
framework_ComboBox_getItemStyleGenerator | /**
* Gets the currently used item style generator.
*
* @return the itemStyleGenerator the currently used item style generator,
* or <code>null</code> if no generator is used
* @since 7.5.6
*/
public ItemStyleGenerator getItemStyleGenerator() {
return itemStyleGenerator;
} | 3.68 |
hadoop_FedBalance_setTrashOpt | /**
* Specify the trash behaviour of the source path.
* @param value the trash option.
*/
public Builder setTrashOpt(TrashOption value) {
this.trashOpt = value;
return this;
} | 3.68 |
flink_Pattern_getAfterMatchSkipStrategy | /** @return the pattern's {@link AfterMatchSkipStrategy.SkipStrategy} after match. */
public AfterMatchSkipStrategy getAfterMatchSkipStrategy() {
return afterMatchSkipStrategy;
} | 3.68 |
hadoop_VirtualInputFormat_getSplits | // Number of splits = Number of mappers. Creates fakeSplits to launch
// the required number of mappers
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
Configuration conf = job.getConfiguration();
int numMappers = conf.getInt(CreateFileMapper.NUM_MAPPERS_KEY, -1);
if (numMappers == -1) {
throw new IOException("Number of mappers should be provided as input");
}
List<InputSplit> splits = new ArrayList<InputSplit>(numMappers);
for (int i = 0; i < numMappers; i++) {
splits.add(new VirtualInputSplit());
}
return splits;
} | 3.68 |
flink_ResultInfo_getResultSchema | /** Get the schemas of the results. */
public ResolvedSchema getResultSchema() {
return ResolvedSchema.of(
columnInfos.stream().map(ColumnInfo::toColumn).collect(Collectors.toList()));
} | 3.68 |
hbase_FileChangeWatcher_waitForState | /**
* Blocks until the current state becomes <code>desiredState</code>. Currently only used by tests,
* thus package-private.
* @param desiredState the desired state.
* @throws InterruptedException if the current thread gets interrupted.
*/
synchronized void waitForState(State desiredState) throws InterruptedException {
while (this.state != desiredState) {
this.wait();
}
} | 3.68 |
framework_VaadinPortlet_getRequestType | /**
* @param vaadinRequest
* @return
*
* @deprecated As of 7.0. This is no longer used and only provided for
* backwards compatibility. Each {@link RequestHandler} can
* individually decide whether it wants to handle a request or
* not.
*/
@Deprecated
protected RequestType getRequestType(VaadinPortletRequest vaadinRequest) {
PortletRequest request = vaadinRequest.getPortletRequest();
if (request instanceof RenderRequest) {
return RequestType.RENDER;
} else if (request instanceof ResourceRequest) {
if (ServletPortletHelper.isUIDLRequest(vaadinRequest)) {
return RequestType.UIDL;
} else if (PortletUIInitHandler.isUIInitRequest(vaadinRequest)) {
return RequestType.BROWSER_DETAILS;
} else if (ServletPortletHelper
.isFileUploadRequest(vaadinRequest)) {
return RequestType.FILE_UPLOAD;
} else if (ServletPortletHelper
.isPublishedFileRequest(vaadinRequest)) {
return RequestType.PUBLISHED_FILE;
} else if (ServletPortletHelper.isAppRequest(vaadinRequest)) {
return RequestType.APP;
} else if (ServletPortletHelper.isHeartbeatRequest(vaadinRequest)) {
return RequestType.HEARTBEAT;
} else if (PortletDummyRequestHandler
.isDummyRequest(vaadinRequest)) {
return RequestType.DUMMY;
} else {
return RequestType.STATIC_FILE;
}
} else if (request instanceof ActionRequest) {
return RequestType.ACTION;
} else if (request instanceof EventRequest) {
return RequestType.EVENT;
}
return RequestType.UNKNOWN;
} | 3.68 |
flink_DataStream_getTransformation | /**
* Returns the {@link Transformation} that represents the operation that logically creates this
* {@link DataStream}.
*
* @return The Transformation
*/
@Internal
public Transformation<T> getTransformation() {
return transformation;
} | 3.68 |
flink_TypeInferenceUtil_inferOutputType | /**
* Infers an output type using the given {@link TypeStrategy}. It assumes that input arguments
* have been adapted before if necessary.
*/
public static DataType inferOutputType(
CallContext callContext, TypeStrategy outputTypeStrategy) {
final Optional<DataType> potentialOutputType = outputTypeStrategy.inferType(callContext);
if (!potentialOutputType.isPresent()) {
throw new ValidationException(
"Could not infer an output type for the given arguments.");
}
final DataType outputType = potentialOutputType.get();
if (isUnknown(outputType)) {
throw new ValidationException(
"Could not infer an output type for the given arguments. Untyped NULL received.");
}
return outputType;
} | 3.68 |
flink_RpcEndpoint_validateRunsInMainThread | /**
* Validates that the method call happens in the RPC endpoint's main thread.
*
* <p><b>IMPORTANT:</b> This check only happens when assertions are enabled, such as when
* running tests.
*
* <p>This can be used for additional checks, like
*
* <pre>{@code
* protected void concurrencyCriticalMethod() {
* validateRunsInMainThread();
*
* // some critical stuff
* }
* }</pre>
*/
public void validateRunsInMainThread() {
assert MainThreadValidatorUtil.isRunningInExpectedThread(currentMainThread.get());
} | 3.68 |
framework_VaadinService_writeToHttpSession | /**
* Performs the actual write of the VaadinSession to the underlying HTTP
* session after sanity checks have been performed.
* <p>
* Called by {@link #storeSession(VaadinSession, WrappedSession)}
*
* @since 7.6
* @param wrappedSession
* the underlying HTTP session
* @param session
* the VaadinSession to store
*/
protected void writeToHttpSession(WrappedSession wrappedSession,
VaadinSession session) {
wrappedSession.setAttribute(getSessionAttributeName(), session);
} | 3.68 |
framework_NestedMethodProperty_initialize | /**
* Initializes most of the internal fields based on the top-level bean
* instance and property name (dot-separated string).
*
* @param beanClass
* class of the top-level bean to which the property applies
* @param propertyName
* dot separated nested property name
* @throws IllegalArgumentException
* if the property name is invalid
*/
private void initialize(Class<?> beanClass, String propertyName)
throws IllegalArgumentException {
List<Method> getMethods = new ArrayList<Method>();
String lastSimplePropertyName = propertyName;
Class<?> lastClass = beanClass;
// first top-level property, then go deeper in a loop
Class<?> propertyClass = beanClass;
String[] simplePropertyNames = propertyName.split("\\.");
if (propertyName.endsWith(".") || 0 == simplePropertyNames.length) {
throw new IllegalArgumentException(
"Invalid property name '" + propertyName + "'");
}
for (String simplePropertyName : simplePropertyNames) {
simplePropertyName = simplePropertyName.trim();
if (!simplePropertyName.isEmpty()) {
lastSimplePropertyName = simplePropertyName;
lastClass = propertyClass;
try {
Method getter = MethodProperty.initGetterMethod(
simplePropertyName, propertyClass);
propertyClass = getter.getReturnType();
getMethods.add(getter);
} catch (final NoSuchMethodException e) {
throw new IllegalArgumentException("Bean property '"
+ simplePropertyName + "' not found", e);
}
} else {
throw new IllegalArgumentException(
"Empty or invalid bean property identifier in '"
+ propertyName + "'");
}
}
// In case the get method is found, resolve the type
Method lastGetMethod = getMethods.get(getMethods.size() - 1);
Class<?> type = lastGetMethod.getReturnType();
// Finds the set method
Method setMethod = null;
try {
// Assure that the first letter is upper cased (it is a common
// mistake to write firstName, not FirstName).
lastSimplePropertyName = SharedUtil
.capitalize(lastSimplePropertyName);
setMethod = lastClass.getMethod("set" + lastSimplePropertyName,
new Class[] { type });
} catch (final NoSuchMethodException skipped) {
}
this.type = (Class<? extends T>) convertPrimitiveType(type);
this.propertyName = propertyName;
this.getMethods = getMethods;
this.setMethod = setMethod;
} | 3.68 |
morf_DataValueLookupMetadata_getColumnNames | /**
* @return The column names stored, where the array index corresponds
* with the {@link DataValueLookupBuilderImpl} internal array position.
*/
List<CaseInsensitiveString> getColumnNames() {
return keys;
} | 3.68 |
querydsl_BooleanBuilder_orNot | /**
* Create the union of this and the negation of the given predicate
*
* @param right predicate to be negated
* @return the current object
*/
public BooleanBuilder orNot(Predicate right) {
return or(right.not());
} | 3.68 |
flink_FlinkContainersSettings_checkpointPath | /**
* Sets the {@code checkpointPath} and returns a reference to this Builder enabling method
* chaining.
*
* @param checkpointPath The checkpoint path to set.
* @return A reference to this Builder.
*/
public Builder checkpointPath(String checkpointPath) {
this.checkpointPath = checkpointPath;
return setConfigOption(
CheckpointingOptions.CHECKPOINTS_DIRECTORY, toUri(checkpointPath));
} | 3.68 |
hadoop_SchedulerAppReport_getReservedContainers | /**
* Get the list of reserved containers
* @return All of the reserved containers.
*/
public Collection<RMContainer> getReservedContainers() {
return reserved;
} | 3.68 |
hadoop_TokenIdentifier_getTrackingId | /**
* Returns a tracking identifier that can be used to associate usages of a
* token across multiple client sessions.
*
* Currently, this function just returns an MD5 of {{@link #getBytes()}.
*
* @return tracking identifier
*/
public String getTrackingId() {
if (trackingId == null) {
trackingId = DigestUtils.md5Hex(getBytes());
}
return trackingId;
} | 3.68 |
morf_DatabaseSchemaManager_dropTablesIfPresent | /**
* Drop the specified tables from the schema if they are present.
*
* @param tablesToDrop The tables to delete if they are present in the database.
*/
public void dropTablesIfPresent(Set<String> tablesToDrop) {
ProducerCache producerCache = new ProducerCache();
try {
Collection<String> sql = Lists.newLinkedList();
for (String tableName : tablesToDrop) {
Table cachedTable = getTable(producerCache, tableName);
if (cachedTable != null) {
sql.addAll(dropTable(cachedTable));
}
}
executeScript(sql);
} finally {
producerCache.close();
}
} | 3.68 |
flink_DeweyNumber_increase | /**
* Creates a new dewey number from this such that its last digit is increased by the supplied
* number.
*
* @param times how many times to increase the Dewey number
* @return A new dewey number derived from this whose last digit is increased by given number
*/
public DeweyNumber increase(int times) {
int[] newDeweyNumber = Arrays.copyOf(deweyNumber, deweyNumber.length);
newDeweyNumber[deweyNumber.length - 1] += times;
return new DeweyNumber(newDeweyNumber);
} | 3.68 |
framework_VRadioButtonGroup_isHtmlContentAllowed | /**
* Returns whether HTML is allowed in the item captions.
*
* @return {@code true} if the captions are used as HTML, {@code false} if
* used as plain text
*/
public boolean isHtmlContentAllowed() {
return htmlContentAllowed;
} | 3.68 |
flink_FlinkAggregateExpandDistinctAggregatesRule_doRewrite | /**
* Converts all distinct aggregate calls to a given set of arguments.
*
* <p>This method is called several times, one for each set of arguments. Each time it is
* called, it generates a JOIN to a new SELECT DISTINCT relational expression, and modifies the
* set of top-level calls.
*
* @param aggregate Original aggregate
* @param n Ordinal of this in a join. {@code relBuilder} contains the input relational
* expression (either the original aggregate, the output from the previous call to this
* method. {@code n} is 0 if we're converting the first distinct aggregate in a query with
* no non-distinct aggregates)
* @param argList Arguments to the distinct aggregate function
* @param filterArg Argument that filters input to aggregate function, or -1
* @param refs Array of expressions which will be the projected by the result of this rule.
* Those relating to this arg list will be modified @return Relational expression
*/
private void doRewrite(
RelBuilder relBuilder,
Aggregate aggregate,
int n,
List<Integer> argList,
int filterArg,
List<RexInputRef> refs) {
final RexBuilder rexBuilder = aggregate.getCluster().getRexBuilder();
final List<RelDataTypeField> leftFields;
if (n == 0) {
leftFields = null;
} else {
leftFields = relBuilder.peek().getRowType().getFieldList();
}
// Aggregate(
// child,
// {COUNT(DISTINCT 1), SUM(DISTINCT 1), SUM(2)})
//
// becomes
//
// Aggregate(
// Join(
// child,
// Aggregate(child, < all columns > {}),
// INNER,
// <f2 = f5>))
//
// E.g.
// SELECT deptno, SUM(DISTINCT sal), COUNT(DISTINCT gender), MAX(age)
// FROM Emps
// GROUP BY deptno
//
// becomes
//
// SELECT e.deptno, adsal.sum_sal, adgender.count_gender, e.max_age
// FROM (
// SELECT deptno, MAX(age) as max_age
// FROM Emps GROUP BY deptno) AS e
// JOIN (
// SELECT deptno, COUNT(gender) AS count_gender FROM (
// SELECT DISTINCT deptno, gender FROM Emps) AS dgender
// GROUP BY deptno) AS adgender
// ON e.deptno = adgender.deptno
// JOIN (
// SELECT deptno, SUM(sal) AS sum_sal FROM (
// SELECT DISTINCT deptno, sal FROM Emps) AS dsal
// GROUP BY deptno) AS adsal
// ON e.deptno = adsal.deptno
// GROUP BY e.deptno
//
// Note that if a query contains no non-distinct aggregates, then the
// very first join/group by is omitted. In the example above, if
// MAX(age) is removed, then the sub-select of "e" is not needed, and
// instead the two other group by's are joined to one another.
// Project the columns of the GROUP BY plus the arguments
// to the agg function.
final Map<Integer, Integer> sourceOf = new HashMap<>();
createSelectDistinct(relBuilder, aggregate, argList, filterArg, sourceOf);
// Now compute the aggregate functions on top of the distinct dataset.
// Each distinct agg becomes a non-distinct call to the corresponding
// field from the right; for example,
// "COUNT(DISTINCT e.sal)"
// becomes
// "COUNT(distinct_e.sal)".
final List<AggregateCall> aggCallList = new ArrayList<>();
final List<AggregateCall> aggCalls = aggregate.getAggCallList();
final int groupCount = aggregate.getGroupCount();
int i = groupCount - 1;
for (AggregateCall aggCall : aggCalls) {
++i;
// Ignore agg calls which are not distinct or have the wrong set
// arguments. If we're rewriting aggs whose args are {sal}, we will
// rewrite COUNT(DISTINCT sal) and SUM(DISTINCT sal) but ignore
// COUNT(DISTINCT gender) or SUM(sal).
if (!aggCall.isDistinct()) {
continue;
}
if (!aggCall.getArgList().equals(argList)) {
continue;
}
// Re-map arguments.
final int argCount = aggCall.getArgList().size();
final List<Integer> newArgs = new ArrayList<>(argCount);
for (int j = 0; j < argCount; j++) {
final Integer arg = aggCall.getArgList().get(j);
newArgs.add(sourceOf.get(arg));
}
final int newFilterArg = aggCall.filterArg >= 0 ? sourceOf.get(aggCall.filterArg) : -1;
final AggregateCall newAggCall =
AggregateCall.create(
aggCall.getAggregation(),
false,
aggCall.isApproximate(),
false,
newArgs,
newFilterArg,
null,
RelCollations.EMPTY,
aggCall.getType(),
aggCall.getName());
assert refs.get(i) == null;
if (n == 0) {
refs.set(i, new RexInputRef(groupCount + aggCallList.size(), newAggCall.getType()));
} else {
refs.set(
i,
new RexInputRef(
leftFields.size() + groupCount + aggCallList.size(),
newAggCall.getType()));
}
aggCallList.add(newAggCall);
}
final Map<Integer, Integer> map = new HashMap<>();
for (Integer key : aggregate.getGroupSet()) {
map.put(key, map.size());
}
final ImmutableBitSet newGroupSet = aggregate.getGroupSet().permute(map);
assert newGroupSet.equals(ImmutableBitSet.range(aggregate.getGroupSet().cardinality()));
relBuilder.push(
aggregate.copy(
aggregate.getTraitSet(),
relBuilder.build(),
newGroupSet,
null,
aggCallList));
// If there's no left child yet, no need to create the join
if (n == 0) {
return;
}
// Create the join condition. It is of the form
// 'left.f0 = right.f0 and left.f1 = right.f1 and ...'
// where {f0, f1, ...} are the GROUP BY fields.
final List<RelDataTypeField> distinctFields = relBuilder.peek().getRowType().getFieldList();
final List<RexNode> conditions = com.google.common.collect.Lists.newArrayList();
for (i = 0; i < groupCount; ++i) {
// null values form its own group
// use "is not distinct from" so that the join condition
// allows null values to match.
conditions.add(
rexBuilder.makeCall(
SqlStdOperatorTable.IS_NOT_DISTINCT_FROM,
RexInputRef.of(i, leftFields),
new RexInputRef(
leftFields.size() + i, distinctFields.get(i).getType())));
}
// Join in the new 'select distinct' relation.
relBuilder.join(JoinRelType.INNER, conditions);
} | 3.68 |
framework_VTooltip_setOpenDelay | /**
* Sets the time (in ms) that should elapse after an event triggering
* tooltip showing has occurred (e.g. mouse over) before the tooltip is
* shown. If a tooltip has recently been shown, then
* {@link #getQuickOpenDelay()} is used instead of this.
*
* @param openDelay
* The open delay (in ms)
*/
public void setOpenDelay(int openDelay) {
this.openDelay = openDelay;
} | 3.68 |
framework_Navigator_addProvider | /**
* Registers a view provider (factory).
* <p>
* Providers are called in order of registration until one that can handle
* the requested view name is found.
*
* @param provider
* provider to register, not <code>null</code>
* @throws IllegalArgumentException
* if the provided view provider is <code>null</code>
*/
public void addProvider(ViewProvider provider) {
if (provider == null) {
throw new IllegalArgumentException(
"Cannot add a null view provider");
}
providers.add(provider);
} | 3.68 |
hbase_RegionCoprocessorHost_postMemStoreCompaction | /**
* Invoked after in memory compaction.
*/
public void postMemStoreCompaction(HStore store) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postMemStoreCompaction(this, store);
}
});
} | 3.68 |
pulsar_ClientCnxIdleState_tryMarkIdleAndInitIdleTime | /**
* Try to transform the state of the connection to #{@link State#IDLE}, state should only be
* transformed to #{@link State#IDLE} from state #{@link State#USING}. if the state
* is successfully transformed, "idleMarkTime" will be assigned to current time.
*/
public void tryMarkIdleAndInitIdleTime() {
if (compareAndSetIdleStat(State.USING, State.IDLE)) {
idleMarkTime = System.currentTimeMillis();
}
} | 3.68 |
pulsar_ReaderConfiguration_setSubscriptionRolePrefix | /**
* Set the subscription role prefix for subscription auth. The default prefix is "reader".
*
* @param subscriptionRolePrefix
*/
public ReaderConfiguration setSubscriptionRolePrefix(String subscriptionRolePrefix) {
checkArgument(StringUtils.isNotBlank(subscriptionRolePrefix));
conf.setSubscriptionRolePrefix(subscriptionRolePrefix);
return this;
} | 3.68 |
hadoop_DatanodeAdminProperties_getPort | /**
* Get the port number of the datanode.
* @return the port number of the datanode.
*/
public int getPort() {
return port;
} | 3.68 |
flink_CheckpointProperties_forCheckpoint | /**
* Creates the checkpoint properties for a checkpoint.
*
* <p>Checkpoints may be queued in case too many other checkpoints are currently happening. They
* are garbage collected automatically, except when the owning job terminates in state {@link
* JobStatus#FAILED}. The user is required to configure the clean up behaviour on job
* cancellation.
*
* @return Checkpoint properties for an external checkpoint.
*/
public static CheckpointProperties forCheckpoint(CheckpointRetentionPolicy policy) {
switch (policy) {
case NEVER_RETAIN_AFTER_TERMINATION:
return CHECKPOINT_NEVER_RETAINED;
case RETAIN_ON_FAILURE:
return CHECKPOINT_RETAINED_ON_FAILURE;
case RETAIN_ON_CANCELLATION:
return CHECKPOINT_RETAINED_ON_CANCELLATION;
default:
throw new IllegalArgumentException("unknown policy: " + policy);
}
} | 3.68 |
flink_ArrowFieldWriter_getCount | /** Returns the current count of elements written. */
public int getCount() {
return count;
} | 3.68 |
hadoop_GangliaConf_setSlope | /**
* @param slope the slope to set
*/
void setSlope(GangliaSlope slope) {
this.slope = slope;
} | 3.68 |
hbase_RawBytesTerminated_encode | /**
* Write {@code val} into {@code dst}, respecting {@code offset} and {@code length}.
* @return number of bytes written.
*/
public int encode(PositionedByteRange dst, byte[] val, int voff, int vlen) {
return ((RawBytes) wrapped).encode(dst, val, voff, vlen);
} | 3.68 |
hadoop_ProbeStatus_markAsSuccessful | /**
* Flip the success bit on while the real outcome bit is kept false
*/
public void markAsSuccessful() {
success = true;
} | 3.68 |
hadoop_CipherSuite_getAlgorithmBlockSize | /**
* @return size of an algorithm block in bytes
*/
public int getAlgorithmBlockSize() {
return algoBlockSize;
} | 3.68 |
hadoop_HttpFSReleaseFilter_getFileSystemAccess | /**
* Returns the {@link FileSystemAccess} service to return the FileSystemAccess filesystem
* instance to.
*
* @return the FileSystemAccess service.
*/
@Override
protected FileSystemAccess getFileSystemAccess() {
return HttpFSServerWebApp.get().get(FileSystemAccess.class);
} | 3.68 |
flink_RestartStrategies_fixedDelayRestart | /**
* Generates a FixedDelayRestartStrategyConfiguration.
*
* @param restartAttempts Number of restart attempts for the FixedDelayRestartStrategy
* @param delayInterval Delay in-between restart attempts for the FixedDelayRestartStrategy
* @return FixedDelayRestartStrategy
*/
public static RestartStrategyConfiguration fixedDelayRestart(
int restartAttempts, Time delayInterval) {
return new FixedDelayRestartStrategyConfiguration(restartAttempts, delayInterval);
} | 3.68 |
hudi_HoodieTableMetadataUtil_deleteMetadataPartition | /**
* Deletes the metadata partition from the file system.
*
* @param basePath - base path of the dataset
* @param context - instance of {@link HoodieEngineContext}
* @param partitionType - {@link MetadataPartitionType} of the partition to delete
*/
public static void deleteMetadataPartition(String basePath, HoodieEngineContext context, MetadataPartitionType partitionType) {
HoodieTableMetaClient dataMetaClient = HoodieTableMetaClient.builder().setBasePath(basePath).setConf(context.getHadoopConf().get()).build();
deleteMetadataTablePartition(dataMetaClient, context, partitionType, false);
} | 3.68 |
hadoop_FederationProtocolPBTranslator_getProtoOrBuilder | /**
* Returns an interface to access data stored within this object. The object
* may have been initialized either via a builder or by an existing protobuf
* byte stream.
*
* @return MessageOrBuilder protobuf interface for the requested class.
*/
@SuppressWarnings("unchecked")
public T getProtoOrBuilder() {
if (this.builder != null) {
// Use mutable builder if it exists
return (T) this.builder;
} else if (this.proto != null) {
// Use immutable message source
return (T) this.proto;
} else {
// Construct empty builder
return (T) this.getBuilder();
}
} | 3.68 |
hadoop_AMRMProxyService_initializePipeline | /**
* Initializes the request interceptor pipeline for the specified application.
*
* @param applicationAttemptId attempt id
* @param user user name
* @param amrmToken amrmToken issued by RM
* @param localToken amrmToken issued by AMRMProxy
* @param recoveredDataMap the recovered states for AMRMProxy from NMSS
* @param isRecovery whether this is to recover a previously existing pipeline
*/
protected void initializePipeline(ApplicationAttemptId applicationAttemptId,
String user, Token<AMRMTokenIdentifier> amrmToken,
Token<AMRMTokenIdentifier> localToken,
Map<String, byte[]> recoveredDataMap, boolean isRecovery,
Credentials credentials) {
RequestInterceptorChainWrapper chainWrapper = null;
synchronized (applPipelineMap) {
if (applPipelineMap
.containsKey(applicationAttemptId.getApplicationId())) {
LOG.warn("Request to start an already existing appId was received. "
+ " This can happen if an application failed and a new attempt "
+ "was created on this machine. ApplicationId: {}.", applicationAttemptId);
RequestInterceptorChainWrapper chainWrapperBackup =
this.applPipelineMap.get(applicationAttemptId.getApplicationId());
if (chainWrapperBackup != null
&& chainWrapperBackup.getApplicationAttemptId() != null
&& !chainWrapperBackup.getApplicationAttemptId()
.equals(applicationAttemptId)) {
// TODO: revisit in AMRMProxy HA in YARN-6128
// Remove the existing pipeline
LOG.info("Remove the previous pipeline for ApplicationId: {}.", applicationAttemptId);
RequestInterceptorChainWrapper pipeline =
applPipelineMap.remove(applicationAttemptId.getApplicationId());
if (!isRecovery && this.nmContext.getNMStateStore() != null) {
try {
this.nmContext.getNMStateStore()
.removeAMRMProxyAppContext(applicationAttemptId);
} catch (IOException ioe) {
LOG.error("Error removing AMRMProxy application context for {}.",
applicationAttemptId, ioe);
}
}
try {
pipeline.getRootInterceptor().shutdown();
} catch (Throwable ex) {
LOG.warn("Failed to shutdown the request processing pipeline for app: {}.",
applicationAttemptId.getApplicationId(), ex);
}
} else {
return;
}
}
chainWrapper = new RequestInterceptorChainWrapper();
this.applPipelineMap.put(applicationAttemptId.getApplicationId(),
chainWrapper);
}
// We register the pipeline instance in the map first and then initialize it
// later because chain initialization can be expensive, and we would like to
// release the lock as soon as possible to prevent other applications from
// blocking when one application's chain is initializing
LOG.info("Initializing request processing pipeline for application. "
+ " ApplicationId: {} for the user: {}.", applicationAttemptId, user);
try {
RequestInterceptor interceptorChain =
this.createRequestInterceptorChain();
interceptorChain.init(
createApplicationMasterContext(this.nmContext, applicationAttemptId,
user, amrmToken, localToken, credentials, this.registry));
if (isRecovery) {
if (recoveredDataMap == null) {
throw new YarnRuntimeException("null recoveredDataMap received for recover");
}
interceptorChain.recover(recoveredDataMap);
}
chainWrapper.init(interceptorChain, applicationAttemptId);
if (!isRecovery && this.nmContext.getNMStateStore() != null) {
try {
this.nmContext.getNMStateStore().storeAMRMProxyAppContextEntry(
applicationAttemptId, NMSS_USER_KEY, user.getBytes(StandardCharsets.UTF_8));
this.nmContext.getNMStateStore().storeAMRMProxyAppContextEntry(
applicationAttemptId, NMSS_AMRMTOKEN_KEY,
amrmToken.encodeToUrlString().getBytes(StandardCharsets.UTF_8));
} catch (IOException e) {
LOG.error("Error storing AMRMProxy application context entry for {}.",
applicationAttemptId, e);
}
}
} catch (Exception e) {
this.applPipelineMap.remove(applicationAttemptId.getApplicationId());
throw e;
}
} | 3.68 |
hadoop_ConfigurationBasicValidator_validate | /**
* This method handles the base case where the configValue is null, based on the throwIfInvalid it either throws or returns the defaultVal,
* otherwise it returns null indicating that the configValue needs to be validated further.
* @param configValue the configuration value set by the user
* @return the defaultVal in case the configValue is null and not required to be set, null in case the configValue not null
* @throws InvalidConfigurationValueException in case the configValue is null and required to be set
*/
public T validate(final String configValue) throws InvalidConfigurationValueException {
if (configValue == null) {
if (this.throwIfInvalid) {
throw new InvalidConfigurationValueException(this.configKey);
}
return this.defaultVal;
}
return null;
} | 3.68 |
rocketmq-connect_ServiceProviderUtil_getPositionManagementService | /**
* Get position management service by class name
*
* @param positionManagementServiceClazz
* @return
*/
@NotNull
public static PositionManagementService getPositionManagementService(String positionManagementServiceClazz) {
if (StringUtils.isEmpty(positionManagementServiceClazz)) {
positionManagementServiceClazz = LocalPositionManagementServiceImpl.class.getName();
}
PositionManagementService positionManagementService = null;
ServiceLoader<PositionManagementService> positionManagementServiceServiceLoader = ServiceLoader.load(PositionManagementService.class);
Iterator<PositionManagementService> positionManagementServiceIterator = positionManagementServiceServiceLoader.iterator();
while (positionManagementServiceIterator.hasNext()) {
PositionManagementService currentPositionManagementService = positionManagementServiceIterator.next();
if (currentPositionManagementService.getClass().getName().equals(positionManagementServiceClazz)) {
positionManagementService = currentPositionManagementService;
break;
}
}
if (null == positionManagementService) {
throw new ConnectException("PositionManagementService class " + positionManagementServiceClazz + " not " +
"found");
}
return positionManagementService;
} | 3.68 |
hibernate-validator_ModUtil_calculateMod11Check | /**
* Calculate Modulo 11 checksum assuming that the threshold is Integer.MAX_VALUE
*
* @param digits the digits for which to calculate the checksum
*
* @return the result of the mod11 checksum calculation
*/
public static int calculateMod11Check(final List<Integer> digits) {
return calculateMod11Check( digits, Integer.MAX_VALUE );
} | 3.68 |
hbase_RowModel_getCells | /** Returns the cells */
public List<CellModel> getCells() {
return cells;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.