name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
AreaShop_AreaShop_debugTask | /**
* Print debug message for periodic task.
* @param message The message to print
*/
public static void debugTask(Object... message) {
if(AreaShop.getInstance().getConfig().getBoolean("debugTask")) {
AreaShop.debug(StringUtils.join(message, " "));
}
} | 3.68 |
framework_FocusableFlowPanel_addFocusHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasFocusHandlers#addFocusHandler(com.
* google.gwt.event.dom.client.FocusHandler)
*/
@Override
public HandlerRegistration addFocusHandler(FocusHandler handler) {
return addDomHandler(handler, FocusEvent.getType());
} | 3.68 |
querydsl_JTSGeometryExpression_buffer | /**
* Returns a geometric object that represents all Points whose distance from this geometric
* object is less than or equal to distance. Calculations are in the spatial reference system
* of this geometric object. Because of the limitations of linear interpolation, there will
* often be some relatively small error in this distance, but it should be near the resolution
* of the coordinates used.
*
* @param distance distance
* @return buffer
*/
public JTSGeometryExpression<Geometry> buffer(double distance) {
return JTSGeometryExpressions.geometryOperation(SpatialOps.BUFFER, mixin, ConstantImpl.create(distance));
} | 3.68 |
hudi_HoodieIndexID_isFileID | /**
* Is this ID a FileID type ?
*
* @return True if this ID of FileID type
*/
public final boolean isFileID() {
return (getType() == Type.FILE);
} | 3.68 |
framework_VAbstractCalendarPanel_selectDate | /**
* Sets the selection highlight to a given day in the current view
*
* @param date
* A Date representing the day of month to be selected. Must be
* one of the days currently visible.
*
*/
@SuppressWarnings("unchecked")
private void selectDate(Date date) {
if (selectedDay != null) {
selectedDay.removeStyleDependentName(CN_SELECTED);
Roles.getGridcellRole()
.removeAriaSelectedState(selectedDay.getElement());
}
int rowCount = days.getRowCount();
for (int i = 0; i < rowCount; i++) {
int cellCount = days.getCellCount(i);
for (int j = 0; j < cellCount; j++) {
Widget widget = days.getWidget(i, j);
if (widget instanceof VAbstractCalendarPanel.Day) {
Day curday = (Day) widget;
if (curday.getDate().equals(date)) {
curday.addStyleDependentName(CN_SELECTED);
selectedDay = curday;
Roles.getGridcellRole().setAriaSelectedState(
selectedDay.getElement(), SelectedValue.TRUE);
return;
}
}
}
}
} | 3.68 |
flink_PrioritizedDeque_asUnmodifiableCollection | /** Returns an unmodifiable collection view. */
public Collection<T> asUnmodifiableCollection() {
return Collections.unmodifiableCollection(deque);
} | 3.68 |
flink_SorterInputGateway_writeRecord | /** Writes the given record for sorting. */
public void writeRecord(E record) throws IOException, InterruptedException {
if (currentBuffer == null) {
this.currentBuffer = this.dispatcher.take(SortStage.READ);
if (!currentBuffer.getBuffer().isEmpty()) {
throw new IOException("New buffer is not empty.");
}
}
InMemorySorter<E> sorter = currentBuffer.getBuffer();
long occupancyPreWrite = sorter.getOccupancy();
if (!sorter.write(record)) {
long recordSize = sorter.getCapacity() - occupancyPreWrite;
signalSpillingIfNecessary(recordSize);
boolean isLarge = occupancyPreWrite == 0;
if (isLarge) {
// did not fit in a fresh buffer, must be large...
writeLarge(record, sorter);
this.currentBuffer.getBuffer().reset();
} else {
this.dispatcher.send(SortStage.SORT, currentBuffer);
this.currentBuffer = null;
writeRecord(record);
}
} else {
long recordSize = sorter.getOccupancy() - occupancyPreWrite;
signalSpillingIfNecessary(recordSize);
}
} | 3.68 |
hadoop_NMTokenSecretManagerInNM_generateNMToken | /**
* Used by the Distributed Scheduler framework to generate NMTokens
* @param applicationSubmitter
* @param container
* @return NMToken
*/
public NMToken generateNMToken(
String applicationSubmitter, Container container) {
this.readLock.lock();
try {
Token token =
createNMToken(container.getId().getApplicationAttemptId(),
container.getNodeId(), applicationSubmitter);
return NMToken.newInstance(container.getNodeId(), token);
} finally {
this.readLock.unlock();
}
} | 3.68 |
framework_FilesystemContainer_getItem | /*
* Gets the specified Item from the filesystem. Don't add a JavaDoc comment
* here, we use the default documentation from implemented interface.
*/
@Override
public Item getItem(Object itemId) {
if (!(itemId instanceof File)) {
return null;
}
return new FileItem((File) itemId);
} | 3.68 |
hbase_Operation_toJSON | /**
* Produces a JSON object sufficient for description of a query in a debugging or logging context.
* @return the produced JSON object, as a string
*/
public String toJSON() throws IOException {
return toJSON(DEFAULT_MAX_COLS);
} | 3.68 |
framework_VAbstractDateFieldCalendar_setTabIndex | /**
* Sets the tabulator index for the calendar panel element that represents
* the entire widget in the browser's focus cycle.
*
* @param tabIndex
* the new tabulator index
*/
public void setTabIndex(int tabIndex) {
calendarPanel.getElement().setTabIndex(tabIndex);
} | 3.68 |
hudi_HoodieTableMetadataUtil_getPartitionLatestFileSlicesIncludingInflight | /**
* Get the latest file slices for a given partition including the inflight ones.
*
* @param metaClient - instance of {@link HoodieTableMetaClient}
* @param fileSystemView - hoodie table file system view, which will be fetched from meta client if not already present
* @param partition - name of the partition whose file groups are to be loaded
* @return
*/
public static List<FileSlice> getPartitionLatestFileSlicesIncludingInflight(HoodieTableMetaClient metaClient,
Option<HoodieTableFileSystemView> fileSystemView,
String partition) {
HoodieTableFileSystemView fsView = fileSystemView.orElse(getFileSystemView(metaClient));
Stream<FileSlice> fileSliceStream = fsView.fetchLatestFileSlicesIncludingInflight(partition);
return fileSliceStream
.sorted(Comparator.comparing(FileSlice::getFileId))
.collect(Collectors.toList());
} | 3.68 |
flink_ZooKeeperUtils_createCompletedCheckpoints | /**
* Creates a {@link DefaultCompletedCheckpointStore} instance with {@link
* ZooKeeperStateHandleStore}.
*
* @param client The {@link CuratorFramework} ZooKeeper client to use
* @param configuration {@link Configuration} object
* @param maxNumberOfCheckpointsToRetain The maximum number of checkpoints to retain
* @param executor to run ZooKeeper callbacks
* @param restoreMode the mode in which the job is being restored
* @return {@link DefaultCompletedCheckpointStore} instance
* @throws Exception if the completed checkpoint store cannot be created
*/
public static CompletedCheckpointStore createCompletedCheckpoints(
CuratorFramework client,
Configuration configuration,
int maxNumberOfCheckpointsToRetain,
SharedStateRegistryFactory sharedStateRegistryFactory,
Executor ioExecutor,
Executor executor,
RestoreMode restoreMode)
throws Exception {
checkNotNull(configuration, "Configuration");
RetrievableStateStorageHelper<CompletedCheckpoint> stateStorage =
createFileSystemStateStorage(configuration, HA_STORAGE_COMPLETED_CHECKPOINT);
final ZooKeeperStateHandleStore<CompletedCheckpoint> completedCheckpointStateHandleStore =
createZooKeeperStateHandleStore(client, getCheckpointsPath(), stateStorage);
Collection<CompletedCheckpoint> completedCheckpoints =
DefaultCompletedCheckpointStoreUtils.retrieveCompletedCheckpoints(
completedCheckpointStateHandleStore, ZooKeeperCheckpointStoreUtil.INSTANCE);
final CompletedCheckpointStore zooKeeperCompletedCheckpointStore =
new DefaultCompletedCheckpointStore<>(
maxNumberOfCheckpointsToRetain,
completedCheckpointStateHandleStore,
ZooKeeperCheckpointStoreUtil.INSTANCE,
completedCheckpoints,
sharedStateRegistryFactory.create(
ioExecutor, completedCheckpoints, restoreMode),
executor);
LOG.info(
"Initialized {} in '{}' with {}.",
DefaultCompletedCheckpointStore.class.getSimpleName(),
completedCheckpointStateHandleStore,
getCheckpointsPath());
return zooKeeperCompletedCheckpointStore;
} | 3.68 |
AreaShop_BuyRegion_isSold | /**
* Check if the region is sold.
* @return true if the region is sold, otherwise false
*/
public boolean isSold() {
return getBuyer() != null;
} | 3.68 |
hadoop_AbfsOutputStream_hasActiveBlock | /**
* Predicate to query whether or not there is an active block.
*
* @return true if there is an active block.
*/
private synchronized boolean hasActiveBlock() {
return activeBlock != null;
} | 3.68 |
hbase_AbstractFSWAL_tellListenersAboutPreLogRoll | /**
* Tell listeners about pre log roll.
*/
private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath)
throws IOException {
coprocessorHost.preWALRoll(oldPath, newPath);
if (!this.listeners.isEmpty()) {
for (WALActionsListener i : this.listeners) {
i.preLogRoll(oldPath, newPath);
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_expectedRandomFunction | /**
* @return The expected random function sql.
*/
protected String expectedRandomFunction() {
return "RAND()";
} | 3.68 |
pulsar_BrokerMonitor_makeMessageRow | // Take advantage of repeated labels in message rows.
private static Object[] makeMessageRow(final String firstElement) {
final List<Object> result = new ArrayList<>();
result.add(firstElement);
result.addAll(MESSAGE_FIELDS);
return result.toArray();
} | 3.68 |
framework_Navigator_navigateTo | /**
* Internal method activating a view, setting its parameters and calling
* listeners.
* <p>
* This method also verifies that the user is allowed to perform the
* navigation operation.
*
* @param view
* view to activate
* @param viewName
* (optional) name of the view or null not to change the
* navigation state
* @param parameters
* parameters passed in the navigation state to the view
*/
protected void navigateTo(View view, String viewName, String parameters) {
runAfterLeaveConfirmation(
() -> performNavigateTo(view, viewName, parameters));
} | 3.68 |
hbase_TestingHBaseCluster_create | /**
* Create a {@link TestingHBaseCluster}. You need to call {@link #start()} of the returned
* {@link TestingHBaseCluster} to actually start the mini testing cluster.
*/
static TestingHBaseCluster create(TestingHBaseClusterOption option) {
return new TestingHBaseClusterImpl(option);
} | 3.68 |
flink_NumericColumnSummary_getMean | /**
* Null, NaN, and Infinite values are ignored in this calculation.
*
* @see <a href="https://en.wikipedia.org/wiki/Mean">Arithmetic Mean</a>
*/
public Double getMean() {
return mean;
} | 3.68 |
hbase_CacheConfig_shouldCacheDataOnRead | /**
* Returns whether the DATA blocks of this HFile should be cached on read or not (we always cache
* the meta blocks, the INDEX and BLOOM blocks).
* @return true if blocks should be cached on read, false if not
*/
public boolean shouldCacheDataOnRead() {
return cacheDataOnRead;
} | 3.68 |
flink_ReOpenableHashPartition_restorePartitionBuffers | /**
* This method is called every time a multi-match hash map is opened again for a new probe
* input.
*
* @param ioManager
* @param availableMemory
* @throws IOException
*/
void restorePartitionBuffers(IOManager ioManager, List<MemorySegment> availableMemory)
throws IOException {
final BulkBlockChannelReader reader =
ioManager.createBulkBlockChannelReader(
this.initialBuildSideChannel,
availableMemory,
this.initialPartitionBuffersCount);
reader.close();
final List<MemorySegment> partitionBuffersFromDisk = reader.getFullSegments();
this.partitionBuffers =
(MemorySegment[])
partitionBuffersFromDisk.toArray(
new MemorySegment[partitionBuffersFromDisk.size()]);
this.overflowSegments = new MemorySegment[2];
this.numOverflowSegments = 0;
this.nextOverflowBucket = 0;
this.isRestored = true;
} | 3.68 |
framework_Tree_fireExpandEvent | /**
* Emits the expand event.
*
* @param itemId
* the item id.
*/
protected void fireExpandEvent(Object itemId) {
fireEvent(new ExpandEvent(this, itemId));
} | 3.68 |
pulsar_ManagedLedgerImpl_hasActiveCursors | /**
* Tells whether the managed ledger has any active-cursor registered.
*
* @return true if at least a cursor exists
*/
public boolean hasActiveCursors() {
// Use hasCursors instead of isEmpty because isEmpty does not take into account non-durable cursors
return !activeCursors.isEmpty();
} | 3.68 |
hudi_LSMTimeline_getMinInstantTime | /**
* Parse the minimum instant time from the file name.
*/
public static String getMinInstantTime(String fileName) {
Matcher fileMatcher = ARCHIVE_FILE_PATTERN.matcher(fileName);
if (fileMatcher.matches()) {
return fileMatcher.group(1);
} else {
throw new HoodieException("Unexpected archival file name: " + fileName);
}
} | 3.68 |
hbase_ZKUtil_deleteNodeRecursivelyMultiOrSequential | /**
* Delete the specified node and its children. This traverse the znode tree for listing the
* children and then delete these znodes including the parent using multi-update api or sequential
* based on the specified configurations.
* <p>
* Sets no watches. Throws all exceptions besides dealing with deletion of children.
* <p>
* If the following is true:
* <ul>
* <li>runSequentialOnMultiFailure is true
* </ul>
* on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*), we
* retry the operations one-by-one (sequentially). - zk reference - if true when we get a
* ZooKeeper exception that could retry the operations one-by-one (sequentially) - path of the
* parent node(s)
* @throws KeeperException.NotEmptyException if node has children while deleting if unexpected
* ZooKeeper exception if an invalid path is specified
*/
public static void deleteNodeRecursivelyMultiOrSequential(ZKWatcher zkw,
boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException {
if (pathRoots == null || pathRoots.length <= 0) {
LOG.warn("Given path is not valid!");
return;
}
List<ZKUtilOp> ops = new ArrayList<>();
for (String eachRoot : pathRoots) {
// ZooKeeper Watches are one time triggers; When children of parent nodes are deleted
// recursively, must set another watch, get notified of delete node
List<String> children = listChildrenBFSAndWatchThem(zkw, eachRoot);
// Delete the leaves first and eventually get rid of the root
for (int i = children.size() - 1; i >= 0; --i) {
ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i)));
}
try {
if (zkw.getRecoverableZooKeeper().exists(eachRoot, zkw) != null) {
ops.add(ZKUtilOp.deleteNodeFailSilent(eachRoot));
}
} catch (InterruptedException e) {
zkw.interruptedException(e);
}
}
submitBatchedMultiOrSequential(zkw, runSequentialOnMultiFailure, ops);
} | 3.68 |
morf_DataType_hasScale | /**
* @return Whether this DataType has a variable width
*/
public boolean hasScale() {
return hasScale;
} | 3.68 |
framework_PushConfiguration_setParameter | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.PushConfiguration#setParameter(java.lang.String,
* java.lang.String)
*/
@Override
public void setParameter(String parameter, String value) {
getState().parameters.put(parameter, value);
} | 3.68 |
AreaShop_SignLinkerManager_enterSignLinkMode | /**
* Let a player enter sign linking mode.
* @param player The player that has to enter sign linking mode
* @param profile The profile to use for the signs (null for default)
*/
public void enterSignLinkMode(Player player, String profile) {
signLinkers.put(player.getUniqueId(), new SignLinker(player, profile));
plugin.message(player, "linksigns-first");
plugin.message(player, "linksigns-next");
if(!eventsRegistered) {
eventsRegistered = true;
plugin.getServer().getPluginManager().registerEvents(this, plugin);
}
} | 3.68 |
flink_UnorderedStreamElementQueue_isEmpty | /**
* True if there are no incomplete elements and all complete elements have been consumed.
*/
boolean isEmpty() {
return incompleteElements.isEmpty() && completedElements.isEmpty();
} | 3.68 |
hadoop_TypedBytesRecordInput_get | /**
* Get a thread-local typed bytes record input for the supplied
* {@link DataInput}.
*
* @param in data input object
* @return typed bytes record input corresponding to the supplied
* {@link DataInput}.
*/
public static TypedBytesRecordInput get(DataInput in) {
return get(TypedBytesInput.get(in));
} | 3.68 |
framework_UIInitHandler_getInitialUidl | /**
* Generates the initial UIDL message that can e.g. be included in a html
* page to avoid a separate round trip just for getting the UIDL.
*
* @param request
* the request that caused the initialization
* @param uI
* the UI for which the UIDL should be generated
* @return a string with the initial UIDL message
* @throws IOException
*/
protected String getInitialUidl(VaadinRequest request, UI uI)
throws IOException {
try (StringWriter writer = new StringWriter()) {
writer.write("{");
VaadinSession session = uI.getSession();
if (session.getConfiguration().isXsrfProtectionEnabled()) {
writer.write(getSecurityKeyUIDL(session));
}
writer.write(getPushIdUIDL(session));
new UidlWriter().write(uI, writer, false);
writer.write("}");
String initialUIDL = writer.toString();
getLogger().log(Level.FINE, "Initial UIDL:" + initialUIDL);
return initialUIDL;
}
} | 3.68 |
hadoop_TimelineDomain_getWriters | /**
* Get the writer (and/or writer group) list string
*
* @return the writer (and/or writer group) list string
*/
@XmlElement(name = "writers")
public String getWriters() {
return writers;
} | 3.68 |
shardingsphere-elasticjob_ShardingService_isNeedSharding | /**
* Judge is need resharding or not.
*
* @return is need resharding or not
*/
public boolean isNeedSharding() {
return jobNodeStorage.isJobNodeExisted(ShardingNode.NECESSARY);
} | 3.68 |
pulsar_RangeCache_getRange | /**
*
* @param first
* the first key in the range
* @param last
* the last key in the range (inclusive)
* @return a collections of the value found in cache
*/
public Collection<Value> getRange(Key first, Key last) {
List<Value> values = new ArrayList();
// Return the values of the entries found in cache
for (Value value : entries.subMap(first, true, last, true).values()) {
try {
value.retain();
values.add(value);
} catch (Throwable t) {
// Value was already destroyed between get() and retain()
}
}
return values;
} | 3.68 |
framework_CalendarMonthDropHandler_dragEnter | /*
* (non-Javadoc)
*
* @see
* com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#dragEnter(com
* .vaadin.terminal.gwt.client.ui.dd.VDragEvent)
*/
@Override
public void dragEnter(VDragEvent drag) {
// NOOP, we determine drag acceptance in dragOver
} | 3.68 |
flink_SlotProfile_getPreferredAllocations | /** Returns the desired allocation ids for the slot. */
public Collection<AllocationID> getPreferredAllocations() {
return preferredAllocations;
} | 3.68 |
framework_AbstractDateField_setAssistiveLabel | /**
* Sets the assistive label for a calendar navigation element. This sets the
* {@code aria-label} attribute for the element which is used by screen
* reading software.
*
* @param element
* the element for which to set the label. Not {@code null}.
* @param label
* the assistive label to set
* @since 8.4
*/
public void setAssistiveLabel(AccessibleElement element, String label) {
Objects.requireNonNull(element, "Element cannot be null");
getState().assistiveLabels.put(element, label);
} | 3.68 |
querydsl_QueryBase_distinct | /**
* Set the Query to return distinct results
*
* @return the current object
*/
public Q distinct() {
return queryMixin.distinct();
} | 3.68 |
morf_SqlParameter_parametersFromColumns | /**
* Generates an iterable of parameters from columns.
*
* @param columns table columns.
* @return parameters matching these columns.
*/
public static Iterable<SqlParameter> parametersFromColumns(Iterable<Column> columns) {
return Iterables.transform(columns, new Function<Column, SqlParameter>() {
@Override
public SqlParameter apply(Column column) {
return new SqlParameter(column);
}
});
} | 3.68 |
hadoop_NativeTaskOutputFiles_getOutputFileForWrite | /**
* Create a local map output file name.
*
* @param size the size of the file
*/
public Path getOutputFileForWrite(long size) throws IOException {
String path = String.format(OUTPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, id);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.68 |
flink_TwoInputTransformation_setStateKeySelectors | /**
* Sets the {@link KeySelector KeySelectors} that must be used for partitioning keyed state of
* this transformation.
*
* @param stateKeySelector1 The {@code KeySelector} to set for the first input
* @param stateKeySelector2 The {@code KeySelector} to set for the first input
*/
public void setStateKeySelectors(
KeySelector<IN1, ?> stateKeySelector1, KeySelector<IN2, ?> stateKeySelector2) {
this.stateKeySelector1 = stateKeySelector1;
this.stateKeySelector2 = stateKeySelector2;
updateManagedMemoryStateBackendUseCase(
stateKeySelector1 != null || stateKeySelector2 != null);
} | 3.68 |
pulsar_AuthenticationDataProvider_getHttpHeaders | /**
*
* @return an enumeration of all the header names
*/
default Set<Map.Entry<String, String>> getHttpHeaders() throws Exception {
return null;
} | 3.68 |
querydsl_AbstractSQLQuery_startContext | /**
* Called to create and start a new SQL Listener context
*
* @param connection the database connection
* @param metadata the meta data for that context
* @return the newly started context
*/
protected SQLListenerContextImpl startContext(Connection connection, QueryMetadata metadata) {
SQLListenerContextImpl context = new SQLListenerContextImpl(metadata, connection);
if (parentContext != null) {
context.setData(PARENT_CONTEXT, parentContext);
}
listeners.start(context);
return context;
} | 3.68 |
framework_VaadinSession_getCumulativeRequestDuration | /**
* @return The total time spent servicing requests in this session, in
* milliseconds.
*/
public long getCumulativeRequestDuration() {
assert hasLock();
return cumulativeRequestDuration;
} | 3.68 |
pulsar_ManagedLedgerConfig_setMetadataOperationsTimeoutSeconds | /**
* Ledger-Op (Create/Delete) timeout after which callback will be completed with failure.
*
* @param metadataOperationsTimeoutSeconds
*/
public ManagedLedgerConfig setMetadataOperationsTimeoutSeconds(long metadataOperationsTimeoutSeconds) {
this.metadataOperationsTimeoutSeconds = metadataOperationsTimeoutSeconds;
return this;
} | 3.68 |
flink_FlinkCalciteCatalogReader_isLegacySourceOptions | /** Checks whether the {@link CatalogTable} uses legacy connector source options. */
private static boolean isLegacySourceOptions(CatalogSchemaTable schemaTable) {
// normalize option keys
DescriptorProperties properties = new DescriptorProperties(true);
properties.putProperties(
schemaTable.getContextResolvedTable().getResolvedTable().getOptions());
if (properties.containsKey(ConnectorDescriptorValidator.CONNECTOR_TYPE)) {
return true;
} else {
// try to create legacy table source using the options,
// some legacy factories uses the new 'connector' key
try {
// The input table is ResolvedCatalogTable that the
// rowtime/proctime contains {@link TimestampKind}. However, rowtime
// is the concept defined by the WatermarkGenerator and the
// WatermarkGenerator is responsible to convert the rowtime column
// to Long. For source, it only treats the rowtime column as regular
// timestamp. So, we erase the rowtime indicator here. Please take a
// look at the usage of the {@link
// DataTypeUtils#removeTimeAttribute}
ResolvedCatalogTable originTable =
schemaTable.getContextResolvedTable().getResolvedTable();
ResolvedSchema resolvedSchemaWithRemovedTimeAttribute =
TableSchemaUtils.removeTimeAttributeFromResolvedSchema(
originTable.getResolvedSchema());
TableFactoryUtil.findAndCreateTableSource(
schemaTable.getContextResolvedTable().getCatalog().orElse(null),
schemaTable.getContextResolvedTable().getIdentifier(),
new ResolvedCatalogTable(
CatalogTable.of(
Schema.newBuilder()
.fromResolvedSchema(
resolvedSchemaWithRemovedTimeAttribute)
.build(),
originTable.getComment(),
originTable.getPartitionKeys(),
originTable.getOptions()),
resolvedSchemaWithRemovedTimeAttribute),
new Configuration(),
schemaTable.isTemporary());
// success, then we will use the legacy factories
return true;
} catch (Throwable e) {
// fail, then we will use new factories
return false;
}
}
} | 3.68 |
hbase_RegionSplitCalculator_specialEndKey | /**
* SPECIAL CASE wrapper for empty end key
* @return ENDKEY if end key is empty, else normal endkey.
*/
private static <R extends KeyRange> byte[] specialEndKey(R range) {
byte[] end = range.getEndKey();
if (end.length == 0) {
return ENDKEY;
}
return end;
} | 3.68 |
flink_FutureUtils_combineAll | /**
* Creates a future that is complete once multiple other futures completed. The future fails
* (completes exceptionally) once one of the futures in the conjunction fails. Upon successful
* completion, the future returns the collection of the futures' results.
*
* <p>The ConjunctFuture gives access to how many Futures in the conjunction have already
* completed successfully, via {@link ConjunctFuture#getNumFuturesCompleted()}.
*
* @param futures The futures that make up the conjunction. No null entries are allowed.
* @return The ConjunctFuture that completes once all given futures are complete (or one fails).
*/
public static <T> ConjunctFuture<Collection<T>> combineAll(
Collection<? extends CompletableFuture<? extends T>> futures) {
checkNotNull(futures, "futures");
return new ResultConjunctFuture<>(futures);
} | 3.68 |
hadoop_SocksSocketFactory_setProxy | /**
* Set the proxy of this socket factory as described in the string
* parameter
*
* @param proxyStr the proxy address using the format "host:port"
*/
private void setProxy(String proxyStr) {
String[] strs = proxyStr.split(":", 2);
if (strs.length != 2)
throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
String host = strs[0];
int port = Integer.parseInt(strs[1]);
this.proxy =
new Proxy(Proxy.Type.SOCKS, InetSocketAddress.createUnresolved(host,
port));
} | 3.68 |
flink_ExtractionUtils_isInvokable | /**
* Checks whether a method/constructor can be called with the given argument classes. This
* includes type widening and vararg. {@code null} is a wildcard.
*
* <p>E.g., {@code (int.class, int.class)} matches {@code f(Object...), f(int, int), f(Integer,
* Object)} and so forth.
*/
public static boolean isInvokable(Executable executable, Class<?>... classes) {
final int m = executable.getModifiers();
if (!Modifier.isPublic(m)) {
return false;
}
final int paramCount = executable.getParameterCount();
final int classCount = classes.length;
// check for enough classes for each parameter
if ((!executable.isVarArgs() && classCount != paramCount)
|| (executable.isVarArgs() && classCount < paramCount - 1)) {
return false;
}
int currentClass = 0;
for (int currentParam = 0; currentParam < paramCount; currentParam++) {
final Class<?> param = executable.getParameterTypes()[currentParam];
// last parameter is a vararg that needs to consume remaining classes
if (currentParam == paramCount - 1 && executable.isVarArgs()) {
final Class<?> paramComponent =
executable.getParameterTypes()[currentParam].getComponentType();
// we have more than 1 classes left so the vararg needs to consume them all
if (classCount - currentClass > 1) {
while (currentClass < classCount
&& ExtractionUtils.isAssignable(
classes[currentClass], paramComponent, true)) {
currentClass++;
}
} else if (currentClass < classCount
&& (parameterMatches(classes[currentClass], param)
|| parameterMatches(classes[currentClass], paramComponent))) {
currentClass++;
}
}
// entire parameter matches
else if (parameterMatches(classes[currentClass], param)) {
currentClass++;
}
}
// check if all classes have been consumed
return currentClass == classCount;
} | 3.68 |
dubbo_AccessLogData_get | /**
* Return value of key
*
* @param key
* @return
*/
private Object get(String key) {
return data.get(key);
} | 3.68 |
framework_VPopupCalendar_makeDate | /**
* Creates a date based on the provided date values map. Any values of a
* more precise resolution than day are ignored.
*
* @param dateValues
* a map with date values to convert into a date
* @return the date based on the dateValues map
*/
@SuppressWarnings("deprecation")
public static Date makeDate(Map<DateResolution, Integer> dateValues) {
if (dateValues.get(YEAR) == null) {
return null;
}
Date date = new Date(2000 - 1900, 0, 1);
Integer year = dateValues.get(YEAR);
if (year != null) {
date.setYear(year - 1900);
}
Integer month = dateValues.get(MONTH);
if (month != null) {
date.setMonth(month - 1);
}
Integer day = dateValues.get(DAY);
if (day != null) {
date.setDate(day);
}
return date;
} | 3.68 |
hbase_HBaseZKTestingUtility_startMiniZKCluster | /**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set the
* port mentioned is used as the default port for ZooKeeper.
*/
private MiniZooKeeperCluster startMiniZKCluster(File dir, int zooKeeperServerNum,
int[] clientPortList) throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0) {
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
if (clientPortList != null) {
// Ignore extra client ports
int clientPortListSize = Math.min(clientPortList.length, zooKeeperServerNum);
for (int i = 0; i < clientPortListSize; i++) {
this.zkCluster.addClientPort(clientPortList[i]);
}
}
int clientPort = this.zkCluster.startup(dir, zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
return this.zkCluster;
} | 3.68 |
framework_BaseLayoutTestUI_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
mainLayout.setMargin(false);
mainLayout.setSpacing(false);
mainLayout.setSizeUndefined();
getLayout().setSizeUndefined();
if (getLayout().getParent() instanceof VerticalLayout) {
((VerticalLayout) getLayout().getParent()).setSizeUndefined();
}
mainLayout.addComponent(l1);
mainLayout.addComponent(l2);
addComponent(mainLayout);
} | 3.68 |
open-banking-gateway_HbciConsentInfo_isWrongPassword | /**
* Was the PSU password that was sent to ASPSP wrong.
*/
public boolean isWrongPassword(HbciContext ctx) {
return null != ctx.getWrongAuthCredentials() && ctx.getWrongAuthCredentials();
} | 3.68 |
framework_CellReference_getValue | /**
* Gets the value of the cell.
*
* @return the value of the cell
*/
public Object getValue() {
return getColumn().getValue(getRow());
} | 3.68 |
flink_FileRecordFormat_getCheckpointedPosition | /**
* Optionally returns the current position of the reader. This can be implemented by readers
* that want to speed up recovery from a checkpoint.
*
* <p>The current position of the reader is the position of the next record that will be
* returned in a call to {@link #read()}. This can be implemented by readers that want to
* speed up recovery from a checkpoint.
*
* <p>See the {@link FileRecordFormat top-level class comment} (section "Checkpointing") for
* details.
*/
@Nullable
default CheckpointedPosition getCheckpointedPosition() {
return null;
} | 3.68 |
hadoop_IOStatisticsBinding_maybeUpdateMaximum | /**
* Update a maximum value tracked in an atomic long.
* This is thread safe -it uses compareAndSet to ensure
* that Thread T1 whose sample is greater than the current
* value never overwrites an update from thread T2 whose
* sample was also higher -and which completed first.
* @param dest destination for all changes.
* @param sample sample to update.
*/
public static void maybeUpdateMaximum(AtomicLong dest, long sample) {
boolean done;
do {
long current = dest.get();
if (sample > current) {
done = dest.compareAndSet(current, sample);
} else {
done = true;
}
} while (!done);
} | 3.68 |
MagicPlugin_CompatibilityLib_isLegacy | // This is here as a bit of a hack, MaterialAndData needs to know how to parse materials, but this is used
// by the MaterialSetTest test framework, where we don't actually have a server and can't really
// initialize CompatibilityLib.
// Kind of ugly, but this sidesteps the problem.
public static boolean isLegacy(Material material) {
CompatibilityUtils compatibilityUtils = platform == null ? null : platform.getCompatibilityUtils();
return compatibilityUtils == null ? false : compatibilityUtils.isLegacy(material);
} | 3.68 |
framework_FlyweightRow_setup | /**
* Configure this FlyweightRow for the current use. This method is expected
* to be called multiple times during the Grid's life-cycle.
*
* @param e
* the root element for this row
* @param row
* the row index
* @param columnWidths
* widths for each column on the row
* @see FlyweightRow
*/
public void setup(final TableRowElement e, final int row,
double[] columnWidths) {
element = e;
this.row = row;
this.columnWidths = columnWidths;
} | 3.68 |
hbase_HRegion_mutateRowsWithLocks | /**
* Perform atomic (all or none) mutations within the region.
* @param mutations The list of mutations to perform. <code>mutations</code> can contain
* operations for multiple rows. Caller has to ensure that all rows are
* contained in this region.
* @param rowsToLock Rows to lock
* @param nonceGroup Optional nonce group of the operation (client Id)
* @param nonce Optional nonce of the operation (unique random id to ensure "more
* idempotence") If multiple rows are locked care should be taken that
* <code>rowsToLock</code> is sorted in order to avoid deadlocks.
*/
@Override
public void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock,
long nonceGroup, long nonce) throws IOException {
batchMutate(new MutationBatchOperation(this, mutations.toArray(new Mutation[mutations.size()]),
true, nonceGroup, nonce) {
@Override
public MiniBatchOperationInProgress<Mutation>
lockRowsAndBuildMiniBatch(List<RowLock> acquiredRowLocks) throws IOException {
RowLock prevRowLock = null;
for (byte[] row : rowsToLock) {
try {
RowLock rowLock = region.getRowLock(row, false, prevRowLock); // write lock
if (rowLock != prevRowLock) {
acquiredRowLocks.add(rowLock);
prevRowLock = rowLock;
}
} catch (IOException ioe) {
LOG.warn("Failed getting lock, row={}, in region {}", Bytes.toStringBinary(row), this,
ioe);
throw ioe;
}
}
return createMiniBatch(size(), size());
}
});
} | 3.68 |
flink_StreamProjection_projectTuple20 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>
SingleOutputStreamOperator<
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>
projectTuple20() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>
tType =
new TupleTypeInfo<
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple20<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
hadoop_BoundedByteArrayOutputStream_size | /**
* Returns the length of the valid data
* currently in the buffer.
*
* @return the length of the valid data.
*/
public int size() {
return currentPointer - startOffset;
} | 3.68 |
framework_Notification_setDescription | /**
* Sets the description part of the notification message.
*
* @param description
* The message description
*/
public void setDescription(String description) {
getState().description = description;
} | 3.68 |
hadoop_NMContainerStatus_newInstance | // Used by tests only
public static NMContainerStatus newInstance(ContainerId containerId,
int version, ContainerState containerState, Resource allocatedResource,
String diagnostics, int containerExitStatus, Priority priority,
long creationTime) {
return newInstance(containerId, version, containerState, allocatedResource,
diagnostics, containerExitStatus, priority, creationTime,
CommonNodeLabelsManager.NO_LABEL, ExecutionType.GUARANTEED, -1);
} | 3.68 |
hadoop_ClusterMetrics_getBlackListedTaskTrackerCount | /**
* Get the number of blacklisted trackers in the cluster.
*
* @return blacklisted tracker count
*/
public int getBlackListedTaskTrackerCount() {
return numBlacklistedTrackers;
} | 3.68 |
hadoop_DirectBufferPool_getBuffer | /**
* Allocate a direct buffer of the specified size, in bytes.
* If a pooled buffer is available, returns that. Otherwise
* allocates a new one.
*
* @param size size.
* @return ByteBuffer.
*/
public ByteBuffer getBuffer(int size) {
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
// no available buffers for this size
return ByteBuffer.allocateDirect(size);
}
WeakReference<ByteBuffer> ref;
while ((ref = list.poll()) != null) {
ByteBuffer b = ref.get();
if (b != null) {
return b;
}
}
return ByteBuffer.allocateDirect(size);
} | 3.68 |
flink_ClusterEntrypoint_getRPCPortRange | /**
* Returns the port range for the common {@link RpcService}.
*
* @param configuration to extract the port range from
* @return Port range for the common {@link RpcService}
*/
protected String getRPCPortRange(Configuration configuration) {
if (ZooKeeperUtils.isZooKeeperRecoveryMode(configuration)) {
return configuration.getString(HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE);
} else {
return String.valueOf(configuration.getInteger(JobManagerOptions.PORT));
}
} | 3.68 |
framework_ApplicationConnection_showCommunicationError | /**
* Shows the communication error notification.
*
* @param details
* Optional details.
* @param statusCode
* The status code returned for the request
*
*/
public void showCommunicationError(String details, int statusCode) {
getLogger().severe("Communication error: " + details);
showError(details, configuration.getCommunicationError());
} | 3.68 |
hbase_TableDescriptorBuilder_getNormalizerTargetRegionSize | /**
* Check if there is the target region size. If so, the normalize plan will be calculated based
* on the target region size.
* @return target region size after normalize done
*/
@Override
public long getNormalizerTargetRegionSize() {
long target_region_size =
getOrDefault(NORMALIZER_TARGET_REGION_SIZE_MB_KEY, Long::valueOf, Long.valueOf(-1));
return target_region_size == Long.valueOf(-1)
? getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1))
: target_region_size;
} | 3.68 |
hadoop_BaseTableRW_getTableMutator | /**
* Used to create a type-safe mutator for this table.
*
* @param hbaseConf used to read table name.
* @param conn used to create a table from.
* @return a type safe {@link BufferedMutator} for the entity table.
* @throws IOException if any exception occurs while creating mutator for the
* table.
*/
public TypedBufferedMutator<T> getTableMutator(Configuration hbaseConf,
Connection conn) throws IOException {
TableName tableName = this.getTableName(hbaseConf);
// Plain buffered mutator
BufferedMutator bufferedMutator = conn.getBufferedMutator(tableName);
// Now make this thing type safe.
// This is how service initialization should hang on to this variable, with
// the proper type
TypedBufferedMutator<T> table =
new TypedBufferedMutator<T>(bufferedMutator);
return table;
} | 3.68 |
framework_Escalator_hasSomethingInDom | /**
* Check whether there are any cells in the DOM.
*
* @return <code>true</code> if header, body or footer has any child
* elements
*/
private boolean hasSomethingInDom() {
return headElem.hasChildNodes() || bodyElem.hasChildNodes()
|| footElem.hasChildNodes();
} | 3.68 |
framework_VProgressBar_setIndeterminate | /**
* Sets whether or not this progress indicator is indeterminate. In
* indeterminate mode there is an animation indicating that the task is
* running but without providing any information about the current progress.
*
* @param indeterminate
* {@code true} to set to indeterminate mode, {@code false}
* otherwise
*/
public void setIndeterminate(boolean indeterminate) {
this.indeterminate = indeterminate;
setStyleName(getStylePrimaryName() + "-indeterminate", indeterminate);
} | 3.68 |
hbase_SplitLogWorker_stopTask | /**
* If the worker is doing a task i.e. splitting a log file then stop the task. It doesn't exit the
* worker thread.
*/
public void stopTask() {
LOG.info("Sending interrupt to stop the worker thread");
worker.interrupt(); // TODO interrupt often gets swallowed, do what else?
} | 3.68 |
framework_DataCommunicator_getUpdatedData | /**
* Returns the currently set updated data.
*
* @return the set of data that should be updated on the next response
* @since 8.0.6
*/
protected Set<T> getUpdatedData() {
return updatedData;
} | 3.68 |
hbase_ThriftUtilities_putFromThrift | /**
* Creates a {@link Put} (HBase) from a {@link TPut} (Thrift)
* @param in the <code>TPut</code> to convert
* @return converted <code>Put</code>
*/
public static Put putFromThrift(TPut in) {
Put out;
if (in.isSetTimestamp()) {
out = new Put(in.getRow(), in.getTimestamp());
} else {
out = new Put(in.getRow());
}
if (in.isSetDurability()) {
out.setDurability(durabilityFromThrift(in.getDurability()));
}
for (TColumnValue columnValue : in.getColumnValues()) {
try {
if (columnValue.isSetTimestamp()) {
out.add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(out.getRow())
.setFamily(columnValue.getFamily()).setQualifier(columnValue.getQualifier())
.setTimestamp(columnValue.getTimestamp()).setType(Cell.Type.Put)
.setValue(columnValue.getValue()).build());
} else {
out.add(CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(out.getRow())
.setFamily(columnValue.getFamily()).setQualifier(columnValue.getQualifier())
.setTimestamp(out.getTimestamp()).setType(Cell.Type.Put)
.setValue(columnValue.getValue()).build());
}
} catch (IOException e) {
throw new IllegalArgumentException((e));
}
}
if (in.isSetAttributes()) {
addAttributes(out, in.getAttributes());
}
if (in.getCellVisibility() != null) {
out.setCellVisibility(new CellVisibility(in.getCellVisibility().getExpression()));
}
return out;
} | 3.68 |
hudi_MarkerHandler_createMarker | /**
* Generates a future for an async marker creation request
*
* The future is added to the marker creation future list and waits for the next batch processing
* of marker creation requests.
*
* @param context Javalin app context
* @param markerDir marker directory path
* @param markerName marker name
* @return the {@code CompletableFuture} instance for the request
*/
public CompletableFuture<String> createMarker(Context context, String markerDir, String markerName, String basePath) {
// Step1 do early conflict detection if enable
if (timelineServiceConfig.earlyConflictDetectionEnable) {
try {
synchronized (earlyConflictDetectionLock) {
if (earlyConflictDetectionStrategy == null) {
String strategyClassName = timelineServiceConfig.earlyConflictDetectionStrategy;
if (!ReflectionUtils.isSubClass(strategyClassName, TimelineServerBasedDetectionStrategy.class)) {
LOG.warn("Cannot use " + strategyClassName + " for timeline-server-based markers.");
strategyClassName = "org.apache.hudi.timeline.service.handlers.marker.AsyncTimelineServerBasedDetectionStrategy";
LOG.warn("Falling back to " + strategyClassName);
}
earlyConflictDetectionStrategy =
(TimelineServerBasedDetectionStrategy) ReflectionUtils.loadClass(
strategyClassName, basePath, markerDir, markerName, timelineServiceConfig.checkCommitConflict);
}
// markerDir => $base_path/.hoodie/.temp/$instant_time
// If markerDir is changed like move to the next instant action, we need to fresh this earlyConflictDetectionStrategy.
// For specific instant related create marker action, we only call this check/fresh once
// instead of starting the conflict detector for every request
if (!markerDir.equalsIgnoreCase(currentMarkerDir)) {
this.currentMarkerDir = markerDir;
Set<String> actions = CollectionUtils.createSet(COMMIT_ACTION, DELTA_COMMIT_ACTION, REPLACE_COMMIT_ACTION);
Set<HoodieInstant> completedCommits = new HashSet<>(
viewManager.getFileSystemView(basePath)
.getTimeline()
.filterCompletedInstants()
.filter(instant -> actions.contains(instant.getAction()))
.getInstants());
earlyConflictDetectionStrategy.startAsyncDetection(
timelineServiceConfig.asyncConflictDetectorInitialDelayMs,
timelineServiceConfig.asyncConflictDetectorPeriodMs,
markerDir, basePath, timelineServiceConfig.maxAllowableHeartbeatIntervalInMs,
fileSystem, this, completedCommits);
}
}
earlyConflictDetectionStrategy.detectAndResolveConflictIfNecessary();
} catch (HoodieEarlyConflictDetectionException he) {
LOG.warn("Detected the write conflict due to a concurrent writer, "
+ "failing the marker creation as the early conflict detection is enabled", he);
return finishCreateMarkerFuture(context, markerDir, markerName);
} catch (Exception e) {
LOG.warn("Failed to execute early conflict detection." + e.getMessage());
// When early conflict detection fails to execute, we still allow the marker creation
// to continue
return addMarkerCreationRequestForAsyncProcessing(context, markerDir, markerName);
}
}
// Step 2 create marker
return addMarkerCreationRequestForAsyncProcessing(context, markerDir, markerName);
} | 3.68 |
hudi_HoodieCatalogUtil_isEmbeddedMetastore | /**
* Check whether the hive.metastore.uris is empty
*/
public static boolean isEmbeddedMetastore(HiveConf hiveConf) {
return isNullOrWhitespaceOnly(hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS));
} | 3.68 |
framework_Table_isSortAscending | /**
* Is the table currently sorted in ascending order.
*
* @return <code>true</code> if ascending, <code>false</code> if descending.
*/
public boolean isSortAscending() {
return sortAscending;
} | 3.68 |
flink_PipelinedSubpartition_needNotifyPriorityEvent | // It is just called after add priorityEvent.
@GuardedBy("buffers")
private boolean needNotifyPriorityEvent() {
assert Thread.holdsLock(buffers);
// if subpartition is blocked then downstream doesn't expect any notifications
return buffers.getNumPriorityElements() == 1 && !isBlocked;
} | 3.68 |
pulsar_Runnables_catchingAndLoggingThrowables | /**
* Wraps a Runnable so that throwables are caught and logged when a Runnable is run.
*
* The main usecase for this method is to be used in
* {@link java.util.concurrent.ScheduledExecutorService#scheduleAtFixedRate(Runnable, long, long, TimeUnit)}
* calls to ensure that the scheduled task doesn't get cancelled as a result of an uncaught exception.
*
* @param runnable The runnable to wrap
* @return a wrapped Runnable
*/
public static Runnable catchingAndLoggingThrowables(Runnable runnable) {
return new CatchingAndLoggingRunnable(runnable);
} | 3.68 |
hbase_HBaseConfiguration_main | /**
* For debugging. Dump configurations to system output as xml format. Master and RS configurations
* can also be dumped using http services. e.g. "curl http://master:16010/dump"
*/
public static void main(String[] args) throws Exception {
HBaseConfiguration.create().writeXml(System.out);
} | 3.68 |
flink_DualInputOperator_setFirstInput | /**
* Sets the first input to the union of the given operators.
*
* @param inputs The operator(s) that form the first input.
* @deprecated This method will be removed in future versions. Use the {@link Union} operator
* instead.
*/
@Deprecated
public void setFirstInput(Operator<IN1>... inputs) {
this.input1 = Operator.createUnionCascade(inputs);
} | 3.68 |
hadoop_WeightedPolicyInfo_setRouterPolicyWeights | /**
* Setter method for Router weights.
*
* @param policyWeights the router weights.
*/
public void setRouterPolicyWeights(
Map<SubClusterIdInfo, Float> policyWeights) {
this.routerPolicyWeights = policyWeights;
} | 3.68 |
flink_TypeExtractor_isRecord | /**
* Determine whether the given class is a valid Java record.
*
* @param clazz class to check
* @return True if the class is a Java record
*/
@PublicEvolving
public static boolean isRecord(Class<?> clazz) {
return clazz.getSuperclass().getName().equals("java.lang.Record")
&& (clazz.getModifiers() & Modifier.FINAL) != 0;
} | 3.68 |
hudi_OptionsResolver_isIncrementalQuery | /**
* Returns whether the query is incremental.
*/
public static boolean isIncrementalQuery(Configuration conf) {
return conf.getOptional(FlinkOptions.READ_START_COMMIT).isPresent() || conf.getOptional(FlinkOptions.READ_END_COMMIT).isPresent();
} | 3.68 |
framework_Margins_setMarginBottom | /**
* Sets the height of the bottom margin.
*
* @param marginBottom
* the bottom margin to set (in pixels)
*/
public void setMarginBottom(int marginBottom) {
this.marginBottom = marginBottom;
updateVertical();
} | 3.68 |
flink_FlinkHints_getAllJoinHints | /** Get all join hints. */
public static List<RelHint> getAllJoinHints(List<RelHint> allHints) {
return allHints.stream()
.filter(hint -> JoinStrategy.isJoinStrategy(hint.hintName))
.collect(Collectors.toList());
} | 3.68 |
flink_DataSet_write | /**
* Writes a DataSet using a {@link FileOutputFormat} to a specified location. This method adds a
* data sink to the program.
*
* @param outputFormat The FileOutputFormat to write the DataSet.
* @param filePath The path to the location where the DataSet is written.
* @param writeMode The mode of writing, indicating whether to overwrite existing files.
* @return The DataSink that writes the DataSet.
* @see FileOutputFormat
*/
public DataSink<T> write(
FileOutputFormat<T> outputFormat, String filePath, WriteMode writeMode) {
Preconditions.checkNotNull(filePath, "File path must not be null.");
Preconditions.checkNotNull(writeMode, "Write mode must not be null.");
Preconditions.checkNotNull(outputFormat, "Output format must not be null.");
outputFormat.setOutputFilePath(new Path(filePath));
outputFormat.setWriteMode(writeMode);
return output(outputFormat);
} | 3.68 |
hbase_MasterRpcServices_restoreSnapshot | /**
* Execute Restore/Clone snapshot operation.
* <p>
* If the specified table exists a "Restore" is executed, replacing the table schema and directory
* data with the content of the snapshot. The table must be disabled, or a
* UnsupportedOperationException will be thrown.
* <p>
* If the table doesn't exist a "Clone" is executed, a new table is created using the schema at
* the time of the snapshot, and the content of the snapshot.
* <p>
* The restore/clone operation does not require copying HFiles. Since HFiles are immutable the
* table can point to and use the same files as the original one.
*/
@Override
public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
RestoreSnapshotRequest request) throws ServiceException {
try {
long procId = server.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(),
request.getNonce(), request.getRestoreACL(), request.getCustomSFT());
return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
} catch (ForeignException e) {
throw new ServiceException(e.getCause());
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
hudi_HoodieInputFormatUtils_getHoodieTimelineForIncrementalQuery | /**
* Get HoodieTimeline for incremental query from Hive map reduce configuration.
*
* @param job
* @param tableName
* @param timeline
* @return
*/
public static HoodieTimeline getHoodieTimelineForIncrementalQuery(JobContext job, String tableName, HoodieTimeline timeline) {
String lastIncrementalTs = HoodieHiveUtils.readStartCommitTime(job, tableName);
// Total number of commits to return in this batch. Set this to -1 to get all the commits.
Integer maxCommits = HoodieHiveUtils.readMaxCommits(job, tableName);
LOG.info("Last Incremental timestamp was set as " + lastIncrementalTs);
return timeline.findInstantsAfter(lastIncrementalTs, maxCommits);
} | 3.68 |
pulsar_ClientCnxIdleState_isIdle | /**
* @return Whether this connection is in idle.
*/
public boolean isIdle() {
return getIdleStat() == State.IDLE;
} | 3.68 |
hudi_ClusteringOperator_readRecordsForGroupBaseFiles | /**
* Read records from baseFiles and get iterator.
*/
private Iterator<RowData> readRecordsForGroupBaseFiles(List<ClusteringOperation> clusteringOps) {
List<Iterator<RowData>> iteratorsForPartition = clusteringOps.stream().map(clusteringOp -> {
Iterable<IndexedRecord> indexedRecords = () -> {
try {
HoodieFileReaderFactory fileReaderFactory = HoodieFileReaderFactory.getReaderFactory(table.getConfig().getRecordMerger().getRecordType());
HoodieAvroFileReader fileReader = (HoodieAvroFileReader) fileReaderFactory.getFileReader(table.getHadoopConf(), new Path(clusteringOp.getDataFilePath()));
return new CloseableMappingIterator<>(fileReader.getRecordIterator(readerSchema), HoodieRecord::getData);
} catch (IOException e) {
throw new HoodieClusteringException("Error reading input data for " + clusteringOp.getDataFilePath()
+ " and " + clusteringOp.getDeltaFilePaths(), e);
}
};
return StreamSupport.stream(indexedRecords.spliterator(), false).map(this::transform).iterator();
}).collect(Collectors.toList());
return new ConcatenatingIterator<>(iteratorsForPartition);
} | 3.68 |
framework_RandomLayoutStress_fillLayout | /**
* Add demo components to given layout
*
* @param layout
*/
private void fillLayout(Layout layout, int numberOfComponents) {
for (int i = 0; i < numberOfComponents; i++) {
layout.addComponent(getRandomComponent(i));
}
} | 3.68 |
MagicPlugin_Wand_wasInventoryOpen | // Somewhat hacky method to handle inventory close event knowing that this was a wand inventory that just closed.
public boolean wasInventoryOpen() {
return inventoryWasOpen;
} | 3.68 |
hbase_ByteBuffAllocator_putbackBuffer | /**
* Return back a ByteBuffer after its use. Don't read/write the ByteBuffer after the returning.
* @param buf ByteBuffer to return.
*/
protected void putbackBuffer(ByteBuffer buf) {
if (buf.capacity() != bufSize || (reservoirEnabled ^ buf.isDirect())) {
LOG.warn("Trying to put a buffer, not created by this pool! Will be just ignored");
return;
}
buffers.offer(buf);
} | 3.68 |
flink_FileDataIndexCache_getCachedRegionContainsTargetBufferIndex | /**
* Get the cached in memory region contains target buffer.
*
* @param subpartitionId the subpartition that target buffer belong to.
* @param bufferIndex the index of target buffer.
* @return If target region is cached in memory, return optional contains target region.
* Otherwise, return {@code Optional#empty()};
*/
private Optional<T> getCachedRegionContainsTargetBufferIndex(
int subpartitionId, int bufferIndex) {
return Optional.ofNullable(
subpartitionFirstBufferIndexRegions
.get(subpartitionId)
.floorEntry(bufferIndex))
.map(Map.Entry::getValue)
.filter(internalRegion -> internalRegion.containBuffer(bufferIndex));
} | 3.68 |
flink_MemorySegment_getLongLittleEndian | /**
* Reads a long integer value (64bit, 8 bytes) from the given position, in little endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getLong(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getLong(int)} is the
* preferable choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public long getLongLittleEndian(int index) {
if (LITTLE_ENDIAN) {
return getLong(index);
} else {
return Long.reverseBytes(getLong(index));
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.