name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_MultipleUIUploadTest_getFileName | /**
* Returns the fileName.
*
* @return String
*/
public String getFileName() {
return fileName;
} | 3.68 |
hbase_RegionSplitter_convertToByte | /**
* Returns the bytes corresponding to the BigInteger
* @param bigInteger number to convert
* @return corresponding bytes
*/
public byte[] convertToByte(BigInteger bigInteger) {
return convertToByte(bigInteger, rowComparisonLength);
} | 3.68 |
pulsar_ManagedLedgerFactoryImpl_getManagedLedgers | /**
* Helper for getting stats.
*
* @return
*/
public Map<String, ManagedLedgerImpl> getManagedLedgers() {
// Return a view of already created ledger by filtering futures not yet completed
return Maps.filterValues(Maps.transformValues(ledgers, future -> future.getNow(null)), Predicates.notNull());
} | 3.68 |
framework_VTabsheet_setAssistiveDescription | /**
* Sets the {@code aria-describedby} attribute for this tab element to
* the referenced id. This should be called when this tab receives focus
* and has a tooltip configured.
*
* @param descriptionId
* the unique id of the tooltip element
*/
public void setAssistiveDescription(String descriptionId) {
Roles.getTablistRole().setAriaDescribedbyProperty(getElement(),
Id.of(descriptionId));
} | 3.68 |
framework_AbstractInMemoryContainer_isPropertyFiltered | /**
* Checks if there is a filter that applies to a given property.
*
* @param propertyId
* @return true if there is an active filter for the property
*/
protected boolean isPropertyFiltered(Object propertyId) {
if (getFilters().isEmpty() || propertyId == null) {
return false;
}
for (final Filter f : getFilters()) {
if (f.appliesToProperty(propertyId)) {
return true;
}
}
return false;
} | 3.68 |
flink_UpsertTestFileUtil_getNumberOfRecords | /**
* Returns the total number of records written using the {@link UpsertTestSinkWriter} to the
* given File.
*
* @param file The File to read from
* @return the number of records
* @throws IOException
*/
public static int getNumberOfRecords(File file) throws IOException {
checkNotNull(file);
FileInputStream fs = new FileInputStream(file);
BufferedInputStream bis = new BufferedInputStream(fs);
return getNumberOfRecords(bis);
} | 3.68 |
hbase_MasterRpcServices_isProcedureDone | /**
* Checks if the specified procedure is done.
* @return true if the procedure is done, false if the procedure is in the process of completing
* @throws ServiceException if invalid procedure or failed procedure with progress failure reason.
*/
@Override
public IsProcedureDoneResponse isProcedureDone(RpcController controller,
IsProcedureDoneRequest request) throws ServiceException {
try {
server.checkInitialized();
ProcedureDescription desc = request.getProcedure();
MasterProcedureManager mpm =
server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
if (mpm == null) {
throw new ServiceException("The procedure is not registered: " + desc.getSignature());
}
LOG.debug("Checking to see if procedure from request:" + desc.getSignature() + " is done");
IsProcedureDoneResponse.Builder builder = IsProcedureDoneResponse.newBuilder();
boolean done = mpm.isProcedureDone(desc);
builder.setDone(done);
return builder.build();
} catch (ForeignException e) {
throw new ServiceException(e.getCause());
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
framework_Table_removeFooterClickListener | /**
* Removes a footer click listener.
*
* @param listener
* The listener to remove.
*/
public void removeFooterClickListener(FooterClickListener listener) {
removeListener(TableConstants.FOOTER_CLICK_EVENT_ID,
FooterClickEvent.class, listener);
} | 3.68 |
dubbo_MeshRuleRouter_computeDestination | /**
* Compute Destination Subset
*/
protected String computeDestination(
MeshRuleCache<T> meshRuleCache,
String appName,
DubboDestination dubboDestination,
BitList<Invoker<T>> availableInvokers)
throws RpcException {
String subset = dubboDestination.getSubset();
do {
BitList<Invoker<T>> result = meshRuleCache.getSubsetInvokers(appName, subset);
if (CollectionUtils.isNotEmpty(result)
&& !availableInvokers.clone().and(result).isEmpty()) {
return subset;
}
// fall back
DubboRouteDestination dubboRouteDestination = dubboDestination.getFallback();
if (dubboRouteDestination == null) {
break;
}
dubboDestination = dubboRouteDestination.getDestination();
if (dubboDestination == null) {
break;
}
subset = dubboDestination.getSubset();
} while (true);
return null;
} | 3.68 |
rocketmq-connect_Serdes_Float | /**
* A serde for nullable {@code Float} type.
*/
static public Serde<Float> Float() {
return new FloatSerde();
} | 3.68 |
pulsar_WindowManager_compactWindow | /**
* expires events that fall out of the window every
* EXPIRE_EVENTS_THRESHOLD so that the window does not grow
* too big.
*/
protected void compactWindow() {
if (eventsSinceLastExpiry.incrementAndGet() >= EXPIRE_EVENTS_THRESHOLD) {
scanEvents(false);
}
} | 3.68 |
flink_Tuple4_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3> Tuple4<T0, T1, T2, T3> of(T0 f0, T1 f1, T2 f2, T3 f3) {
return new Tuple4<>(f0, f1, f2, f3);
} | 3.68 |
pulsar_TripleLongPriorityQueue_peekN1 | /**
* Read the 1st long item in the top tuple in the priority queue.
*
* <p>The tuple will not be extracted
*/
public long peekN1() {
checkArgument(tuplesCount != 0);
return array.readLong(0);
} | 3.68 |
hbase_StoreFileWriter_appendMobMetadata | /**
* Appends MOB - specific metadata (even if it is empty)
* @param mobRefSet - original table -> set of MOB file names
* @throws IOException problem writing to FS
*/
public void appendMobMetadata(SetMultimap<TableName, String> mobRefSet) throws IOException {
writer.appendFileInfo(MOB_FILE_REFS, MobUtils.serializeMobFileRefs(mobRefSet));
} | 3.68 |
framework_ComponentLocator_getElementsByPath | /**
* Locates elements using a String locator (path) which identifies DOM
* elements.
*
* @since 7.2
* @param path
* The String locator which identifies target elements.
* @return The JavaScriptArray of DOM elements identified by {@code path} or
* empty array if elements could not be located.
*/
public JsArray<Element> getElementsByPath(String path) {
JsArray<Element> jsElements = JavaScriptObject.createArray().cast();
for (LocatorStrategy strategy : locatorStrategies) {
if (strategy.validatePath(path)) {
List<Element> elements = strategy.getElementsByPath(path);
if (!elements.isEmpty()) {
for (Element e : elements) {
jsElements.push(e);
}
return jsElements;
}
}
}
return jsElements;
} | 3.68 |
flink_ResolvedSchema_getColumn | /**
* Returns the {@link Column} instance for the given column name.
*
* @param columnName the name of the column
*/
public Optional<Column> getColumn(String columnName) {
return this.columns.stream()
.filter(column -> column.getName().equals(columnName))
.findFirst();
} | 3.68 |
hbase_Increment_add | /**
* Add the specified KeyValue to this operation.
* @param cell individual Cell
* @throws java.io.IOException e
*/
@Override
public Increment add(Cell cell) throws IOException {
super.add(cell);
return this;
} | 3.68 |
open-banking-gateway_PairIdPsuAspspTuple_buildPrvKey | /**
* Creates PSU - ASPSP private key pair entity.
* @param path Datasafe path
* @param em Entity manager to persist to
* @return KeyPair template
*/
public static PsuAspspPrvKey buildPrvKey(String path, EntityManager em) {
PairIdPsuAspspTuple tuple = new PairIdPsuAspspTuple(path);
if (null == tuple.getPairId()) {
throw new IllegalArgumentException("Pair id missing");
}
return PsuAspspPrvKey.builder()
.id(tuple.getPairId())
.psu(em.find(Psu.class, tuple.getPsuId()))
.aspsp(em.find(Bank.class, tuple.getAspspId()))
.build();
} | 3.68 |
hbase_RegionMover_unload | /**
* Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}.In
* noAck mode we do not make sure that region is successfully online on the target region
* server,hence it is best effort.We do not unload regions to hostnames given in
* {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions
* to hostnames provided in {@link #designatedFile}
* @return true if unloading succeeded, false otherwise
*/
public boolean unload() throws InterruptedException, ExecutionException, TimeoutException {
return unloadRegions(false);
} | 3.68 |
morf_OracleDialect_connectionTestStatement | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#connectionTestStatement()
*/
@Override
public String connectionTestStatement() {
return "select 1 from dual";
} | 3.68 |
druid_Base64_byteArrayToBase64 | /**
* Translates the specified byte array into a Base64 string as per Preferences.put(byte[]).
*/
public static String byteArrayToBase64(byte[] a) {
return byteArrayToBase64(a, false);
} | 3.68 |
hudi_HoodieInputFormatUtils_getCommitsForIncrementalQuery | /**
* Get commits for incremental query from Hive map reduce configuration.
*
* @param job
* @param tableName
* @param timeline
* @return
*/
public static Option<List<HoodieInstant>> getCommitsForIncrementalQuery(Job job, String tableName, HoodieTimeline timeline) {
return Option.of(getHoodieTimelineForIncrementalQuery(job, tableName, timeline).getInstants());
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_rawBytesUploaded | /**
* Indicate that we just uploaded some data to Azure storage.
* @param numberOfBytes The raw number of bytes uploaded (including overhead).
*/
public void rawBytesUploaded(long numberOfBytes) {
rawBytesUploaded.incr(numberOfBytes);
} | 3.68 |
hudi_OptionsResolver_needsAsyncCompaction | /**
* Returns whether there is need to schedule the async compaction.
*
* @param conf The flink configuration.
*/
public static boolean needsAsyncCompaction(Configuration conf) {
return OptionsResolver.isMorTable(conf)
&& conf.getBoolean(FlinkOptions.COMPACTION_ASYNC_ENABLED);
} | 3.68 |
hadoop_RegexMountPointResolvedDstPathReplaceInterceptor_interceptRemainingPath | /**
* The interceptRemainingPath will just return the remainingPath passed in.
*
*/
@Override
public Path interceptRemainingPath(Path remainingPath) {
return remainingPath;
} | 3.68 |
morf_Function_addMonths | /**
* Helper method to create an instance of the "addMonths" SQL function.
*
* @param expression the expression to evaluate
* @param number an expression evaluating to the number of months to add (or if negative, subtract)
* @return an instance of the addMonths function
*/
public static Function addMonths(AliasedField expression, AliasedField number) {
return new Function(FunctionType.ADD_MONTHS, expression, number);
} | 3.68 |
framework_Slot_detachListeners | /**
* Detaches resize listeners from the widget, caption and spacing elements
*/
private void detachListeners() {
if (getWidget() != null && layout.getLayoutManager() != null) {
LayoutManager lm = layout.getLayoutManager();
if (getCaptionElement() != null && captionResizeListener != null) {
lm.removeElementResizeListener(getCaptionElement(),
captionResizeListener);
}
if (widgetResizeListener != null) {
lm.removeElementResizeListener(getWidget().getElement(),
widgetResizeListener);
}
// in many cases, the listener has already been removed by
// setSpacing(false)
if (getSpacingElement() != null && spacingResizeListener != null) {
lm.removeElementResizeListener(getSpacingElement(),
spacingResizeListener);
}
}
} | 3.68 |
morf_SqlUtils_then | /**
* @param value The value returned if the criteria is true
* @return {@link WhenCondition}
*/
public WhenCondition then(long value) {
return then(literal(value));
} | 3.68 |
hbase_MasterObserver_postTruncateRegion | /**
* Called after the truncate region procedure is called.
* @param c The environment to interact with the framework and master
* @param regionInfo The Region being truncated
*/
@SuppressWarnings("unused")
default void postTruncateRegion(final ObserverContext<MasterCoprocessorEnvironment> c,
RegionInfo regionInfo) {
} | 3.68 |
hadoop_ContainerStatus_setContainerSubState | /**
* Add Extra state information of the container (SCHEDULED, LOCALIZING etc.).
* @param subState Extra State Information.
*/
@Private
@Unstable
public void setContainerSubState(ContainerSubState subState) {
throw new UnsupportedOperationException(
"subclass must implement this method");
} | 3.68 |
flink_MailboxProcessor_isMailboxThread | /**
* Check if the current thread is the mailbox thread.
*
* @return only true if called from the mailbox thread.
*/
public boolean isMailboxThread() {
return mailbox.isMailboxThread();
} | 3.68 |
pulsar_ResourceGroupService_getNamespaceResourceGroup | /**
* Return the resource group associated with a namespace.
*
* @param namespaceName
* @throws if the RG does not exist, or if the NS already references the RG.
*/
public ResourceGroup getNamespaceResourceGroup(NamespaceName namespaceName) {
return this.namespaceToRGsMap.get(namespaceName);
} | 3.68 |
dubbo_Environment_getConfigurationMaps | /**
* Get global configuration as map list
*
* @return
*/
public List<Map<String, String>> getConfigurationMaps() {
if (globalConfigurationMaps == null) {
globalConfigurationMaps = getConfigurationMaps(null, null);
}
return globalConfigurationMaps;
} | 3.68 |
hbase_Procedure_tryRunnable | /**
* Try to set this procedure into RUNNABLE state. Succeeds if all subprocedures/children are done.
* @return True if we were able to move procedure to RUNNABLE state.
*/
synchronized boolean tryRunnable() {
// Don't use isWaiting in the below; it returns true for WAITING and WAITING_TIMEOUT
if (getState() == ProcedureState.WAITING && childrenCountDown()) {
setState(ProcedureState.RUNNABLE);
return true;
} else {
return false;
}
} | 3.68 |
hbase_WALEdit_createBulkLoadEvent | /**
* Create a bulk loader WALEdit
* @param hri The RegionInfo for the region in which we are bulk loading
* @param bulkLoadDescriptor The descriptor for the Bulk Loader
* @return The WALEdit for the BulkLoad
*/
public static WALEdit createBulkLoadEvent(RegionInfo hri,
WALProtos.BulkLoadDescriptor bulkLoadDescriptor) {
KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, BULK_LOAD,
EnvironmentEdgeManager.currentTime(), bulkLoadDescriptor.toByteArray());
return new WALEdit().add(kv, METAFAMILY);
} | 3.68 |
framework_AbstractMedia_isAutoplay | /**
* @return true if the media is set to automatically start playback.
*/
public boolean isAutoplay() {
return getState(false).autoplay;
} | 3.68 |
hbase_HBaseTestingUtility_cleanup | // close hbase admin, close current connection and reset MIN MAX configs for RS.
private void cleanup() throws IOException {
closeConnection();
// unset the configuration for MIN and MAX RS to start
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
} | 3.68 |
framework_BasicEvent_setAllDay | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.event.CalendarEventEditor#setAllDay(boolean)
*/
@Override
public void setAllDay(boolean isAllDay) {
this.isAllDay = isAllDay;
fireEventChange();
} | 3.68 |
hmily_TransactionImpl_doEnList | /**
* Do en list.
*
* @param xaResource the xa resource
* @param flag the flag
* @throws SystemException the system exception
* @throws RollbackException the rollback exception
*/
public void doEnList(final XAResource xaResource, final int flag) throws SystemException, RollbackException {
//xaResource;
if (flag == XAResource.TMJOIN
|| flag == XAResource.TMNOFLAGS) {
//这里需要处理不同的xa事务数据.
enlistResource(xaResource);
} else if (flag == XAResource.TMRESUME) {
//进行事务的恢复.
if (delistResourceList != null) {
for (final XAResource resource : delistResourceList) {
this.enlistResource(resource);
}
}
}
delistResourceList = null;
} | 3.68 |
hbase_SingleColumnValueFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) return true;
if (!(o instanceof SingleColumnValueFilter)) return false;
SingleColumnValueFilter other = (SingleColumnValueFilter) o;
return Bytes.equals(this.getFamily(), other.getFamily())
&& Bytes.equals(this.getQualifier(), other.getQualifier()) && this.op.equals(other.op)
&& this.getComparator().areSerializedFieldsEqual(other.getComparator())
&& this.getFilterIfMissing() == other.getFilterIfMissing()
&& this.getLatestVersionOnly() == other.getLatestVersionOnly();
} | 3.68 |
pulsar_FunctionRecord_from | /**
* Creates a builder for a Record from a Function Context.
* The builder is initialized with the output topic from the Context and with the topicName, key, eventTime,
* properties, partitionId, partitionIndex and recordSequence from the Context input Record.
* It doesn't initialize a Message at the moment.
*
* @param context a Function Context
* @param <T> type of Record to build
* @return a Record builder initialised with values from the Function Context
*/
public static <T> FunctionRecord.FunctionRecordBuilder<T> from(Context context, Schema<T> schema) {
if (schema == null) {
throw new IllegalArgumentException("Schema should not be null.");
}
Record<?> currentRecord = context.getCurrentRecord();
FunctionRecordBuilder<T> builder = new FunctionRecordBuilder<T>()
.schema(schema)
.destinationTopic(context.getOutputTopic())
.properties(currentRecord.getProperties());
currentRecord.getTopicName().ifPresent(builder::topicName);
currentRecord.getKey().ifPresent(builder::key);
currentRecord.getEventTime().ifPresent(builder::eventTime);
currentRecord.getPartitionId().ifPresent(builder::partitionId);
currentRecord.getPartitionIndex().ifPresent(builder::partitionIndex);
currentRecord.getRecordSequence().ifPresent(builder::recordSequence);
return builder;
} | 3.68 |
framework_AbstractComponentConnector_shouldHandleLongTap | /**
* Checks whether a long tap needs handling.
*
* @return {@code true} if long tap handling is needed, {@code false}
* otherwise
*/
protected boolean shouldHandleLongTap() {
return BrowserInfo.get().isTouchDevice();
} | 3.68 |
flink_TableStreamOperator_computeMemorySize | /** Compute memory size from memory faction. */
public long computeMemorySize() {
final Environment environment = getContainingTask().getEnvironment();
return environment
.getMemoryManager()
.computeMemorySize(
getOperatorConfig()
.getManagedMemoryFractionOperatorUseCaseOfSlot(
ManagedMemoryUseCase.OPERATOR,
environment.getTaskManagerInfo().getConfiguration(),
environment.getUserCodeClassLoader().asClassLoader()));
} | 3.68 |
morf_UpgradeTestHelper_jiraIdIsValid | /**
* Checks that a supplied JIRA ID is valid.
*
* @param jiraId the JIRA ID to check
* @return true if the JIRA ID is valid, false otherwise
*/
private boolean jiraIdIsValid(final String jiraId) {
return
jiraId.matches("WEB-\\d+") ||
jiraId.matches("PDT-\\d+");
} | 3.68 |
flink_EvictingWindowSavepointReader_aggregate | /**
* Reads window state generated using an {@link AggregateFunction}.
*
* @param uid The uid of the operator.
* @param aggregateFunction The aggregate function used to create the window.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param inputType The type information of the accumulator function.
* @param outputType The output type of the reader function.
* @param <K> The type of the key.
* @param <T> The type of the values that are aggregated.
* @param <ACC> The type of the accumulator (intermediate aggregate state).
* @param <R> The type of the aggregated result.
* @param <OUT> The output type of the reader function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException If savepoint does not contain the specified uid.
*/
public <K, T, ACC, R, OUT> DataStream<OUT> aggregate(
String uid,
AggregateFunction<T, ACC, R> aggregateFunction,
WindowReaderFunction<R, OUT, K, W> readerFunction,
TypeInformation<K> keyType,
TypeInformation<T> inputType,
TypeInformation<OUT> outputType)
throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator =
WindowReaderOperator.evictingWindow(
new AggregateEvictingWindowReaderFunction<>(
readerFunction, aggregateFunction),
keyType,
windowSerializer,
inputType,
env.getConfig());
return readWindowOperator(uid, outputType, operator);
} | 3.68 |
hbase_JVMClusterUtil_waitForEvent | /**
* Utility method to wait some time for an event to occur, and then return control to the caller.
* @param millis How long to wait, in milliseconds.
* @param action The action that we are waiting for. Will be used in log message if the event does
* not occur.
* @param check A Supplier that will be checked periodically to produce an updated true/false
* result indicating if the expected event has happened or not.
* @throws InterruptedIOException If we are interrupted while waiting for the event.
* @throws RuntimeException If we reach the specified timeout while waiting for the event.
*/
private static void waitForEvent(long millis, String action, Supplier<Boolean> check)
throws InterruptedIOException {
long end = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(millis);
while (true) {
if (check.get()) {
return;
}
if (System.nanoTime() > end) {
String msg = "Master not " + action + " after " + millis + "ms";
Threads.printThreadInfo(System.out, "Thread dump because: " + msg);
throw new RuntimeException(msg);
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
}
} | 3.68 |
hadoop_BufferPool_acquire | /**
* Acquires a {@code ByteBuffer}; blocking if necessary until one becomes available.
* @param blockNumber the id of the block to acquire.
* @return the acquired block's {@code BufferData}.
*/
public synchronized BufferData acquire(int blockNumber) {
BufferData data;
final int maxRetryDelayMs = 600 * 1000;
final int statusUpdateDelayMs = 120 * 1000;
Retryer retryer = new Retryer(10, maxRetryDelayMs, statusUpdateDelayMs);
do {
if (retryer.updateStatus()) {
if (LOG.isDebugEnabled()) {
LOG.debug("waiting to acquire block: {}", blockNumber);
LOG.debug("state = {}", this);
}
releaseReadyBlock(blockNumber);
}
data = tryAcquire(blockNumber);
}
while ((data == null) && retryer.continueRetry());
if (data != null) {
return data;
} else {
String message =
String.format("Wait failed for acquire(%d)", blockNumber);
throw new IllegalStateException(message);
}
} | 3.68 |
framework_TreeTable_isAnimationsEnabled | /**
* @return true if animations are enabled
*/
public boolean isAnimationsEnabled() {
return animationsEnabled;
} | 3.68 |
hadoop_RouterDelegationTokenSecretManager_getMasterKeyByDelegationKey | /**
* The Router supports obtaining the DelegationKey stored in the Router StateStote
* according to the DelegationKey.
*
* @param key Param DelegationKey
* @return Delegation Token
* @throws YarnException An internal conversion error occurred when getting the Token
* @throws IOException IO exception occurred
*/
public DelegationKey getMasterKeyByDelegationKey(DelegationKey key)
throws YarnException, IOException {
try {
RouterMasterKeyResponse response = federationFacade.getMasterKeyByDelegationKey(key);
RouterMasterKey masterKey = response.getRouterMasterKey();
ByteBuffer keyByteBuf = masterKey.getKeyBytes();
byte[] keyBytes = new byte[keyByteBuf.remaining()];
keyByteBuf.get(keyBytes);
DelegationKey delegationKey =
new DelegationKey(masterKey.getKeyId(), masterKey.getExpiryDate(), keyBytes);
return delegationKey;
} catch (IOException ex) {
throw new IOException(ex);
} catch (YarnException ex) {
throw new YarnException(ex);
}
} | 3.68 |
hbase_ReflectedFunctionCache_notFound | /**
* In order to use computeIfAbsent, we can't store nulls in our cache. So we store a lambda which
* resolves to null. The contract is that getAndCallByName returns null in this case.
*/
private R notFound(I argument) {
return null;
} | 3.68 |
hudi_TimelineServerBasedWriteMarkers_executeCreateMarkerRequest | /**
* Executes marker creation request with specific parameters.
*
* @param paramsMap Parameters to be included in the marker request.
* @param partitionPath Relative partition path.
* @param markerFileName Marker file name.
* @return {@code true} if successful; {@code false} otherwise.
*/
private boolean executeCreateMarkerRequest(Map<String, String> paramsMap, String partitionPath, String markerFileName) {
boolean success;
try {
success = httpRequestClient.executeRequest(
CREATE_MARKER_URL, paramsMap, BOOLEAN_TYPE_REFERENCE, HttpRequestClient.RequestMethod.POST);
} catch (IOException e) {
throw new HoodieRemoteException("Failed to create marker file " + partitionPath + "/" + markerFileName, e);
}
return success;
} | 3.68 |
flink_DefaultLookupCache_newBuilder | /** Creates a builder for the cache. */
public static Builder newBuilder() {
return new Builder();
} | 3.68 |
Activiti_AstRightValue_setValue | /**
* non-lvalues are always readonly, so throw an exception
*/
public final void setValue(
Bindings bindings,
ELContext context,
Object value
) {
throw new ELException(
LocalMessages.get(
"error.value.set.rvalue",
getStructuralId(bindings)
)
);
} | 3.68 |
flink_PlanNode_getOriginalOptimizerNode | /**
* Gets the node from the optimizer DAG for which this plan candidate node was created.
*
* @return The optimizer's DAG node.
*/
public OptimizerNode getOriginalOptimizerNode() {
return this.template;
} | 3.68 |
hbase_MiniZooKeeperCluster_killOneBackupZooKeeperServer | /**
* Kill one back up ZK servers.
* @throws IOException if waiting for the shutdown of a server fails
*/
public void killOneBackupZooKeeperServer() throws IOException, InterruptedException {
if (!started || activeZKServerIndex < 0 || standaloneServerFactoryList.size() <= 1) {
return;
}
int backupZKServerIndex = activeZKServerIndex + 1;
// Shutdown the current active one
NIOServerCnxnFactory standaloneServerFactory =
standaloneServerFactoryList.get(backupZKServerIndex);
int clientPort = clientPortList.get(backupZKServerIndex);
standaloneServerFactory.shutdown();
if (!waitForServerDown(clientPort, connectionTimeout)) {
throw new IOException("Waiting for shutdown of standalone server");
}
zooKeeperServers.get(backupZKServerIndex).getZKDatabase().close();
// remove this backup zk server
standaloneServerFactoryList.remove(backupZKServerIndex);
clientPortList.remove(backupZKServerIndex);
zooKeeperServers.remove(backupZKServerIndex);
LOG.info("Kill one backup ZK servers in the cluster on client port: {}", clientPort);
} | 3.68 |
hbase_MasterObserver_postListNamespaces | /**
* Called after a listNamespaces request has been processed.
* @param ctx the environment to interact with the framework and master
* @param namespaces the list of namespaces about to be returned
* @throws IOException if something went wrong
*/
default void postListNamespaces(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<String> namespaces) throws IOException {
} | 3.68 |
framework_VaadinSession_removeBootstrapListener | /**
* Remove a bootstrap listener that was previously added.
*
* @see #addBootstrapListener(BootstrapListener)
*
* @param listener
* the bootstrap listener to remove
* @deprecated Use a {@link Registration} object returned by
* {@link #addBootstrapListener(BootstrapListener)} to remove a
* listener
*/
@Deprecated
public void removeBootstrapListener(BootstrapListener listener) {
assert hasLock();
eventRouter.removeListener(BootstrapFragmentResponse.class, listener,
BOOTSTRAP_FRAGMENT_METHOD);
eventRouter.removeListener(BootstrapPageResponse.class, listener,
BOOTSTRAP_PAGE_METHOD);
} | 3.68 |
flink_HeapPriorityQueueSnapshotRestoreWrapper_forUpdatedSerializer | /**
* Returns a deep copy of the snapshot, where the serializer is re-registered by the serializer
* snapshot or changed to the given serializer.
*
* @param updatedSerializer updated serializer.
* @param allowFutureMetadataUpdates whether allow metadata to update in the future or not.
* @return the queue with the specified unique name.
*/
public HeapPriorityQueueSnapshotRestoreWrapper<T> forUpdatedSerializer(
@Nonnull TypeSerializer<T> updatedSerializer, boolean allowFutureMetadataUpdates) {
RegisteredPriorityQueueStateBackendMetaInfo<T> updatedMetaInfo =
new RegisteredPriorityQueueStateBackendMetaInfo<>(
metaInfo.getName(), updatedSerializer);
updatedMetaInfo =
allowFutureMetadataUpdates
? updatedMetaInfo.withSerializerUpgradesAllowed()
: updatedMetaInfo;
return new HeapPriorityQueueSnapshotRestoreWrapper<>(
priorityQueue,
updatedMetaInfo,
keyExtractorFunction,
localKeyGroupRange,
totalKeyGroups);
} | 3.68 |
flink_PrioritizedDeque_peekLast | /**
* Returns the last non-priority element or priority element if the former does not exist.
*
* @return the last element or null.
*/
@Nullable
public T peekLast() {
return deque.peekLast();
} | 3.68 |
hudi_CkpMetadata_abortInstant | /**
* Add an aborted checkpoint message.
*/
public void abortInstant(String instant) {
Path path = fullPath(CkpMessage.getFileName(instant, CkpMessage.State.ABORTED));
try {
fs.createNewFile(path);
} catch (IOException e) {
throw new HoodieException("Exception while adding checkpoint abort metadata for instant: " + instant);
}
} | 3.68 |
hmily_HmilyTransaction_registerParticipantList | /**
* Register participant list.
*
* @param hmilyParticipantList the hmily participant list
*/
public void registerParticipantList(final List<HmilyParticipant> hmilyParticipantList) {
hmilyParticipants.addAll(hmilyParticipantList);
} | 3.68 |
framework_Navigator_getViewProvider | /**
* Get view provider that handles the given {@code state}.
*
* @param state
* state string
* @return suitable provider
*/
protected ViewProvider getViewProvider(String state) {
String longestViewName = null;
ViewProvider longestViewNameProvider = null;
for (ViewProvider provider : providers) {
String viewName = provider.getViewName(state);
if (null != viewName && (longestViewName == null
|| viewName.length() > longestViewName.length())) {
longestViewName = viewName;
longestViewNameProvider = provider;
}
}
return longestViewNameProvider;
} | 3.68 |
framework_SQLContainer_getItemUnfiltered | /**
* Bypasses in-memory filtering to return items that are cached in memory.
* <em>NOTE</em>: This does not bypass database-level filtering.
*
* @param itemId
* the id of the item to retrieve.
* @return the item represented by itemId.
*/
public Item getItemUnfiltered(Object itemId) {
if (!cachedItems.containsKey(itemId)) {
for (RowItem item : addedItems) {
if (item.getId().equals(itemId)) {
return item;
}
}
}
return cachedItems.get(itemId);
}
/**
* NOTE! Do not use this method if in any way avoidable. This method doesn't
* (and cannot) use lazy loading, which means that all rows in the database
* will be loaded into memory.
*
* {@inheritDoc} | 3.68 |
flink_JobVertex_getName | /**
* Returns the name of the vertex.
*
* @return The name of the vertex.
*/
public String getName() {
return this.name;
} | 3.68 |
hbase_MobUtils_serializeMobFileRefs | /**
* Serialize a set of referenced mob hfiles
* @param mobRefSet to serialize, may be null
* @return byte array to i.e. put into store file metadata. will not be null
*/
public static byte[] serializeMobFileRefs(SetMultimap<TableName, String> mobRefSet) {
if (mobRefSet != null && mobRefSet.size() > 0) {
// Here we rely on the fact that '/' and ',' are not allowed in either table names nor hfile
// names for serialization.
//
// exampleTable/filename1,filename2//example:table/filename5//otherTable/filename3,filename4
//
// to approximate the needed capacity we use the fact that there will usually be 1 table name
// and each mob filename is around 105 bytes. we pick an arbitrary number to cover "most"
// single table name lengths
StringBuilder sb = new StringBuilder(100 + mobRefSet.size() * 105);
boolean doubleSlash = false;
for (TableName tableName : mobRefSet.keySet()) {
if (doubleSlash) {
sb.append("//");
} else {
doubleSlash = true;
}
sb.append(tableName).append("/");
boolean comma = false;
for (String refs : mobRefSet.get(tableName)) {
if (comma) {
sb.append(",");
} else {
comma = true;
}
sb.append(refs);
}
}
return Bytes.toBytes(sb.toString());
} else {
return HStoreFile.NULL_VALUE;
}
} | 3.68 |
hbase_ServerManager_areDeadServersInProgress | /**
* Checks if any dead servers are currently in progress.
* @return true if any RS are being processed as dead, false if not
*/
public boolean areDeadServersInProgress() throws IOException {
return master.getProcedures().stream()
.anyMatch(p -> !p.isFinished() && p instanceof ServerCrashProcedure);
} | 3.68 |
graphhopper_LandmarkStorage_setLMSelectionWeighting | /**
* This weighting is used for the selection heuristic and is per default not the weighting specified in the constructor.
* The special weighting leads to a much better distribution of the landmarks and results in better response times.
*/
public void setLMSelectionWeighting(Weighting lmSelectionWeighting) {
this.lmSelectionWeighting = lmSelectionWeighting;
} | 3.68 |
hadoop_MountTableRefresherThread_isSuccess | /**
* @return true if cache was refreshed successfully.
*/
public boolean isSuccess() {
return success;
} | 3.68 |
flink_HandlerRequest_getPathParameter | /**
* Returns the value of the {@link MessagePathParameter} for the given class.
*
* @param parameterClass class of the parameter
* @param <X> the value type that the parameter contains
* @param <PP> type of the path parameter
* @return path parameter value for the given class
* @throws IllegalStateException if no value is defined for the given parameter class
*/
public <X, PP extends MessagePathParameter<X>> X getPathParameter(Class<PP> parameterClass) {
@SuppressWarnings("unchecked")
PP pathParameter = (PP) pathParameters.get(parameterClass);
Preconditions.checkState(
pathParameter != null, "No parameter could be found for the given class.");
return pathParameter.getValue();
} | 3.68 |
hbase_AccessChecker_requireTablePermission | /**
* Authorizes that the current user has any of the given permissions for the given table, column
* family and column qualifier.
* @param user Active user to which authorization checks should be applied
* @param request Request type
* @param tableName Table requested
* @param family Column family param
* @param qualifier Column qualifier param
* @throws IOException if obtaining the current user fails
* @throws AccessDeniedException if user has no authorization
*/
public void requireTablePermission(User user, String request, TableName tableName, byte[] family,
byte[] qualifier, Action... permissions) throws IOException {
AuthResult result = null;
for (Action permission : permissions) {
if (authManager.authorizeUserTable(user, tableName, permission)) {
result = AuthResult.allow(request, "Table permission granted", user, permission, tableName,
null, null);
result.getParams().setFamily(family).setQualifier(qualifier);
break;
} else {
// rest of the world
result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName,
family, qualifier);
result.getParams().setFamily(family).setQualifier(qualifier);
}
}
logResult(result);
if (!result.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + result.toContextString());
}
} | 3.68 |
morf_MorfModule_provideUpgrade | /**
* Singleton provider creating an instance of {@link Upgrade}.
*
* @param connectionResources the connection resources
* @param factory the upgrade path factory
* @param upgradeStatusTableService the service class for managing the status of temporary upgrade tables
* @param viewChangesDeploymentHelper the view deployment helper
* @param viewDeploymentValidator the view deployment validator
* @param graphBasedUpgradeBuilderFactory the graph based upgrade builder
* @return the singleton instance of {@link Upgrade}.
*/
@Provides
@Singleton
public Upgrade provideUpgrade(ConnectionResources connectionResources,
UpgradePathFactory factory,
UpgradeStatusTableService upgradeStatusTableService,
ViewChangesDeploymentHelper viewChangesDeploymentHelper,
ViewDeploymentValidator viewDeploymentValidator,
GraphBasedUpgradeBuilderFactory graphBasedUpgradeBuilderFactory,
DatabaseUpgradePathValidationService databaseUpgradePathValidationService) {
return new Upgrade(connectionResources, factory, upgradeStatusTableService, viewChangesDeploymentHelper,
viewDeploymentValidator, graphBasedUpgradeBuilderFactory, databaseUpgradePathValidationService);
} | 3.68 |
MagicPlugin_ExprActiveSpell_acceptChange | // Eclipse detects the parent return type of this function as @NonNull
// which is not correct.
@SuppressWarnings("null")
@Nullable
@Override
public Class<?>[] acceptChange(@Nonnull Changer.ChangeMode mode) {
if (mode != Changer.ChangeMode.SET && mode != Changer.ChangeMode.REMOVE_ALL)
return null;
return new Class<?>[] {String.class};
} | 3.68 |
hudi_Base64CodecUtil_encode | /**
* Encodes all bytes from the specified byte array into String using StandardCharsets.UTF_8.
*
* @param data byte[] source data
* @return base64 encoded data
*/
public static String encode(byte[] data) {
return new String(Base64.getEncoder().encode(data), StandardCharsets.UTF_8);
} | 3.68 |
hbase_HRegion_batchReplay | /**
* @deprecated Since 3.0.0, will be removed in 4.0.0. Now we use
* {@link #replayWALEntry(WALEntry, CellScanner)} for replaying edits at secondary
* replica side.
*/
@Deprecated
OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId) throws IOException {
if (
!RegionReplicaUtil.isDefaultReplica(getRegionInfo())
&& replaySeqId < lastReplayedOpenRegionSeqId
) {
// if it is a secondary replica we should ignore these entries silently
// since they are coming out of order
if (LOG.isTraceEnabled()) {
LOG.trace(getRegionInfo().getEncodedName() + " : " + "Skipping " + mutations.length
+ " mutations with replaySeqId=" + replaySeqId
+ " which is < than lastReplayedOpenRegionSeqId=" + lastReplayedOpenRegionSeqId);
for (MutationReplay mut : mutations) {
LOG.trace(getRegionInfo().getEncodedName() + " : Skipping : " + mut.mutation);
}
}
OperationStatus[] statuses = new OperationStatus[mutations.length];
for (int i = 0; i < statuses.length; i++) {
statuses[i] = OperationStatus.SUCCESS;
}
return statuses;
}
return batchMutate(new ReplayBatchOperation(this, mutations, replaySeqId));
} | 3.68 |
framework_Embedded_getClassId | /**
* This attribute may be used to specify the location of an object's
* implementation via a URI.
*
* @return the classid.
*/
public String getClassId() {
return getState(false).classId;
} | 3.68 |
hadoop_FSStoreOpHandler_getMirrorOp | /**
* Get mirror operation of store Type.
*
* @param storeType storeType.
* @return instance of FSNodeStoreLogOp.
*/
public static FSNodeStoreLogOp getMirrorOp(StoreType storeType) {
return newInstance(mirrorOp.get(storeType));
} | 3.68 |
flink_AbstractHeapVector_reset | /**
* Resets the column to default state. - fills the isNull array with false. - sets noNulls to
* true.
*/
@Override
public void reset() {
if (!noNulls) {
Arrays.fill(isNull, false);
}
noNulls = true;
} | 3.68 |
hbase_Threads_sleepWithoutInterrupt | /**
* Sleeps for the given amount of time even if interrupted. Preserves the interrupt status.
* @param msToWait the amount of time to sleep in milliseconds
*/
public static void sleepWithoutInterrupt(final long msToWait) {
long timeMillis = EnvironmentEdgeManager.currentTime();
long endTime = timeMillis + msToWait;
boolean interrupted = false;
while (timeMillis < endTime) {
try {
Thread.sleep(endTime - timeMillis);
} catch (InterruptedException ex) {
interrupted = true;
}
timeMillis = EnvironmentEdgeManager.currentTime();
}
if (interrupted) {
Thread.currentThread().interrupt();
}
} | 3.68 |
hbase_RegionInfo_isNext | /**
* Returns True if region is next, adjacent but 'after' this one.
* @see #isAdjacent(RegionInfo)
* @see #areAdjacent(RegionInfo, RegionInfo)
*/
default boolean isNext(RegionInfo after) {
return getTable().equals(after.getTable()) && Bytes.equals(getEndKey(), after.getStartKey());
} | 3.68 |
hadoop_DocumentStoreTimelineReaderImpl_applyFilters | // for honoring all filters from {@link TimelineEntityFilters}
private Set<TimelineEntity> applyFilters(TimelineEntityFilters filters,
TimelineDataToRetrieve dataToRetrieve,
List<TimelineEntityDocument> entityDocs) throws IOException {
Set<TimelineEntity> timelineEntities = new HashSet<>();
for (TimelineEntityDocument entityDoc : entityDocs) {
final TimelineEntity timelineEntity = entityDoc.fetchTimelineEntity();
if (DocumentStoreUtils.isFilterNotMatching(filters, timelineEntity)) {
continue;
}
TimelineEntity entityToBeReturned = DocumentStoreUtils
.createEntityToBeReturned(entityDoc, dataToRetrieve);
timelineEntities.add(entityToBeReturned);
}
return timelineEntities;
} | 3.68 |
flink_FlinkCalciteCatalogReader_toPreparingTable | /** Translate this {@link CatalogSchemaTable} into Flink source table. */
private static FlinkPreparingTableBase toPreparingTable(
RelOptSchema relOptSchema,
List<String> names,
RelDataType rowType,
CatalogSchemaTable schemaTable) {
final ResolvedCatalogBaseTable<?> resolvedBaseTable =
schemaTable.getContextResolvedTable().getResolvedTable();
final CatalogBaseTable originTable = resolvedBaseTable.getOrigin();
if (originTable instanceof QueryOperationCatalogView) {
return convertQueryOperationView(
relOptSchema, names, rowType, (QueryOperationCatalogView) originTable);
} else if (originTable instanceof ConnectorCatalogTable) {
ConnectorCatalogTable<?, ?> connectorTable = (ConnectorCatalogTable<?, ?>) originTable;
if ((connectorTable).getTableSource().isPresent()) {
return convertLegacyTableSource(
relOptSchema,
rowType,
schemaTable.getContextResolvedTable().getIdentifier(),
connectorTable,
schemaTable.getStatistic(),
schemaTable.isStreamingMode());
} else {
throw new ValidationException(
"Cannot convert a connector table " + "without source.");
}
} else if (originTable instanceof CatalogView) {
return convertCatalogView(
relOptSchema,
names,
rowType,
schemaTable.getStatistic(),
(CatalogView) originTable);
} else if (originTable instanceof CatalogTable) {
return convertCatalogTable(relOptSchema, names, rowType, schemaTable);
} else {
throw new ValidationException("Unsupported table type: " + originTable);
}
} | 3.68 |
framework_AbstractOrderedLayout_getMargin | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Layout.MarginHandler#getMargin()
*/
@Override
public MarginInfo getMargin() {
return new MarginInfo(getState(false).marginsBitmask);
} | 3.68 |
hbase_Bytes_searchDelimiterIndexInReverse | /**
* Find index of passed delimiter walking from end of buffer backwards.
* @return Index of delimiter
*/
public static int searchDelimiterIndexInReverse(final byte[] b, final int offset,
final int length, final int delimiter) {
if (b == null) {
throw new IllegalArgumentException("Passed buffer is null");
}
int result = -1;
for (int i = (offset + length) - 1; i >= offset; i--) {
if (b[i] == delimiter) {
result = i;
break;
}
}
return result;
} | 3.68 |
pulsar_TransactionUtil_canTransitionTo | /**
* Check if the a status can be transaction to a new status.
*
* @param newStatus the new status
* @return true if the current status can be transitioning to.
*/
public static boolean canTransitionTo(TxnStatus currentStatus, TxnStatus newStatus) {
switch (currentStatus) {
case OPEN:
return newStatus != COMMITTED && newStatus != ABORTED;
case COMMITTING:
return newStatus == COMMITTING || newStatus == COMMITTED;
case COMMITTED:
return newStatus == COMMITTED;
case ABORTING:
return newStatus == ABORTING || newStatus == ABORTED;
case ABORTED:
return newStatus == ABORTED;
default:
throw new IllegalArgumentException("Unknown txn status : " + newStatus);
}
} | 3.68 |
hbase_MultiTableSnapshotInputFormatImpl_setInput | /**
* Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of
* restoreDir.
* <p/>
* Sets: {@link #RESTORE_DIRS_KEY}, {@link #SNAPSHOT_TO_SCANS_KEY}
*/
public void setInput(Configuration conf, Map<String, Collection<Scan>> snapshotScans,
Path restoreDir) throws IOException {
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
setSnapshotToScans(conf, snapshotScans);
Map<String, Path> restoreDirs =
generateSnapshotToRestoreDirMapping(snapshotScans.keySet(), restoreDir);
setSnapshotDirs(conf, restoreDirs);
restoreSnapshots(conf, restoreDirs, fs);
} | 3.68 |
framework_Calendar_addActionHandler | /**
* Adds an action handler to the calendar that handles event produced by the
* context menu.
*
* <p>
* The {@link Handler#getActions(Object, Object)} parameters depend on what
* view the Calendar is in:
* <ul>
* <li>If the Calendar is in <i>Day or Week View</i> then the target
* parameter will be a {@link CalendarDateRange} with a range of
* half-an-hour. The {@link Handler#getActions(Object, Object)} method will
* be called once per half-hour slot.</li>
* <li>If the Calendar is in <i>Month View</i> then the target parameter
* will be a {@link CalendarDateRange} with a range of one day. The
* {@link Handler#getActions(Object, Object)} will be called once for each
* day.
* </ul>
* The Dates passed into the {@link CalendarDateRange} are in the same
* timezone as the calendar is.
* </p>
*
* <p>
* The {@link Handler#handleAction(Action, Object, Object)} parameters
* depend on what the context menu is called upon:
* <ul>
* <li>If the context menu is called upon an event then the target parameter
* is the event, i.e. instanceof {@link CalendarEvent}</li>
* <li>If the context menu is called upon an empty slot then the target is a
* {@link Date} representing that slot
* </ul>
* </p>
*/
@Override
public void addActionHandler(Handler actionHandler) {
if (actionHandler != null) {
if (actionHandlers == null) {
actionHandlers = new LinkedList<Handler>();
actionMapper = new KeyMapper<Action>();
}
if (!actionHandlers.contains(actionHandler)) {
actionHandlers.add(actionHandler);
markAsDirty();
}
}
} | 3.68 |
graphhopper_EdgeBasedWitnessPathSearcher_initSearch | /**
* Deletes the shortest path tree that has been found so far and initializes a new witness path search for a given
* node to be contracted and source edge key.
*
* @param sourceEdgeKey the key of the original edge incoming to s from which the search starts
* @param sourceNode the neighbor node from which the search starts (s)
* @param centerNode the node to be contracted (x)
*/
public void initSearch(int sourceEdgeKey, int sourceNode, int centerNode, Stats stats) {
this.stats = stats;
stats.numTrees++;
this.sourceNode = sourceNode;
this.centerNode = centerNode;
// set start entry
weights[sourceEdgeKey] = 0;
parents[sourceEdgeKey] = -1;
setAdjNodeAndPathToCenter(sourceEdgeKey, sourceNode, true);
changedEdgeKeys.add(sourceEdgeKey);
dijkstraHeap.insert(0, sourceEdgeKey);
} | 3.68 |
hadoop_Chain_startAllThreads | // start all the threads
void startAllThreads() {
for (Thread thread : threads) {
thread.start();
}
} | 3.68 |
hadoop_ManifestStoreOperations_fromResilientCommit | /**
* Full commit result.
* @param recovered Did recovery take place?
* @param waitTime any time spent waiting for IO capacity.
*/
public static CommitFileResult fromResilientCommit(
final boolean recovered,
final Duration waitTime) {
return new CommitFileResult(recovered, waitTime);
} | 3.68 |
hadoop_RLESparseResourceAllocation_getMinimumCapacityInInterval | /**
* Get the minimum capacity in the specified time range.
*
* @param interval the {@link ReservationInterval} to be searched
* @return minimum resource allocation
*/
public Resource getMinimumCapacityInInterval(ReservationInterval interval) {
Resource minCapacity =
Resource.newInstance(Integer.MAX_VALUE, Integer.MAX_VALUE);
long start = interval.getStartTime();
long end = interval.getEndTime();
NavigableMap<Long, Resource> capacityRange =
getRangeOverlapping(start, end).getCumulative();
if (!capacityRange.isEmpty()) {
for (Map.Entry<Long, Resource> entry : capacityRange.entrySet()) {
if (entry.getValue() != null) {
minCapacity =
Resources.componentwiseMin(minCapacity, entry.getValue());
}
}
}
return minCapacity;
} | 3.68 |
hbase_MetricsSource_getTimestampOfLastShippedOp | /**
* Get the timestampsOfLastShippedOp, if there are multiple groups, return the latest one
*/
public long getTimestampOfLastShippedOp() {
long lastTimestamp = 0L;
for (long ts : lastShippedTimeStamps.values()) {
if (ts > lastTimestamp) {
lastTimestamp = ts;
}
}
return lastTimestamp;
} | 3.68 |
morf_SqlServerDialect_getFromDummyTable | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getFromDummyTable()
*/
@Override
protected String getFromDummyTable() {
return StringUtils.EMPTY; // SQLServer doesn't have a "DUAL" table like oracle etc.
} | 3.68 |
flink_MemorySegment_processAsByteBuffer | /**
* Supplies a {@link ByteBuffer} that represents this entire segment to the given process
* consumer.
*
* <p>Note: The {@link ByteBuffer} passed into the process consumer is temporary and could
* become invalid after the processing. Thus, the process consumer should not try to keep any
* reference of the {@link ByteBuffer}.
*
* @param processConsumer to accept the segment as {@link ByteBuffer}.
*/
public void processAsByteBuffer(Consumer<ByteBuffer> processConsumer) {
Preconditions.checkNotNull(processConsumer).accept(wrapInternal(0, size));
} | 3.68 |
morf_AbstractSqlDialectTest_expectedHints4 | /**
* @return The expected SQL for the {@link InsertStatement#useDirectPath()} directive.
*/
protected String expectedHints4() {
return "INSERT INTO " + tableName("Foo") + " SELECT a, b FROM " + tableName("Foo_1");
} | 3.68 |
morf_DeleteStatementBuilder_where | /**
* Specifies the where criteria
*
* <blockquote><pre>DeleteStatement.delete([table])
* .where([criteria])
* .build();</pre></blockquote>
*
* @param criterion the criteria to filter the results by
* @return this, for method chaining.
*/
public DeleteStatementBuilder where(Criterion criterion) {
if (criterion == null)
throw new IllegalArgumentException("Criterion was null in where clause");
whereCriterion = criterion;
return this;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithTableAlias | /**
* Tests a select with table aliases.
*/
@Test
public void testSelectWithTableAlias() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(DATE_FIELD).as("aliasDate"))
.from(new TableReference(TEST_TABLE).as("aliasTest"));
String expectedSql = "SELECT stringField, intField, dateField AS aliasDate FROM " + tableName(TEST_TABLE) + " aliasTest";
assertEquals("Select statement with qualified field names", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_RestServerEndpointConfiguration_fromConfiguration | /**
* Creates and returns a new {@link RestServerEndpointConfiguration} from the given {@link
* Configuration}.
*
* @param config configuration from which the REST server endpoint configuration should be
* created from
* @return REST server endpoint configuration
* @throws ConfigurationException if SSL was configured incorrectly
*/
public static RestServerEndpointConfiguration fromConfiguration(Configuration config)
throws ConfigurationException {
Preconditions.checkNotNull(config);
final String restAddress =
Preconditions.checkNotNull(
config.getString(RestOptions.ADDRESS),
"%s must be set",
RestOptions.ADDRESS.key());
final String restBindAddress = config.getString(RestOptions.BIND_ADDRESS);
final String portRangeDefinition = config.getString(RestOptions.BIND_PORT);
final SSLHandlerFactory sslHandlerFactory;
if (SecurityOptions.isRestSSLEnabled(config)) {
try {
sslHandlerFactory = SSLUtils.createRestServerSSLEngineFactory(config);
} catch (Exception e) {
throw new ConfigurationException(
"Failed to initialize SSLEngineFactory for REST server endpoint.", e);
}
} else {
sslHandlerFactory = null;
}
final Path uploadDir =
Paths.get(
config.getString(
WebOptions.UPLOAD_DIR, config.getString(WebOptions.TMP_DIR)),
"flink-web-upload");
final int maxContentLength = config.getInteger(RestOptions.SERVER_MAX_CONTENT_LENGTH);
final Map<String, String> responseHeaders =
Collections.singletonMap(
HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN.toString(),
config.getString(WebOptions.ACCESS_CONTROL_ALLOW_ORIGIN));
return new RestServerEndpointConfiguration(
restAddress,
restBindAddress,
portRangeDefinition,
sslHandlerFactory,
uploadDir,
maxContentLength,
responseHeaders);
} | 3.68 |
hadoop_Result_isDescend | /**
* Should further directories be descended.
* @return if is pass true,not false.
*/
public boolean isDescend() {
return this.descend;
} | 3.68 |
hbase_TableHFileArchiveTracker_safeStopTrackingTable | /**
* Stop tracking a table. Ensures that the table doesn't exist, but if it does, it attempts to add
* the table back via {@link #addAndReWatchTable(String)} - its a 'safe' removal.
* @param tableZnode full zookeeper path to the table to be added
* @throws KeeperException if an unexpected zk exception occurs
*/
private void safeStopTrackingTable(String tableZnode) throws KeeperException {
getMonitor().removeTable(ZKUtil.getNodeName(tableZnode));
// if the table exists, then add and rewatch it
if (ZKUtil.checkExists(watcher, tableZnode) >= 0) {
addAndReWatchTable(tableZnode);
}
} | 3.68 |
hmily_ConsistentHashSelector_selectForKey | /**
* Select for key singleton executor.
*
* @param hash the hash
* @return the singleton executor
*/
private SingletonExecutor selectForKey(final long hash) {
SingletonExecutor invoker;
Long key = hash;
if (!virtualInvokers.containsKey(key)) {
SortedMap<Long, SingletonExecutor> tailMap = virtualInvokers.tailMap(key);
if (tailMap.isEmpty()) {
key = virtualInvokers.firstKey();
} else {
key = tailMap.firstKey();
}
}
invoker = virtualInvokers.get(key);
return invoker;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.