name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_Buffered_getCauses | /**
* Gets all the causes for this exception.
*
* @return throwables that caused this exception
*/
public final Throwable[] getCauses() {
return causes;
} | 3.68 |
hbase_MasterObserver_postUnassign | /**
* Called after the region unassignment has been requested.
* @param ctx the environment to interact with the framework and master
*/
default void postUnassign(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo regionInfo) throws IOException {
} | 3.68 |
hudi_HoodieMetaSyncOperations_getMetastoreSchema | /**
* Get the schema from metastore.
*/
default Map<String, String> getMetastoreSchema(String tableName) {
return Collections.emptyMap();
} | 3.68 |
hbase_StoreFileWriter_withMaxKeyCount | /**
* @param maxKeyCount estimated maximum number of keys we expect to add
* @return this (for chained invocation)
*/
public Builder withMaxKeyCount(long maxKeyCount) {
this.maxKeyCount = maxKeyCount;
return this;
} | 3.68 |
flink_Tuple10_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple10)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple10 tuple = (Tuple10) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
return true;
} | 3.68 |
framework_TouchScrollDelegate_requiresDelegate | /**
* Checks if a delegate for scrolling is required or if the native
* scrolling of the device should be used. By default, relies on
* {@link BrowserInfo#requiresTouchScrollDelegate()}, override to change
* the behavior.
*
* @return true if a Javascript delegate should be used for scrolling,
* false to use the native scrolling of the device
*/
protected boolean requiresDelegate() {
return requiresDelegate;
} | 3.68 |
hadoop_ErrorTranslation_isObjectNotFound | /**
* Does this exception indicate that a reference to an object
* returned a 404. Unknown bucket errors do not match this
* predicate.
* @param e exception.
* @return true if the status code and error code mean that the
* HEAD request returned 404 but the bucket was there.
*/
public static boolean isObjectNotFound(AwsServiceException e) {
return e.statusCode() == SC_404_NOT_FOUND && !isUnknownBucket(e);
} | 3.68 |
hadoop_AzureNativeFileSystemStore_getLinkInFileMetadata | /**
* If the blob with the given key exists and has a link in its metadata to a
* temporary file (see storeEmptyLinkFile), this method returns the key to
* that temporary file. Otherwise, returns null.
*/
@Override
public String getLinkInFileMetadata(String key) throws AzureException {
if (null == storageInteractionLayer) {
final String errMsg = String.format(
"Storage session expected for URI '%s' but does not exist.",
sessionUri);
throw new AssertionError(errMsg);
}
try {
checkContainer(ContainerAccessType.PureRead);
CloudBlobWrapper blob = getBlobReference(key);
blob.downloadAttributes(getInstrumentedContext());
return getLinkAttributeValue(blob);
} catch (Exception e) {
// Caught exception while attempting download. Re-throw as an Azure
// storage exception.
throw new AzureException(e);
}
} | 3.68 |
flink_KeyMap_getLog2TableCapacity | /**
* Gets the base-2 logarithm of the hash table capacity, as returned by {@link
* #getCurrentTableCapacity()}.
*
* @return The base-2 logarithm of the hash table capacity.
*/
public int getLog2TableCapacity() {
return log2size;
} | 3.68 |
hbase_CoprocessorClassLoader_getIfCached | // This method is used in unit test
public static CoprocessorClassLoader getIfCached(final Path path) {
Preconditions.checkNotNull(path, "The jar path is null!");
return classLoadersCache.get(path);
} | 3.68 |
flink_FlinkBushyJoinReorderRule_foundNextLevel | /** Found possible join plans for the next level based on the found plans in the prev levels. */
private static Map<Set<Integer>, JoinPlan> foundNextLevel(
RelBuilder relBuilder,
List<Map<Set<Integer>, JoinPlan>> foundPlans,
LoptMultiJoin multiJoin) {
Map<Set<Integer>, JoinPlan> currentLevelJoinPlanMap = new LinkedHashMap<>();
int foundPlansLevel = foundPlans.size() - 1;
int joinLeftSideLevel = 0;
int joinRightSideLevel = foundPlansLevel;
while (joinLeftSideLevel <= joinRightSideLevel) {
List<JoinPlan> joinLeftSidePlans =
new ArrayList<>(foundPlans.get(joinLeftSideLevel).values());
int planSize = joinLeftSidePlans.size();
for (int i = 0; i < planSize; i++) {
JoinPlan joinLeftSidePlan = joinLeftSidePlans.get(i);
List<JoinPlan> joinRightSidePlans;
if (joinLeftSideLevel == joinRightSideLevel) {
// If left side level number equals right side level number. We can remove those
// top 'i' plans which already judged in right side plans to decrease search
// spaces.
joinRightSidePlans = new ArrayList<>(joinLeftSidePlans);
if (i > 0) {
joinRightSidePlans.subList(0, i).clear();
}
} else {
joinRightSidePlans =
new ArrayList<>(foundPlans.get(joinRightSideLevel).values());
}
for (JoinPlan joinRightSidePlan : joinRightSidePlans) {
Optional<JoinPlan> newJoinPlan =
buildInnerJoin(
relBuilder, joinLeftSidePlan, joinRightSidePlan, multiJoin);
if (newJoinPlan.isPresent()) {
JoinPlan existingPlanInCurrentLevel =
currentLevelJoinPlanMap.get(newJoinPlan.get().factorIds);
// check if it's the first plan for the factor set, or it's a better plan
// than the existing one due to lower cost.
if (existingPlanInCurrentLevel == null
|| newJoinPlan.get().betterThan(existingPlanInCurrentLevel)) {
currentLevelJoinPlanMap.put(
newJoinPlan.get().factorIds, newJoinPlan.get());
}
}
}
}
joinLeftSideLevel++;
joinRightSideLevel--;
}
return currentLevelJoinPlanMap;
} | 3.68 |
morf_AbstractSqlDialectTest_testAddBigIntegerColumn | /**
* Test adding a big integer column.
*/
@Test
public void testAddBigIntegerColumn() {
testAlterTableColumn(AlterationType.ADD, column("bigIntegerField_new", DataType.BIG_INTEGER).nullable(), expectedAlterTableAddBigIntegerColumnStatement());
} | 3.68 |
dubbo_ReactorServerCalls_oneToMany | /**
* Implements a unary -> stream call as Mono -> Flux
*
* @param request request
* @param responseObserver response StreamObserver
* @param func service implementation
*/
public static <T, R> void oneToMany(
T request, StreamObserver<R> responseObserver, Function<Mono<T>, Flux<R>> func) {
try {
Flux<R> response = func.apply(Mono.just(request));
ServerTripleReactorSubscriber<R> subscriber = response.subscribeWith(new ServerTripleReactorSubscriber<>());
subscriber.subscribe((ServerCallToObserverAdapter<R>) responseObserver);
} catch (Throwable throwable) {
responseObserver.onError(throwable);
}
} | 3.68 |
flink_FunctionContext_getExternalResourceInfos | /** Get the external resource information. */
public Set<ExternalResourceInfo> getExternalResourceInfos(String resourceName) {
if (context == null) {
throw new TableException(
"Calls to FunctionContext.getExternalResourceInfos are not available "
+ "at the current location.");
}
return context.getExternalResourceInfos(resourceName);
} | 3.68 |
framework_VTabsheetBase_isDynamicHeight | /**
* Returns whether the height of the widget is undefined.
*
* @since 7.2
* @return {@code true} if height of the widget is determined by its
* content, {@code false} otherwise
*/
protected boolean isDynamicHeight() {
return getConnectorForWidget(this).isUndefinedHeight();
} | 3.68 |
hadoop_JobACLsManager_isMRAdmin | /**
* Is the calling user an admin for the mapreduce cluster
* i.e. member of mapreduce.cluster.administrators
* @return true, if user is an admin
*/
boolean isMRAdmin(UserGroupInformation callerUGI) {
if (adminAcl.isUserAllowed(callerUGI)) {
return true;
}
return false;
} | 3.68 |
hadoop_SchedulerAppReport_getLiveContainers | /**
* Get the list of live containers
* @return All of the live containers
*/
public Collection<RMContainer> getLiveContainers() {
return live;
} | 3.68 |
hadoop_S3AInMemoryInputStream_ensureCurrentBuffer | /**
* Ensures that a non-empty valid buffer is available for immediate reading.
* It returns true when at least one such buffer is available for reading.
* It returns false on reaching the end of the stream.
*
* @return true if at least one such buffer is available for reading, false otherwise.
*/
@Override
protected boolean ensureCurrentBuffer() throws IOException {
if (isClosed()) {
return false;
}
if (getBlockData().getFileSize() == 0) {
return false;
}
FilePosition filePosition = getFilePosition();
if (filePosition.isValid()) {
// Update current position (lazy seek).
filePosition.setAbsolute(getNextReadPos());
} else {
// Read entire file into buffer.
buffer.clear();
int numBytesRead =
getReader().read(buffer, 0, buffer.capacity());
if (numBytesRead <= 0) {
return false;
}
BufferData data = new BufferData(0, buffer);
filePosition.setData(data, 0, getNextReadPos());
}
return filePosition.buffer().hasRemaining();
} | 3.68 |
hadoop_ResourceRequest_build | /**
* Return generated {@link ResourceRequest} object.
* @return {@link ResourceRequest}
*/
@Public
@Stable
public ResourceRequest build() {
return resourceRequest;
} | 3.68 |
hbase_MetricsREST_incrementSucessfulDeleteRequests | /**
* @param inc How much to add to sucessfulDeleteCount.
*/
public void incrementSucessfulDeleteRequests(final int inc) {
source.incrementSucessfulDeleteRequests(inc);
} | 3.68 |
hmily_HmilyExpressionBuilder_extractAndPredicates | /**
* Extract and predicates.
*
* @return Or predicate segment.
*/
public HmilyOrPredicateSegment extractAndPredicates() {
HmilyOrPredicateSegment result = new HmilyOrPredicateSegment();
if (expression instanceof HmilyBinaryOperationExpression) {
String operator = ((HmilyBinaryOperationExpression) expression).getOperator();
Optional<HmilyLogicalOperator> logicalOperator = HmilyLogicalOperator.valueFrom(operator);
if (logicalOperator.isPresent() && HmilyLogicalOperator.OR == logicalOperator.get()) {
HmilyExpressionBuilder leftBuilder = new HmilyExpressionBuilder(((HmilyBinaryOperationExpression) expression).getLeft());
HmilyExpressionBuilder rightBuilder = new HmilyExpressionBuilder(((HmilyBinaryOperationExpression) expression).getRight());
result.getHmilyAndPredicates().addAll(leftBuilder.extractAndPredicates().getHmilyAndPredicates());
result.getHmilyAndPredicates().addAll(rightBuilder.extractAndPredicates().getHmilyAndPredicates());
} else if (logicalOperator.isPresent() && HmilyLogicalOperator.AND == logicalOperator.get()) {
HmilyExpressionBuilder leftBuilder = new HmilyExpressionBuilder(((HmilyBinaryOperationExpression) expression).getLeft());
HmilyExpressionBuilder rightBuilder = new HmilyExpressionBuilder(((HmilyBinaryOperationExpression) expression).getRight());
for (HmilyAndPredicate eachLeft : leftBuilder.extractAndPredicates().getHmilyAndPredicates()) {
for (HmilyAndPredicate eachRight : rightBuilder.extractAndPredicates().getHmilyAndPredicates()) {
result.getHmilyAndPredicates().add(createAndPredicate(eachLeft, eachRight));
}
}
} else {
HmilyAndPredicate andPredicate = new HmilyAndPredicate();
andPredicate.getPredicates().add(expression);
result.getHmilyAndPredicates().add(andPredicate);
}
} else {
HmilyAndPredicate andPredicate = new HmilyAndPredicate();
andPredicate.getPredicates().add(expression);
result.getHmilyAndPredicates().add(andPredicate);
}
return result;
} | 3.68 |
hadoop_DBNameNodeConnector_getConnectorInfo | /**
* Returns info about the connector.
*
* @return String.
*/
@Override
public String getConnectorInfo() {
return "Name Node Connector : " + clusterURI.toString();
} | 3.68 |
hadoop_StageConfig_withIOStatistics | /**
* Set IOStatistics store.
* @param store new store
* @return this
*/
public StageConfig withIOStatistics(final IOStatisticsStore store) {
checkOpen();
iostatistics = store;
return this;
} | 3.68 |
flink_InstantiationUtil_isNonStaticInnerClass | /**
* Checks, whether the class is an inner class that is not statically accessible. That is
* especially true for anonymous inner classes.
*
* @param clazz The class to check.
* @return True, if the class is a non-statically accessible inner class.
*/
public static boolean isNonStaticInnerClass(Class<?> clazz) {
return clazz.getEnclosingClass() != null
&& (clazz.getDeclaringClass() == null || !Modifier.isStatic(clazz.getModifiers()));
} | 3.68 |
graphhopper_MaxWidth_create | /**
* Currently enables to store 0.1 to max=0.1*2⁷m and infinity. If a value is between the maximum and infinity
* it is assumed to use the maximum value.
*/
public static DecimalEncodedValue create() {
return new DecimalEncodedValueImpl(KEY, 7, 0, 0.1, false, false, true);
} | 3.68 |
framework_VScrollTable_isNavigationKey | /**
*
* @param keyCode
* @return true if the given keyCode is used by the table for navigation
*/
private boolean isNavigationKey(int keyCode) {
return keyCode == getNavigationUpKey()
|| keyCode == getNavigationLeftKey()
|| keyCode == getNavigationRightKey()
|| keyCode == getNavigationDownKey()
|| keyCode == getNavigationPageUpKey()
|| keyCode == getNavigationPageDownKey()
|| keyCode == getNavigationEndKey()
|| keyCode == getNavigationStartKey();
} | 3.68 |
hibernate-validator_PredefinedScopeBeanMetaDataManager_getAnnotationProcessingOptionsFromNonDefaultProviders | /**
* @return returns the annotation ignores from the non annotation based meta data providers
*/
private static AnnotationProcessingOptions getAnnotationProcessingOptionsFromNonDefaultProviders(List<MetaDataProvider> optionalMetaDataProviders) {
AnnotationProcessingOptions options = new AnnotationProcessingOptionsImpl();
for ( MetaDataProvider metaDataProvider : optionalMetaDataProviders ) {
options.merge( metaDataProvider.getAnnotationProcessingOptions() );
}
return options;
} | 3.68 |
flink_TumblingWindowAssigner_withOffset | /**
* Creates a new {@code TumblingWindowAssigner} {@link WindowAssigner} that assigns elements to
* time windows based on the element timestamp and offset.
*
* <p>For example, if you want window a stream by hour,but window begins at the 15th minutes of
* each hour, you can use {@code of(Time.hours(1),Time.minutes(15))},then you will get time
* windows start at 0:15:00,1:15:00,2:15:00,etc.
*
* <p>Rather than that,if you are living in somewhere which is not using UTC±00:00 time, such as
* China which is using GMT+08:00,and you want a time window with size of one day, and window
* begins at every 00:00:00 of local time,you may use {@code of(Time.days(1),Time.hours(-8))}.
* The parameter of offset is {@code Time.hours(-8))} since UTC+08:00 is 8 hours earlier than
* UTC time.
*
* @param offset The offset which window start would be shifted by.
* @return The time policy.
*/
public TumblingWindowAssigner withOffset(Duration offset) {
return new TumblingWindowAssigner(size, offset.toMillis(), isEventTime);
} | 3.68 |
flink_BinarySegmentUtils_getByte | /**
* get byte from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static byte getByte(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].get(offset);
} else {
return getByteMultiSegments(segments, offset);
}
} | 3.68 |
flink_CrossOperator_projectTupleX | /**
* Chooses a projectTupleX according to the length of {@link
* org.apache.flink.api.java.operators.CrossOperator.CrossProjection#fieldIndexes}.
*
* @return The projected DataSet.
*/
@SuppressWarnings("unchecked")
public <OUT extends Tuple> ProjectCross<I1, I2, OUT> projectTupleX() {
ProjectCross<I1, I2, OUT> projectionCross = null;
switch (fieldIndexes.length) {
case 1:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple1();
break;
case 2:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple2();
break;
case 3:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple3();
break;
case 4:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple4();
break;
case 5:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple5();
break;
case 6:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple6();
break;
case 7:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple7();
break;
case 8:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple8();
break;
case 9:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple9();
break;
case 10:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple10();
break;
case 11:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple11();
break;
case 12:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple12();
break;
case 13:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple13();
break;
case 14:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple14();
break;
case 15:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple15();
break;
case 16:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple16();
break;
case 17:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple17();
break;
case 18:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple18();
break;
case 19:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple19();
break;
case 20:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple20();
break;
case 21:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple21();
break;
case 22:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple22();
break;
case 23:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple23();
break;
case 24:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple24();
break;
case 25:
projectionCross = (ProjectCross<I1, I2, OUT>) projectTuple25();
break;
default:
throw new IllegalStateException("Excessive arity in tuple.");
}
return projectionCross;
} | 3.68 |
flink_FullCachingLookupProvider_of | /**
* Build a {@link FullCachingLookupProvider} from the specified {@link
* ScanTableSource.ScanRuntimeProvider} and {@link CacheReloadTrigger}.
*/
static FullCachingLookupProvider of(
ScanTableSource.ScanRuntimeProvider scanRuntimeProvider,
CacheReloadTrigger cacheReloadTrigger) {
return new FullCachingLookupProvider() {
@Override
public ScanTableSource.ScanRuntimeProvider getScanRuntimeProvider() {
return scanRuntimeProvider;
}
@Override
public CacheReloadTrigger getCacheReloadTrigger() {
return cacheReloadTrigger;
}
@Override
public LookupFunction createLookupFunction() {
return null;
}
};
} | 3.68 |
hadoop_NativeTaskOutputFiles_getOutputIndexFile | /**
* Return the path to a local map output index file created earlier
*/
public Path getOutputIndexFile() throws IOException {
String path = String.format(OUTPUT_FILE_INDEX_FORMAT_STRING, TASKTRACKER_OUTPUT, id);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.68 |
flink_StateBackendLoader_loadFromApplicationOrConfigOrDefaultInternal | /**
* Checks if an application-defined state backend is given, and if not, loads the state backend
* from the configuration, from the parameter 'state.backend', as defined in {@link
* CheckpointingOptions#STATE_BACKEND}. If no state backend is configured, this instantiates the
* default state backend (the {@link HashMapStateBackend}).
*
* <p>If an application-defined state backend is found, and the state backend is a {@link
* ConfigurableStateBackend}, this methods calls {@link
* ConfigurableStateBackend#configure(ReadableConfig, ClassLoader)} on the state backend.
*
* <p>Refer to {@link #loadStateBackendFromConfig(ReadableConfig, ClassLoader, Logger)} for
* details on how the state backend is loaded from the configuration.
*
* @param config The configuration to load the state backend from
* @param classLoader The class loader that should be used to load the state backend
* @param logger Optionally, a logger to log actions to (may be null)
* @return The instantiated state backend.
* @throws DynamicCodeLoadingException Thrown if a state backend factory is configured and the
* factory class was not found or the factory could not be instantiated
* @throws IllegalConfigurationException May be thrown by the StateBackendFactory when creating
* / configuring the state backend in the factory
* @throws IOException May be thrown by the StateBackendFactory when instantiating the state
* backend
*/
private static StateBackend loadFromApplicationOrConfigOrDefaultInternal(
@Nullable StateBackend fromApplication,
Configuration config,
ClassLoader classLoader,
@Nullable Logger logger)
throws IllegalConfigurationException, DynamicCodeLoadingException, IOException {
checkNotNull(config, "config");
checkNotNull(classLoader, "classLoader");
final StateBackend backend;
// (1) the application defined state backend has precedence
if (fromApplication != null) {
// see if this is supposed to pick up additional configuration parameters
if (fromApplication instanceof ConfigurableStateBackend) {
// needs to pick up configuration
if (logger != null) {
logger.info(
"Using job/cluster config to configure application-defined state backend: {}",
fromApplication);
}
backend =
((ConfigurableStateBackend) fromApplication).configure(config, classLoader);
} else {
// keep as is!
backend = fromApplication;
}
if (logger != null) {
logger.info("Using application-defined state backend: {}", backend);
}
} else {
// (2) check if the config defines a state backend
final StateBackend fromConfig = loadStateBackendFromConfig(config, classLoader, logger);
if (fromConfig != null) {
backend = fromConfig;
} else {
// (3) use the default
backend = new HashMapStateBackendFactory().createFromConfig(config, classLoader);
if (logger != null) {
logger.info(
"No state backend has been configured, using default (HashMap) {}",
backend);
}
}
}
return backend;
} | 3.68 |
dubbo_InjvmExporterListener_removeExporterChangeListener | /**
* Removes an ExporterChangeListener for a specific service.
*
* @param listener The ExporterChangeListener to remove.
* @param listenerKey The service key for the service to remove the listener from.
*/
public synchronized void removeExporterChangeListener(ExporterChangeListener listener, String listenerKey) {
Set<ExporterChangeListener> listeners = exporterChangeListeners.get(listenerKey);
if (CollectionUtils.isEmpty(listeners)) {
return;
}
listeners.remove(listener);
if (CollectionUtils.isEmpty(listeners)) {
exporterChangeListeners.remove(listenerKey);
}
} | 3.68 |
flink_HiveParserContext_setTokenRewriteStream | /**
* Set the token rewrite stream being used to parse the current top-level SQL statement. Note
* that this should <b>not</b> be used for other parsing activities; for example, when we
* encounter a reference to a view, we switch to a new stream for parsing the stored view
* definition from the catalog, but we don't clobber the top-level stream in the context.
*
* @param tokenRewriteStream the stream being used
*/
public void setTokenRewriteStream(TokenRewriteStream tokenRewriteStream) {
assert this.tokenRewriteStream == null;
this.tokenRewriteStream = tokenRewriteStream;
} | 3.68 |
hbase_HBaseServerBase_setAbortRequested | /**
* Sets the abort state if not already set.
* @return True if abortRequested set to True successfully, false if an abort is already in
* progress.
*/
protected final boolean setAbortRequested() {
return abortRequested.compareAndSet(false, true);
} | 3.68 |
framework_AbstractOrderedLayoutConnector_getWidget | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.AbstractComponentConnector#getWidget()
*/
@Override
public VAbstractOrderedLayout getWidget() {
return (VAbstractOrderedLayout) super.getWidget();
} | 3.68 |
rocketmq-connect_WorkerSinkTask_removeAndCloseMessageQueue | /**
* remove and close message queue
*
* @param queues
*/
public void removeAndCloseMessageQueue(String topic, Set<MessageQueue> queues) {
Set<MessageQueue> removeMessageQueues;
if (queues == null) {
removeMessageQueues = new HashSet<>();
for (MessageQueue messageQueue : messageQueues) {
if (messageQueue.getTopic().equals(topic)) {
removeMessageQueues.add(messageQueue);
}
}
} else {
// filter not contains in messageQueues
removeMessageQueues = messageQueues.stream().filter(messageQueue -> topic.equals(messageQueue.getTopic()) && !queues.contains(messageQueue)).collect(Collectors.toSet());
}
if (removeMessageQueues == null || removeMessageQueues.isEmpty()) {
return;
}
// clean message queues offset
closeMessageQueues(removeMessageQueues, false);
// remove record partitions
Set<RecordPartition> waitRemoveQueueMetaDatas = new HashSet<>();
for (MessageQueue messageQueue : removeMessageQueues) {
recordPartitions.forEach(key -> {
if (key.getPartition().get(TOPIC).equals(messageQueue.getTopic()) && key.getPartition().get(BROKER_NAME).equals(messageQueue.getBrokerName())
&& Integer.valueOf(String.valueOf(key.getPartition().get(QUEUE_ID))).equals(messageQueue.getQueueId())) {
waitRemoveQueueMetaDatas.add(key);
}
});
}
recordPartitions.removeAll(waitRemoveQueueMetaDatas);
// start remove
messageQueues.removeAll(removeMessageQueues);
} | 3.68 |
flink_AbstractStreamOperator_isUsingCustomRawKeyedState | /**
* Indicates whether or not implementations of this class is writing to the raw keyed state
* streams on snapshots, using {@link #snapshotState(StateSnapshotContext)}. If yes, subclasses
* should override this method to return {@code true}.
*
* <p>Subclasses need to explicitly indicate the use of raw keyed state because, internally, the
* {@link AbstractStreamOperator} may attempt to read from it as well to restore heap-based
* timers and ultimately fail with read errors. By setting this flag to {@code true}, this
* allows the {@link AbstractStreamOperator} to know that the data written in the raw keyed
* states were not written by the timer services, and skips the timer restore attempt.
*
* <p>Please refer to FLINK-19741 for further details.
*
* <p>TODO: this method can be removed once all timers are moved to be managed by state
* backends.
*
* @return flag indicating whether or not this operator is writing to raw keyed state via {@link
* #snapshotState(StateSnapshotContext)}.
*/
@Internal
protected boolean isUsingCustomRawKeyedState() {
return false;
} | 3.68 |
hbase_QuotaSettingsFactory_bypassGlobals | /**
* Set the "bypass global settings" for the specified user
* @param userName the user to throttle
* @param bypassGlobals true if the global settings should be bypassed
* @return the quota settings
*/
public static QuotaSettings bypassGlobals(final String userName, final boolean bypassGlobals) {
return new QuotaGlobalsSettingsBypass(userName, null, null, null, bypassGlobals);
} | 3.68 |
hbase_FileSystemUtilizationChore_getInitialDelay | /**
* Extracts the initial delay for the chore from the configuration.
* @param conf The configuration object.
* @return The configured chore initial delay or the default value.
*/
static long getInitialDelay(Configuration conf) {
return conf.getLong(FS_UTILIZATION_CHORE_DELAY_KEY, FS_UTILIZATION_CHORE_DELAY_DEFAULT);
} | 3.68 |
pulsar_ManagedLedgerConfig_getMaxSizePerLedgerMb | /**
* @return the maxSizePerLedgerMb
*/
public int getMaxSizePerLedgerMb() {
return maxSizePerLedgerMb;
} | 3.68 |
flink_PriorityQueueSetFactory_create | /**
* Creates a {@link KeyGroupedInternalPriorityQueue}.
*
* @param stateName unique name for associated with this queue.
* @param byteOrderedElementSerializer a serializer that with a format that is lexicographically
* ordered in alignment with elementPriorityComparator.
* @param allowFutureMetadataUpdates whether allow metadata to update in the future or not.
* @param <T> type of the stored elements.
* @return the queue with the specified unique name.
*/
default <T extends HeapPriorityQueueElement & PriorityComparable<? super T> & Keyed<?>>
KeyGroupedInternalPriorityQueue<T> create(
@Nonnull String stateName,
@Nonnull TypeSerializer<T> byteOrderedElementSerializer,
boolean allowFutureMetadataUpdates) {
if (allowFutureMetadataUpdates) {
throw new UnsupportedOperationException(
this.getClass().getName()
+ " doesn't support to allow to update future metadata.");
} else {
return create(stateName, byteOrderedElementSerializer);
}
} | 3.68 |
hbase_NoOpIndexBlockEncoder_writeRoot | /**
* Writes this chunk into the given output stream in the root block index format. This format is
* similar to the {@link HFile} version 1 block index format, except that we store on-disk size of
* the block instead of its uncompressed size.
* @param out the data output stream to write the block index to. Typically a stream writing into
* an {@link HFile} block.
*/
private void writeRoot(BlockIndexChunk blockIndexChunk, DataOutput out) throws IOException {
for (int i = 0; i < blockIndexChunk.getNumEntries(); ++i) {
out.writeLong(blockIndexChunk.getBlockOffset(i));
out.writeInt(blockIndexChunk.getOnDiskDataSize(i));
Bytes.writeByteArray(out, blockIndexChunk.getBlockKey(i));
}
} | 3.68 |
framework_VAbstractPopupCalendar_setRangeEnd | /**
* Sets the end range for this component. The end range is inclusive, and it
* depends on the current resolution, what is considered inside the range.
*
* @param rangeEnd
* - the allowed range's end date
*/
public void setRangeEnd(String rangeEnd) {
calendar.setRangeEnd(rangeEnd);
} | 3.68 |
flink_FsJobArchivist_getArchivedJsons | /**
* Reads the given archive file and returns a {@link Collection} of contained {@link
* ArchivedJson}.
*
* @param file archive to extract
* @return collection of archived jsons
* @throws IOException if the file can't be opened, read or doesn't contain valid json
*/
public static Collection<ArchivedJson> getArchivedJsons(Path file) throws IOException {
try (FSDataInputStream input = file.getFileSystem().open(file);
ByteArrayOutputStream output = new ByteArrayOutputStream()) {
IOUtils.copyBytes(input, output);
try {
JsonNode archive = mapper.readTree(output.toByteArray());
Collection<ArchivedJson> archives = new ArrayList<>();
for (JsonNode archivePart : archive.get(ARCHIVE)) {
String path = archivePart.get(PATH).asText();
String json = archivePart.get(JSON).asText();
archives.add(new ArchivedJson(path, json));
}
return archives;
} catch (NullPointerException npe) {
// occurs if the archive is empty or any of the expected fields are not present
throw new IOException(
"Job archive (" + file.getPath() + ") did not conform to expected format.");
}
}
} | 3.68 |
streampipes_AbstractMigrationManager_performUpdate | /**
* Perform the update of the description based on the given requestUrl
*
* @param requestUrl URl that references the description to be updated at the extensions service.
*/
protected void performUpdate(String requestUrl) {
try {
var entityPayload = HttpJsonParser.getContentFromUrl(URI.create(requestUrl));
var updateResult = Operations.verifyAndUpdateElement(entityPayload);
if (!updateResult.isSuccess()) {
LOG.error(
"Updating the pipeline element description failed: {}",
StringUtils.join(
updateResult.getNotifications().stream().map(Notification::toString).toList(),
"\n")
);
}
} catch (IOException | SepaParseException e) {
LOG.error("Updating the pipeline element description failed due to the following exception:\n{}",
StringUtils.join(e.getStackTrace(), "\n")
);
}
} | 3.68 |
hbase_HFilePrettyPrinter_mobFileExists | /**
* Checks whether the referenced mob file exists.
*/
private boolean mobFileExists(FileSystem fs, TableName tn, String mobFileName, String family,
Set<String> foundMobFiles, Set<String> missingMobFiles) throws IOException {
if (foundMobFiles.contains(mobFileName)) {
return true;
}
if (missingMobFiles.contains(mobFileName)) {
return false;
}
String tableName = tn.getNameAsString();
List<Path> locations = mobFileLocations.get(tableName);
if (locations == null) {
locations = new ArrayList<>(2);
locations.add(MobUtils.getMobFamilyPath(getConf(), tn, family));
locations.add(HFileArchiveUtil.getStoreArchivePath(getConf(), tn,
MobUtils.getMobRegionInfo(tn).getEncodedName(), family));
mobFileLocations.put(tn.getNameAsString(), locations);
}
boolean exist = false;
for (Path location : locations) {
Path mobFilePath = new Path(location, mobFileName);
if (fs.exists(mobFilePath)) {
exist = true;
break;
}
}
if (exist) {
evictMobFilesIfNecessary(foundMobFiles, FOUND_MOB_FILES_CACHE_CAPACITY);
foundMobFiles.add(mobFileName);
} else {
evictMobFilesIfNecessary(missingMobFiles, MISSING_MOB_FILES_CACHE_CAPACITY);
missingMobFiles.add(mobFileName);
}
return exist;
} | 3.68 |
framework_DragSourceExtensionConnector_sendDragStartEventToServer | /**
* Initiates a server RPC for the drag start event.
* <p>
* This method is called only if there is a server side drag start event
* handler attached.
*
* @param dragStartEvent
* Client side dragstart event.
*/
protected void sendDragStartEventToServer(NativeEvent dragStartEvent) {
getRpcProxy(DragSourceRpc.class).dragStart();
} | 3.68 |
framework_ComboBoxElement_openPopup | /**
* Open the suggestion popup.
*/
public void openPopup() {
findElement(By.vaadin("#button")).click();
} | 3.68 |
hadoop_TFile_isValueLengthKnown | /**
* Check whether it is safe to call getValueLength().
*
* @return true if value length is known before hand. Values less than
* the chunk size will always have their lengths known before
* hand. Values that are written out as a whole (with advertised
* length up-front) will always have their lengths known in
* read.
*/
public boolean isValueLengthKnown() {
return (vlen >= 0);
} | 3.68 |
flink_DefaultExecutionGraph_jobVertexFinished | /**
* Called whenever a job vertex reaches state FINISHED (completed successfully). Once all job
* vertices are in the FINISHED state, the program is successfully done.
*/
@Override
public void jobVertexFinished() {
assertRunningInJobMasterMainThread();
final int numFinished = ++numFinishedJobVertices;
if (numFinished == numJobVerticesTotal) {
FutureUtils.assertNoException(
waitForAllExecutionsTermination().thenAccept(ignored -> jobFinished()));
}
} | 3.68 |
hbase_RegionServerCoprocessorHost_getRegionServerServices | /**
* @return An instance of RegionServerServices, an object NOT for general user-space Coprocessor
* consumption.
*/
@Override
public RegionServerServices getRegionServerServices() {
return this.regionServerServices;
} | 3.68 |
flink_ChannelStateCheckpointWriter_fail | /**
* The throwable is just used for specific subtask that triggered the failure. Other subtasks
* should fail by {@link CHANNEL_STATE_SHARED_STREAM_EXCEPTION}.
*/
public void fail(JobVertexID jobVertexID, int subtaskIndex, Throwable throwable) {
if (isDone()) {
return;
}
this.throwable = throwable;
ChannelStatePendingResult result =
pendingResults.get(SubtaskID.of(jobVertexID, subtaskIndex));
if (result != null) {
result.fail(throwable);
}
failResultAndCloseStream(
new CheckpointException(CHANNEL_STATE_SHARED_STREAM_EXCEPTION, throwable));
} | 3.68 |
hbase_HFileReaderImpl_getFirstKey | /**
* @return the first key in the file. May be null if file has no entries. Note that this is not
* the first row key, but rather the byte form of the first KeyValue.
*/
@Override
public Optional<Cell> getFirstKey() {
if (dataBlockIndexReader == null) {
throw new BlockIndexNotLoadedException(path);
}
return dataBlockIndexReader.isEmpty()
? Optional.empty()
: Optional.of(dataBlockIndexReader.getRootBlockKey(0));
} | 3.68 |
hadoop_RpcServerException_getRpcErrorCodeProto | /**
* @return get the detailed rpc status corresponding to this exception.
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_RPC_SERVER;
} | 3.68 |
hadoop_ErrorTranslation_isUnknownBucket | /**
* Does this exception indicate that the AWS Bucket was unknown.
* @param e exception.
* @return true if the status code and error code mean that the
* remote bucket is unknown.
*/
public static boolean isUnknownBucket(AwsServiceException e) {
return e.statusCode() == SC_404_NOT_FOUND
&& AwsErrorCodes.E_NO_SUCH_BUCKET.equals(e.awsErrorDetails().errorCode());
} | 3.68 |
open-banking-gateway_ExpirableDataConfig_subscribers | /**
* Expirable subscribers to the process results. They will be alive for some time and then if no message
* comes in - will be removed.
*/
@Bean
Map<String, Consumer<InternalProcessResult>> subscribers(@Qualifier(PROTOCOL_CACHE_BUILDER) CacheBuilder builder) {
return builder.build().asMap();
} | 3.68 |
flink_HadoopTupleUnwrappingIterator_set | /**
* Set the Flink iterator to wrap.
*
* @param iterator The Flink iterator to wrap.
*/
@Override
public void set(final Iterator<Tuple2<KEY, VALUE>> iterator) {
this.iterator = iterator;
if (this.hasNext()) {
final Tuple2<KEY, VALUE> tuple = iterator.next();
this.curKey = keySerializer.copy(tuple.f0);
this.firstValue = tuple.f1;
this.atFirst = true;
} else {
this.atFirst = false;
}
} | 3.68 |
hmily_HmilyXaResource_start | /**
* Start.
*
* @param i the
* @throws XAException the xa exception
*/
public void start(final int i) throws XAException {
this.start(this.xid, i);
} | 3.68 |
dubbo_ExpiringMap_getExpirationInterval | /**
* get expiration interval
*
* @return expiration interval (second)
*/
public int getExpirationInterval() {
return (int) expirationIntervalMillis / 1000;
} | 3.68 |
hadoop_ResourceUsageMetrics_getHeapUsage | /**
* Get the total heap usage.
*/
public long getHeapUsage() {
return heapUsage;
} | 3.68 |
hadoop_DatanodeLocalInfo_getUptime | /** get uptime */
public long getUptime() {
return this.uptime;
} | 3.68 |
hadoop_DomainRowKey_parseRowKey | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey a rowkey represented as a byte array.
* @return an <cite>DomainRowKey</cite> object.
*/
public static DomainRowKey parseRowKey(byte[] rowKey) {
return new DomainRowKeyConverter().decode(rowKey);
} | 3.68 |
querydsl_DateExpression_min | /**
* Get the minimum value of this expression (aggregation)
*
* @return min(this)
*/
@Override
public DateExpression<T> min() {
if (min == null) {
min = Expressions.dateOperation(getType(), Ops.AggOps.MIN_AGG, mixin);
}
return min;
} | 3.68 |
framework_Result_ok | /**
* Returns a successful result wrapping the given value.
*
* @param <R>
* the result value type
* @param value
* the result value, can be null
* @return a successful result
*/
public static <R> Result<R> ok(R value) {
return new SimpleResult<>(value, null);
} | 3.68 |
hbase_MiniZooKeeperCluster_waitForServerDown | // XXX: From o.a.zk.t.ClientBase. We just dropped the check for ssl/secure.
private static boolean waitForServerDown(int port, long timeout) throws IOException {
long start = EnvironmentEdgeManager.currentTime();
while (true) {
try {
send4LetterWord(HOST, port, "stat", false, (int) timeout);
} catch (IOException | X509Exception.SSLContextException e) {
return true;
}
if (EnvironmentEdgeManager.currentTime() > start + timeout) {
break;
}
try {
Thread.sleep(TIMEOUT);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
}
}
return false;
} | 3.68 |
flink_MapView_keys | /**
* Returns all the keys in the map view.
*
* @return An iterable of all the keys in the map.
* @throws Exception Thrown if the system cannot access the map.
*/
public Iterable<K> keys() throws Exception {
return map.keySet();
} | 3.68 |
framework_SQLContainer_removeContainerFilters | /**
* {@inheritDoc}
*/
public void removeContainerFilters(Object propertyId) {
List<Filter> toRemove = new ArrayList<Filter>();
for (Filter f : filters) {
if (f.appliesToProperty(propertyId)) {
toRemove.add(f);
}
}
filters.removeAll(toRemove);
refresh();
} | 3.68 |
morf_OracleDialect_formatSqlStatement | /**
* SqlPlus requires sql statement lines to be less than 2500 characters in length.
* Additionally PL\SQL statements must be ended with "/".
*
* @see org.alfasoftware.morf.jdbc.SqlDialect#formatSqlStatement(java.lang.String)
*/
@Override
public String formatSqlStatement(String sqlStatement) {
// format statement ending
StringBuilder builder = new StringBuilder(sqlStatement);
if (sqlStatement.endsWith("END;")) {
builder.append(System.getProperty("line.separator"));
builder.append("/");
} else {
builder.append(";");
}
return splitSqlStatement(builder.toString());
} | 3.68 |
hudi_HoodieBaseFileGroupRecordBuffer_shouldSkip | /**
* Filter a record for downstream processing when:
* 1. A set of pre-specified keys exists.
* 2. The key of the record is not contained in the set.
*/
protected boolean shouldSkip(T record, String keyFieldName, boolean isFullKey, Set<String> keys) {
String recordKey = readerContext.getValue(record, readerSchema, keyFieldName).toString();
// Can not extract the record key, throw.
if (recordKey == null || recordKey.isEmpty()) {
throw new HoodieKeyException("Can not extract the key for a record");
}
// No keys are specified. Cannot skip at all.
if (keys.isEmpty()) {
return false;
}
// When the record key matches with one of the keys or key prefixes, can not skip.
if ((isFullKey && keys.contains(recordKey))
|| (!isFullKey && keys.stream().anyMatch(recordKey::startsWith))) {
return false;
}
// Otherwise, this record is not needed.
return true;
} | 3.68 |
pulsar_ManagedLedgerConfig_getAckQuorumSize | /**
* @return the ackQuorumSize
*/
public int getAckQuorumSize() {
return ackQuorumSize;
} | 3.68 |
zxing_GridSampler_getInstance | /**
* @return the current implementation of GridSampler
*/
public static GridSampler getInstance() {
return gridSampler;
} | 3.68 |
hadoop_AMRMClientAsyncImpl_updateBlacklist | /**
* Update application's blacklist with addition or removal resources.
*
* @param blacklistAdditions list of resources which should be added to the
* application blacklist
* @param blacklistRemovals list of resources which should be removed from the
* application blacklist
*/
public void updateBlacklist(List<String> blacklistAdditions,
List<String> blacklistRemovals) {
client.updateBlacklist(blacklistAdditions, blacklistRemovals);
} | 3.68 |
morf_ColumnBean_getUpperCaseName | /**
* @return the upper case name
*/
@Override
public String getUpperCaseName() {
return upperCaseName.get();
} | 3.68 |
hbase_RequestConverter_buildNoDataRegionAction | /** Returns whether or not the rowMutations has a Increment or Append */
private static boolean buildNoDataRegionAction(final RowMutations rowMutations,
final List<CellScannable> cells, long nonce, final RegionAction.Builder regionActionBuilder,
final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder)
throws IOException {
boolean ret = false;
for (Mutation mutation : rowMutations.getMutations()) {
mutationBuilder.clear();
MutationProto mp;
if (mutation instanceof Increment || mutation instanceof Append) {
mp = ProtobufUtil.toMutationNoData(getMutationType(mutation), mutation, mutationBuilder,
nonce);
ret = true;
} else {
mp = ProtobufUtil.toMutationNoData(getMutationType(mutation), mutation, mutationBuilder);
}
cells.add(mutation);
actionBuilder.clear();
regionActionBuilder.addAction(actionBuilder.setMutation(mp).build());
}
return ret;
} | 3.68 |
hbase_MasterWalManager_getSplittingServersFromWALDir | /**
* Get Servernames which are currently splitting; paths have a '-splitting' suffix.
*/
public Set<ServerName> getSplittingServersFromWALDir() throws IOException {
return getServerNamesFromWALDirPath(
p -> p.getName().endsWith(AbstractFSWALProvider.SPLITTING_EXT));
} | 3.68 |
framework_LayoutDemo_init | /**
* Initialize Application. Demo components are added to main window.
*/
@Override
public void init() {
final LegacyWindow mainWindow = new LegacyWindow("Layout demo");
setMainWindow(mainWindow);
//
// Create horizontal ordered layout
//
final HorizontalLayout layoutA = new HorizontalLayout();
// Add 4 panels
fillLayout(layoutA, 4);
//
// Create vertical ordered layout
//
final VerticalLayout layoutB = new VerticalLayout();
// Add 4 panels
fillLayout(layoutB, 4);
//
// Create grid layout
//
final GridLayout layoutG = new GridLayout(4, 4);
// Add 16 panels components
fillLayout(layoutG, 16);
//
// Create grid layout
//
final GridLayout layoutG2 = new GridLayout(4, 4);
// Add 4 panels with absolute coordinates (diagonally)
layoutG2.addComponent(getExampleComponent("x=0, y=0"), 0, 0);
layoutG2.addComponent(getExampleComponent("x=1, y=1"), 1, 1);
layoutG2.addComponent(getExampleComponent("x=2, y=2"), 2, 2);
layoutG2.addComponent(getExampleComponent("x=3, y=3"), 3, 3);
// Add 4 pictures with absolute coordinates (diagonally)
layoutG2.addComponent(getExamplePicture("x=3, y=0"), 3, 0);
layoutG2.addComponent(getExamplePicture("x=2, y=1"), 2, 1);
layoutG2.addComponent(getExamplePicture("x=1, y=2"), 1, 2);
layoutG2.addComponent(getExamplePicture("x=0, y=3"), 0, 3);
//
// Create TabSheet
//
final TabSheet tabsheet = new TabSheet();
tabsheet.setCaption(
"Tabsheet, above layouts are added to this component");
tabsheet.addTab(layoutA, "Horizontal ordered layout", null);
tabsheet.addTab(layoutB, "Vertical ordered layout", null);
tabsheet.addTab(layoutG, "First grid layout", null);
tabsheet.addTab(layoutG2, "Second grid layout", null);
//
// Add demo layouts to main window
//
mainWindow.addComponent(new Label(
"<h3>Horizontal ordered layout</h3>Added four components.",
ContentMode.HTML));
mainWindow.addComponent(layoutA);
mainWindow.addComponent(new Label(
"<br /><h3>Vertical ordered layout</h3>Added four components.",
ContentMode.HTML));
mainWindow.addComponent(layoutB);
mainWindow.addComponent(new Label(
"<br /><h3>Grid Layout (4 x 4)</h3>Added 16 components.",
ContentMode.HTML));
mainWindow.addComponent(layoutG);
mainWindow.addComponent(new Label(
"<br /><h3>Grid Layout (4 x 4)</h3>"
+ "Added four panels and four embedded components "
+ "diagonally with absolute coordinates.",
ContentMode.HTML));
mainWindow.addComponent(layoutG2);
mainWindow.addComponent(
new Label("<br /><h3>TabSheet</h3>Added above layouts as tabs.",
ContentMode.HTML));
mainWindow.addComponent(tabsheet);
} | 3.68 |
framework_VFilterSelect_updateSuggestionPopupMinWidth | /**
* Update minimum width for FilterSelect textarea based on input prompt and
* suggestions.
* <p>
* For internal use only. May be removed or replaced in the future.
*/
public void updateSuggestionPopupMinWidth() {
// used only to calculate minimum width
String captions = WidgetUtil.escapeHTML(inputPrompt);
for (FilterSelectSuggestion suggestion : currentSuggestions) {
// Collect captions so we can calculate minimum width for
// textarea
if (!captions.isEmpty()) {
captions += "|";
}
captions += WidgetUtil
.escapeHTML(suggestion.getReplacementString());
}
// Calculate minimum textarea width
suggestionPopupMinWidth = minWidth(captions);
} | 3.68 |
flink_YarnResourceManagerDriver_getYarnStatus | /**
* Converts a Flink application status enum to a YARN application status enum.
*
* @param status The Flink application status.
* @return The corresponding YARN application status.
*/
private FinalApplicationStatus getYarnStatus(ApplicationStatus status) {
if (status == null) {
return FinalApplicationStatus.UNDEFINED;
} else {
switch (status) {
case SUCCEEDED:
return FinalApplicationStatus.SUCCEEDED;
case FAILED:
return FinalApplicationStatus.FAILED;
case CANCELED:
return FinalApplicationStatus.KILLED;
default:
return FinalApplicationStatus.UNDEFINED;
}
}
} | 3.68 |
morf_SchemaChangeSequence_removeIndex | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#removeIndex(java.lang.String, org.alfasoftware.morf.metadata.Index)
*/
@Override
public void removeIndex(String tableName, Index index) {
RemoveIndex removeIndex = new RemoveIndex(tableName, index);
visitor.visit(removeIndex);
schemaAndDataChangeVisitor.visit(removeIndex);
} | 3.68 |
hbase_RowFilter_parseFrom | /**
* Parse a serialized representation of {@link RowFilter}
* @param pbBytes A pb serialized {@link RowFilter} instance
* @return An instance of {@link RowFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static RowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.RowFilter proto;
try {
proto = FilterProtos.RowFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator valueCompareOp =
CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new RowFilter(valueCompareOp, valueComparator);
} | 3.68 |
framework_ServerRpcMethodInvocation_findInvocationMethod | /**
* Tries to find the method from the cache or alternatively by invoking
* {@link #doFindInvocationMethod(Class, String, int)} and updating the
* cache.
*
* @param targetType
* @param methodName
* @param parameterCount
* @return
*/
private Method findInvocationMethod(Class<?> targetType, String methodName,
int parameterCount) {
// TODO currently only using method name and number of parameters as the
// signature
String signature = targetType.getName() + "." + methodName + "("
+ parameterCount;
Method invocationMethod = INVOCATION_METHOD_CACHE.get(signature);
if (invocationMethod == null) {
invocationMethod = doFindInvocationMethod(targetType, methodName,
parameterCount);
if (invocationMethod != null) {
INVOCATION_METHOD_CACHE.put(signature, invocationMethod);
}
}
if (invocationMethod == null) {
throw new IllegalStateException("Can't find method " + methodName
+ " with " + parameterCount + " parameters in "
+ targetType.getName());
}
return invocationMethod;
} | 3.68 |
hbase_OperationStatus_getExceptionMsg | /**
* */
public String getExceptionMsg() {
return exceptionMsg;
} | 3.68 |
hmily_PropertyName_isNumericIndex | /**
* Return if the element in the name is indexed and numeric.
*
* @param elementIndex the index of the element
* @return {@code true} if the element is indexed and numeric
*/
public boolean isNumericIndex(final int elementIndex) {
return isIndexed(elementIndex) && isNumeric(getElement(elementIndex));
} | 3.68 |
hadoop_StartupProgressMetrics_register | /**
* Registers StartupProgressMetrics linked to the given StartupProgress.
*
* @param prog StartupProgress to link
*/
public static void register(StartupProgress prog) {
new StartupProgressMetrics(prog);
} | 3.68 |
hudi_HoodieMergedLogRecordScanner_scanByFullKeys | /**
* Provides incremental scanning capability where only provided keys will be looked
* up in the delta-log files, scanned and subsequently materialized into the internal
* cache
*
* @param keys to be looked up
*/
public void scanByFullKeys(List<String> keys) {
// We can skip scanning in case reader is in full-scan mode, in which case all blocks
// are processed upfront (no additional scanning is necessary)
if (forceFullScan) {
return; // no-op
}
List<String> missingKeys = keys.stream()
.filter(key -> !records.containsKey(key))
.collect(Collectors.toList());
if (missingKeys.isEmpty()) {
// All the required records are already fetched, no-op
return;
}
scanInternal(Option.of(KeySpec.fullKeySpec(missingKeys)), false);
} | 3.68 |
dubbo_AsyncRpcResult_getValue | /**
* Notice the return type of {@link #getValue} is the actual type of the RPC method, not {@link AppResponse}
*
* @return
*/
@Override
public Object getValue() {
return getAppResponse().getValue();
} | 3.68 |
hadoop_LocalJobOutputFiles_getSpillFileForWrite | /**
* Create a local map spill file name.
*
* @param spillNumber the number
* @param size the size of the file
*/
public Path getSpillFileForWrite(int spillNumber, long size) throws IOException {
String path = String.format(SPILL_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.68 |
hudi_SchedulerConfGenerator_generateAndStoreConfig | /**
* Generate spark scheduling configs and store it to a randomly generated tmp file.
*
* @param deltaSyncWeight Scheduling weight for delta sync
* @param compactionWeight Scheduling weight for compaction
* @param deltaSyncMinShare Minshare for delta sync
* @param compactionMinShare Minshare for compaction
* @param clusteringMinShare Scheduling weight for clustering
* @param clusteringWeight Minshare for clustering
* @return Return the absolute path of the tmp file which stores the spark schedule configs
* @throws IOException Throws an IOException when write configs to file failed
*/
private static String generateAndStoreConfig(Integer deltaSyncWeight, Integer compactionWeight,
Integer deltaSyncMinShare, Integer compactionMinShare, Integer clusteringWeight, Integer clusteringMinShare) throws IOException {
File tempConfigFile = File.createTempFile(UUID.randomUUID().toString(), ".xml");
BufferedWriter bw = new BufferedWriter(new FileWriter(tempConfigFile));
bw.write(generateConfig(deltaSyncWeight, compactionWeight, deltaSyncMinShare, compactionMinShare, clusteringWeight, clusteringMinShare));
bw.close();
// SPARK-35083 introduces remote scheduler pool files, so the file must include scheme since Spark 3.2
String path = HoodieSparkUtils.gteqSpark3_2() ? tempConfigFile.toURI().toString() : tempConfigFile.getAbsolutePath();
LOG.info("Configs written to file " + path);
return path;
} | 3.68 |
hadoop_S3ClientFactory_withMultipartThreshold | /**
* Set the threshold for multipart operations.
* @param value new value
* @return the builder
*/
public S3ClientCreationParameters withMultipartThreshold(
final long value) {
multiPartThreshold = value;
return this;
} | 3.68 |
framework_VerticalLayout_addComponentsAndExpand | /**
* Adds the given components to this layout and sets them as expanded. The
* height of all added child components are set to 100% so that the
* expansion will be effective. The height of this layout is also set to
* 100% if it is currently undefined.
* <p>
* The components are added in the provided order to the end of this layout.
* Any components that are already children of this layout will be moved to
* new positions.
*
* @param components
* the components to set, not <code>null</code>
* @since 8.0
*/
public void addComponentsAndExpand(Component... components) {
addComponents(components);
if (getHeight() < 0) {
setHeight(100, Unit.PERCENTAGE);
}
for (Component child : components) {
child.setHeight(100, Unit.PERCENTAGE);
setExpandRatio(child, 1);
}
} | 3.68 |
hbase_StoreFileInfo_getActiveFileName | /**
* Return the active file name that contains the real data.
* <p>
* For referenced hfile, we will return the name of the reference file as it will be used to
* construct the StoreFileReader. And for linked hfile, we will return the name of the file being
* linked.
*/
public String getActiveFileName() {
if (reference != null || link == null) {
return initialPath.getName();
} else {
return HFileLink.getReferencedHFileName(initialPath.getName());
}
} | 3.68 |
dubbo_HttpMessageCodecManager_typeJudge | /**
* if content-type is null or all ,will judge media type by class type
*
* @param mediaType
* @param bodyType
* @param httpMessageCodec
* @return
*/
private static boolean typeJudge(MediaType mediaType, Class<?> bodyType, HttpMessageCodec httpMessageCodec) {
return (MediaType.ALL_VALUE.equals(mediaType) || mediaType == null)
&& bodyType != null
&& httpMessageCodec.typeSupport(bodyType);
} | 3.68 |
framework_VDragAndDropWrapper_startDrag | /**
* Starts a drag and drop operation from mousedown or touchstart event if
* required conditions are met.
*
* @param event
* @return true if the event was handled as a drag start event
*/
private boolean startDrag(NativeEvent event) {
if (dragStartMode == WRAPPER || dragStartMode == COMPONENT
|| dragStartMode == COMPONENT_OTHER) {
VTransferable transferable = new VTransferable();
transferable.setDragSource(getConnector());
ComponentConnector paintable = Util.findPaintable(client,
Element.as(event.getEventTarget()));
Widget widget = paintable.getWidget();
transferable.setData("component", paintable);
VDragEvent dragEvent = VDragAndDropManager.get()
.startDrag(transferable, event, true);
transferable.setData("mouseDown", MouseEventDetailsBuilder
.buildMouseEventDetails(event).serialize());
if (dragStartMode == WRAPPER) {
dragEvent.createDragImage(getElement(), true);
} else if (dragStartMode == COMPONENT_OTHER
&& getDragImageWidget() != null) {
dragEvent.createDragImage(getDragImageWidget().getElement(),
true);
} else {
dragEvent.createDragImage(widget.getElement(), true);
}
return true;
}
return false;
} | 3.68 |
pulsar_ConsumerConfiguration_getCryptoKeyReader | /**
* @return the CryptoKeyReader
*/
public CryptoKeyReader getCryptoKeyReader() {
return conf.getCryptoKeyReader();
} | 3.68 |
morf_MathsField_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser
.dispatch(getLeftField())
.dispatch(getRightField());
} | 3.68 |
open-banking-gateway_DatasafeDataStorage_update | /**
* Updates encrypted database entry
* @param path Path to the entry
* @param data New entry value
*/
@Override
public void update(String path, byte[] data) {
txOper.execute(callback -> {
Optional<T> entry = find.apply(path);
if (entry.isPresent()) {
T toSave = entry.get();
setData.accept(toSave, data);
return null;
}
T newEntry = factory.apply(path);
setData.accept(newEntry, data);
repository.save(newEntry);
return null;
});
} | 3.68 |
flink_PropertiesUtil_getInt | /**
* Get integer from properties. This method throws an exception if the integer is not valid.
*
* @param config Properties
* @param key key in Properties
* @param defaultValue default value if value is not set
* @return default or value of key
*/
public static int getInt(Properties config, String key, int defaultValue) {
String val = config.getProperty(key);
if (val == null) {
return defaultValue;
} else {
try {
return Integer.parseInt(val);
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException(
"Value for configuration key='"
+ key
+ "' is not set correctly. "
+ "Entered value='"
+ val
+ "'. Default value='"
+ defaultValue
+ "'");
}
}
} | 3.68 |
framework_JsonPaintTarget_escapeXML | /**
* Substitutes the XML sensitive characters with predefined XML entities.
*
* @param xml
* the String to be substituted.
* @return A new StringBuilder instance where all occurrences of XML
* sensitive characters are substituted with entities.
*
*/
static StringBuilder escapeXML(StringBuilder xml) {
if (xml == null || xml.length() <= 0) {
return new StringBuilder();
}
final StringBuilder result = new StringBuilder(xml.length() * 2);
for (int i = 0; i < xml.length(); i++) {
final char c = xml.charAt(i);
final String s = toXmlChar(c);
if (s != null) {
result.append(s);
} else {
result.append(c);
}
}
return result;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.